text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def remove_peer_from_group(self, group_jid, peer_jid):
"""
Kicks someone out of a group
:param group_jid: The group JID from which to remove the user
:param peer_jid: The JID of the user to remove
"""
log.info("[+] Requesting removal of user {} from group {}".format(peer_jid, group_jid))
return self._send_xmpp_element(group_adminship.RemoveFromGroupRequest(group_jid, peer_jid)) | 0.009153 |
def duplicate(self):
'''
Returns a copy of the current contact element.
@returns: Contact
'''
return self.__class__(name=self.name, identifier=self.identifier,
phone=self.phone, require_id=self.__require_id,
address=self.address.duplicate()) | 0.005865 |
def OpenFile(filename, binary=False, newline=None, encoding=None):
'''
Open a file and returns it.
Consider the possibility of a remote file (HTTP, HTTPS, FTP)
:param unicode filename:
Local or remote filename.
:param bool binary:
If True returns the file as is, ignore any EOL conversion.
If set ignores univeral_newlines parameter.
:param None|''|'\n'|'\r'|'\r\n' newline:
Controls universal newlines.
See 'io.open' newline parameter documentation for more details.
:param unicode encoding:
File's encoding. If not None, contents obtained from file will be decoded using this
`encoding`.
:returns file:
The open file, it must be closed by the caller
@raise: FileNotFoundError
When the given filename cannot be found
.. seealso:: FTP LIMITATIONS at this module's doc for performance issues information
'''
from six.moves.urllib.parse import urlparse
filename_url = urlparse(filename)
# Check if file is local
if _UrlIsLocal(filename_url):
if not os.path.isfile(filename):
from ._exceptions import FileNotFoundError
raise FileNotFoundError(filename)
mode = 'rb' if binary else 'r'
return io.open(filename, mode, encoding=encoding, newline=newline)
# Not local
from ._exceptions import NotImplementedProtocol
raise NotImplementedProtocol(target_url.scheme) | 0.002059 |
def parse(self, xml_file, view_name=None) -> XmlNode:
"""Parses xml file with xml_path and returns XmlNode"""
self._setup_parser()
try:
self._view_name = view_name
self._parser.ParseFile(xml_file)
except ExpatError as error:
# pylint: disable=E1101
raise XmlError(errors.messages[error.code], ViewInfo(view_name, error.lineno))
root = self._root
self._reset()
return root | 0.006303 |
def upload_image(vol, img, offset, parallel=1,
manual_shared_memory_id=None, manual_shared_memory_bbox=None, manual_shared_memory_order='F'):
"""Upload img to vol with offset. This is the primary entry point for uploads."""
global NON_ALIGNED_WRITE
if not np.issubdtype(img.dtype, np.dtype(vol.dtype).type):
raise ValueError('The uploaded image data type must match the volume data type. volume: {}, image: {}'.format(vol.dtype, img.dtype))
(is_aligned, bounds, expanded) = check_grid_aligned(vol, img, offset)
if is_aligned:
upload_aligned(vol, img, offset, parallel=parallel,
manual_shared_memory_id=manual_shared_memory_id, manual_shared_memory_bbox=manual_shared_memory_bbox,
manual_shared_memory_order=manual_shared_memory_order)
return
elif vol.non_aligned_writes == False:
msg = NON_ALIGNED_WRITE.format(mip=vol.mip, chunk_size=vol.chunk_size, offset=vol.voxel_offset, got=bounds, check=expanded)
raise AlignmentError(msg)
# Upload the aligned core
retracted = bounds.shrink_to_chunk_size(vol.underlying, vol.voxel_offset)
core_bbox = retracted.clone() - bounds.minpt
if not core_bbox.subvoxel():
core_img = img[ core_bbox.to_slices() ]
upload_aligned(vol, core_img, retracted.minpt, parallel=parallel,
manual_shared_memory_id=manual_shared_memory_id, manual_shared_memory_bbox=manual_shared_memory_bbox,
manual_shared_memory_order=manual_shared_memory_order)
# Download the shell, paint, and upload
all_chunks = set(chunknames(expanded, vol.bounds, vol.key, vol.underlying))
core_chunks = set(chunknames(retracted, vol.bounds, vol.key, vol.underlying))
shell_chunks = all_chunks.difference(core_chunks)
def shade_and_upload(img3d, bbox):
# decode is returning non-writable chunk
# we're throwing them away so safe to write
img3d.setflags(write=1)
shade(img3d, bbox, img, bounds)
single_process_upload(vol, img3d, (( Vec(0,0,0), Vec(*img3d.shape[:3]), bbox.minpt, bbox.maxpt),), n_threads=0)
download_multiple(vol, shell_chunks, fn=shade_and_upload) | 0.019277 |
def run_bwa(job, fastqs, sample_type, univ_options, bwa_options):
"""
This module aligns the SAMPLE_TYPE dna fastqs to the reference
ARGUMENTS -- <ST> depicts the sample type. Substitute with 'tumor'/'normal'
1. fastqs: Dict of list of input WGS/WXS fastqs
fastqs
+- '<ST>_dna': [<JSid for 1.fastq> , <JSid for 2.fastq>]
2. sample_type: string of 'tumor_dna' or 'normal_dna'
3. univ_options: Dict of universal arguments used by almost all tools
univ_options
+- 'dockerhub': <dockerhub to use>
4. bwa_options: Dict of parameters specific to bwa
bwa_options
|- 'index_tar': <JSid for the bwa index tarball>
+- 'n': <number of threads to allocate>
RETURN VALUES
1. output_files: Dict of aligned bam + reference (nested return)
output_files
|- '<ST>_fix_pg_sorted.bam': <JSid>
+- '<ST>_fix_pg_sorted.bam.bai': <JSid>
This module corresponds to nodes 3 and 4 on the tree
"""
job.fileStore.logToMaster('Running bwa on %s:%s' % (univ_options['patient'], sample_type))
work_dir = job.fileStore.getLocalTempDir()
fq_extn = '.gz' if fastqs['gzipped'] else ''
input_files = {
'dna_1.fastq' + fq_extn: fastqs[sample_type][0],
'dna_2.fastq' + fq_extn: fastqs[sample_type][1],
'bwa_index.tar.gz': bwa_options['index_tar']}
input_files = get_files_from_filestore(job, input_files, work_dir, docker=True)
parameters = ['mem',
'-t', str(bwa_options['n']),
'-v', '1', # Don't print INFO messages to the stderr
'/'.join([input_files['bwa_index'], 'hg19.fa']),
input_files['dna_1.fastq'],
input_files['dna_2.fastq']]
with open(''.join([work_dir, '/', sample_type, '_aligned.sam']), 'w') as samfile:
docker_call(tool='bwa', tool_parameters=parameters, work_dir=work_dir,
dockerhub=univ_options['dockerhub'], outfile=samfile)
# samfile.name retains the path info
output_file = job.fileStore.writeGlobalFile(samfile.name)
samfile_processing = job.wrapJobFn(bam_conversion, output_file, sample_type, univ_options,
disk='60G')
job.addChild(samfile_processing)
# Return values get passed up the chain to here. The return value will be a dict with
# SAMPLE_TYPE_fix_pg_sorted.bam: jobStoreID
# SAMPLE_TYPE_fix_pg_sorted.bam.bai: jobStoreID
return samfile_processing.rv() | 0.00235 |
def ijk_to_xyz(dset,ijk):
'''convert the dset indices ``ijk`` to RAI coordinates ``xyz``'''
i = nl.dset_info(dset)
orient_codes = [int(x) for x in nl.run(['@AfniOrient2RAImap',i.orient]).output.split()]
orient_is = [abs(x)-1 for x in orient_codes]
rai = []
for rai_i in xrange(3):
ijk_i = orient_is[rai_i]
if orient_codes[rai_i] > 0:
rai.append(ijk[ijk_i]*i.voxel_size[rai_i] + i.spatial_from[rai_i])
else:
rai.append(i.spatial_to[rai_i] - ijk[ijk_i]*i.voxel_size[rai_i])
return rai | 0.017762 |
def base(ctx, verbose, config):
"""Puzzle: manage DNA variant resources."""
# configure root logger to print to STDERR
loglevel = LEVELS.get(min(verbose, 3))
configure_stream(level=loglevel)
ctx.obj = {}
if config and os.path.exists(config):
ctx.obj = yaml.load(open(config, 'r')) or {}
ctx.obj['config_path'] = config
# launch the command line interface
logger.debug('Booting up command line interface') | 0.002212 |
def make_export_strategy(
args,
keep_target,
assets_extra,
features,
schema,
stats):
"""Makes prediction graph that takes json input.
Args:
args: command line args
keep_target: If ture, target column is returned in prediction graph. Target
column must also exist in input data
assets_extra: other fiels to copy to the output folder
job_dir: root job folder
features: features dict
schema: schema list
stats: stats dict
"""
target_name = feature_transforms.get_target_name(features)
csv_header = [col['name'] for col in schema]
if not keep_target:
csv_header.remove(target_name)
def export_fn(estimator, export_dir_base, checkpoint_path=None, eval_result=None):
with ops.Graph().as_default() as g:
contrib_variables.create_global_step(g)
input_ops = feature_transforms.build_csv_serving_tensors_for_training_step(
args.analysis, features, schema, stats, keep_target)
model_fn_ops = estimator._call_model_fn(input_ops.features,
None,
model_fn_lib.ModeKeys.INFER)
output_fetch_tensors = make_prediction_output_tensors(
args=args,
features=features,
input_ops=input_ops,
model_fn_ops=model_fn_ops,
keep_target=keep_target)
# Don't use signature_def_utils.predict_signature_def as that renames
# tensor names if there is only 1 input/output tensor!
signature_inputs = {key: tf.saved_model.utils.build_tensor_info(tensor)
for key, tensor in six.iteritems(input_ops.default_inputs)}
signature_outputs = {key: tf.saved_model.utils.build_tensor_info(tensor)
for key, tensor in six.iteritems(output_fetch_tensors)}
signature_def_map = {
'serving_default':
signature_def_utils.build_signature_def(
signature_inputs,
signature_outputs,
tf.saved_model.signature_constants.PREDICT_METHOD_NAME)}
if not checkpoint_path:
# Locate the latest checkpoint
checkpoint_path = saver.latest_checkpoint(estimator._model_dir)
if not checkpoint_path:
raise ValueError("Couldn't find trained model at %s."
% estimator._model_dir)
export_dir = saved_model_export_utils.get_timestamped_export_dir(
export_dir_base)
if (model_fn_ops.scaffold is not None and
model_fn_ops.scaffold.saver is not None):
saver_for_restore = model_fn_ops.scaffold.saver
else:
saver_for_restore = saver.Saver(sharded=True)
with tf_session.Session('') as session:
saver_for_restore.restore(session, checkpoint_path)
init_op = control_flow_ops.group(
variables.local_variables_initializer(),
resources.initialize_resources(resources.shared_resources()),
tf.tables_initializer())
# Perform the export
builder = saved_model_builder.SavedModelBuilder(export_dir)
builder.add_meta_graph_and_variables(
session, [tag_constants.SERVING],
signature_def_map=signature_def_map,
assets_collection=ops.get_collection(
ops.GraphKeys.ASSET_FILEPATHS),
legacy_init_op=init_op)
builder.save(False)
# Add the extra assets
if assets_extra:
assets_extra_path = os.path.join(compat.as_bytes(export_dir),
compat.as_bytes('assets.extra'))
for dest_relative, source in assets_extra.items():
dest_absolute = os.path.join(compat.as_bytes(assets_extra_path),
compat.as_bytes(dest_relative))
dest_path = os.path.dirname(dest_absolute)
file_io.recursive_create_dir(dest_path)
file_io.copy(source, dest_absolute)
# only keep the last 3 models
saved_model_export_utils.garbage_collect_exports(
export_dir_base,
exports_to_keep=3)
# save the last model to the model folder.
# export_dir_base = A/B/intermediate_models/
if keep_target:
final_dir = os.path.join(args.job_dir, 'evaluation_model')
else:
final_dir = os.path.join(args.job_dir, 'model')
if file_io.is_directory(final_dir):
file_io.delete_recursively(final_dir)
file_io.recursive_create_dir(final_dir)
recursive_copy(export_dir, final_dir)
return export_dir
if keep_target:
intermediate_dir = 'intermediate_evaluation_models'
else:
intermediate_dir = 'intermediate_prediction_models'
return export_strategy.ExportStrategy(intermediate_dir, export_fn) | 0.007731 |
def get_members(self, selector):
"""
Returns the members that satisfy the given selector.
:param selector: (:class:`~hazelcast.core.MemberSelector`), Selector to be applied to the members.
:return: (List), List of members.
"""
members = []
for member in self.get_member_list():
if selector.select(member):
members.append(member)
return members | 0.006865 |
def create(self, doc_details):
'''
a method to create a new document in the collection
:param doc_details: dictionary with document details and user id value
:return: dictionary with document details and _id and _rev values
'''
# https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/document/post__db___doc_
title = '%s.create' % self.__class__.__name__
# validate input
if self.model:
doc_details = self.model.validate(doc_details, path_to_root='', object_title='%s(doc_details={...}' % title)
# define request fields
from copy import deepcopy
new_record = deepcopy(doc_details)
url = self.bucket_url + '/'
# send request and construct output
response = requests.post(url, json=new_record)
if response.status_code not in (200, 201):
response = response.json()
raise Exception('%s() error: %s' % (title, response))
response = response.json()
new_record['_id'] = response['id']
new_record['_rev'] = response['rev']
return new_record | 0.004188 |
def new_comment(self, string, start, end, line):
""" Possibly add a new comment.
Only adds a new comment if this comment is the only thing on the line.
Otherwise, it extends the noncomment block.
"""
prefix = line[:start[1]]
if prefix.strip():
# Oops! Trailing comment, not a comment block.
self.current_block.add(string, start, end, line)
else:
# A comment block.
block = Comment(start[0], end[0], string)
self.blocks.append(block)
self.current_block = block | 0.003401 |
def recipe_status(backend):
"""
Compare local recipe to remote recipe for the current recipe.
"""
kitchen = DKCloudCommandRunner.which_kitchen_name()
if kitchen is None:
raise click.ClickException('You are not in a Kitchen')
recipe_dir = DKRecipeDisk.find_recipe_root_dir()
if recipe_dir is None:
raise click.ClickException('You must be in a Recipe folder')
recipe_name = DKRecipeDisk.find_recipe_name()
click.secho("%s - Getting the status of Recipe '%s' in Kitchen '%s'\n\tversus directory '%s'" % (
get_datetime(), recipe_name, kitchen, recipe_dir), fg='green')
check_and_print(DKCloudCommandRunner.recipe_status(backend.dki, kitchen, recipe_name, recipe_dir)) | 0.004127 |
def set_out(self, que_out, num_followers):
"""Set the queue in output and the number of parallel tasks that follow"""
self._que_out = que_out
self._num_followers = num_followers | 0.014925 |
def loadOrInitSettings(self, groupName=None):
""" Reads the registry items from the persistent settings store, falls back on the
default plugins if there are no settings in the store for this registry.
"""
groupName = groupName if groupName else self.settingsGroupName
settings = QtCore.QSettings()
#for key in sorted(settings.allKeys()):
# print(key)
if containsSettingsGroup(groupName, settings):
self.loadSettings(groupName)
else:
logger.info("Group {!r} not found, falling back on default settings".format(groupName))
for item in self.getDefaultItems():
self.registerItem(item)
self.saveSettings(groupName)
assert containsSettingsGroup(groupName, settings), \
"Sanity check failed. {} not found".format(groupName) | 0.006742 |
def type(self, s, enter=False, clear=False):
"""Input some text, this method has been tested not very stable on some device.
"Hi world" maybe spell into "H iworld"
Args:
- s: string (text to input), better to be unicode
- enter(bool): input enter at last
- next(bool): perform editor action Next
- clear(bool): clear text before type
- ui_select_kwargs(**): tap then type
The android source code show that
space need to change to %s
insteresting thing is that if want to input %s, it is really unconvinent.
android source code can be found here.
https://android.googlesource.com/platform/frameworks/base/+/android-4.4.2_r1/cmds/input/src/com/android/commands/input/Input.java#159
app source see here: https://github.com/openatx/android-unicode
"""
if clear:
self.clear_text()
self._uiauto.send_keys(s)
if enter:
self.keyevent('KEYCODE_ENTER') | 0.003865 |
def seq_seqhash(seq, normalize=True):
"""returns 24-byte Truncated Digest sequence `seq`
>>> seq_seqhash("")
'z4PhNX7vuL3xVChQ1m2AB9Yg5AULVxXc'
>>> seq_seqhash("ACGT")
'aKF498dAxcJAqme6QYQ7EZ07-fiw8Kw2'
>>> seq_seqhash("acgt")
'aKF498dAxcJAqme6QYQ7EZ07-fiw8Kw2'
>>> seq_seqhash("acgt", normalize=False)
'eFwawHHdibaZBDcs9kW3gm31h1NNJcQe'
"""
seq = normalize_sequence(seq) if normalize else seq
return str(vmc_digest(seq, digest_size=24)) | 0.002037 |
def clean_config(config):
"""Check if all values have defaults and replace errors with their default value
:param config: the configobj to clean
:type config: ConfigObj
:returns: None
:raises: ConfigError
The object is validated, so we need a spec file. All failed values will be replaced
by their default values. If default values are not specified in the spec, a
MissingDefaultError will be raised. If the replaced values still fail validation,
a ValueError is raised. This can occur if the default is of the wrong type.
If the object does not have a config spec, this function does nothing.
You are on your own then.
"""
if config.configspec is None:
return
vld = Validator()
validation = config.validate(vld, copy=True)
config.configspec.walk(check_default_values, validator=vld)
fix_errors(config, validation)
validation = config.validate(vld, copy=True)
if not (validation == True): # NOQA seems unpythonic but this validation evaluates that way only
msg = 'The config could not be fixed. Make sure that all default values have the right type!'
log.debug(msg)
raise ConfigError(msg) | 0.004167 |
def map_lrepr(
entries: Callable[[], Iterable[Tuple[Any, Any]]],
start: str,
end: str,
meta=None,
**kwargs,
) -> str:
"""Produce a Lisp representation of an associative collection, bookended
with the start and end string supplied. The entries argument must be a
callable which will produce tuples of key-value pairs.
The keyword arguments will be passed along to lrepr for the sequence
elements."""
print_level = kwargs["print_level"]
if isinstance(print_level, int) and print_level < 1:
return SURPASSED_PRINT_LEVEL
kwargs = _process_kwargs(**kwargs)
def entry_reprs():
for k, v in entries():
yield "{k} {v}".format(k=lrepr(k, **kwargs), v=lrepr(v, **kwargs))
trailer = []
print_dup = kwargs["print_dup"]
print_length = kwargs["print_length"]
if not print_dup and isinstance(print_length, int):
items = seq(entry_reprs()).take(print_length + 1).to_list()
if len(items) > print_length:
items.pop()
trailer.append(SURPASSED_PRINT_LENGTH)
else:
items = list(entry_reprs())
seq_lrepr = PRINT_SEPARATOR.join(items + trailer)
print_meta = kwargs["print_meta"]
if print_meta and meta:
return f"^{lrepr(meta, **kwargs)} {start}{seq_lrepr}{end}"
return f"{start}{seq_lrepr}{end}" | 0.000739 |
def _reset(self):
'''
_reset - reset this object. Assigned to .reset after __init__ call.
'''
HTMLParser.reset(self)
self.root = None
self.doctype = None
self._inTag = [] | 0.008658 |
def namedb_offset_count_predicate( offset=None, count=None ):
"""
Make an offset/count predicate
even if offset=None or count=None.
Return (query, args)
"""
offset_count_query = ""
offset_count_args = ()
if count is not None:
offset_count_query += "LIMIT ? "
offset_count_args += (count,)
if count is not None and offset is not None:
offset_count_query += "OFFSET ? "
offset_count_args += (offset,)
return (offset_count_query, offset_count_args) | 0.005758 |
def widgetForName(self, name):
"""Gets a widget with *name*
:param name: the widgets in this container should all have
a name() method. This is the string to match to that result
:type name: str
"""
for iwidget in range(len(self)):
if self.widget(iwidget).name() == name:
return self.widget(iwidget) | 0.005305 |
def _validate_pixel_density(self):
"""
Validate image pixel density
See `spec <https://ocr-d.github.io/mets#pixel-density-of-images-must-be-explicit-and-high-enough>`_.
"""
for f in [f for f in self.mets.find_files() if f.mimetype.startswith('image/')]:
if not f.local_filename and not self.download:
self.report.add_notice("Won't download remote image <%s>" % f.url)
continue
exif = self.workspace.resolve_image_exif(f.url)
for k in ['xResolution', 'yResolution']:
v = exif.__dict__.get(k)
if v is None or v <= 72:
self.report.add_error("Image %s: %s (%s pixels per %s) is too low" % (f.ID, k, v, exif.resolutionUnit)) | 0.007692 |
def findfirst(f, coll):
"""Return first occurrence matching f, otherwise None"""
result = list(dropwhile(f, coll))
return result[0] if result else None | 0.006024 |
def selectedConnections( self ):
"""
Returns a list of the selected connections in a scene.
:return <list> [ <XNodeConnection>, .. ]
"""
output = []
for item in self.selectedItems():
if ( isinstance(item, XNodeConnection) ):
output.append(item)
return output | 0.019718 |
def destroy_comment(self, access_token, comment_id):
"""doc: http://open.youku.com/docs/doc?id=42
"""
url = 'https://openapi.youku.com/v2/comments/destroy.json'
data = {
'client_id': self.client_id,
'access_token': access_token,
'comment_id': comment_id
}
r = requests.post(url, data=data)
check_error(r)
return r.json()['id'] | 0.004695 |
def count_integers(arr,
weights=None,
minlength=None,
maxlength=None,
axis=None,
dtype=tf.int32,
name=None):
"""Counts the number of occurrences of each value in an integer array `arr`.
Works like `tf.math.bincount`, but provides an `axis` kwarg that specifies
dimensions to reduce over. With
`~axis = [i for i in range(arr.ndim) if i not in axis]`,
this function returns a `Tensor` of shape `[K] + arr.shape[~axis]`.
If `minlength` and `maxlength` are not given, `K = tf.reduce_max(arr) + 1`
if `arr` is non-empty, and 0 otherwise.
If `weights` are non-None, then index `i` of the output stores the sum of the
value in `weights` at each index where the corresponding value in `arr` is
`i`.
Args:
arr: An `int32` `Tensor` of non-negative values.
weights: If non-None, must be the same shape as arr. For each value in
`arr`, the bin will be incremented by the corresponding weight instead of
1.
minlength: If given, ensures the output has length at least `minlength`,
padding with zeros at the end if necessary.
maxlength: If given, skips values in `arr` that are equal or greater than
`maxlength`, ensuring that the output has length at most `maxlength`.
axis: A `0-D` or `1-D` `int32` `Tensor` (with static values) designating
dimensions in `arr` to reduce over.
`Default value:` `None`, meaning reduce over all dimensions.
dtype: If `weights` is None, determines the type of the output bins.
name: A name scope for the associated operations (optional).
Returns:
A vector with the same dtype as `weights` or the given `dtype`. The bin
values.
"""
with tf.compat.v1.name_scope(
name, 'count_integers', values=[arr, weights, minlength, maxlength,
axis]):
if axis is None:
return tf.math.bincount(
arr,
weights=weights,
minlength=minlength,
maxlength=maxlength,
dtype=dtype)
arr = tf.convert_to_tensor(value=arr, dtype=tf.int32, name='arr')
arr_ndims = _get_static_ndims(arr, expect_static=True)
axis = _make_static_axis_non_negative_list(axis, arr_ndims)
# ~axis from docstring. Dims in arr that are not in axis.
not_axis = sorted(set(range(arr_ndims)).difference(axis))
# If we're reducing over everything, just use standard bincount.
if not not_axis:
return tf.math.bincount(
arr,
weights=weights,
minlength=minlength,
maxlength=maxlength,
dtype=dtype)
# Move dims in ~axis to the left, so we can tf.map_fn bincount over them,
# Producing counts for every index I in ~axis.
# Thus, flat_arr is not totally flat, it just has the dims in ~axis
# flattened.
flat_arr = _move_dims_to_flat_end(arr, not_axis, arr_ndims, right_end=False)
# tf.map_fn over dim 0.
if weights is None:
def one_bincount(arr_slice):
return tf.math.bincount(
arr_slice,
weights=None,
minlength=minlength,
maxlength=maxlength,
dtype=dtype)
flat_counts = tf.map_fn(one_bincount, elems=flat_arr, dtype=dtype)
else:
weights = tf.convert_to_tensor(value=weights, name='weights')
_get_static_ndims(weights, expect_static=True, expect_ndims=arr_ndims)
flat_weights = _move_dims_to_flat_end(
weights, not_axis, arr_ndims, right_end=False)
def one_bincount(arr_and_weights_slices):
arr_slice, weights_slice = arr_and_weights_slices
return tf.math.bincount(
arr_slice,
weights=weights_slice,
minlength=minlength,
maxlength=maxlength,
dtype=dtype)
flat_counts = tf.map_fn(
one_bincount, elems=[flat_arr, flat_weights], dtype=weights.dtype)
# flat_counts.shape = [prod(~axis), K], because map_fn stacked on axis 0.
# bincount needs to have the K bins in axis 0, so transpose...
flat_counts_t = tf.transpose(a=flat_counts, perm=[1, 0])
# Throw in this assert, to ensure shape assumptions are correct.
_get_static_ndims(flat_counts_t, expect_ndims=2, expect_static=True)
# not_axis_shape = arr.shape[~axis]
not_axis_shape = tf.gather(tf.shape(input=arr), indices=not_axis)
# The first index of flat_counts_t indexes bins 0,..,K-1, the rest are ~axis
out_shape = tf.concat([[-1], not_axis_shape], axis=0)
return tf.reshape(flat_counts_t, out_shape) | 0.003038 |
def SegmentSum(a, ids, *args):
"""
Segmented sum op.
"""
func = lambda idxs: reduce(np.add, a[idxs])
return seg_map(func, a, ids), | 0.013333 |
def _get_ref_from_galaxy_loc(name, genome_build, loc_file, galaxy_dt, need_remap,
galaxy_config, data):
"""Retrieve reference genome file from Galaxy *.loc file.
Reads from tool_data_table_conf.xml information for the index if it
exists, otherwise uses heuristics to find line based on most common setups.
"""
refs = [ref for dbkey, ref in _galaxy_loc_iter(loc_file, galaxy_dt, need_remap)
if dbkey == genome_build]
remap_fn = alignment.TOOLS[name].remap_index_fn
need_remap = remap_fn is not None
if len(refs) == 0:
raise ValueError("Did not find genome build %s in bcbio installation: %s" %
(genome_build, os.path.normpath(loc_file)))
else:
cur_ref = refs[-1]
# Find genome directory and check for packed wf tarballs
cur_ref_norm = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"]))
base_dir_i = cur_ref_norm.find("/%s/" % genome_build)
base_dir = os.path.join(cur_ref_norm[:base_dir_i], genome_build)
for tarball in glob.glob(os.path.join(base_dir, "*-wf.tar.gz")):
cwlutils.unpack_tarballs(tarball, {"dirs": {"work": base_dir}}, use_subdir=False)
if need_remap:
assert remap_fn is not None, "%s requires remapping function from base location file" % name
cur_ref = os.path.normpath(utils.add_full_path(cur_ref, galaxy_config["tool_data_path"]))
cur_ref = remap_fn(os.path.abspath(cur_ref))
return cur_ref | 0.005277 |
def _declare(self, var):
""" Declare the variable `var` """
if var.name in self._declarations:
raise ValueError('Variable already declared')
self._declarations[var.name] = var
return var | 0.008696 |
def _Backward3_P_hs(h, s):
"""Backward equation for region 3, P=f(h,s)
Parameters
----------
h : float
Specific enthalpy, [kJ/kg]
s : float
Specific entropy, [kJ/kgK]
Returns
-------
P : float
Pressure, [MPa]
"""
sc = 4.41202148223476
if s <= sc:
return _Backward3a_P_hs(h, s)
else:
return _Backward3b_P_hs(h, s) | 0.002488 |
def register():
"""Plugin registration."""
from pelican import signals
signals.initialized.connect(setup_git)
signals.article_generator_finalized.connect(replace_git_url) | 0.005319 |
def hiveconfs(self):
"""
Returns a dict of key=value settings to be passed along
to the hive command line via --hiveconf. By default, sets
mapred.job.name to task_id and if not None, sets:
* mapred.reduce.tasks (n_reduce_tasks)
* mapred.fairscheduler.pool (pool) or mapred.job.queue.name (pool)
* hive.exec.reducers.bytes.per.reducer (bytes_per_reducer)
* hive.exec.reducers.max (reducers_max)
"""
jcs = {}
jcs['mapred.job.name'] = "'" + self.task_id + "'"
if self.n_reduce_tasks is not None:
jcs['mapred.reduce.tasks'] = self.n_reduce_tasks
if self.pool is not None:
# Supporting two schedulers: fair (default) and capacity using the same option
scheduler_type = luigi.configuration.get_config().get('hadoop', 'scheduler', 'fair')
if scheduler_type == 'fair':
jcs['mapred.fairscheduler.pool'] = self.pool
elif scheduler_type == 'capacity':
jcs['mapred.job.queue.name'] = self.pool
if self.bytes_per_reducer is not None:
jcs['hive.exec.reducers.bytes.per.reducer'] = self.bytes_per_reducer
if self.reducers_max is not None:
jcs['hive.exec.reducers.max'] = self.reducers_max
return jcs | 0.003762 |
def dumps(self, indent=1):
""" Returns nested string representation of the dictionary (like json.dumps).
:param indent: indentation level.
"""
str_keys_dict = OrderedDict({str(k): v for k, v in self.items()})
for k, v in str_keys_dict.items():
if isinstance(v, dict):
str_keys_dict[k] = OrderedDict({str(k1): v1 for k1, v1 in v.items()})
for k1, v1 in str_keys_dict[k].items():
if isinstance(v1, dict):
str_keys_dict[k][k1] = OrderedDict({str(k2): v2 for k2, v2 in v1.items()})
return json.dumps(str_keys_dict, indent=indent) | 0.007541 |
def quantile_binning(data=None, bins=10, *, qrange=(0.0, 1.0), **kwargs) -> StaticBinning:
"""Binning schema based on quantile ranges.
This binning finds equally spaced quantiles. This should lead to
all bins having roughly the same frequencies.
Note: weights are not (yet) take into account for calculating
quantiles.
Parameters
----------
bins: sequence or Optional[int]
Number of bins
qrange: Optional[tuple]
Two floats as minimum and maximum quantile (default: 0.0, 1.0)
Returns
-------
StaticBinning
"""
if np.isscalar(bins):
bins = np.linspace(qrange[0] * 100, qrange[1] * 100, bins + 1)
bins = np.percentile(data, bins)
return static_binning(bins=make_bin_array(bins), includes_right_edge=True) | 0.002519 |
def build_map(function: Callable[[Any], Any] = None,
unpack: bool = False):
""" Decorator to wrap a function to return a Map operator.
:param function: function to be wrapped
:param unpack: value from emits will be unpacked (*value)
"""
def _build_map(function: Callable[[Any], Any]):
@wraps(function)
def _wrapper(*args, **kwargs) -> Map:
if 'unpack' in kwargs:
raise TypeError('"unpack" has to be defined by decorator')
return Map(function, *args, unpack=unpack, **kwargs)
return _wrapper
if function:
return _build_map(function)
return _build_map | 0.001502 |
def make_node(can_device_name, **kwargs):
"""Constructs a node instance with specified CAN device.
:param can_device_name: CAN device name, e.g. "/dev/ttyACM0", "COM9", "can0".
:param kwargs: These arguments will be supplied to the CAN driver factory and to the node constructor.
"""
can = driver.make_driver(can_device_name, **kwargs)
return Node(can, **kwargs) | 0.007772 |
def mark(self, lineno, count=1):
"""Mark a given source line as executed count times.
Multiple calls to mark for the same lineno add up.
"""
self.sourcelines[lineno] = self.sourcelines.get(lineno, 0) + count | 0.008333 |
def get_region(service, region, profile):
"""
Retrieve the region for a particular AWS service based on configured region and/or profile.
"""
_, region, _, _ = _get_profile(service, region, None, None, profile)
return region | 0.008163 |
def _handle_error(self, data, params):
"""Handle an error response from the SABnzbd API"""
error = data.get('error', 'API call failed')
mode = params.get('mode')
raise SabnzbdApiException(error, mode=mode) | 0.008439 |
def addCurvatureScalars(self, method=0, lut=None):
"""
Build an ``Actor`` that contains the color coded surface
curvature calculated in three different ways.
:param int method: 0-gaussian, 1-mean, 2-max, 3-min curvature.
:param float lut: optional look up table.
:Example:
.. code-block:: python
from vtkplotter import *
t = Torus().addCurvatureScalars()
show(t)
|curvature|
"""
curve = vtk.vtkCurvatures()
curve.SetInputData(self.poly)
curve.SetCurvatureType(method)
curve.Update()
self.poly = curve.GetOutput()
scls = self.poly.GetPointData().GetScalars().GetRange()
print("curvature(): scalar range is", scls)
self.mapper.SetInputData(self.poly)
if lut:
self.mapper.SetLookupTable(lut)
self.mapper.SetUseLookupTableScalarRange(1)
self.mapper.Update()
self.Modified()
self.mapper.ScalarVisibilityOn()
return self | 0.002757 |
def span_in_context(span):
"""
Create a context manager that stores the given span in the thread-local
request context. This function should only be used in single-threaded
applications like Flask / uWSGI.
## Usage example in WSGI middleware:
.. code-block:: python
from opentracing_instrumentation.http_server import WSGIRequestWrapper
from opentracing_instrumentation.http_server import before_request
from opentracing_instrumentation import request_context
def create_wsgi_tracing_middleware(other_wsgi):
def wsgi_tracing_middleware(environ, start_response):
request = WSGIRequestWrapper.from_wsgi_environ(environ)
span = before_request(request=request, tracer=tracer)
# Wrapper around the real start_response object to log
# additional information to opentracing Span
def start_response_wrapper(status, response_headers,
exc_info=None):
if exc_info is not None:
span.log(event='exception', payload=exc_info)
span.finish()
return start_response(status, response_headers)
with request_context.span_in_context(span):
return other_wsgi(environ, start_response_wrapper)
return wsgi_tracing_middleware
:param span: OpenTracing Span
:return:
Return context manager that wraps the request context.
"""
# Return a no-op Scope if None was specified.
if span is None:
return opentracing.Scope(None, None)
return opentracing.tracer.scope_manager.activate(span, False) | 0.000577 |
def _treat_devices_removed(self):
"""Process the removed devices."""
for device in self._removed_ports.copy():
eventlet.spawn_n(self._process_removed_port, device) | 0.010471 |
def grad_local_log_likelihood(self, x):
"""
d/d \psi y \psi - log (1 + exp(\psi))
= y - exp(\psi) / (1 + exp(\psi))
= y - sigma(psi)
= y - p
d \psi / dx = C
d / dx = (y - sigma(psi)) * C
"""
C, D, u, y = self.C, self.D, self.inputs, self.data
psi = x.dot(C.T) + u.dot(D.T)
p = 1. / (1 + np.exp(-psi))
return (y - p).dot(C) | 0.018476 |
def record(self):
# type: () -> bytes
'''
Generate a string representing the Rock Ridge Symbolic Link record.
Parameters:
None.
Returns:
String containing the Rock Ridge record.
'''
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('SL record not yet initialized!')
outlist = [b'SL', struct.pack('=BBB', self.current_length(), SU_ENTRY_VERSION, self.flags)]
for comp in self.symlink_components:
outlist.append(comp.record())
return b''.join(outlist) | 0.008418 |
def setValueSafe(self, row, value):
'setValue and ignore exceptions'
try:
return self.setValue(row, value)
except Exception as e:
exceptionCaught(e) | 0.010204 |
def compile_proto(source, python_out, proto_path):
"""Invoke Protocol Compiler to generate python from given source .proto."""
if not protoc:
sys.exit('protoc not found. Is the protobuf-compiler installed?\n')
protoc_command = [
protoc,
'--proto_path', proto_path,
'--python_out', python_out,
source,
]
if subprocess.call(protoc_command) != 0:
sys.exit('Make sure your protoc version >= 2.6. You can use a custom '
'protoc by setting the PROTOC environment variable.') | 0.009542 |
def _compute_quads(self, element, data, mapping):
"""
Computes the node quad glyph data.x
"""
quad_mapping = {'left': 'x0', 'right': 'x1', 'bottom': 'y0', 'top': 'y1'}
quad_data = dict(data['scatter_1'])
quad_data.update({'x0': [], 'x1': [], 'y0': [], 'y1': []})
for node in element._sankey['nodes']:
quad_data['x0'].append(node['x0'])
quad_data['y0'].append(node['y0'])
quad_data['x1'].append(node['x1'])
quad_data['y1'].append(node['y1'])
data['scatter_1'].update(quad_data)
data['quad_1'] = data['scatter_1']
mapping['quad_1'] = quad_mapping | 0.004438 |
def get_translations(self, context_id):
"""Retrieves all translation entries for a tunnel context.
:param int context_id: The id-value representing the context instance.
:return list(dict): Translations associated with the given context
"""
_mask = ('[mask[addressTranslations[customerIpAddressRecord,'
'internalIpAddressRecord]]]')
context = self.get_tunnel_context(context_id, mask=_mask)
# Pull the internal and remote IP addresses into the translation
for translation in context.get('addressTranslations', []):
remote_ip = translation.get('customerIpAddressRecord', {})
internal_ip = translation.get('internalIpAddressRecord', {})
translation['customerIpAddress'] = remote_ip.get('ipAddress', '')
translation['internalIpAddress'] = internal_ip.get('ipAddress', '')
translation.pop('customerIpAddressRecord', None)
translation.pop('internalIpAddressRecord', None)
return context['addressTranslations'] | 0.001876 |
def mmInformation(NetworkName_presence=0, NetworkName_presence1=0,
TimeZone_presence=0, TimeZoneAndTime_presence=0,
LsaIdentifier_presence=0):
"""MM INFORMATION Section 9.2.15a"""
a = TpPd(pd=0x5)
b = MessageType(mesType=0x32) # 00110010
packet = a / b
if NetworkName_presence is 1:
c = NetworkNameHdr(ieiNN=0x43, eightBitNN=0x0)
packet = packet / c
if NetworkName_presence1 is 1:
d = NetworkNameHdr(ieiNN=0x45, eightBitNN=0x0)
packet = packet / d
if TimeZone_presence is 1:
e = TimeZoneHdr(ieiTZ=0x46, eightBitTZ=0x0)
packet = packet / e
if TimeZoneAndTime_presence is 1:
f = TimeZoneAndTimeHdr(ieiTZAT=0x47, eightBitTZAT=0x0)
packet = packet / f
if LsaIdentifier_presence is 1:
g = LsaIdentifierHdr(ieiLI=0x48, eightBitLI=0x0)
packet = packet / g
return packet | 0.001088 |
def share_application_with_accounts(application_id, account_ids, sar_client=None):
"""
Share the application privately with given AWS account IDs.
:param application_id: The Amazon Resource Name (ARN) of the application
:type application_id: str
:param account_ids: List of AWS account IDs, or *
:type account_ids: list of str
:param sar_client: The boto3 client used to access SAR
:type sar_client: boto3.client
:raises ValueError
"""
if not application_id or not account_ids:
raise ValueError('Require application id and list of AWS account IDs to share the app')
if not sar_client:
sar_client = boto3.client('serverlessrepo')
application_policy = ApplicationPolicy(account_ids, [ApplicationPolicy.DEPLOY])
application_policy.validate()
sar_client.put_application_policy(
ApplicationId=application_id,
Statements=[application_policy.to_statement()]
) | 0.004206 |
def _create(self):
"""Create the Whisper file on disk"""
if not os.path.exists(settings.SALMON_WHISPER_DB_PATH):
os.makedirs(settings.SALMON_WHISPER_DB_PATH)
archives = [whisper.parseRetentionDef(retentionDef)
for retentionDef in settings.ARCHIVES.split(",")]
whisper.create(self.path, archives,
xFilesFactor=settings.XFILEFACTOR,
aggregationMethod=settings.AGGREGATION_METHOD) | 0.004107 |
def update(self, i):
"""D.update(E) -> None. Update D from iterable E with pre-existing
items being overwritten.
Elements in E are assumed to be dicts containing the primary key to
allow the equivelent of:
for k in E: D[k.primary_key] = k
"""
key_list = self.key_list
keynone = {key:None for key in key_list}
# Generator which fills in missing data from the original iterator
def datagen(i):
for datum in i:
tmp = keynone.copy()
tmp.update(datum)
yield tmp
with self._connection as con:
con.executemany(
"""INSERT OR REPLACE INTO {table} ({keylist})
VALUES (:{vallist});
""".format(table=self.table,
keylist=", ".join(self.key_list),
vallist=", :".join(self.key_list)
), datagen(i)) | 0.008368 |
def my_import(name):
""" dynamic importing """
module, attr = name.rsplit('.', 1)
mod = __import__(module, fromlist=[attr])
klass = getattr(mod, attr)
return klass() | 0.005405 |
def index():
"""Generate a list of all crawlers, alphabetically, with op counts."""
crawlers = []
for crawler in manager:
data = Event.get_counts(crawler)
data['last_active'] = crawler.last_run
data['total_ops'] = crawler.op_count
data['running'] = crawler.is_running
data['crawler'] = crawler
crawlers.append(data)
return render_template('index.html', crawlers=crawlers) | 0.002299 |
def checkCAS(CASRN):
'''Checks if a CAS number is valid. Returns False if the parser cannot
parse the given string..
Parameters
----------
CASRN : string
A three-piece, dash-separated set of numbers
Returns
-------
result : bool
Boolean value if CASRN was valid. If parsing fails, return False also.
Notes
-----
Check method is according to Chemical Abstract Society. However, no lookup
to their service is performed; therefore, this function cannot detect
false positives.
Function also does not support additional separators, apart from '-'.
CAS numbers up to the series 1 XXX XXX-XX-X are now being issued.
A long can hold CAS numbers up to 2 147 483-64-7
Examples
--------
>>> checkCAS('7732-18-5')
True
>>> checkCAS('77332-18-5')
False
'''
try:
check = CASRN[-1]
CASRN = CASRN[::-1][1:]
productsum = 0
i = 1
for num in CASRN:
if num == '-':
pass
else:
productsum += i*int(num)
i += 1
return (productsum % 10 == int(check))
except:
return False | 0.004139 |
def remote_unpack(self):
"""
Called by remote worker to state that no more data will be transferred
"""
# Make sure remote_close is called, otherwise atomic rename won't happen
self.remote_close()
# Map configured compression to a TarFile setting
if self.compress == 'bz2':
mode = 'r|bz2'
elif self.compress == 'gz':
mode = 'r|gz'
else:
mode = 'r'
# Unpack archive and clean up after self
archive = tarfile.open(name=self.tarname, mode=mode)
archive.extractall(path=self.destroot)
archive.close()
os.remove(self.tarname) | 0.004478 |
def send_to_back(self):
"""adjusts sprite's z-order so that the sprite is behind it's
siblings"""
if not self.parent:
return
self.z_order = self.parent._z_ordered_sprites[0].z_order - 1 | 0.008734 |
def log_errors(f, self, *args, **kwargs):
"""decorator to log unhandled exceptions raised in a method.
For use wrapping on_recv callbacks, so that exceptions
do not cause the stream to be closed.
"""
try:
return f(self, *args, **kwargs)
except Exception:
self.log.error("Uncaught exception in %r" % f, exc_info=True) | 0.00554 |
def update_backend(use_pypi=False, index='dev', build=True, user=None, version=None):
"""
Install the backend from the given devpi index at the given version on the target host and restart the service.
If version is None, it defaults to the latest version
Optionally, build and upload the application first from local sources. This requires a
full backend development environment on the machine running this command (pyramid etc.)
"""
get_vars()
if value_asbool(build):
upload_backend(index=index, user=user)
with fab.cd('{apphome}'.format(**AV)):
if value_asbool(use_pypi):
command = 'bin/pip install --upgrade briefkasten'
else:
command = 'bin/pip install --upgrade --pre -i {ploy_default_publish_devpi}/briefkasten/{index}/+simple/ briefkasten'.format(
index=index,
user=user,
**AV)
if version:
command = '%s==%s' % (command, version)
fab.sudo(command)
briefkasten_ctl('restart') | 0.00572 |
def stft(self, samples: np.ndarray):
"""
Perform Short-time Fourier transform to get the spectrogram for the given samples
:return: short-time Fourier transform of the given signal
"""
window = self.window_function(self.window_size)
hop_size = self.hop_size
if len(samples) < self.window_size:
samples = np.append(samples, np.zeros(self.window_size - len(samples)))
num_frames = max(1, (len(samples) - self.window_size) // hop_size + 1)
# Get frames as numpy view with stride_tricks to save RAM
# Same as: frames = [padded_samples[i*hop_size:i*hop_size+self.window_size] for i in range(num_frames)]
shape = (num_frames, self.window_size)
strides = (hop_size * samples.strides[-1], samples.strides[-1])
frames = np.lib.stride_tricks.as_strided(samples, shape=shape, strides=strides)
result = np.fft.fft(frames * window, self.window_size) / np.atleast_1d(self.window_size)
return result | 0.006863 |
def add_projection(query_proto, *projection):
"""Add projection properties to the given datatstore.Query proto message."""
for p in projection:
proto = query_proto.projection.add()
proto.property.name = p | 0.013889 |
def _get_site(self, url, headers, cookies, timeout, driver_args, driver_kwargs):
"""
Try and return page content in the requested format using requests
"""
try:
# Headers and cookies are combined to the ones stored in the requests session
# Ones passed in here will override the ones in the session if they are the same key
response = self.driver.get(url,
*driver_args,
headers=headers,
cookies=cookies,
timeout=timeout,
**driver_kwargs)
# Set data to access from script
self.status_code = response.status_code
self.url = response.url
self.response = response
if response.status_code == requests.codes.ok:
# Return the correct format
return response.text
response.raise_for_status()
except Exception as e:
raise e.with_traceback(sys.exc_info()[2]) | 0.00439 |
def p_primary_expr_no_brace_4(self, p):
"""primary_expr_no_brace : LPAREN expr RPAREN"""
if isinstance(p[2], self.asttypes.GroupingOp):
# this reduces the grouping operator to one.
p[0] = p[2]
else:
p[0] = self.asttypes.GroupingOp(expr=p[2])
p[0].setpos(p) | 0.006098 |
def assert_shape_match(shape1, shape2):
"""Ensure the shape1 match the pattern given by shape2.
Ex:
assert_shape_match((64, 64, 3), (None, None, 3))
Args:
shape1 (tuple): Static shape
shape2 (tuple): Dynamic shape (can contain None)
"""
shape1 = tf.TensorShape(shape1)
shape2 = tf.TensorShape(shape2)
if shape1.ndims is None or shape2.ndims is None:
raise ValueError('Shapes must have known rank. Got %s and %s.' %
(shape1.ndims, shape2.ndims))
shape1.assert_same_rank(shape2)
shape1.assert_is_compatible_with(shape2) | 0.012195 |
async def json(self, *, encoding: Optional[str]=None) -> Any:
"""Like read(), but assumes that body parts contains JSON data."""
data = await self.read(decode=True)
if not data:
return None
encoding = encoding or self.get_charset(default='utf-8')
return json.loads(data.decode(encoding)) | 0.011799 |
def fetch(self, recursive=1, exclude_children=False, exclude_back_refs=False):
"""Fetch resource from the API server
:param recursive: level of recursion for fetching resources
:type recursive: int
:param exclude_children: don't get children references
:type exclude_children: bool
:param exclude_back_refs: don't get back_refs references
:type exclude_back_refs: bool
:rtype: Resource
"""
if not self.path.is_resource and not self.path.is_uuid:
self.check()
params = {}
# even if the param is False the API will exclude resources
if exclude_children:
params['exclude_children'] = True
if exclude_back_refs:
params['exclude_back_refs'] = True
data = self.session.get_json(self.href, **params)[self.type]
self.from_dict(data)
return self | 0.002198 |
def open_image(fname_or_instance: Union[str, IO[bytes]]):
"""Opens a Image and returns it.
:param fname_or_instance: Can either be the location of the image as a
string or the Image.Image instance itself.
"""
if isinstance(fname_or_instance, Image.Image):
return fname_or_instance
return Image.open(fname_or_instance) | 0.002653 |
def get_ngroups(self, field=None):
'''
Returns ngroups count if it was specified in the query, otherwise ValueError.
If grouping on more than one field, provide the field argument to specify which count you are looking for.
'''
field = field if field else self._determine_group_field(field)
if 'ngroups' in self.data['grouped'][field]:
return self.data['grouped'][field]['ngroups']
raise ValueError("ngroups not found in response. specify group.ngroups in the query.") | 0.009311 |
def egg_info_writer(cmd, basename, filename):
# type: (setuptools.command.egg_info.egg_info, str, str) -> None
"""Read rcli configuration and write it out to the egg info.
Args:
cmd: An egg info command instance to use for writing.
basename: The basename of the file to write.
filename: The full path of the file to write into the egg info.
"""
setupcfg = next((f for f in setuptools.findall()
if os.path.basename(f) == 'setup.cfg'), None)
if not setupcfg:
return
parser = six.moves.configparser.ConfigParser() # type: ignore
parser.read(setupcfg)
if not parser.has_section('rcli') or not parser.items('rcli'):
return
config = dict(parser.items('rcli')) # type: typing.Dict[str, typing.Any]
for k, v in six.iteritems(config):
if v.lower() in ('y', 'yes', 'true'):
config[k] = True
elif v.lower() in ('n', 'no', 'false'):
config[k] = False
else:
try:
config[k] = json.loads(v)
except ValueError:
pass
cmd.write_file(basename, filename, json.dumps(config)) | 0.000855 |
def validateBool(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, trueVal='True', falseVal='False', caseSensitive=False, excMsg=None):
"""Raises ValidationException if value is not an email address.
Returns the yesVal or noVal argument, not value.
* value (str): The value being validated as an email address.
* blank (bool): If True, a blank string will be accepted. Defaults to False.
* strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped.
* allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers.
* blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation.
* excMsg (str): A custom message to use in the raised ValidationException.
>>> import pysimplevalidate as pysv
>>> pysv.validateYesNo('y')
'yes'
>>> pysv.validateYesNo('YES')
'yes'
>>> pysv.validateYesNo('No')
'no'
>>> pysv.validateYesNo('OUI', yesVal='oui', noVal='no')
'oui'
"""
# Validate parameters. TODO - can probably improve this to remove the duplication.
_validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes)
returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg)
if returnNow:
return value
# Replace the exception messages used in validateYesNo():
trueVal = str(trueVal)
falseVal = str(falseVal)
if len(trueVal) == 0:
raise PySimpleValidateException('trueVal argument must be a non-empty string.')
if len(falseVal) == 0:
raise PySimpleValidateException('falseVal argument must be a non-empty string.')
if (trueVal == falseVal) or (not caseSensitive and trueVal.upper() == falseVal.upper()):
raise PySimpleValidateException('trueVal and noVal arguments must be different.')
if (trueVal[0] == falseVal[0]) or (not caseSensitive and trueVal[0].upper() == falseVal[0].upper()):
raise PySimpleValidateException('first character of trueVal and noVal arguments must be different')
result = validateYesNo(value, blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes, yesVal=trueVal, noVal=falseVal, caseSensitive=caseSensitive, excMsg=None)
# Return a bool value instead of a string.
if result == trueVal:
return True
elif result == falseVal:
return False
else:
assert False, 'inner validateYesNo() call returned something that was not yesVal or noVal. This should never happen.' | 0.006076 |
def _serve_clients(self):
'''
Accept cients and serve, one separate thread per client.
'''
self.__up = True
while self.__up:
log.debug('Waiting for a client to connect')
try:
conn, addr = self.skt.accept()
log.debug('Received connection from %s:%d', addr[0], addr[1])
except socket.error as error:
if not self.__up:
return
msg = 'Received listener socket error: {}'.format(error)
log.error(msg, exc_info=True)
raise ListenerException(msg)
client_thread = threading.Thread(target=self._client_connection, args=(conn, addr,))
client_thread.start() | 0.003942 |
def specwindow_lsp_value(times, mags, errs, omega):
'''This calculates the peak associated with the spectral window function
for times and at the specified omega.
NOTE: this is classical Lomb-Scargle, not the Generalized
Lomb-Scargle. `mags` and `errs` are silently ignored since we're calculating
the periodogram of the observing window function. These are kept to present
a consistent external API so the `pgen_lsp` function below can call this
transparently.
Parameters
----------
times,mags,errs : np.array
The time-series to calculate the periodogram value for.
omega : float
The frequency to calculate the periodogram value at.
Returns
-------
periodogramvalue : float
The normalized periodogram at the specified test frequency `omega`.
'''
norm_times = times - times.min()
tau = (
(1.0/(2.0*omega)) *
nparctan( npsum(npsin(2.0*omega*norm_times)) /
npsum(npcos(2.0*omega*norm_times)) )
)
lspval_top_cos = (npsum(1.0 * npcos(omega*(norm_times-tau))) *
npsum(1.0 * npcos(omega*(norm_times-tau))))
lspval_bot_cos = npsum( (npcos(omega*(norm_times-tau))) *
(npcos(omega*(norm_times-tau))) )
lspval_top_sin = (npsum(1.0 * npsin(omega*(norm_times-tau))) *
npsum(1.0 * npsin(omega*(norm_times-tau))))
lspval_bot_sin = npsum( (npsin(omega*(norm_times-tau))) *
(npsin(omega*(norm_times-tau))) )
lspval = 0.5 * ( (lspval_top_cos/lspval_bot_cos) +
(lspval_top_sin/lspval_bot_sin) )
return lspval | 0.005959 |
def _update_gecos(name, key, value, root=None):
'''
Common code to change a user's GECOS information
'''
if value is None:
value = ''
elif not isinstance(value, six.string_types):
value = six.text_type(value)
else:
value = salt.utils.stringutils.to_unicode(value)
pre_info = _get_gecos(name, root=root)
if not pre_info:
return False
if value == pre_info[key]:
return True
gecos_data = copy.deepcopy(pre_info)
gecos_data[key] = value
cmd = ['usermod']
if root is not None and __grains__['kernel'] != 'AIX':
cmd.extend(('-R', root))
cmd.extend(('-c', _build_gecos(gecos_data), name))
__salt__['cmd.run'](cmd, python_shell=False)
return _get_gecos(name, root=root).get(key) == value | 0.001261 |
def info(*packages, **kwargs):
'''
Returns a detailed summary of package information for provided package names.
If no packages are specified, all packages will be returned.
.. versionadded:: 2015.8.1
packages
The names of the packages for which to return information.
failhard
Whether to throw an exception if none of the packages are installed.
Defaults to True.
.. versionadded:: 2016.11.3
attr
Comma-separated package attributes. If no 'attr' is specified, all available attributes returned.
Valid attributes are:
version, vendor, release, build_date, build_date_time_t, install_date, install_date_time_t,
build_host, group, source_rpm, arch, epoch, size, license, signature, packager, url, summary, description.
.. versionadded:: Neon
CLI example:
.. code-block:: bash
salt '*' lowpkg.info
salt '*' lowpkg.info apache2 bash
salt '*' lowpkg.info 'php5*' failhard=false
'''
# Get the missing information from the /var/lib/dpkg/available, if it is there.
# However, this file is operated by dselect which has to be installed.
dselect_pkg_avail = _get_pkg_ds_avail()
kwargs = salt.utils.args.clean_kwargs(**kwargs)
failhard = kwargs.pop('failhard', True)
attr = kwargs.pop('attr', None) or None
if attr:
attr = attr.split(',')
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
ret = dict()
for pkg in _get_pkg_info(*packages, failhard=failhard):
# Merge extra information from the dselect, if available
for pkg_ext_k, pkg_ext_v in dselect_pkg_avail.get(pkg['package'], {}).items():
if pkg_ext_k not in pkg:
pkg[pkg_ext_k] = pkg_ext_v
# Remove "technical" keys
for t_key in ['installed_size', 'depends', 'recommends',
'provides', 'replaces', 'conflicts', 'bugs',
'description-md5', 'task']:
if t_key in pkg:
del pkg[t_key]
lic = _get_pkg_license(pkg['package'])
if lic:
pkg['license'] = lic
# Remove keys that aren't in attrs
pkg_name = pkg['package']
if attr:
for k in list(pkg.keys())[:]:
if k not in attr:
del pkg[k]
ret[pkg_name] = pkg
return ret | 0.002903 |
def _handle_actionconstantpool(self, _):
"""Handle the ActionConstantPool action."""
obj = _make_object("ActionConstantPool")
obj.Count = count = unpack_ui16(self._src)
obj.ConstantPool = pool = []
for _ in range(count):
pool.append(self._get_struct_string())
yield obj | 0.006079 |
async def on_error(self, event_method, *args, **kwargs):
"""|coro|
The default error handler provided by the client.
By default this prints to :data:`sys.stderr` however it could be
overridden to have a different implementation.
Check :func:`discord.on_error` for more details.
"""
print('Ignoring exception in {}'.format(event_method), file=sys.stderr)
traceback.print_exc() | 0.004535 |
def checksum(digits, scale):
"""
Calculate checksum of Norwegian personal identity code.
Checksum is calculated with "Module 11" method using a scale.
The digits of the personal code are multiplied by the corresponding
number in the scale and summed;
if remainder of module 11 of the sum is less than 10, checksum is the
remainder.
If remainder is 0, the checksum is 0.
https://no.wikipedia.org/wiki/F%C3%B8dselsnummer
"""
chk_nbr = 11 - (sum(map(operator.mul, digits, scale)) % 11)
if chk_nbr == 11:
return 0
return chk_nbr | 0.001706 |
def generate_settings():
"""
This command is run when ``default_path`` doesn't exist, or ``init`` is
run and returns a string representing the default data to put into their
settings file.
"""
conf_file = os.path.join(os.path.dirname(base_settings.__file__),
'example', 'conf.py')
conf_template = open(conf_file).read()
default_url = 'http://salmon.example.com'
site_url = raw_input("What will be the URL for Salmon? [{0}]".format(
default_url))
site_url = site_url or default_url
secret_key = base64.b64encode(os.urandom(KEY_LENGTH))
api_key = base64.b64encode(os.urandom(KEY_LENGTH))
output = conf_template.format(api_key=api_key, secret_key=secret_key,
site_url=site_url)
return output | 0.001227 |
def add_untagged_ok(self, text: MaybeBytes,
code: Optional[ResponseCode] = None) -> None:
"""Add an untagged ``OK`` response.
See Also:
:meth:`.add_untagged`, :class:`ResponseOk`
Args:
text: The response text.
code: Optional response code.
"""
response = ResponseOk(b'*', text, code)
self.add_untagged(response) | 0.007109 |
def security_rule_exists(name,
rulename=None,
vsys='1',
action=None,
disabled=None,
sourcezone=None,
destinationzone=None,
source=None,
destination=None,
application=None,
service=None,
description=None,
logsetting=None,
logstart=None,
logend=None,
negatesource=None,
negatedestination=None,
profilegroup=None,
datafilter=None,
fileblock=None,
spyware=None,
urlfilter=None,
virus=None,
vulnerability=None,
wildfire=None,
move=None,
movetarget=None,
commit=False):
'''
Ensures that a security rule exists on the device. Also, ensure that all configurations are set appropriately.
This method will create the rule if it does not exist. If the rule does exist, it will ensure that the
configurations are set appropriately.
If the rule does not exist and is created, any value that is not provided will be provided as the default.
The action, to, from, source, destination, application, and service fields are mandatory and must be provided.
This will enforce the exact match of the rule. For example, if the rule is currently configured with the log-end
option, but this option is not specified in the state method, it will be removed and reset to the system default.
It is strongly recommended to specify all options to ensure proper operation.
When defining the profile group settings, the device can only support either a profile group or individual settings.
If both are specified, the profile group will be preferred and the individual settings are ignored. If neither are
specified, the value will be set to system default of none.
name: The name of the module function to execute.
rulename(str): The name of the security rule. The name is case-sensitive and can have up to 31 characters, which
can be letters, numbers, spaces, hyphens, and underscores. The name must be unique on a firewall and, on Panorama,
unique within its device group and any ancestor or descendant device groups.
vsys(str): The string representation of the VSYS ID. Defaults to VSYS 1.
action(str): The action that the security rule will enforce. Valid options are: allow, deny, drop, reset-client,
reset-server, reset-both.
disabled(bool): Controls if the rule is disabled. Set 'True' to disable and 'False' to enable.
sourcezone(str, list): The source zone(s). The value 'any' will match all zones.
destinationzone(str, list): The destination zone(s). The value 'any' will match all zones.
source(str, list): The source address(es). The value 'any' will match all addresses.
destination(str, list): The destination address(es). The value 'any' will match all addresses.
application(str, list): The application(s) matched. The value 'any' will match all applications.
service(str, list): The service(s) matched. The value 'any' will match all services. The value
'application-default' will match based upon the application defined ports.
description(str): A description for the policy (up to 255 characters).
logsetting(str): The name of a valid log forwarding profile.
logstart(bool): Generates a traffic log entry for the start of a session (disabled by default).
logend(bool): Generates a traffic log entry for the end of a session (enabled by default).
negatesource(bool): Match all but the specified source addresses.
negatedestination(bool): Match all but the specified destination addresses.
profilegroup(str): A valid profile group name.
datafilter(str): A valid data filter profile name. Ignored with the profilegroup option set.
fileblock(str): A valid file blocking profile name. Ignored with the profilegroup option set.
spyware(str): A valid spyware profile name. Ignored with the profilegroup option set.
urlfilter(str): A valid URL filtering profile name. Ignored with the profilegroup option set.
virus(str): A valid virus profile name. Ignored with the profilegroup option set.
vulnerability(str): A valid vulnerability profile name. Ignored with the profilegroup option set.
wildfire(str): A valid vulnerability profile name. Ignored with the profilegroup option set.
move(str): An optional argument that ensure the rule is moved to a specific location. Valid options are 'top',
'bottom', 'before', or 'after'. The 'before' and 'after' options require the use of the 'movetarget' argument
to define the location of the move request.
movetarget(str): An optional argument that defines the target of the move operation if the move argument is
set to 'before' or 'after'.
commit(bool): If true the firewall will commit the changes, if false do not commit changes.
SLS Example:
.. code-block:: yaml
panos/rulebase/security/rule01:
panos.security_rule_exists:
- rulename: rule01
- vsys: 1
- action: allow
- disabled: False
- sourcezone: untrust
- destinationzone: trust
- source:
- 10.10.10.0/24
- 1.1.1.1
- destination:
- 2.2.2.2-2.2.2.4
- application:
- any
- service:
- tcp-25
- description: My test security rule
- logsetting: logprofile
- logstart: False
- logend: True
- negatesource: False
- negatedestination: False
- profilegroup: myprofilegroup
- move: top
- commit: False
panos/rulebase/security/rule01:
panos.security_rule_exists:
- rulename: rule01
- vsys: 1
- action: allow
- disabled: False
- sourcezone: untrust
- destinationzone: trust
- source:
- 10.10.10.0/24
- 1.1.1.1
- destination:
- 2.2.2.2-2.2.2.4
- application:
- any
- service:
- tcp-25
- description: My test security rule
- logsetting: logprofile
- logstart: False
- logend: False
- datafilter: foobar
- fileblock: foobar
- spyware: foobar
- urlfilter: foobar
- virus: foobar
- vulnerability: foobar
- wildfire: foobar
- move: after
- movetarget: rule02
- commit: False
'''
ret = _default_ret(name)
if not rulename:
return ret
# Check if rule currently exists
rule = __salt__['panos.get_security_rule'](rulename, vsys)['result']
if rule and 'entry' in rule:
rule = rule['entry']
else:
rule = {}
# Build the rule element
element = ""
if sourcezone:
element += "<from>{0}</from>".format(_build_members(sourcezone, True))
else:
ret.update({'comment': "The sourcezone field must be provided."})
return ret
if destinationzone:
element += "<to>{0}</to>".format(_build_members(destinationzone, True))
else:
ret.update({'comment': "The destinationzone field must be provided."})
return ret
if source:
element += "<source>{0}</source>".format(_build_members(source, True))
else:
ret.update({'comment': "The source field must be provided."})
return
if destination:
element += "<destination>{0}</destination>".format(_build_members(destination, True))
else:
ret.update({'comment': "The destination field must be provided."})
return ret
if application:
element += "<application>{0}</application>".format(_build_members(application, True))
else:
ret.update({'comment': "The application field must be provided."})
return ret
if service:
element += "<service>{0}</service>".format(_build_members(service, True))
else:
ret.update({'comment': "The service field must be provided."})
return ret
if action:
element += "<action>{0}</action>".format(action)
else:
ret.update({'comment': "The action field must be provided."})
return ret
if disabled is not None:
if disabled:
element += "<disabled>yes</disabled>"
else:
element += "<disabled>no</disabled>"
if description:
element += "<description>{0}</description>".format(description)
if logsetting:
element += "<log-setting>{0}</log-setting>".format(logsetting)
if logstart is not None:
if logstart:
element += "<log-start>yes</log-start>"
else:
element += "<log-start>no</log-start>"
if logend is not None:
if logend:
element += "<log-end>yes</log-end>"
else:
element += "<log-end>no</log-end>"
if negatesource is not None:
if negatesource:
element += "<negate-source>yes</negate-source>"
else:
element += "<negate-source>no</negate-source>"
if negatedestination is not None:
if negatedestination:
element += "<negate-destination>yes</negate-destination>"
else:
element += "<negate-destination>no</negate-destination>"
# Build the profile settings
profile_string = None
if profilegroup:
profile_string = "<group><member>{0}</member></group>".format(profilegroup)
else:
member_string = ""
if datafilter:
member_string += "<data-filtering><member>{0}</member></data-filtering>".format(datafilter)
if fileblock:
member_string += "<file-blocking><member>{0}</member></file-blocking>".format(fileblock)
if spyware:
member_string += "<spyware><member>{0}</member></spyware>".format(spyware)
if urlfilter:
member_string += "<url-filtering><member>{0}</member></url-filtering>".format(urlfilter)
if virus:
member_string += "<virus><member>{0}</member></virus>".format(virus)
if vulnerability:
member_string += "<vulnerability><member>{0}</member></vulnerability>".format(vulnerability)
if wildfire:
member_string += "<wildfire-analysis><member>{0}</member></wildfire-analysis>".format(wildfire)
if member_string != "":
profile_string = "<profiles>{0}</profiles>".format(member_string)
if profile_string:
element += "<profile-setting>{0}</profile-setting>".format(profile_string)
full_element = "<entry name='{0}'>{1}</entry>".format(rulename, element)
new_rule = xml.to_dict(ET.fromstring(full_element), True)
config_change = False
if rule == new_rule:
ret.update({
'comment': 'Security rule already exists. No changes required.'
})
else:
config_change = True
xpath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/rulebase/" \
"security/rules/entry[@name=\'{1}\']".format(vsys, rulename)
result, msg = _edit_config(xpath, full_element)
if not result:
ret.update({
'comment': msg
})
return ret
ret.update({
'changes': {'before': rule, 'after': new_rule},
'comment': 'Security rule verified successfully.'
})
if move:
movepath = "/config/devices/entry[@name=\'localhost.localdomain\']/vsys/entry[@name=\'vsys{0}\']/rulebase/" \
"security/rules/entry[@name=\'{1}\']".format(vsys, rulename)
move_result = False
move_msg = ''
if move == "before" and movetarget:
move_result, move_msg = _move_before(movepath, movetarget)
elif move == "after":
move_result, move_msg = _move_after(movepath, movetarget)
elif move == "top":
move_result, move_msg = _move_top(movepath)
elif move == "bottom":
move_result, move_msg = _move_bottom(movepath)
if config_change:
ret.update({
'changes': {'before': rule, 'after': new_rule, 'move': move_msg}
})
else:
ret.update({
'changes': {'move': move_msg}
})
if not move_result:
ret.update({
'comment': move_msg
})
return ret
if commit is True:
ret.update({
'commit': __salt__['panos.commit'](),
'result': True
})
else:
ret.update({
'result': True
})
return ret | 0.003646 |
def _pool_one_shape(features_2d, area_width, area_height, batch_size,
width, height, depth, fn=tf.reduce_max, name=None):
"""Pools for an area in features_2d.
Args:
features_2d: a Tensor in a shape of [batch_size, height, width, depth].
area_width: the max width allowed for an area.
area_height: the max height allowed for an area.
batch_size: the batch size.
width: the width of the memory.
height: the height of the memory.
depth: the depth of the features.
fn: the TF function for the pooling.
name: the op name.
Returns:
pool_tensor: A Tensor of shape [batch_size, num_areas, depth]
"""
with tf.name_scope(name, default_name="pool_one_shape"):
images = []
for y_shift in range(area_height):
image_height = tf.maximum(height - area_height + 1 + y_shift, 0)
for x_shift in range(area_width):
image_width = tf.maximum(width - area_width + 1 + x_shift, 0)
area = features_2d[:, y_shift:image_height, x_shift:image_width, :]
flatten_area = tf.reshape(area, [batch_size, -1, depth, 1])
images.append(flatten_area)
image_tensor = tf.concat(images, axis=3)
max_tensor = fn(image_tensor, axis=3)
return max_tensor | 0.004831 |
def pack(self, topic_dims, t):
""" Packs selected logs into a numpy array
:param list topic_dims: list of (topic, dims) tuples, where topic is a string and dims a list dimensions to be packed for that topic
:param int t: time indexes to be packed
"""
data = []
for topic, dims in topic_dims:
for d in dims:
data.append(self.logs[topic][t, d])
return array(data).T | 0.006579 |
def u2open(self, u2request):
"""
Open a connection.
@param u2request: A urllib2 request.
@type u2request: urllib2.Requet.
@return: The opened file-like urllib2 object.
@rtype: fp
"""
tm = self.options.timeout
url = build_opener(HTTPSClientAuthHandler(self.context))
if self.u2ver() < 2.6:
socket.setdefaulttimeout(tm)
return url.open(u2request)
else:
return url.open(u2request, timeout=tm) | 0.003899 |
def should_build(self, fpath, meta):
"""
Checks if the file should be built or not
Only skips layouts which are tagged as INCREMENTAL
Rebuilds only those files with mtime changed since previous build
"""
if meta.get('layout', self.default_template) in self.inc_layout:
if self.prev_mtime.get(fpath, 0) == os.path.getmtime(fpath):
return False
else:
return True
return True | 0.004132 |
def transcript_to_fake_psl_line(self,ref):
"""Convert a mapping to a fake PSL line
:param ref: reference genome dictionary
:type ref: dict()
:return: psl line
:rtype: string
"""
self._initialize()
e = self
mylen = 0
matches = 0
qstartslist = []
for exon in self.exons:
mylen = exon.rng.length()
matches += mylen
qstartslist.append(matches-mylen)
qstarts = ','.join([str(x) for x in qstartslist])+','
oline = str(matches)+"\t" # 1
oline += "0\t" # 2
oline += "0\t" # 3
oline += "0\t" # 4
oline += "0\t" # 5
oline += "0\t" # 6
oline += "0\t" # 7
oline += "0\t" # 8
oline += e.get_strand()+"\t" # 9
oline += e.get_transcript_name()+"\t" # 10
oline += str(matches)+"\t" # 11
oline += "0\t" # 12
oline += str(matches)+"\t" # 13
oline += e.get_chrom()+"\t" # 14
oline += str(len(ref[e.get_chrom()]))+"\t" # 15
oline += str(e.exons[0].rng.start-1)+"\t" # 16
oline += str(e.exons[-1].rng.end)+"\t" # 17
oline += str(len(e.exons))+"\t" # 18
oline += ','.join([str(e.exons[x].rng.end-(e.exons[x].rng.start-1)) for x in range(0,len(e.exons))])+','+"\t" # 19
oline += qstarts + "\t" # 20
oline += ','.join([str(x.rng.start-1) for x in e.exons])+',' # 21
return oline | 0.02207 |
def _extern_decl(return_type, arg_types):
"""A decorator for methods corresponding to extern functions. All types should be strings.
The _FFISpecification class is able to automatically convert these into method declarations for
cffi.
"""
def wrapper(func):
signature = _ExternSignature(
return_type=str(return_type),
method_name=str(func.__name__),
arg_types=tuple(arg_types))
func.extern_signature = signature
return func
return wrapper | 0.012474 |
def conversations(self, getsrcdst=None, **kargs):
"""Graphes a conversations between sources and destinations and display it
(using graphviz and imagemagick)
getsrcdst: a function that takes an element of the list and
returns the source, the destination and optionally
a label. By default, returns the IP source and
destination from IP and ARP layers
type: output type (svg, ps, gif, jpg, etc.), passed to dot's "-T" option # noqa: E501
target: filename or redirect. Defaults pipe to Imagemagick's display program # noqa: E501
prog: which graphviz program to use"""
if getsrcdst is None:
def getsrcdst(pkt):
"""Extract src and dst addresses"""
if 'IP' in pkt:
return (pkt['IP'].src, pkt['IP'].dst)
if 'IPv6' in pkt:
return (pkt['IPv6'].src, pkt['IPv6'].dst)
if 'ARP' in pkt:
return (pkt['ARP'].psrc, pkt['ARP'].pdst)
raise TypeError()
conv = {}
for p in self.res:
p = self._elt2pkt(p)
try:
c = getsrcdst(p)
except Exception:
# No warning here: it's OK that getsrcdst() raises an
# exception, since it might be, for example, a
# function that expects a specific layer in each
# packet. The try/except approach is faster and
# considered more Pythonic than adding tests.
continue
if len(c) == 3:
conv.setdefault(c[:2], set()).add(c[2])
else:
conv[c] = conv.get(c, 0) + 1
gr = 'digraph "conv" {\n'
for (s, d), l in six.iteritems(conv):
gr += '\t "%s" -> "%s" [label="%s"]\n' % (
s, d, ', '.join(str(x) for x in l) if isinstance(l, set) else l
)
gr += "}\n"
return do_graph(gr, **kargs) | 0.00098 |
def concretize(self, symbolic, policy, maxcount=7):
""" This finds a set of solutions for symbolic using policy.
This raises TooManySolutions if more solutions than maxcount
"""
assert self.constraints == self.platform.constraints
symbolic = self.migrate_expression(symbolic)
vals = []
if policy == 'MINMAX':
vals = self._solver.minmax(self._constraints, symbolic)
elif policy == 'MAX':
vals = self._solver.max(self._constraints, symbolic)
elif policy == 'MIN':
vals = self._solver.min(self._constraints, symbolic)
elif policy == 'SAMPLED':
m, M = self._solver.minmax(self._constraints, symbolic)
vals += [m, M]
if M - m > 3:
if self._solver.can_be_true(self._constraints, symbolic == (m + M) // 2):
vals.append((m + M) // 2)
if M - m > 100:
for i in (0, 1, 2, 5, 32, 64, 128, 320):
if self._solver.can_be_true(self._constraints, symbolic == m + i):
vals.append(m + i)
if maxcount <= len(vals):
break
if M - m > 1000 and maxcount > len(vals):
vals += self._solver.get_all_values(self._constraints, symbolic,
maxcnt=maxcount - len(vals), silent=True)
elif policy == 'ONE':
vals = [self._solver.get_value(self._constraints, symbolic)]
else:
assert policy == 'ALL'
vals = solver.get_all_values(self._constraints, symbolic, maxcnt=maxcount, silent=True)
return tuple(set(vals)) | 0.004063 |
def list_zones():
'''
Displays a list of available time zones. Use this list when setting a
time zone using ``timezone.set_zone``
:return: a list of time zones
:rtype: list
CLI Example:
.. code-block:: bash
salt '*' timezone.list_zones
'''
ret = salt.utils.mac_utils.execute_return_result(
'systemsetup -listtimezones')
zones = salt.utils.mac_utils.parse_return(ret)
return [x.strip() for x in zones.splitlines()] | 0.002092 |
def set_tensor_final(self, tensor_name):
"""Denotes a tensor as a final output of the computation.
Args:
tensor_name: a string, name of a tensor in the graph.
"""
tensor = self._name_to_tensor(tensor_name)
self._final_tensors.add(tensor) | 0.003788 |
def return_dat(self, chan, begsam, endsam):
"""Return the data as 2D numpy.ndarray.
Parameters
----------
chan : int or list
index (indices) of the channels to read
begsam : int
index of the first sample
endsam : int
index of the last sample
Returns
-------
numpy.ndarray
A 2d matrix, with dimension chan X samples
"""
dat_begsam = max(begsam, 0)
dat_endsam = min(endsam, self.n_samples)
dur = dat_endsam - dat_begsam
dtype_onlychan = dtype({k: v for k, v in self.dtype.fields.items() if v[0].kind != 'S'})
# make sure we read some data at least, otherwise segfault
if dat_begsam < self.n_samples and dat_endsam > 0:
with self.filename.open('rb') as f:
f.seek(self.header_len, SEEK_SET) # skip header
f.seek(self.dtype.itemsize * dat_begsam, SEEK_CUR)
dat = fromfile(f, dtype=self.dtype, count=dur)
dat = ndarray(dat.shape, dtype_onlychan, dat, 0, dat.strides).view((dtype_onlychan[0], len(dtype_onlychan.names))).T
else:
n_chan = len(dtype_onlychan.names)
dat = empty((n_chan, 0))
if begsam < 0:
pad = empty((dat.shape[0], 0 - begsam))
pad.fill(NaN)
dat = c_[pad, dat]
if endsam >= self.n_samples:
pad = empty((dat.shape[0], endsam - self.n_samples))
pad.fill(NaN)
dat = c_[dat, pad]
return dat[chan, :] * self.gain[chan][:, None] | 0.002468 |
def GetConfigPolicy(self, request, context):
"""Dispatches the request to the plugins get_config_policy method"""
try:
policy = self.plugin.get_config_policy()
return policy._pb
except Exception as err:
msg = "message: {}\n\nstack trace: {}".format(
err.message, traceback.format_exc())
return GetConfigPolicyReply(error=msg) | 0.004843 |
def post(self, url, postParameters=None, urlParameters=None):
"""
Convenience method for requesting to google with proper cookies/params.
"""
if urlParameters:
url = url + "?" + self.getParameters(urlParameters)
headers = {'Authorization':'GoogleLogin auth=%s' % self.auth_token,
'Content-Type': 'application/x-www-form-urlencoded'
}
postString = self.postParameters(postParameters)
req = requests.post(url, data=postString, headers=headers)
return req.text | 0.008726 |
def created(self, instance, commit=True):
"""
Convenience method for saving a model (automatically commits it to
the database and returns the object with an HTTP 201 status code)
"""
if commit:
self.session_manager.save(instance, commit=True)
return instance, HTTPStatus.CREATED | 0.005917 |
def clear(self):
"""
Clears the context.
"""
self._objects.clear()
self._class_aliases = {}
self._unicodes = {}
self.extra = {} | 0.010929 |
def removeUnreferencedIDs(referencedIDs, identifiedElements):
"""
Removes the unreferenced ID attributes.
Returns the number of ID attributes removed
"""
global _num_ids_removed
keepTags = ['font']
num = 0
for id in identifiedElements:
node = identifiedElements[id]
if id not in referencedIDs and node.nodeName not in keepTags:
node.removeAttribute('id')
_num_ids_removed += 1
num += 1
return num | 0.002062 |
def copy( self ):
"""
Returns a new copy of a C{ParseResults} object.
"""
ret = ParseResults( self.__toklist )
ret.__tokdict = self.__tokdict.copy()
ret.__parent = self.__parent
ret.__accumNames.update( self.__accumNames )
ret.__name = self.__name
return ret | 0.023599 |
def _wrap_parse(code, filename):
"""
async wrapper is required to avoid await calls raising a SyntaxError
"""
code = 'async def wrapper():\n' + indent(code, ' ')
return ast.parse(code, filename=filename).body[0].body[0].value | 0.007547 |
def volcano(differential_dfs, title='Axial Volcano Plot', scripts_mode="CDN", data_mode="directory",
organism="human", q_value_column_name="q", log2FC_column_name="logFC",
output_dir=".", filename="volcano.html", version=this_version):
"""
Arguments:
differential_dfs (dict or pandas.DataFrame): python dict of names to pandas dataframes, or a single dataframe, indexed by gene symbols which must have columns named log2FC and qval.
title (str): The title of the plot (to be embedded in the html).
scripts_mode (str): Choose from [`"CDN"`, `"directory"`, `"inline"`]:
- `"CDN"` compiles a single HTML page with links to scripts hosted on a CDN,
- `"directory"` compiles a directory with all scripts locally cached,
- `"inline"` compiles a single HTML file with all scripts/styles inlined.
data_mode (str): Choose from ["directory", "inline"]:
- "directory" compiles a directory with all data locally cached,
- "inline" compiles a single HTML file with all data inlined.
organism (str): `"human"` or `"mouse"`
q_value_column_name (str):
log2FC_column_name (str):
output_dir (str): the directory in which to output the file
filename (str): the filename of the output file
version (str): the version of the javascripts to use.
Leave the default to pin the version, or choose "latest" to get updates,
or choose part of the version string to get minor updates.
Returns:
Path: The filepath which the html was outputted to.
"""
output_dir = Path(output_dir)
output_dir.mkdir(exist_ok=True, parents=True)
# Data =======================
if isinstance(differential_dfs, pd.DataFrame):
differential_dfs = {'differential': differential_dfs}
for name, df in differential_dfs.items():
df = df[[q_value_column_name, log2FC_column_name]]
df.columns = ['q', 'logFC']
df = df.round(2)
# TODO drop all zero rows
_verify_differential_df(df)
del differential_dfs[name]
differential_dfs[_sanitize(name)] = df
names_and_differentials = f"var names_and_differentials = { '{'+ ','.join([_quote(name)+': '+df.to_json(orient='index') for name, df in differential_dfs.items()]) +'}' };"
data_block = _data_block(data_mode, [('names_and_differentials', names_and_differentials)], output_dir, include_gene_sets=False, organism=organism)
# Scripts =======================
scripts = third_party_scripts + [CDN_url(version)+"js/util.js", CDN_url(version)+"js/GOrilla.js", CDN_url(version)+"js/volcano.js"]
scripts_block = _scripts_block(scripts, scripts_mode, output_dir)
html = templateEnv.get_template('volcano.html.j2').render(title=title, scripts_block=scripts_block+'\n'+data_block, organism="HOMO_SAPIENS")
(output_dir / filename).write_text(html)
return (output_dir / filename).resolve() | 0.004647 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.