text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def set_data_points(self, points):
"""
Input `points` must be in data coordinates, will be converted
to the coordinate space of the object and stored.
"""
self.points = np.asarray(self.crdmap.data_to(points)) | 0.008065 |
def mnist(training):
"""Downloads MNIST and loads it into numpy arrays."""
if training:
data_filename = 'train-images-idx3-ubyte.gz'
labels_filename = 'train-labels-idx1-ubyte.gz'
count = 60000
else:
data_filename = 't10k-images-idx3-ubyte.gz'
labels_filename = 't10k-labels-idx1-ubyte.gz'
count = 10000
data_filename = maybe_download(MNIST_URL, data_filename)
labels_filename = maybe_download(MNIST_URL, labels_filename)
return (mnist_extract_data(data_filename, count),
mnist_extract_labels(labels_filename, count)) | 0.012411 |
def update_fact(self, fact_id, fact, temporary_activity = False):
"""Update fact values. See add_fact for rules.
Update is performed via remove/insert, so the
fact_id after update should not be used anymore. Instead use the ID
from the fact dict that is returned by this function"""
start_time = timegm((fact.start_time or dt.datetime.now()).timetuple())
end_time = fact.end_time or 0
if end_time:
end_time = timegm(end_time.timetuple())
new_id = self.conn.UpdateFact(fact_id,
fact.serialized_name(),
start_time,
end_time,
temporary_activity)
return new_id | 0.007491 |
def _sentence(self, words):
"""Generate a sentence"""
db = self.database
# Generate 2 words to start a sentence with
seed = random.randint(0, db['word_count'] - 3)
seed_word, next_word = db['words'][seed], db['words'][seed + 1]
w1, w2 = seed_word, next_word
# Generate the complete sentence
sentence = []
for i in range(0, words - 1):
sentence.append(w1)
w1, w2 = w2, random.choice(db['freqs'][(w1, w2)])
sentence.append(w2)
# Make the sentence respectable
sentence = ' '.join(sentence)
# Capitalize the sentence
sentence = sentence.capitalize()
# Remove additional sentence ending puntuation
sentence = sentence.replace('.', '')
sentence = sentence.replace('!', '')
sentence = sentence.replace('?', '')
sentence = sentence.replace(':', '')
# Remove quote tags
sentence = sentence.replace('.', '')
sentence = sentence.replace('!', '')
sentence = sentence.replace('?', '')
sentence = sentence.replace(':', '')
sentence = sentence.replace('"', '')
# If the last character is not an alphanumeric remove it
sentence = re.sub('[^a-zA-Z0-9]$', '', sentence)
# Remove excess space
sentence = re.sub('\s+', ' ', sentence)
# Add a full stop
sentence += '.'
return sentence | 0.002065 |
def build(args):
"""Build the documentation for the projects specified in the CLI.
It will do 4 different things for each project the
user asks for (see flags):
1. Update mkdocs's index.md file with links to project
documentations
2. Build these documentations
3. Update the documentations' index.html file to add a link
back to the home of all documentations
4. Build mkdoc's home documentation
Args:
args (ArgumentParser): parsed args from an ArgumentParser
"""
# Proceed?
go = False
# Current working directory
dir_path = Path().resolve()
# Set of all available projects in the dir
# Projects must contain a PROJECT_MARKER file.
all_projects = {
m
for m in os.listdir(dir_path)
if os.path.isdir(m) and "source" in os.listdir(dir_path / m)
}
if args.all and args.projects:
print(
"{}Can't use both the 'projects' and 'all' flags{}".format(
utils.colors.FAIL, utils.colors.ENDC
)
)
return
if not args.all and not args.projects:
print(
"{}You have to specify at least one project (or all){}".format(
utils.colors.FAIL, utils.colors.ENDC
)
)
return
if args.force:
go = True
projects = (
all_projects if args.all else all_projects.intersection(set(args.projects))
)
elif args.projects:
s = "You are about to build the docs for: "
s += "\n- {}\nContinue? (y/n) ".format("\n- ".join(args.projects))
if "y" in input(s):
go = True
projects = all_projects.intersection(set(args.projects))
elif args.all:
s = "You're about to build the docs for ALL projects."
s += "\nContinue? (y/n) "
if "y" in input(s):
go = True
projects = all_projects
if go:
# Update projects links
listed_projects = utils.get_listed_projects()
# Don't update projects which are not listed in the Documentation's
# Home if the -o flag was used
if args.only_index:
projects = listed_projects.intersection(projects)
print("projects", projects)
for project_to_build in projects:
# Re-build documentation
warnings.warn("[sphinx]")
if args.verbose:
os.system(
"cd {} && make clean && make html".format(
dir_path / project_to_build
)
)
else:
os.system(
"cd {} && make clean && make html > /dev/null".format(
dir_path / project_to_build
)
)
# Add link to Documentation's Home
utils.overwrite_view_source(project_to_build, dir_path)
if args.verbose:
print("\n>>>>>> Done {}\n\n\n".format(project_to_build))
# Build Documentation
if args.verbose:
os.system("mkdocs build")
print("\n\n>>>>>> Build Complete.")
else:
warnings.warn("[mkdocs]")
os.system("mkdocs build > /dev/null")
if args.offline:
utils.make_offline() | 0.000596 |
def detach_screens(self, screen_ids):
"""Unplugs monitors from the virtual graphics card.
in screen_ids of type int
"""
if not isinstance(screen_ids, list):
raise TypeError("screen_ids can only be an instance of type list")
for a in screen_ids[:10]:
if not isinstance(a, baseinteger):
raise TypeError(
"array can only contain objects of type baseinteger")
self._call("detachScreens",
in_p=[screen_ids]) | 0.005587 |
def remove(self, elem):
"""Removes _elem_ from the collection, will raise a KeyError is _elem_ is missing
# Parameters
_elem_ : `object`
> The object to be removed
"""
try:
return self._collection.remove(elem)
except KeyError:
raise KeyError("'{}' was not found in the {}: '{}'.".format(elem, type(self).__name__, self)) from None | 0.009685 |
def _write_branch_and_tag_to_meta_yaml(self):
"""
Write branch and tag to meta.yaml by editing in place
"""
## set the branch to pull source from
with open(self.meta_yaml.replace("meta", "template"), 'r') as infile:
dat = infile.read()
newdat = dat.format(**{'tag': self.tag, 'branch': self.branch})
with open(self.meta_yaml, 'w') as outfile:
outfile.write(newdat) | 0.006682 |
def framers(self):
"""
Reset the framers in use for the connection to be a
tendril.IdentityFramer. The framer states will be reset next
time their respective framer is used.
"""
f = self.default_framer()
self._send_framer = f
self._recv_framer = f | 0.00639 |
def get_group(self, t, i):
"""Get group number."""
try:
value = []
if t in _DIGIT and t != '0':
value.append(t)
t = next(i)
if t in _DIGIT:
value.append(t)
else:
i.rewind(1)
except StopIteration:
pass
return ''.join(value) if value else None | 0.004831 |
def to_cnf(self):
"""Return an equivalent expression in conjunctive normal form."""
node = self.node.to_cnf()
if node is self.node:
return self
else:
return _expr(node) | 0.008929 |
def Disconnect(self, reason=None, isDead=True):
"""Close the connection with the remote node client."""
self.disconnecting = True
self.expect_verack_next = False
if reason:
logger.debug(f"Disconnecting with reason: {reason}")
self.stop_block_loop()
self.stop_header_loop()
self.stop_peerinfo_loop()
if isDead:
self.leader.AddDeadAddress(self.address, reason=f"{self.prefix} Forced disconnect by us")
self.leader.forced_disconnect_by_us += 1
self.disconnect_deferred = defer.Deferred()
self.disconnect_deferred.debug = True
# force disconnection without waiting on the other side
# calling later to give func caller time to add callbacks to the deferred
reactor.callLater(1, self.transport.abortConnection)
return self.disconnect_deferred | 0.00453 |
def nFreeParams(self):
"""Count the number of free parameters in the active model."""
nF = 0
pars = self.params()
for par in pars:
if par.isFree():
nF += 1
return nF | 0.008584 |
def splitFile(inputFileName, linePerFile, outPrefix):
"""Split a file.
:param inputFileName: the name of the input file.
:param linePerFile: the number of line per file (after splitting).
:param outPrefix: the prefix of the output files.
:type inputFileName: str
:type linePerFile: int
:type outPrefix: str
:returns: the number of created temporary files.
Splits a file (``inputFileName`` into multiple files containing at most
``linePerFile`` lines.
"""
nbTmpFile = 1
nbLine = 0
tmpFile = None
try:
with open(inputFileName, "r") as inputFile:
for line in inputFile:
row = line.rstrip("\r\n").split(" ")
nbLine += 1
if tmpFile is None:
try:
tmpFile = open(
outPrefix + "_tmp.list%d" % nbTmpFile,
"w",
)
except IOError:
msg = "tmp.list%d: can't write file" % nbTmpFile
raise ProgramError(msg)
print >>tmpFile, " ".join(row[:2])
if nbLine == linePerFile:
nbLine = 0
nbTmpFile += 1
tmpFile.close()
try:
tmpFile = open(
outPrefix + "_tmp.list%d" % nbTmpFile,
"w",
)
except IOError:
msg = "tmp.list%d: can't write file" % nbTmpFile
raise ProgramError(msg)
tmpFile.close()
# Check if the number of line is zero (hence the last file is empty)
if nbLine == 0:
# We delete the last file
file_name = outPrefix + "_tmp.list{}".format(nbTmpFile)
if os.path.isfile(file_name):
os.remove(file_name)
nbTmpFile -= 1
except IOError:
msg = "%s: no such file" % inputFileName
raise ProgramError(msg)
return nbTmpFile | 0.000471 |
def get_template(name):
"""
Look for 'name' in the vr.runners.templates folder. Return its contents.
>>> import six
>>> tmpl = get_template('base_image.lxc')
>>> isinstance(tmpl, six.string_types)
True
"""
path = 'templates/' + name
b_stream = pkg_resources.resource_stream('vr.imager', path)
return b_stream.read().decode('utf-8') | 0.002681 |
def _trna_annotation(data):
"""
use tDRmapper to quantify tRNAs
"""
trna_ref = op.join(dd.get_srna_trna_file(data))
name = dd.get_sample_name(data)
work_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(data), "trna", name))
in_file = op.basename(data["clean_fastq"])
tdrmapper = os.path.join(os.path.dirname(sys.executable), "TdrMappingScripts.pl")
perl_export = utils.get_perl_exports()
if not file_exists(trna_ref) or not file_exists(tdrmapper):
logger.info("There is no tRNA annotation to run TdrMapper.")
return work_dir
out_file = op.join(work_dir, in_file + ".hq_cs.mapped")
if not file_exists(out_file):
with tx_tmpdir(data) as txdir:
with utils.chdir(txdir):
utils.symlink_plus(data["clean_fastq"], op.join(txdir, in_file))
cmd = ("{perl_export} && perl {tdrmapper} {trna_ref} {in_file}").format(**locals())
do.run(cmd, "tRNA for %s" % name)
for filename in glob.glob("*mapped*"):
shutil.move(filename, work_dir)
return work_dir | 0.004492 |
def read_dir(self, path):
"""
Reads the given path into the tree
"""
self.tree = {}
self.file_count = 0
self.path = path
for root, _, filelist in os.walk(path):
rel = root[len(path):].lstrip('/\\')
# empty rel, means file is in root dir
if not rel:
rel = ' '
for filename in filelist:
filename = filename.split('.')
if len(filename) <= 1:
raise RuntimeError("Files without an extension are not supported: {0}".format(
repr(os.path.join(root, '.'.join(filename))),
))
ext = filename[-1]
filename = '.'.join(filename[:-1])
if ext not in self.tree:
self.tree[ext] = {}
if rel not in self.tree[ext]:
self.tree[ext][rel] = []
self.tree[ext][rel].append(filename)
self.file_count += 1
self.tree_length = self.calculate_tree_length() | 0.003549 |
def init_gl(self):
"""
Perform the magic incantations to create an
OpenGL scene using pyglet.
"""
# default background color is white-ish
background = [.99, .99, .99, 1.0]
# if user passed a background color use it
if 'background' in self.kwargs:
try:
# convert to (4,) uint8 RGBA
background = to_rgba(self.kwargs['background'])
# convert to 0.0 - 1.0 float
background = background.astype(np.float64) / 255.0
except BaseException:
log.error('background color set but wrong!',
exc_info=True)
self._gl_set_background(background)
self._gl_enable_depth(self.scene)
self._gl_enable_color_material()
self._gl_enable_blending()
self._gl_enable_smooth_lines()
self._gl_enable_lighting(self.scene) | 0.002146 |
def convert_descriptor(self, descriptor):
"""Convert descriptor to BigQuery
"""
# Fields
fields = []
fallbacks = []
schema = tableschema.Schema(descriptor)
for index, field in enumerate(schema.fields):
converted_type = self.convert_type(field.type)
if not converted_type:
converted_type = 'STRING'
fallbacks.append(index)
mode = 'NULLABLE'
if field.required:
mode = 'REQUIRED'
fields.append({
'name': _slugify_field_name(field.name),
'type': converted_type,
'mode': mode,
})
# Descriptor
converted_descriptor = {
'fields': fields,
}
return (converted_descriptor, fallbacks) | 0.00237 |
def from_record(self, record):
"""
Constructs and returns a sequenced item object, from given ORM object.
"""
kwargs = self.get_field_kwargs(record)
return self.sequenced_item_class(**kwargs) | 0.008658 |
def get_result_xml(result):
""" Formats a scan result to XML format.
Arguments:
result (dict): Dictionary with a scan result.
Return:
Result as xml element object.
"""
result_xml = Element('result')
for name, value in [('name', result['name']),
('type', ResultType.get_str(result['type'])),
('severity', result['severity']),
('host', result['host']),
('test_id', result['test_id']),
('port', result['port']),
('qod', result['qod'])]:
result_xml.set(name, str(value))
result_xml.text = result['value']
return result_xml | 0.001389 |
def _apply_updates(self, gradients):
"""Apply AdaGrad update to parameters.
Parameters
----------
gradients
Returns
-------
"""
if not hasattr(self, 'optimizers'):
self.optimizers = \
{obj: AdaGradOptimizer(self.learning_rate)
for obj in ['W', 'C', 'bw', 'bc']}
self.W -= self.optimizers['W'].get_step(gradients['W'])
self.C -= self.optimizers['C'].get_step(gradients['C'])
self.bw -= self.optimizers['bw'].get_step(gradients['bw'])
self.bc -= self.optimizers['bc'].get_step(gradients['bc']) | 0.00315 |
def route(self, uri, *args, **kwargs):
"""Create a plugin route from a decorated function.
:param uri: endpoint at which the route will be accessible.
:type uri: str
:param args: captures all of the positional arguments passed in
:type args: tuple(Any)
:param kwargs: captures the keyword arguments passed in
:type kwargs: dict(Any)
:return: The exception function to use as the decorator
:rtype: fn
"""
if len(args) == 0 and callable(uri): # pragma: no cover
raise RuntimeError("Cannot use the @route decorator without "
"arguments.")
kwargs.setdefault('methods', frozenset({'GET'}))
kwargs.setdefault('host', None)
kwargs.setdefault('strict_slashes', False)
kwargs.setdefault('stream', False)
kwargs.setdefault('name', None)
def wrapper(handler_f):
self._routes.append(FutureRoute(handler_f, uri, args, kwargs))
return handler_f
return wrapper | 0.00189 |
def deploy_ext(self):
'''
Deploy the ext_mods tarball
'''
if self.mods.get('file'):
self.shell.send(
self.mods['file'],
os.path.join(self.thin_dir, 'salt-ext_mods.tgz'),
)
return True | 0.007168 |
def remove_colormap(self, removal_type):
"""Remove a palette (colormap); if no colormap, returns a copy of this
image
removal_type - any of lept.REMOVE_CMAP_*
"""
with _LeptonicaErrorTrap():
return Pix(
lept.pixRemoveColormapGeneral(self._cdata, removal_type, lept.L_COPY)
) | 0.008333 |
def get_package_for_module(module):
"""Get package name for a module.
Helper calculates the package name of a module.
Args:
module: Module to get name for. If module is a string, try to find
module in sys.modules.
Returns:
If module contains 'package' attribute, uses that as package name.
Else, if module is not the '__main__' module, the module __name__.
Else, the base name of the module file name. Else None.
"""
if isinstance(module, six.string_types):
try:
module = sys.modules[module]
except KeyError:
return None
try:
return six.text_type(module.package)
except AttributeError:
if module.__name__ == '__main__':
try:
file_name = module.__file__
except AttributeError:
pass
else:
base_name = os.path.basename(file_name)
split_name = os.path.splitext(base_name)
if len(split_name) == 1:
return six.text_type(base_name)
return u'.'.join(split_name[:-1])
return six.text_type(module.__name__) | 0.000845 |
def _none_subst(self, *args):
""" Helper function to insert full ranges for |None| for X_iter methods.
Custom method, specifically tailored, taking in the arguments from
an X_iter method and performing the replacement of |None| after
error-checking the arguments for a max of one |None| value, and ensuring
that if a |None| is present, no other non-|str| iterables are present.
Parameters
----------
args : 3-5 arguments of |int| or iterable |int|, or |None|
First argument is always the indices for the geometries; all
following are for the atoms in sequence as required for the
particular :samp:`{x}_iter` method
Returns
-------
arglist : 3-5 arguments, matching input params
Argument list, with |None| substituted if validly present
Raises
------
~exceptions.ValueError : If more than one |None| argument is present
~exceptions.ValueError : If an arg is non-|str| iterable when one
|None| is present
"""
# Imports
import numpy as np
# Initialize argument list return value, and as None not found
arglist = [a for a in args]
none_found = False
# Check for None values
none_vals = list(map(lambda e: isinstance(e, type(None)), arglist))
# Error if more than one None; handle if exactly one; pass through if
# none.
if np.count_nonzero(none_vals) > 1:
raise ValueError(
"Multiple 'None' values [indices {0}] not supported"
.format(tuple(np.nonzero(none_vals)[0])))
elif np.count_nonzero(none_vals) == 1:
# Must be no iterables that are not strings. Thus, an element-wise
# test for iterability and an element-wise test for stringiness
# must give matching arrays
if not all(np.equal(list(map(np.iterable, arglist)),
list(map(lambda e: isinstance(e, str), arglist)))):
raise ValueError(
"'None' as parameter invalid with non-str iterables")
## end if
# Parameters okay; replace the None with the appropriate range()
none_found = True
none_loc = np.nonzero(none_vals)[0][0]
arglist[none_loc] = \
range(self.num_geoms if none_loc == 0 else self.num_atoms)
## end if
# Return the arguments list and the none-found value
return arglist | 0.003089 |
def collect_segment_partitions(self):
"""Return a dict of segments partitions, keyed on the name of the parent partition
"""
from collections import defaultdict
# Group the segments by their parent partition name, which is the
# same name, but without the segment.
partitions = defaultdict(set)
for p in self.dataset.partitions:
if p.type == p.TYPE.SEGMENT:
name = p.identity.name
name.segment = None
partitions[name].add(p)
return partitions | 0.005282 |
def _save_notebook(self, os_path, nb):
"""Save a notebook to an os_path."""
with self.atomic_writing(os_path, encoding='utf-8') as f:
if ftdetect(os_path) == 'notebook':
nbformat.write(nb, f, version=nbformat.NO_CONVERT)
elif ftdetect(os_path) == 'markdown':
nbjson = nbformat.writes(nb, version=nbformat.NO_CONVERT)
markdown = convert(nbjson,
informat='notebook',
outformat='markdown',
strip_outputs=self.strip_outputs)
f.write(markdown) | 0.003091 |
def schema_to_json(self, schema_list, destination):
"""Takes a list of schema field objects.
Serializes the list of schema field objects as json to a file.
Destination is a file path or a file object.
"""
json_schema_list = [f.to_api_repr() for f in schema_list]
if isinstance(destination, io.IOBase):
return self._schema_to_json_file_object(json_schema_list, destination)
with open(destination, mode="w") as file_obj:
return self._schema_to_json_file_object(json_schema_list, file_obj) | 0.005263 |
def get_client_by_appid(self, authorizer_appid):
"""
通过 authorizer_appid 获取 Client 对象
:params authorizer_appid: 授权公众号appid
"""
access_token_key = '{0}_access_token'.format(authorizer_appid)
refresh_token_key = '{0}_refresh_token'.format(authorizer_appid)
access_token = self.session.get(access_token_key)
refresh_token = self.session.get(refresh_token_key)
assert refresh_token
if not access_token:
ret = self.refresh_authorizer_token(
authorizer_appid,
refresh_token
)
access_token = ret['authorizer_access_token']
refresh_token = ret['authorizer_refresh_token']
access_token_key = '{0}_access_token'.format(authorizer_appid)
expires_in = 7200
if 'expires_in' in ret:
expires_in = ret['expires_in']
self.session.set(access_token_key, access_token, expires_in)
return WeChatComponentClient(
authorizer_appid,
self,
session=self.session
) | 0.001792 |
def expr_tokenizer(expr, operator_tokens):
"""expr_tokenizer yields the components ("tokens") forming the expression.
Tokens are split by whitespace which is never considered a token in its
own right. operator_tokens should likely include "(" and ")" and strictly
the expression. This means that the word 'test' will be split into ['t',
'e', 'st'] if 'e' is an operator.
:param expr: The expression to break into tokens.
:param operator_tokens: A list of operators to extract as tokens.
"""
operator_tokens.sort(key=len, reverse=True)
for m in re.finditer(
r"""(\s+) | # Whitespace
({0}) | # Operators
(.+?)(?={0}|\s|$) # Patterns
""".format("|".join(re.escape(op) for op in operator_tokens)),
expr, re.X
):
token = m.group(2) or m.group(3)
if token:
yield token | 0.001098 |
def read(self, subpath=None):
"""
Returns the UTF-8 Readme content.
Raises ReadmeNotFoundError if subpath is specified since
subpaths are not supported for text readers.
"""
# Lazily read STDIN
if self.text is None and subpath is None:
self.text = self.read_stdin()
return super(StdinReader, self).read(subpath) | 0.005141 |
def palette(hues, saturations, values):
"""Generate a palette.
Parameters
----------
hues : `int`
Number of hues.
saturations : `int`
Number of saturations.
values : `int`
Number of values.
Raises
------
ValueError
If `hues` * `saturations` * `values` > 256
or min(`hues`, `saturations`, `values`) < 1.
Returns
-------
`list` of `int`
Palette for `PIL.Image.putpalette`.
"""
size = hues * saturations * values
if size > 256:
raise ValueError('palette size > 256: {0}'.format(size))
if min(hues, saturations, values) < 1:
raise ValueError('invalid palette size: {0} {1} {2}'
.format(hues, saturations, values))
ret = []
if hues == 1 and saturations == 1:
if values == 1:
size = 0
else:
nvalues = values - 1
for value in range(values):
value1 = value * 255 // nvalues
ret.extend((value1, value1, value1))
else:
for saturation in range(1, saturations + 1):
saturation1 = saturation / saturations
for hue in range(1, hues + 1):
hue1 = hue / hues
for value in range(1, values + 1):
value1 = value / values
ret.extend(floor(x * 255)
for x in hsv_to_rgb(hue1, saturation1, value1))
ret.extend(0 for _ in range((256 - size) * 3))
return ret | 0.000654 |
def _inertia_from_labels(X, centers, labels):
"""Compute inertia with cosine distance using known labels.
"""
n_examples, n_features = X.shape
inertia = np.zeros((n_examples,))
for ee in range(n_examples):
inertia[ee] = 1 - X[ee, :].dot(centers[int(labels[ee]), :].T)
return np.sum(inertia) | 0.003096 |
def unsubscribe(self, jid, node=None, *,
subscription_jid=None,
subid=None):
"""
Unsubscribe from a node.
:param jid: Address of the PubSub service.
:type jid: :class:`aioxmpp.JID`
:param node: Name of the PubSub node to unsubscribe from.
:type node: :class:`str`
:param subscription_jid: The address to subscribe from the service.
:type subscription_jid: :class:`aioxmpp.JID`
:param subid: Unique ID of the subscription to remove.
:type subid: :class:`str`
:raises aioxmpp.errors.XMPPError: as returned by the service
By default, the unsubscribe request will be for the bare JID of the
client. It can be specified explicitly using the `subscription_jid`
argument.
If available, the `subid` should also be specified.
If an error occurs, the corresponding :class:`~.errors.XMPPError` is
raised.
"""
subscription_jid = subscription_jid or self.client.local_jid.bare()
iq = aioxmpp.stanza.IQ(to=jid, type_=aioxmpp.structs.IQType.SET)
iq.payload = pubsub_xso.Request(
pubsub_xso.Unsubscribe(subscription_jid, node=node, subid=subid)
)
yield from self.client.send(iq) | 0.003065 |
def save(self, inplace=True):
"""
Saves modification to the api server.
"""
modified = self._modified_data()
if bool(modified):
new_data = self.permissions.copy()
new_data.update(modified['permissions'])
data = {
'permissions': new_data
}
url = six.text_type(self.href)
self._api.patch(url=url, data=data, append_base=False)
else:
raise ResourceNotModified() | 0.003953 |
async def fetch_batch(self, request):
"""Fetches a specific batch from the validator, specified by id.
Request:
path:
- batch_id: The 128-character id of the batch to be fetched
Response:
data: A JSON object with the data from the fully expanded Batch
link: The link to this exact query
"""
error_traps = [error_handlers.BatchNotFoundTrap]
batch_id = request.match_info.get('batch_id', '')
self._validate_id(batch_id)
response = await self._query_validator(
Message.CLIENT_BATCH_GET_REQUEST,
client_batch_pb2.ClientBatchGetResponse,
client_batch_pb2.ClientBatchGetRequest(batch_id=batch_id),
error_traps)
return self._wrap_response(
request,
data=self._expand_batch(response['batch']),
metadata=self._get_metadata(request, response)) | 0.002114 |
def sample(self, batch_size, batch_idxs=None):
"""Return a randomized batch of experiences
# Argument
batch_size (int): Size of the all batch
batch_idxs (int): Indexes to extract
# Returns
A list of experiences randomly selected
"""
# It is not possible to tell whether the first state in the memory is terminal, because it
# would require access to the "terminal" flag associated to the previous state. As a result
# we will never return this first state (only using `self.terminals[0]` to know whether the
# second state is terminal).
# In addition we need enough entries to fill the desired window length.
assert self.nb_entries >= self.window_length + 2, 'not enough entries in the memory'
if batch_idxs is None:
# Draw random indexes such that we have enough entries before each index to fill the
# desired window length.
batch_idxs = sample_batch_indexes(
self.window_length, self.nb_entries - 1, size=batch_size)
batch_idxs = np.array(batch_idxs) + 1
assert np.min(batch_idxs) >= self.window_length + 1
assert np.max(batch_idxs) < self.nb_entries
assert len(batch_idxs) == batch_size
# Create experiences
experiences = []
for idx in batch_idxs:
terminal0 = self.terminals[idx - 2]
while terminal0:
# Skip this transition because the environment was reset here. Select a new, random
# transition and use this instead. This may cause the batch to contain the same
# transition twice.
idx = sample_batch_indexes(self.window_length + 1, self.nb_entries, size=1)[0]
terminal0 = self.terminals[idx - 2]
assert self.window_length + 1 <= idx < self.nb_entries
# This code is slightly complicated by the fact that subsequent observations might be
# from different episodes. We ensure that an experience never spans multiple episodes.
# This is probably not that important in practice but it seems cleaner.
state0 = [self.observations[idx - 1]]
for offset in range(0, self.window_length - 1):
current_idx = idx - 2 - offset
assert current_idx >= 1
current_terminal = self.terminals[current_idx - 1]
if current_terminal and not self.ignore_episode_boundaries:
# The previously handled observation was terminal, don't add the current one.
# Otherwise we would leak into a different episode.
break
state0.insert(0, self.observations[current_idx])
while len(state0) < self.window_length:
state0.insert(0, zeroed_observation(state0[0]))
action = self.actions[idx - 1]
reward = self.rewards[idx - 1]
terminal1 = self.terminals[idx - 1]
# Okay, now we need to create the follow-up state. This is state0 shifted on timestep
# to the right. Again, we need to be careful to not include an observation from the next
# episode if the last state is terminal.
state1 = [np.copy(x) for x in state0[1:]]
state1.append(self.observations[idx])
assert len(state0) == self.window_length
assert len(state1) == len(state0)
experiences.append(Experience(state0=state0, action=action, reward=reward,
state1=state1, terminal1=terminal1))
assert len(experiences) == batch_size
return experiences | 0.004548 |
def create_pool(self, name, method='ROUND_ROBIN'):
'''
Create a pool on the F5 load balancer
'''
lbmethods = self.bigIP.LocalLB.Pool.typefactory.create(
'LocalLB.LBMethod'
)
supported_method = [i[0] for i in lbmethods if (
i[0].split('_', 2)[-1] == method.upper()
)]
if supported_method and not self.check_pool(name):
try:
self.bigIP.LocalLB.Pool.create(pool_names=[name],
lb_methods=[supported_method],
members=[[]])
except Exception as e:
raise Exception(
'Unable to create `{0}` pool\n\n{1}'.format(name, e)
)
else:
raise Exception('Unsupported method')
return True | 0.002294 |
def __rename_path(self, source, target):
"""
Renames given source with given target name.
:param source: Source file.
:type source: unicode
:param target: Target file.
:type target: unicode
"""
if not foundations.common.path_exists(source):
return
parent_directory = os.path.dirname(source)
is_path_registered = self.__engine.file_system_events_manager.is_path_registered(parent_directory)
is_path_registered and self.__engine.file_system_events_manager.unregister_path(parent_directory)
os.rename(source, target)
is_path_registered and self.__engine.file_system_events_manager.register_path(parent_directory) | 0.006868 |
def _parse_transpile_args(circuits, backend,
basis_gates, coupling_map, backend_properties,
initial_layout, seed_transpiler, optimization_level,
pass_manager):
"""Resolve the various types of args allowed to the transpile() function through
duck typing, overriding args, etc. Refer to the transpile() docstring for details on
what types of inputs are allowed.
Here the args are resolved by converting them to standard instances, and prioritizing
them in case a transpile option is passed through multiple args (explicitly setting an
arg has more priority than the arg set by backend)
Returns:
list[TranspileConfig]: a transpile config for each circuit, which is a standardized
object that configures the transpiler and determines the pass manager to use.
"""
# Each arg could be single or a list. If list, it must be the same size as
# number of circuits. If single, duplicate to create a list of that size.
num_circuits = len(circuits)
basis_gates = _parse_basis_gates(basis_gates, backend, circuits)
coupling_map = _parse_coupling_map(coupling_map, backend, num_circuits)
backend_properties = _parse_backend_properties(backend_properties, backend, num_circuits)
initial_layout = _parse_initial_layout(initial_layout, circuits)
seed_transpiler = _parse_seed_transpiler(seed_transpiler, num_circuits)
optimization_level = _parse_optimization_level(optimization_level, num_circuits)
pass_manager = _parse_pass_manager(pass_manager, num_circuits)
transpile_configs = []
for args in zip(basis_gates, coupling_map, backend_properties, initial_layout,
seed_transpiler, optimization_level, pass_manager):
transpile_config = TranspileConfig(basis_gates=args[0],
coupling_map=args[1],
backend_properties=args[2],
initial_layout=args[3],
seed_transpiler=args[4],
optimization_level=args[5],
pass_manager=args[6])
transpile_configs.append(transpile_config)
return transpile_configs | 0.004241 |
def _set_ipv6_address(self, v, load=False):
"""
Setter method for ipv6_address, mapped from YANG variable /interface/fortygigabitethernet/ipv6/ipv6_config/address/ipv6_address (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_address is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_address() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("address",ipv6_address.ipv6_address, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}), is_container='list', yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6_address must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("address",ipv6_address.ipv6_address, yang_name="ipv6-address", rest_name="ipv6-address", parent=self, is_container='list', user_ordered=True, path_helper=self._path_helper, yang_keys='address', extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}), is_container='list', yang_name="ipv6-address", rest_name="ipv6-address", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set the IP address of an interface', u'cli-no-key-completion': None, u'cli-suppress-mode': None, u'cli-compact-syntax': None, u'cli-drop-node-name': None, u'cli-no-match-completion': None, u'callpoint': u'phy-intf-ipv6-addr-cp'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-config', defining_module='brocade-ipv6-config', yang_type='list', is_config=True)""",
})
self.__ipv6_address = t
if hasattr(self, '_set'):
self._set() | 0.003726 |
def memoize(obj):
"""
Memoize objects to trade memory for execution speed
Use a limited size cache to store the value, which takes into account
The calling args and kwargs
See https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
"""
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
# only keep the most recent 100 entries
if len(cache) > 100:
cache.popitem(last=False)
return cache[key]
return memoizer | 0.001543 |
def add_prefix(self, args):
""" Add a prefix.
Valid keys in the `args`-struct:
* `auth` [struct]
Authentication options passed to the :class:`AuthFactory`.
* `attr` [struct]
Attributes to set on the new prefix.
* `args` [srgs]
Arguments for addition of prefix, such as what pool or prefix
it should be allocated from.
Returns ID of created prefix.
"""
try:
res = self.nip.add_prefix(args.get('auth'), args.get('attr'), args.get('args'))
# mangle result
res = _mangle_prefix(res)
return res
except (AuthError, NipapError) as exc:
self.logger.debug(unicode(exc))
raise Fault(exc.error_code, unicode(exc)) | 0.003601 |
def entrance_angled(angle, method='Idelchik'):
r'''Returns loss coefficient for a sharp, angled entrance to a pipe
flush with the wall of a reservoir. First published in [2]_, it has been
recommended in [3]_ as well as in [1]_.
.. math::
K = 0.57 + 0.30\cos(\theta) + 0.20\cos(\theta)^2
.. figure:: fittings/entrance_mounted_at_an_angle.png
:scale: 30 %
:alt: entrace mounted at an angle; after [1]_
Parameters
----------
angle : float
Angle of inclination (90° = straight, 0° = parallel to pipe wall),
[degrees]
method : str, optional
The method to use; only 'Idelchik' is supported
Returns
-------
K : float
Loss coefficient [-]
Notes
-----
Not reliable for angles under 20 degrees.
Loss coefficient is the same for an upward or downward angled inlet.
Examples
--------
>>> entrance_angled(30)
0.9798076211353315
References
----------
.. [1] Rennels, Donald C., and Hobart M. Hudson. Pipe Flow: A Practical
and Comprehensive Guide. 1st edition. Hoboken, N.J: Wiley, 2012.
.. [2] Idel’chik, I. E. Handbook of Hydraulic Resistance: Coefficients of
Local Resistance and of Friction (Spravochnik Po Gidravlicheskim
Soprotivleniyam, Koeffitsienty Mestnykh Soprotivlenii i Soprotivleniya
Treniya). National technical information Service, 1966.
.. [3] Blevins, Robert D. Applied Fluid Dynamics Handbook. New York, N.Y.:
Van Nostrand Reinhold Co., 1984.
'''
if method is None:
method = 'Idelchik'
if method == 'Idelchik':
cos_term = cos(radians(angle))
return 0.57 + cos_term*(0.2*cos_term + 0.3)
else:
raise ValueError('Specified method not recognized; methods are %s'
%(entrance_angled_methods)) | 0.003755 |
def pave_event_space(fn=pair):
"""
:return:
a pair producer that ensures the seeder and delegator share the same event space.
"""
global _event_space
event_space = next(_event_space)
@_ensure_seeders_list
def p(seeders, delegator_factory, *args, **kwargs):
return fn(seeders + [per_event_source_id(event_space)],
delegator_factory, *args, event_space=event_space, **kwargs)
return p | 0.004454 |
def _linearEOM(y,t,pot):
"""
NAME:
linearEOM
PURPOSE:
the one-dimensional equation-of-motion
INPUT:
y - current phase-space position
t - current time
pot - (list of) linearPotential instance(s)
OUTPUT:
dy/dt
HISTORY:
2010-07-13 - Bovy (NYU)
"""
return [y[1],_evaluatelinearForces(pot,y[0],t=t)] | 0.016 |
def _output_to_list(cmdoutput):
'''
Convert rabbitmqctl output to a list of strings (assuming whitespace-delimited output).
Ignores output lines that shouldn't be parsed, like warnings.
cmdoutput: string output of rabbitmqctl commands
'''
return [item for line in cmdoutput.splitlines() if _safe_output(line) for item in line.split()] | 0.00838 |
def get_identifiability_dataframe(self,singular_value=None,precondition=False):
"""get the parameter identifiability as a pandas dataframe
Parameters
----------
singular_value : int
the singular spectrum truncation point. Defaults to minimum of
non-zero-weighted observations and adjustable parameters
precondition : bool
flag to use the preconditioned hessian (xtqt + sigma_theta^-1).
Default is False
Returns
-------
pandas.DataFrame : pandas.DataFrame
A pandas dataframe of the V_1**2 Matrix with the
identifiability in the column labeled "ident"
"""
if singular_value is None:
singular_value = int(min(self.pst.nnz_obs, self.pst.npar_adj))
#v1_df = self.qhalfx.v[:, :singular_value].to_dataframe() ** 2
xtqx = self.xtqx
if precondition:
xtqx = xtqx + self.parcov.inv
#v1_df = self.xtqx.v[:, :singular_value].to_dataframe() ** 2
v1_df = xtqx.v[:, :singular_value].to_dataframe() ** 2
v1_df["ident"] = v1_df.sum(axis=1)
return v1_df | 0.005133 |
def save_list(lst, path):
"""
Save items from list to the file.
"""
with open(path, 'wb') as out:
lines = []
for item in lst:
if isinstance(item, (six.text_type, six.binary_type)):
lines.append(make_str(item))
else:
lines.append(make_str(json.dumps(item)))
out.write(b'\n'.join(lines) + b'\n') | 0.002564 |
def bg_compensate(img, sigma, splinepoints, scale):
'''Reads file, subtracts background. Returns [compensated image, background].'''
from PIL import Image
import pylab
from matplotlib.image import pil_to_array
from centrosome.filter import canny
import matplotlib
img = Image.open(img)
if img.mode=='I;16':
# 16-bit image
# deal with the endianness explicitly... I'm not sure
# why PIL doesn't get this right.
imgdata = np.fromstring(img.tostring(),np.uint8)
imgdata.shape=(int(imgdata.shape[0]/2),2)
imgdata = imgdata.astype(np.uint16)
hi,lo = (0,1) if img.tag.prefix == 'MM' else (1,0)
imgdata = imgdata[:,hi]*256 + imgdata[:,lo]
img_size = list(img.size)
img_size.reverse()
new_img = imgdata.reshape(img_size)
# The magic # for maximum sample value is 281
if 281 in img.tag:
img = new_img.astype(np.float32) / img.tag[281][0]
elif np.max(new_img) < 4096:
img = new_img.astype(np.float32) / 4095.
else:
img = new_img.astype(np.float32) / 65535.
else:
img = pil_to_array(img)
pylab.subplot(1,3,1).imshow(img, cmap=matplotlib.cm.Greys_r)
pylab.show()
if len(img.shape)>2:
raise ValueError('Image must be grayscale')
## Create mask that will fix problem when image has black areas outside of well
edges = canny(img, np.ones(img.shape, bool), 2, .1, .3)
ci = np.cumsum(edges, 0)
cj = np.cumsum(edges, 1)
i,j = np.mgrid[0:img.shape[0], 0:img.shape[1]]
mask = ci > 0
mask = mask & (cj > 0)
mask[1:,:] &= (ci[0:-1,:] < ci[-1,j[0:-1,:]])
mask[:,1:] &= (cj[:,0:-1] < cj[i[:,0:-1],-1])
import time
t0 = time.clock()
bg = backgr(img, mask, MODE_AUTO, sigma, splinepoints=splinepoints, scale=scale)
print("Executed in %f sec" % (time.clock() - t0))
bg[~mask] = img[~mask]
pylab.subplot(1,3,2).imshow(img - bg, cmap=matplotlib.cm.Greys_r)
pylab.subplot(1,3,3).imshow(bg, cmap=matplotlib.cm.Greys_r)
pylab.show() | 0.015662 |
def sentence_matches(self, sentence_text):
"""Returns true iff the sentence contains this mention's upstream
and downstream participants, and if one of the stemmed verbs in
the sentence is the same as the stemmed action type."""
has_upstream = False
has_downstream = False
has_verb = False
# Get the first word of the action type and assume this is the verb
# (Ex. get depends for depends on)
actiontype_words = word_tokenize(self.mention.actiontype)
actiontype_verb_stemmed = stem(actiontype_words[0])
words = word_tokenize(sentence_text)
if self.string_matches_sans_whitespace(sentence_text.lower(),
self.mention.upstream.lower()):
has_upstream = True
if self.string_matches_sans_whitespace(sentence_text.lower(),
self.mention.downstream.lower()):
has_downstream = True
for word in words:
if actiontype_verb_stemmed == stem(word):
has_verb = True
return has_upstream and has_downstream and has_verb | 0.005445 |
def ul(
self,
text):
"""*convert plain-text to MMD unordered list*
**Key Arguments:**
- ``text`` -- the text to convert to MMD unordered list
**Return:**
- ``ul`` -- the MMD unordered list
**Usage:**
To convert text to a MMD unordered list:
.. code-block:: python
ul = md.ul(" This is a list item ")
print ul
# OUTPUT:
# * This is a list item
#
"""
m = self.reWS.match(text)
ul = []
for l in m.group(2).split("\n"):
prefix, text, suffix = self._snip_whitespace(l)
ul.append("%(prefix)s* %(text)s " % locals())
return ("\n").join(ul) + "\n\n" | 0.003745 |
def _find_parameter(self, name_list, create_missing=False, quiet=False):
"""
Tries to find and return the parameter of the specified name. The name
should be of the form
['branch1','branch2', 'parametername']
Setting create_missing=True means if it doesn't find a branch it
will create one.
Setting quiet=True will suppress error messages (for checking)
"""
# make a copy so this isn't destructive to the supplied list
s = list(name_list)
# if the length is zero, return the root widget
if len(s)==0: return self._widget
# the first name must be treated differently because it is
# the main widget, not a branch
r = self._clean_up_name(s.pop(0))
# search for the root name
result = self._widget.findItems(r, _g.QtCore.Qt.MatchCaseSensitive | _g.QtCore.Qt.MatchFixedString)
# if it pooped and we're not supposed to create it, quit
if len(result) == 0 and not create_missing:
if not quiet: self.print_message("ERROR: Could not find '"+r+"'")
return None
# otherwise use the first value
elif len(result): x = result[0].param
# otherwise, if there are more names in the list,
# create the branch and keep going
else:
x = _g.parametertree.Parameter.create(name=r, type='group', children=[])
self._widget.addParameters(x)
# loop over the remaining names, and use a different search method
for n in s:
# first clean up
n = self._clean_up_name(n)
# try to search for the name
try: x = x.param(n)
# name doesn't exist
except:
# if we're supposed to, create the new branch
if create_missing: x = x.addChild(_g.parametertree.Parameter.create(name=n, type='group', children=[]))
# otherwise poop out
else:
if not quiet: self.print_message("ERROR: Could not find '"+n+"' in '"+x.name()+"'")
return None
# return the last one we found / created.
return x | 0.006352 |
def rename(self, old_fieldname, new_fieldname):
"""
Renames a specific field, and preserves the underlying order.
"""
if old_fieldname not in self:
raise Exception("DataTable does not have field `%s`" %
old_fieldname)
if not isinstance(new_fieldname, basestring):
raise ValueError("DataTable fields must be strings, not `%s`" %
type(new_fieldname))
if old_fieldname == new_fieldname:
return
new_names = self.fields
location = new_names.index(old_fieldname)
del new_names[location]
new_names.insert(location, new_fieldname)
self.fields = new_names | 0.00274 |
def cross_state_value(state):
"""
Compute the state value of the cross solving search.
"""
centres, edges = state
value = 0
for edge in edges:
if "U" in edge:
if edge["U"] == centres["D"]["D"]:
value += 1
else:
value += 2
elif "D" in edge:
if edge["D"] != centres["D"]["D"]:
value += 3
else:
value += 1
edgeposes = {}
counts = {f: 0 for f in "LFRB"}
ngedges = []
for edge in edges:
if "U" in edge and edge["U"] == centres["D"]["D"]:
k = "".join(edge.facings.keys()).replace("U", "")
edgeposes[k] = edge[k]
counts[k] += 1
elif "D" in edge and edge["D"] == centres["D"]["D"]:
k = "".join(edge.facings.keys()).replace("D", "")
edgeposes[k] = edge[k]
counts[k] += 1
elif "U" in edge or "D" in edge:
ngedges.append(edge)
else:
for k, s in edge:
if s != centres["D"]["D"]:
edgeposes[k] = s
counts[k] += 1
break
for edge in ngedges:
idx = "LFRB".index(edge[centres["D"].colour])
for i in [-1, 1]:
if "LFRB"[(idx+1)%4] not in edgeposes:
k = "".join(edge.facings.keys()).replace("LFRB"[idx], "")
edgeposes["LFRB"[(idx+1)%4]] = edge[k]
counts["LFRB"[(idx+1)%4]] += 1
break
else:
k = "".join(edge.facings.keys()).replace("LFRB"[idx], "")
if counts["LFRB"[(idx-1)%4]] > counts["LFRB"[(idx+1)%4]]:
edgeposes["LFRB"[(idx-1)%4]] = edge[k]
else:
edgeposes["LFRB"[(idx+1)%4]] = edge[k]
relative_pos = {f: centres[f][f] for f in "LFRB"}
if len(edgeposes) == 4:
for i in range(4):
edgeposes["L"], edgeposes["F"], edgeposes["R"], edgeposes["B"] = \
edgeposes["F"], edgeposes["R"], edgeposes["B"], edgeposes["L"]
if edgeposes == relative_pos:
break
else:
value += 5
else:
value += 3
return value | 0.004461 |
def _add_command(parser, subparser_fn, handler, cell_required=False,
cell_prohibited=False):
""" Create and initialize a pipeline subcommand handler. """
sub_parser = subparser_fn(parser)
sub_parser.set_defaults(func=lambda args, cell: _dispatch_handler(
args, cell, sub_parser, handler, cell_required=cell_required,
cell_prohibited=cell_prohibited)) | 0.01039 |
def get_all_hits(self):
"""
Return all of a Requester's HITs
Despite what search_hits says, it does not return all hits, but
instead returns a page of hits. This method will pull the hits
from the server 100 at a time, but will yield the results
iteratively, so subsequent requests are made on demand.
"""
page_size = 100
search_rs = self.search_hits(page_size=page_size)
total_records = int(search_rs.TotalNumResults)
get_page_hits = lambda(page): self.search_hits(page_size=page_size, page_number=page)
page_nums = self._get_pages(page_size, total_records)
hit_sets = itertools.imap(get_page_hits, page_nums)
return itertools.chain.from_iterable(hit_sets) | 0.006427 |
def calculate_betweenness_centality(graph: BELGraph, number_samples: int = CENTRALITY_SAMPLES) -> Counter:
"""Calculate the betweenness centrality over nodes in the graph.
Tries to do it with a certain number of samples, but then tries a complete approach if it fails.
"""
try:
res = nx.betweenness_centrality(graph, k=number_samples)
except Exception:
res = nx.betweenness_centrality(graph)
return Counter(res) | 0.006637 |
def scan(self, cursor='0', match=None, count=10):
"""Emulate scan."""
def value_function():
return sorted(self.redis.keys()) # sorted list for consistent order
return self._common_scan(value_function, cursor=cursor, match=match, count=count) | 0.014388 |
def pretty_print(input_word, anagrams, by_length=False):
"""Prints the anagram results sorted by score to stdout.
Args:
input_word: the base word we searched on
anagrams: generator of (word, score) from anagrams_in_word
by_length: a boolean to declare printing by length instead of score
"""
scores = {}
if by_length:
noun = "tiles"
for word, score in anagrams:
try:
scores[len(word)].append("{0} ({1:d})".format(word, score))
except KeyError:
scores[len(word)] = ["{0} ({1:d})".format(word, score)]
else:
noun = "points"
for word, score in anagrams:
try:
scores[score].append(word)
except KeyError:
scores[score] = [word]
print("Anagrams for {0}{1}:".format(input_word, " (score)" * by_length))
if not valid_scrabble_word(input_word):
print("{0} is not possible in Scrabble.".format(input_word))
for key, value in sorted(scores.items(), reverse=True):
print("{0:d} {1}: {2}".format(key, noun, ", ".join(value))) | 0.00088 |
def is_suspicious( pe ):
"""
unusual locations of import tables
non recognized section names
presence of long ASCII strings
"""
relocations_overlap_entry_point = False
sequential_relocs = 0
# If relocation data is found and the entries go over the entry point, and also are very
# continuous or point outside section's boundaries => it might imply that an obfuscation
# trick is being used or the relocations are corrupt (maybe intentionally)
#
if hasattr(pe, 'DIRECTORY_ENTRY_BASERELOC'):
for base_reloc in pe.DIRECTORY_ENTRY_BASERELOC:
last_reloc_rva = None
for reloc in base_reloc.entries:
if reloc.rva <= pe.OPTIONAL_HEADER.AddressOfEntryPoint <= reloc.rva + 4:
relocations_overlap_entry_point = True
if last_reloc_rva is not None and last_reloc_rva <= reloc.rva <= last_reloc_rva + 4:
sequential_relocs += 1
last_reloc_rva = reloc.rva
# If import tables or strings exist (are pointed to) to within the header or in the area
# between the PE header and the first section that's supicious
#
# IMPLEMENT
warnings_while_parsing = False
# If we have warnings, that's suspicious, some of those will be because of out-of-ordinary
# values are found in the PE header fields
# Things that are reported in warnings:
# (parsing problems, special section characteristics i.e. W & X, uncommon values of fields,
# unusual entrypoint, suspicious imports)
#
warnings = pe.get_warnings()
if warnings:
warnings_while_parsing
# If there are few or none (should come with a standard "density" of strings/kilobytes of data) longer (>8)
# ascii sequences that might indicate packed data, (this is similar to the entropy test in some ways but
# might help to discard cases of legitimate installer or compressed data)
# If compressed data (high entropy) and is_driver => uuuuhhh, nasty
pass | 0.006893 |
def _uri_split(uri):
"""Splits up an URI or IRI."""
scheme, netloc, path, query, fragment = _safe_urlsplit(uri)
auth = None
port = None
if '@' in netloc:
auth, netloc = netloc.split('@', 1)
if netloc.startswith('['):
host, port_part = netloc[1:].split(']', 1)
if port_part.startswith(':'):
port = port_part[1:]
elif ':' in netloc:
host, port = netloc.split(':', 1)
else:
host = netloc
return scheme, auth, host, port, path, query, fragment | 0.001887 |
def variantcall_batch_region(items):
"""CWL entry point: variant call a batch of samples in a block of regions.
"""
items = [utils.to_single_data(x) for x in items]
align_bams = [dd.get_align_bam(x) for x in items]
variantcaller = _get_batch_variantcaller(items)
region_blocks = list(set([tuple(x.get("region_block")) for x in items if "region_block" in x]))
assert len(region_blocks) == 1, region_blocks
region_block = region_blocks[0]
# Pre-called input variant files
if not variantcaller and all(d.get("vrn_file") for d in items):
return {"vrn_file_region": None, "region_block": region_block}
caller_fn = get_variantcallers()[variantcaller]
assoc_files = tz.get_in(("genome_resources", "variation"), items[0], {})
region = _region_to_coords(region_block[0])
chrom, start, end = region
region_str = "_".join(str(x) for x in region)
batch_name = _get_batch_name(items)
out_file = os.path.join(dd.get_work_dir(items[0]), variantcaller, chrom,
"%s-%s-block.vcf.gz" % (batch_name, region_str))
utils.safe_makedir(os.path.dirname(out_file))
with pshared.bedtools_tmpdir(items[0]):
if variantcaller in SUPPORT_MULTICORE:
call_file = caller_fn(align_bams, items, dd.get_ref_file(items[0]), assoc_files,
[_region_to_coords(r) for r in region_block], out_file)
else:
call_file = _run_variantcall_batch_multicore(items, region_block, out_file)
return {"vrn_file_region": call_file, "region_block": region_block} | 0.003137 |
def included(self, path, is_dir=False):
"""Check patterns in order, last match that includes or excludes `path` wins. Return `None` on undecided."""
inclusive = None
for pattern in self.patterns:
if pattern.is_dir == is_dir and pattern.matches(path):
inclusive = pattern.inclusive
#print('+++' if inclusive else '---', path, pattern)
return inclusive | 0.009547 |
def get_annotations(cls, __fn):
"""Get the annotations of a given callable."""
if hasattr(__fn, '__func__'):
__fn = __fn.__func__
if hasattr(__fn, '__notes__'):
return __fn.__notes__
raise AttributeError('{!r} does not have annotations'.format(__fn)) | 0.006536 |
def concentration(self, pM=False):
"""Return the concentration (in Moles) of the particles in the box.
"""
concentr = (self.num_particles / NA) / self.box.volume_L
if pM:
concentr *= 1e12
return concentr | 0.007843 |
def context(self):
"""
An execution context created using :mod:`executor.contexts`.
The value of :attr:`context` defaults to a
:class:`~executor.contexts.LocalContext` object with the following
characteristics:
- The working directory of the execution context is set to the
value of :attr:`directory`.
- The environment variable given by :data:`DIRECTORY_VARIABLE` is set
to the value of :attr:`directory`.
:raises: :exc:`.MissingPasswordStoreError` when :attr:`directory`
doesn't exist.
"""
# Make sure the directory exists.
self.ensure_directory_exists()
# Prepare the environment variables.
environment = {DIRECTORY_VARIABLE: self.directory}
try:
# Try to enable the GPG agent in headless sessions.
environment.update(get_gpg_variables())
except Exception:
# If we failed then let's at least make sure that the
# $GPG_TTY environment variable is set correctly.
environment.update(GPG_TTY=execute("tty", capture=True, check=False, tty=True, silent=True))
return LocalContext(directory=self.directory, environment=environment) | 0.002389 |
def _load_raw_data(self, resource_name):
"""Extract raw data from resource
:param resource_name:
"""
# Instantiating the resource again as a simple `Resource` ensures that
# ``data`` will be returned as bytes.
upcast_resource = datapackage.Resource(
self.__resources[resource_name].descriptor,
default_base_path=self.__base_path)
return upcast_resource.data | 0.004566 |
def fit(self, X, y=None):
"""Compute mixture of von Mises Fisher clustering.
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
"""
if self.normalize:
X = normalize(X)
self._check_force_weights()
random_state = check_random_state(self.random_state)
X = self._check_fit_data(X)
(
self.cluster_centers_,
self.labels_,
self.inertia_,
self.weights_,
self.concentrations_,
self.posterior_,
) = movMF(
X,
self.n_clusters,
posterior_type=self.posterior_type,
force_weights=self.force_weights,
n_init=self.n_init,
n_jobs=self.n_jobs,
max_iter=self.max_iter,
verbose=self.verbose,
init=self.init,
random_state=random_state,
tol=self.tol,
copy_x=self.copy_x,
)
return self | 0.001938 |
def remove_value(self, keys, value):
"""
Remove a value (data item) from this tree node and its children.
Also updates the tree node's cumulative child count.
"""
self.count -= 1
if not self.key:
keys = self.__value_reverse_mapping[value]
del self.__value_reverse_mapping[value]
if len(keys) == 0:
self.values.remove(value)
else:
key = keys[0]
index = bisect.bisect_left(self.children, TreeNode(key, reversed=self.reversed))
assert index != len(self.children) and self.children[index].key == key
self.children[index].remove_value(keys[1:], value)
if self.tree_node_updated:
self.tree_node_updated(self.children[index])
if self.children[index].count == 0:
del self.children[index]
if self.child_removed:
self.child_removed(self, index) | 0.004073 |
def bulk_add_units(unit_list, **kwargs):
"""
Save all the units contained in the passed list, with the name of their dimension.
"""
# for unit in unit_list:
# add_unit(unit, **kwargs)
added_units = []
for unit in unit_list:
added_units.append(add_unit(unit, **kwargs))
return JSONObject({"units": added_units}) | 0.005556 |
def create_policy_for_vhost(
self, vhost, name,
definition,
pattern=None,
priority=0,
apply_to='all'):
"""
Create a policy for a vhost.
:param vhost: The virtual host the policy is for
:type vhost: str
:param name: The name of the policy
:type name: str
:param definition: The definition of the policy. Required
:type definition: dict
:param priority: The priority of the policy. Defaults to 0
:param pattern: The pattern of resource names to apply the policy to
:type pattern: str
:type priority: int
:param apply_to: What resource type to apply the policy to.
Usually "exchanges", "queues", or "all". Defaults to "all"
:type apply_to: str
Example ::
# Makes all queues and exchanges on vhost "/" highly available
>>> api.create_policy_for_vhost(
... vhost="/",
... name="ha-all",
... definition={"ha-mode": "all"},
... pattern="",
... apply_to="all")
"""
data = {
"pattern": pattern,
"definition": definition,
"priority": priority,
"apply-to": apply_to
}
self._api_put(
'/api/policies/{0}/{1}'.format(
urllib.parse.quote_plus(vhost),
urllib.parse.quote_plus(name),
),
data=data,
) | 0.001317 |
def setup_failures(self, gremlins):
"""Add gremlins to environment"""
assert isinstance(gremlins, dict) and 'gremlins' in gremlins
for gremlin in gremlins['gremlins']:
self.setup_failure(**gremlin)
self.push_rules() | 0.007692 |
def add_directives(kb_app: kb,
sphinx_app: Sphinx,
sphinx_env: BuildEnvironment,
docnames=List[str],
):
""" For each resource type, register a new Sphinx directive """
for k, v in list(kb_app.config.resources.items()):
sphinx_app.add_directive(k, ResourceDirective) | 0.002793 |
def makeMrkvHist(self):
'''
Makes a history of macroeconomic Markov states, stored in the attribute
MrkvNow_hist. This version ensures that each state is reached a sufficient
number of times to have a valid sample for calcDynamics to produce a good
dynamic rule. It will sometimes cause act_T to be increased beyond its
initially specified level.
Parameters
----------
None
Returns
-------
None
'''
if hasattr(self,'loops_max'):
loops_max = self.loops_max
else: # Maximum number of loops; final act_T never exceeds act_T*loops_max
loops_max = 10
state_T_min = 50 # Choose minimum number of periods in each state for a valid Markov sequence
logit_scale = 0.2 # Scaling factor on logit choice shocks when jumping to a new state
# Values close to zero make the most underrepresented states very likely to visit, while
# large values of logit_scale make any state very likely to be jumped to.
# Reset act_T to the level actually specified by the user
if hasattr(self,'act_T_orig'):
act_T = self.act_T_orig
else: # Or store it for the first time
self.act_T_orig = self.act_T
act_T = self.act_T
# Find the long run distribution of Markov states
w, v = np.linalg.eig(np.transpose(self.MrkvArray))
idx = (np.abs(w-1.0)).argmin()
x = v[:,idx].astype(float)
LR_dstn = (x/np.sum(x))
# Initialize the Markov history and set up transitions
MrkvNow_hist = np.zeros(self.act_T_orig,dtype=int)
cutoffs = np.cumsum(self.MrkvArray,axis=1)
loops = 0
go = True
MrkvNow = self.MrkvNow_init
t = 0
StateCount = self.MrkvArray.shape[0]
# Add histories until each state has been visited at least state_T_min times
while go:
draws = drawUniform(N=self.act_T_orig,seed=loops)
for s in range(draws.size): # Add act_T_orig more periods
MrkvNow_hist[t] = MrkvNow
MrkvNow = np.searchsorted(cutoffs[MrkvNow,:],draws[s])
t += 1
# Calculate the empirical distribution
state_T = np.zeros(StateCount)
for i in range(StateCount):
state_T[i] = np.sum(MrkvNow_hist==i)
# Check whether each state has been visited state_T_min times
if np.all(state_T >= state_T_min):
go = False # If so, terminate the loop
continue
# Choose an underrepresented state to "jump" to
if np.any(state_T == 0): # If any states have *never* been visited, randomly choose one of those
never_visited = np.where(np.array(state_T == 0))[0]
MrkvNow = np.random.choice(never_visited)
else: # Otherwise, use logit choice probabilities to visit an underrepresented state
emp_dstn = state_T/act_T
ratios = LR_dstn/emp_dstn
ratios_adj = ratios - np.max(ratios)
ratios_exp = np.exp(ratios_adj/logit_scale)
ratios_sum = np.sum(ratios_exp)
jump_probs = ratios_exp/ratios_sum
cum_probs = np.cumsum(jump_probs)
MrkvNow = np.searchsorted(cum_probs,draws[-1])
loops += 1
# Make the Markov state history longer by act_T_orig periods
if loops >= loops_max:
go = False
print('makeMrkvHist reached maximum number of loops without generating a valid sequence!')
else:
MrkvNow_new = np.zeros(self.act_T_orig,dtype=int)
MrkvNow_hist = np.concatenate((MrkvNow_hist,MrkvNow_new))
act_T += self.act_T_orig
# Store the results as attributes of self
self.MrkvNow_hist = MrkvNow_hist
self.act_T = act_T | 0.009457 |
def json_tuple(col, *fields):
"""Creates a new row for a json column according to the given field names.
:param col: string column in json format
:param fields: list of fields to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
return Column(jc) | 0.00597 |
def find_invalid_venues(all_items):
"""Find venues assigned slots that aren't on the allowed list
of days."""
venues = {}
for item in all_items:
valid = False
item_days = list(item.venue.days.all())
for slot in item.slots.all():
for day in item_days:
if day == slot.get_day():
valid = True
break
if not valid:
venues.setdefault(item.venue, [])
venues[item.venue].append(item)
return venues.items() | 0.001838 |
def backing_type_for(value):
"""Returns the DynamoDB backing type for a given python value's type
::
4 -> 'N'
['x', 3] -> 'L'
{2, 4} -> 'SS'
"""
if isinstance(value, str):
vtype = "S"
elif isinstance(value, bytes):
vtype = "B"
# NOTE: numbers.Number check must come **AFTER** bool check since isinstance(True, numbers.Number)
elif isinstance(value, bool):
vtype = "BOOL"
elif isinstance(value, numbers.Number):
vtype = "N"
elif isinstance(value, dict):
vtype = "M"
elif isinstance(value, list):
vtype = "L"
elif isinstance(value, set):
if not value:
vtype = "SS" # doesn't matter, Set(x) should dump an empty set the same for all x
else:
inner = next(iter(value))
if isinstance(inner, str):
vtype = "SS"
elif isinstance(inner, bytes):
vtype = "BS"
elif isinstance(inner, numbers.Number):
vtype = "NS"
else:
raise ValueError(f"Unknown set type for inner value {inner!r}")
else:
raise ValueError(f"Can't dump unexpected type {type(value)!r} for value {value!r}")
return vtype | 0.004286 |
def fromElement(cls, elem):
"""
Read properties from a MetaDataVersion element
:param lxml.etree._Element elem: Source etree Element
"""
self = cls()
self.oid = elem.get("OID")
self.name = elem.get("Name")
return self | 0.007042 |
def _write_model(self, specification, specification_set):
""" Write autogenerate specification file
"""
filename = "vspk/%s%s.cs" % (self._class_prefix, specification.entity_name)
override_content = self._extract_override_content(specification.entity_name)
superclass_name = "RestObject"
defaults = {}
section = specification.entity_name
if self.attrs_defaults.has_section(section):
for attribute in self.attrs_defaults.options(section):
defaults[attribute] = self.attrs_defaults.get(section, attribute)
self.write(destination=self.output_directory,
filename=filename,
template_name="model.cs.tpl",
specification=specification,
specification_set=specification_set,
version=self.api_version,
name=self._name,
class_prefix=self._class_prefix,
product_accronym=self._product_accronym,
override_content=override_content,
superclass_name=superclass_name,
header=self.header_content,
version_string=self._api_version_string,
package_name=self._package_name,
attribute_defaults=defaults)
return (filename, specification.entity_name) | 0.003561 |
def __save_plots_of_the_current_page(self, section, page, output_path):
"""This method saves plots in the appropriate section folder.
As a consequence two plots cannot have the same name within the same section."""
for key in self.sections[section].pages[page].elements.keys():
print(section, page, key) # debug print
buf_temp = self.sections[section].pages[page].elements[key].plot_object
if buf_temp is not None :
plot_path = os.path.join(output_path, "output", self.name, "site", section, key + ".svg")
try :
my_file = open(plot_path, "wb")
my_file.write(buf_temp)
my_file.close()
except Exception as e:
print (e)
print ("In {section} and {page}, an element exists without a valid plot object.".format(section = section, page = page)) | 0.01996 |
def primers(self):
"""Setup and create threads for ePCR"""
# Create the threads for the ePCR analysis
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
threads = Thread(target=self.epcr, args=())
threads.setDaemon(True)
threads.start()
for sample in self.metadata:
if sample.general.bestassemblyfile != 'NA':
setattr(sample, self.analysistype, GenObject())
# Get the primers ready
try:
sample[self.analysistype].primers = glob(os.path.join(self.reffilepath,
self.analysistype,
sample.general.referencegenus,
'primers',
'*.txt'))[0]
# Find the name of the probe file
sample[self.analysistype].probes = glob(os.path.join(self.reffilepath,
self.analysistype,
sample.general.referencegenus,
'probes',
'*.fa'))[0]
# Create the BLAST database of the probes (if necessary)
self.makeblastdb(sample[self.analysistype].probes)
# Initialise a list to store the names of the targets
sample[self.analysistype].targets = list()
# Open the primer file, and read the names of the targets into a list
with open(sample[self.analysistype].primers, 'r') as primerfile:
for line in primerfile:
sample[self.analysistype].targets.append(line.split('\t')[0])
# Organisms without primer/probe files will fail. Populate metadata with 'NA' values
except IndexError:
sample[self.analysistype].primers = 'NA'
sample[self.analysistype].probes = 'NA'
# Only try to process organisms with primer files
if sample[self.analysistype].primers != 'NA':
# Make the output path
sample[self.analysistype].reportdir = os.path.join(sample.general.outputdirectory,
self.analysistype)
make_path(sample[self.analysistype].reportdir)
# Set the base name of the output file
outfile = sample[self.analysistype].reportdir + sample.name
# Set the hashing and mapping commands
sample.commands.famap = 'famap -b {}.famap {}.fasta'.format(outfile, sample.general.filenoext)
sample.commands.fahash = 'fahash -b {}.hash {}.famap'.format(outfile, outfile)
# re-PCR uses the subtyping primers list to search the contigs file using the following parameters
# -S {hash file} (Perform STS lookup using hash-file), -r + (Enable/disable reverse STS lookup)
# -m 10000 (Set variability for STS size for lookup),
# -n 1 (Set max allowed mismatches per primer for lookup)
# -g 0 (Set max allowed indels per primer for lookup),
# -G (Print alignments in comments), -o {output file}
sample.commands.epcr = 're-PCR -S {}.hash -r + -m 10000 -n 2 -g 0 -G -q -o {}.txt {}' \
.format(outfile, outfile, sample[self.analysistype].primers)
# Add the variables to the queue
self.epcrqueue.put((sample, outfile))
self.epcrqueue.join() | 0.005855 |
def check_resources(self, number):
'''Check <number> of URLs that have not been (recently) checked'''
if not current_app.config.get('LINKCHECKING_ENABLED'):
log.error('Link checking is disabled.')
return
base_pipeline = [
{'$match': {'resources': {'$gt': []}}},
{'$project': {'resources._id': True,
'resources.extras.check:date': True}},
{'$unwind': '$resources'},
]
# unchecked resources
pipeline = base_pipeline + [
{'$match': {'resources.extras.check:date': {'$eq': None}}},
{'$limit': number}
]
resources = list(Dataset.objects.aggregate(*pipeline))
# not recently checked resources
slots_left = number - len(resources)
if slots_left:
pipeline = base_pipeline + [
{'$match': {'resources.extras.check:date': {'$ne': None}}},
{'$sort': {'resources.extras.check:date': 1}},
{'$limit': slots_left}
]
resources += list(Dataset.objects.aggregate(*pipeline))
nb_resources = len(resources)
log.info('Checking %s resources...', nb_resources)
for idx, dataset_resource in enumerate(resources):
dataset_obj = Dataset.objects.get(id=dataset_resource['_id'])
resource_id = dataset_resource['resources']['_id']
rid = uuid.UUID(resource_id)
resource_obj = get_by(dataset_obj.resources, 'id', rid)
log.info('Checking resource %s (%s/%s)',
resource_id, idx + 1, nb_resources)
if resource_obj.need_check():
check_resource(resource_obj)
else:
log.info("--> Skipping this resource, cache is fresh enough.")
log.info('Done.') | 0.000587 |
def load_jws_from_request(req):
"""
This function performs almost entirely bitjws authentication tasks.
If valid bitjws message and signature headers are found,
then the request will be assigned 'jws_header' and 'jws_payload' attributes.
:param req: The flask request to load the jwt claim set from.
"""
current_app.logger.info("loading request with headers: %s" % req.headers)
if (("content-type" in req.headers and
"application/jose" in req.headers['content-type']) or
("Content-Type" in req.headers and
"application/jose" in req.headers['Content-Type'])):
path = urlparse.urlsplit(req.url).path
for rule in current_app.url_map.iter_rules():
if path == rule.rule and req.method in rule.methods:
dedata = req.get_data().decode('utf8')
bp = current_app.bitjws.basepath
req.jws_header, req.jws_payload = \
bitjws.validate_deserialize(dedata, requrl=bp + rule.rule)
break | 0.001923 |
def mergecopy(src, dest):
"""
copy2, but only if the destination isn't up to date
"""
if os.path.exists(dest) and os.stat(dest).st_mtime >= os.stat(src).st_mtime:
return
copy2(src, dest) | 0.009346 |
def renderHTTP(self, context):
"""
Render C{self.resource} through a L{StylesheetRewritingRequestWrapper}.
"""
request = IRequest(context)
request = StylesheetRewritingRequestWrapper(
request, self.installedOfferingNames, self.rootURL)
context.remember(request, IRequest)
return self.resource.renderHTTP(context) | 0.005263 |
def set_mode(self, mode):
"""Configure how this console will react to the cursor writing past the
end if the console.
This is for methods that use the virtual cursor, such as
:any:`print_str`.
Args:
mode (Text): The mode to set.
Possible settings are:
- 'error' - A TDLError will be raised once the cursor
reaches the end of the console. Everything up until
the error will still be drawn.
This is the default setting.
- 'scroll' - The console will scroll up as stuff is
written to the end.
You can restrict the region with :any:`tdl.Window` when
doing this.
..seealso:: :any:`write`, :any:`print_str`
"""
MODES = ['error', 'scroll']
if mode.lower() not in MODES:
raise TDLError('mode must be one of %s, got %s' % (MODES, repr(mode)))
self._scrollMode = mode.lower() | 0.00304 |
def netconf_state_schemas_schema_version(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
netconf_state = ET.SubElement(config, "netconf-state", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring")
schemas = ET.SubElement(netconf_state, "schemas")
schema = ET.SubElement(schemas, "schema")
identifier_key = ET.SubElement(schema, "identifier")
identifier_key.text = kwargs.pop('identifier')
format_key = ET.SubElement(schema, "format")
format_key.text = kwargs.pop('format')
version = ET.SubElement(schema, "version")
version.text = kwargs.pop('version')
callback = kwargs.pop('callback', self._callback)
return callback(config) | 0.003881 |
def DomainTokensCreate(self, domain_id, amount):
"""
This method creates tokens that can be used by users who want to join the domain.
Tokens are automatically deleted after usage.
Only domain managers can create tokens.
"""
if self.__SenseApiCall__('/domains/{0}/tokens.json'.format(domain_id), 'POST', parameters = {"amount":amount}):
return True
else:
self.__error__ = "api call unsuccessful"
return False | 0.017176 |
def all_dimensions_names(self):
""" Returns all the dimensions names, including the names of sub_fields
and their corresponding packed fields
"""
return frozenset(self.array.dtype.names + tuple(self.sub_fields_dict.keys())) | 0.011765 |
def usages_list(location, **kwargs):
'''
.. versionadded:: 2019.2.0
List subscription network usage for a location.
:param location: The Azure location to query for network usage.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.usages_list westus
'''
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
result = __utils__['azurearm.paged_object_to_list'](netconn.usages.list(location))
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result | 0.003145 |
def _get_func(cls, source_ver, target_ver):
"""
Return exactly one function to convert from source to target
"""
matches = (
func for func in cls._upgrade_funcs
if func.source == source_ver and func.target == target_ver
)
try:
match, = matches
except ValueError:
raise ValueError(
f"No migration from {source_ver} to {target_ver}")
return match | 0.039894 |
def start_depth_socket(self, symbol, callback, depth=None):
"""Start a websocket for symbol market depth returning either a diff or a partial book
https://github.com/binance-exchange/binance-official-api-docs/blob/master/web-socket-streams.md#partial-book-depth-streams
:param symbol: required
:type symbol: str
:param callback: callback function to handle messages
:type callback: function
:param depth: optional Number of depth entries to return, default None. If passed returns a partial book instead of a diff
:type depth: str
:returns: connection key string if successful, False otherwise
Partial Message Format
.. code-block:: python
{
"lastUpdateId": 160, # Last update ID
"bids": [ # Bids to be updated
[
"0.0024", # price level to be updated
"10", # quantity
[] # ignore
]
],
"asks": [ # Asks to be updated
[
"0.0026", # price level to be updated
"100", # quantity
[] # ignore
]
]
}
Diff Message Format
.. code-block:: python
{
"e": "depthUpdate", # Event type
"E": 123456789, # Event time
"s": "BNBBTC", # Symbol
"U": 157, # First update ID in event
"u": 160, # Final update ID in event
"b": [ # Bids to be updated
[
"0.0024", # price level to be updated
"10", # quantity
[] # ignore
]
],
"a": [ # Asks to be updated
[
"0.0026", # price level to be updated
"100", # quantity
[] # ignore
]
]
}
"""
socket_name = symbol.lower() + '@depth'
if depth and depth != '1':
socket_name = '{}{}'.format(socket_name, depth)
return self._start_socket(socket_name, callback) | 0.001596 |
def register_dde_task(self, *args, **kwargs):
"""Register a Dde task."""
kwargs["task_class"] = DdeTask
return self.register_task(*args, **kwargs) | 0.011765 |
def alpha(reliability_data=None, value_counts=None, value_domain=None, level_of_measurement='interval',
dtype=np.float64):
"""Compute Krippendorff's alpha.
See https://en.wikipedia.org/wiki/Krippendorff%27s_alpha for more information.
Parameters
----------
reliability_data : array_like, with shape (M, N)
Reliability data matrix which has the rate the i coder gave to the j unit, where M is the number of raters
and N is the unit count.
Missing rates are represented with `np.nan`.
If it's provided then `value_counts` must not be provided.
value_counts : ndarray, with shape (N, V)
Number of coders that assigned a certain value to a determined unit, where N is the number of units
and V is the value count.
If it's provided then `reliability_data` must not be provided.
value_domain : array_like, with shape (V,)
Possible values the units can take.
If the level of measurement is not nominal, it must be ordered.
If `reliability_data` is provided, then the default value is the ordered list of unique rates that appear.
Else, the default value is `list(range(V))`.
level_of_measurement : string or callable
Steven's level of measurement of the variable.
It must be one of 'nominal', 'ordinal', 'interval', 'ratio' or a callable.
dtype : data-type
Result and computation data-type.
Returns
-------
alpha : `dtype`
Scalar value of Krippendorff's alpha of type `dtype`.
Examples
--------
>>> reliability_data = [[np.nan, np.nan, np.nan, np.nan, np.nan, 3, 4, 1, 2, 1, 1, 3, 3, np.nan, 3],
... [1, np.nan, 2, 1, 3, 3, 4, 3, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
... [np.nan, np.nan, 2, 1, 3, 4, 4, np.nan, 2, 1, 1, 3, 3, np.nan, 4]]
>>> print(round(alpha(reliability_data=reliability_data, level_of_measurement='nominal'), 6))
0.691358
>>> print(round(alpha(reliability_data=reliability_data, level_of_measurement='interval'), 6))
0.810845
>>> value_counts = np.array([[1, 0, 0, 0],
... [0, 0, 0, 0],
... [0, 2, 0, 0],
... [2, 0, 0, 0],
... [0, 0, 2, 0],
... [0, 0, 2, 1],
... [0, 0, 0, 3],
... [1, 0, 1, 0],
... [0, 2, 0, 0],
... [2, 0, 0, 0],
... [2, 0, 0, 0],
... [0, 0, 2, 0],
... [0, 0, 2, 0],
... [0, 0, 0, 0],
... [0, 0, 1, 1]])
>>> print(round(alpha(value_counts=value_counts, level_of_measurement='nominal'), 6))
0.691358
>>> # The following examples were extracted from
>>> # https://www.statisticshowto.datasciencecentral.com/wp-content/uploads/2016/07/fulltext.pdf, page 8.
>>> reliability_data = [[1, 2, 3, 3, 2, 1, 4, 1, 2, np.nan, np.nan, np.nan],
... [1, 2, 3, 3, 2, 2, 4, 1, 2, 5, np.nan, 3.],
... [np.nan, 3, 3, 3, 2, 3, 4, 2, 2, 5, 1, np.nan],
... [1, 2, 3, 3, 2, 4, 4, 1, 2, 5, 1, np.nan]]
>>> print(round(alpha(reliability_data, level_of_measurement='ordinal'), 3))
0.815
>>> print(round(alpha(reliability_data, level_of_measurement='ratio'), 3))
0.797
"""
if (reliability_data is None) == (value_counts is None):
raise ValueError("Either reliability_data or value_counts must be provided, but not both.")
# Don't know if it's a list or numpy array. If it's the latter, the truth value is ambiguous. So, ask for None.
if value_counts is None:
if type(reliability_data) is not np.ndarray:
reliability_data = np.array(reliability_data)
value_domain = value_domain or np.unique(reliability_data[~np.isnan(reliability_data)])
value_counts = _reliability_data_to_value_counts(reliability_data, value_domain)
else: # elif reliability_data is None
if value_domain:
assert value_counts.shape[1] == len(value_domain), \
"The value domain should be equal to the number of columns of value_counts."
else:
value_domain = tuple(range(value_counts.shape[1]))
distance_metric = _distance_metric(level_of_measurement)
o = _coincidences(value_counts, value_domain, dtype=dtype)
n_v = np.sum(o, axis=0)
n = np.sum(n_v)
e = _random_coincidences(value_domain, n, n_v)
d = _distances(value_domain, distance_metric, n_v)
return 1 - np.sum(o * d) / np.sum(e * d) | 0.004364 |
def run_band_structure(self,
paths,
with_eigenvectors=False,
with_group_velocities=False,
is_band_connection=False,
path_connections=None,
labels=None,
is_legacy_plot=False):
"""Run phonon band structure calculation.
Parameters
----------
paths : List of array_like
Sets of qpoints that can be passed to phonopy.set_band_structure().
Numbers of qpoints can be different.
shape of each array_like : (qpoints, 3)
with_eigenvectors : bool, optional
Flag whether eigenvectors are calculated or not. Default is False.
with_group_velocities : bool, optional
Flag whether group velocities are calculated or not. Default is
False.
is_band_connection : bool, optional
Flag whether each band is connected or not. This is achieved by
comparing similarity of eigenvectors of neghboring poins. Sometimes
this fails. Default is False.
path_connections : List of bool, optional
This is only used in graphical plot of band structure and gives
whether each path is connected to the next path or not,
i.e., if False, there is a jump of q-points. Number of elements is
the same at that of paths. Default is None.
labels : List of str, optional
This is only used in graphical plot of band structure and gives
labels of end points of each path. The number of labels is equal
to (2 - np.array(path_connections)).sum().
is_legacy_plot: bool, optional
This makes the old style band structure plot. Default is False.
"""
if self._dynamical_matrix is None:
msg = ("Dynamical matrix has not yet built.")
raise RuntimeError(msg)
if with_group_velocities:
if self._group_velocity is None:
self._set_group_velocity()
group_velocity = self._group_velocity
else:
group_velocity = None
self._band_structure = BandStructure(
paths,
self._dynamical_matrix,
with_eigenvectors=with_eigenvectors,
is_band_connection=is_band_connection,
group_velocity=group_velocity,
path_connections=path_connections,
labels=labels,
is_legacy_plot=is_legacy_plot,
factor=self._factor) | 0.003419 |
def __get_inferred_data_res_2(v=None, calc=True):
"""
Use a list of values to calculate m/m/m/m. Resolution values or otherwise.
:param numpy array v: Values
:param bool calc: If false, we don't need calculations
:return dict: Results of calculation
"""
# Base: If something goes wrong, or if there are no values, then use "NaN" placeholders.
d = {
"hasMinValue": "nan", "hasMaxValue": "nan",
"hasMeanValue": "nan", "hasMedianValue": "nan",
}
try:
if calc:
_min = np.nanmin(v)
_max = np.nanmax(v)
_mean = np.nanmean(v)
_med = np.nanmedian(v)
if np.isnan(_min):
_min = "nan"
else:
_min = abs(_min)
if np.isnan(_max):
_max = "nan"
else:
_min = abs(_min)
if np.isnan(_mean):
_mean = "nan"
else:
_min = abs(_min)
if np.isnan(_med):
_med = "nan"
else:
_min = abs(_min)
d = {
"hasMinValue": _min,
"hasMaxValue": _max,
"hasMeanValue": _mean,
"hasMedianValue": _med
}
except Exception as e:
logger_inferred_data.error("get_inferred_data_res_2: {}".format(e))
return d | 0.001426 |
def _get_cache_dates(self):
"""
Get s list of dates (:py:class:`datetime.datetime`) present in cache,
beginning with the longest contiguous set of dates that isn't missing
more than one date in series.
:return: list of datetime objects for contiguous dates in cache
:rtype: ``list``
"""
all_dates = self.cache.get_dates_for_project(self.project_name)
dates = []
last_date = None
for val in sorted(all_dates):
if last_date is None:
last_date = val
continue
if val - last_date > timedelta(hours=48):
# reset dates to start from here
logger.warning("Last cache date was %s, current date is %s; "
"delta is too large. Starting cache date series "
"at current date.", last_date, val)
dates = []
last_date = val
dates.append(val)
# find the first download record, and only look at dates after that
for idx, cache_date in enumerate(dates):
data = self._cache_get(cache_date)
if not self._is_empty_cache_record(data):
logger.debug("First cache date with data: %s", cache_date)
return dates[idx:]
return dates | 0.002208 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.