text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def set_feature(dev, feature, recipient = None):
r"""Set/enable a specific feature.
dev is the Device object to which the request will be
sent to.
feature is the feature you want to enable.
The recipient can be None (on which the status will be queried
from the device), an Interface or Endpoint descriptors.
"""
bmRequestType, wIndex = _parse_recipient(recipient, util.CTRL_OUT)
dev.ctrl_transfer(bmRequestType = bmRequestType,
bRequest = 0x03,
wIndex = wIndex,
wValue = feature) | 0.018803 |
def load_waypoints(self, filename):
'''load waypoints from a file'''
self.wploader.target_system = self.target_system
self.wploader.target_component = self.target_component
try:
self.wploader.load(filename)
except Exception as msg:
print("Unable to load %s - %s" % (filename, msg))
return
print("Loaded %u waypoints from %s" % (self.wploader.count(), filename))
self.send_all_waypoints() | 0.006263 |
def create_floating_ip(kwargs=None, call=None):
'''
Create a new floating IP
.. versionadded:: 2016.3.0
CLI Examples:
.. code-block:: bash
salt-cloud -f create_floating_ip my-digitalocean-config region='NYC2'
salt-cloud -f create_floating_ip my-digitalocean-config droplet_id='1234567'
'''
if call != 'function':
log.error(
'The create_floating_ip function must be called with -f or --function.'
)
return False
if not kwargs:
kwargs = {}
if 'droplet_id' in kwargs:
result = query(method='floating_ips',
args={'droplet_id': kwargs['droplet_id']},
http_method='post')
return result
elif 'region' in kwargs:
result = query(method='floating_ips',
args={'region': kwargs['region']},
http_method='post')
return result
else:
log.error('A droplet_id or region is required.')
return False | 0.006667 |
def get_arg_value(state, host, arg):
'''
Runs string arguments through the jinja2 templating system with a state and
host. Used to avoid string formatting in deploy operations which result in
one operation per host/variable. By parsing the commands after we generate
the ``op_hash``, multiple command variations can fall under one op.
'''
if isinstance(arg, six.string_types):
data = {
'host': host,
'inventory': state.inventory,
}
try:
return get_template(arg, is_string=True).render(data)
except (TemplateSyntaxError, UndefinedError) as e:
raise PyinfraError('Error in template string: {0}'.format(e))
elif isinstance(arg, list):
return [get_arg_value(state, host, value) for value in arg]
elif isinstance(arg, tuple):
return tuple(get_arg_value(state, host, value) for value in arg)
elif isinstance(arg, dict):
return {
key: get_arg_value(state, host, value)
for key, value in six.iteritems(arg)
}
return arg | 0.000912 |
def _imputeMissing(X, center=True, unit=True, betaNotUnitVariance=False, betaA=1.0, betaB=1.0):
'''
fill in missing values in the SNP matrix by the mean value
optionally center the data and unit-variance it
Args:
X: scipy.array of SNP values. If dtype=='int8' the missing values are -9,
otherwise the missing values are scipy.nan
center: Boolean indicator if data should be mean centered
Not supported in C-based parser
unit: Boolean indicator if data should be normalized to have unit variance
Not supported in C-based parser
betaNotUnitVariance: use Beta(betaA,betaB) standardization instead of unit variance
(only with C-based parser) (default: False)
betaA: shape parameter for Beta(betaA,betaB) standardization (only with C-based parser)
betaB: scale parameter for Beta(betaA,betaB) standardization (only with C-based parser)
Returns:
X: scipy.array of standardized SNPs with scipy.float64 values
'''
typeX=X.dtype
if typeX!=SP.int8:
iNanX = X!=X
else:
iNanX = X==-9
if iNanX.any() or betaNotUnitVariance:
if cparser:
print("using C-based imputer")
if X.flags["C_CONTIGUOUS"] or typeX!=SP.float32:
X = SP.array(X, order="F", dtype=SP.float32)
if typeX==SP.int8:
X[iNanX]=SP.nan
parser.standardize(X,betaNotUnitVariance=betaNotUnitVariance,betaA=betaA,betaB=betaB)
X=SP.array(X,dtype=SP.float64)
else:
parser.standardize(X,betaNotUnitVariance=betaNotUnitVariance,betaA=betaA,betaB=betaB)
X=SP.array(X,dtype=SP.float64)
else:
if betaNotUnitVariance:
raise NotImplementedError("Beta(betaA,betaB) standardization only in C-based parser, but not found")
nObsX = (~iNanX).sum(0)
if typeX!=SP.float64:
X=SP.array(X,dtype=SP.float64)
X[iNanX] = 0.0
sumX = (X).sum(0)
meanX = sumX/nObsX
if center:
X-=meanX
X[iNanX] = 0.0
X_=X
else:
mean=SP.tile(meanX,(X.shape[0],1))
X[iNanX]=mean[iNanX]
X_=X-mean
if unit:
stdX = SP.sqrt((X_*X_).sum(0)/nObsX)
stdX[stdX==0.0]=1.0
X/=stdX
else:
if X.dtype!=SP.float64:
X=SP.array(X,dtype=SP.float64)
if center:
X-= X.mean(axis=0)
if unit:
stdX= X.std(axis=0)
stdX[stdX==0.0]=1.0
X/=stdX
return X | 0.017027 |
def add_operation(self, operation_type, operation, mode=None):
"""Add an operation to the version
:param mode: Name of the mode in which the operation is executed
:type mode: str
:param operation_type: one of 'pre', 'post'
:type operation_type: str
:param operation: the operation to add
:type operation: :class:`marabunta.model.Operation`
"""
version_mode = self._get_version_mode(mode=mode)
if operation_type == 'pre':
version_mode.add_pre(operation)
elif operation_type == 'post':
version_mode.add_post(operation)
else:
raise ConfigurationError(
u"Type of operation must be 'pre' or 'post', got %s" %
(operation_type,)
) | 0.0025 |
def _convert_reftype_to_jaeger_reftype(ref):
"""Convert opencensus reference types to jaeger reference types."""
if ref == link_module.Type.CHILD_LINKED_SPAN:
return jaeger.SpanRefType.CHILD_OF
if ref == link_module.Type.PARENT_LINKED_SPAN:
return jaeger.SpanRefType.FOLLOWS_FROM
return None | 0.003096 |
def chol(A):
"""
Calculate the lower triangular matrix of the Cholesky decomposition of
a symmetric, positive-definite matrix.
"""
A = np.array(A)
assert A.shape[0] == A.shape[1], "Input matrix must be square"
L = [[0.0] * len(A) for _ in range(len(A))]
for i in range(len(A)):
for j in range(i + 1):
s = sum(L[i][k] * L[j][k] for k in range(j))
L[i][j] = (
(A[i][i] - s) ** 0.5 if (i == j) else (1.0 / L[j][j] * (A[i][j] - s))
)
return np.array(L) | 0.003559 |
def apns_send_message(registration_id, alert, application_id=None, certfile=None, **kwargs):
"""
Sends an APNS notification to a single registration_id.
This will send the notification as form data.
If sending multiple notifications, it is more efficient to use
apns_send_bulk_message()
Note that if set alert should always be a string. If it is not set,
it won"t be included in the notification. You will need to pass None
to this for silent notifications.
"""
try:
_apns_send(
registration_id, alert, application_id=application_id,
certfile=certfile, **kwargs
)
except apns2_errors.APNsException as apns2_exception:
if isinstance(apns2_exception, apns2_errors.Unregistered):
device = models.APNSDevice.objects.get(registration_id=registration_id)
device.active = False
device.save()
raise APNSServerError(status=apns2_exception.__class__.__name__) | 0.024831 |
def previous_page(self, max_=None):
"""
Return a query set which requests the page before this response.
:param max_: Maximum number of items to return.
:type max_: :class:`int` or :data:`None`
:rtype: :class:`ResultSetMetadata`
:return: A new request set up to request the previous page.
Must be called on a result set which has :attr:`first` set.
"""
result = type(self)()
result.before = Before(self.first.value)
result.max_ = max_
return result | 0.003656 |
def pkcs12_kdf(hash_algorithm, password, salt, iterations, key_length, id_):
"""
KDF from RFC7292 appendix b.2 - https://tools.ietf.org/html/rfc7292#page-19
:param hash_algorithm:
The string name of the hash algorithm to use: "md5", "sha1", "sha224",
"sha256", "sha384", "sha512"
:param password:
A byte string of the password to use an input to the KDF
:param salt:
A cryptographic random byte string
:param iterations:
The numbers of iterations to use when deriving the key
:param key_length:
The length of the desired key in bytes
:param id_:
The ID of the usage - 1 for key, 2 for iv, 3 for mac
:return:
The derived key as a byte string
"""
if not isinstance(password, byte_cls):
raise TypeError(pretty_message(
'''
password must be a byte string, not %s
''',
type_name(password)
))
if not isinstance(salt, byte_cls):
raise TypeError(pretty_message(
'''
salt must be a byte string, not %s
''',
type_name(salt)
))
if not isinstance(iterations, int_types):
raise TypeError(pretty_message(
'''
iterations must be an integer, not %s
''',
type_name(iterations)
))
if iterations < 1:
raise ValueError(pretty_message(
'''
iterations must be greater than 0 - is %s
''',
repr(iterations)
))
if not isinstance(key_length, int_types):
raise TypeError(pretty_message(
'''
key_length must be an integer, not %s
''',
type_name(key_length)
))
if key_length < 1:
raise ValueError(pretty_message(
'''
key_length must be greater than 0 - is %s
''',
repr(key_length)
))
if hash_algorithm not in set(['md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512']):
raise ValueError(pretty_message(
'''
hash_algorithm must be one of "md5", "sha1", "sha224", "sha256",
"sha384", "sha512", not %s
''',
repr(hash_algorithm)
))
if id_ not in set([1, 2, 3]):
raise ValueError(pretty_message(
'''
id_ must be one of 1, 2, 3, not %s
''',
repr(id_)
))
utf16_password = password.decode('utf-8').encode('utf-16be') + b'\x00\x00'
algo = getattr(hashlib, hash_algorithm)
# u and v values are bytes (not bits as in the RFC)
u = {
'md5': 16,
'sha1': 20,
'sha224': 28,
'sha256': 32,
'sha384': 48,
'sha512': 64
}[hash_algorithm]
if hash_algorithm in ['sha384', 'sha512']:
v = 128
else:
v = 64
# Step 1
d = chr_cls(id_) * v
# Step 2
s = b''
if salt != b'':
s_len = v * int(math.ceil(float(len(salt)) / v))
while len(s) < s_len:
s += salt
s = s[0:s_len]
# Step 3
p = b''
if utf16_password != b'':
p_len = v * int(math.ceil(float(len(utf16_password)) / v))
while len(p) < p_len:
p += utf16_password
p = p[0:p_len]
# Step 4
i = s + p
# Step 5
c = int(math.ceil(float(key_length) / u))
a = b'\x00' * (c * u)
for num in range(1, c + 1):
# Step 6A
a2 = algo(d + i).digest()
for _ in range(2, iterations + 1):
a2 = algo(a2).digest()
if num < c:
# Step 6B
b = b''
while len(b) < v:
b += a2
b = int_from_bytes(b[0:v]) + 1
# Step 6C
for num2 in range(0, len(i) // v):
start = num2 * v
end = (num2 + 1) * v
i_num2 = i[start:end]
i_num2 = int_to_bytes(int_from_bytes(i_num2) + b)
# Ensure the new slice is the right size
i_num2_l = len(i_num2)
if i_num2_l > v:
i_num2 = i_num2[i_num2_l - v:]
i = i[0:start] + i_num2 + i[end:]
# Step 7 (one peice at a time)
begin = (num - 1) * u
to_copy = min(key_length, u)
a = a[0:begin] + a2[0:to_copy] + a[begin + to_copy:]
return a[0:key_length] | 0.000448 |
def availableBranches(self):
''' return a list of GithubComponentVersion objects for the tip of each branch
'''
return [
GithubComponentVersion(
'', b[0], b[1], self.name, cache_key=None
) for b in _getBranchHeads(self.repo).items()
] | 0.009804 |
def join(self):
"""Note that the Executor must be close()'d elsewhere,
or join() will never return.
"""
self.inputfeeder_thread.join()
self.pool.join()
self.resulttracker_thread.join()
self.failuretracker_thread.join() | 0.038793 |
def peer_store(key, value, relation_name='cluster'):
"""Store the key/value pair on the named peer relation `relation_name`."""
cluster_rels = relation_ids(relation_name)
if len(cluster_rels) > 0:
cluster_rid = cluster_rels[0]
relation_set(relation_id=cluster_rid,
relation_settings={key: value})
else:
raise ValueError('Unable to detect '
'peer relation {}'.format(relation_name)) | 0.002141 |
def find_neighbor_sites(sites, am, flatten=True, include_input=False,
logic='or'):
r"""
Given a symmetric adjacency matrix, finds all sites that are connected
to the input sites.
Parameters
----------
am : scipy.sparse matrix
The adjacency matrix of the network. Must be symmetrical such that if
sites *i* and *j* are connected, the matrix contains non-zero values
at locations (i, j) and (j, i).
flatten : boolean
If ``True`` (default) the returned result is a compressed array of all
neighbors, or a list of lists with each sub-list containing the
neighbors for each input site. Note that an *unflattened* list might
be slow to generate since it is a Python ``list`` rather than a Numpy
array.
include_input : boolean
If ``False`` (default) the input sites will be removed from the result.
logic : string
Specifies logic to filter the resulting list. Options are:
**'or'** : (default) All neighbors of the input sites. This is also
known as the 'union' in set theory or 'any' in boolean logic. Both
keywords are accepted and treated as 'or'.
**'xor'** : Only neighbors of one and only one input site. This is
useful for finding the sites that are not shared by any of the input
sites. 'exclusive_or' is also accepted.
**'xnor'** : Neighbors that are shared by two or more input sites. This
is equivalent to finding all neighbors with 'or', minus those found
with 'xor', and is useful for finding neighbors that the inputs have
in common. 'nxor' is also accepted.
**'and'** : Only neighbors shared by all input sites. This is also
known as 'intersection' in set theory and (somtimes) as 'all' in
boolean logic. Both keywords are accepted and treated as 'and'.
Returns
-------
An array containing the neighboring sites filtered by the given logic. If
``flatten`` is ``False`` then the result is a list of lists containing the
neighbors of each input site.
See Also
--------
find_complement
Notes
-----
The ``logic`` options are applied to neighboring sites only, thus it is not
possible to obtain sites that are part of the global set but not neighbors.
This is because (a) the list global sites might be very large, and (b) it
is not possible to return a list of neighbors for each input site if global
sites are considered.
"""
if am.format != 'lil':
am = am.tolil(copy=False)
n_sites = am.shape[0]
rows = [am.rows[i] for i in sp.array(sites, ndmin=1)]
if len(rows) == 0:
return []
neighbors = sp.hstack(rows).astype(sp.int64) # Flatten list to apply logic
if logic in ['or', 'union', 'any']:
neighbors = sp.unique(neighbors)
elif logic in ['xor', 'exclusive_or']:
neighbors = sp.unique(sp.where(sp.bincount(neighbors) == 1)[0])
elif logic in ['xnor', 'nxor']:
neighbors = sp.unique(sp.where(sp.bincount(neighbors) > 1)[0])
elif logic in ['and', 'all', 'intersection']:
neighbors = set(neighbors)
[neighbors.intersection_update(i) for i in rows]
neighbors = sp.array(list(neighbors), dtype=sp.int64, ndmin=1)
else:
raise Exception('Specified logic is not implemented')
# Deal with removing inputs or not
mask = sp.zeros(shape=n_sites, dtype=bool)
mask[neighbors] = True
if not include_input:
mask[sites] = False
# Finally flatten or not
if flatten:
neighbors = sp.where(mask)[0]
else:
if (neighbors.size > 0):
for i in range(len(rows)):
vals = sp.array(rows[i], dtype=sp.int64)
rows[i] = vals[mask[vals]]
neighbors = rows
else:
neighbors = [sp.array([], dtype=int) for i in range(len(sites))]
return neighbors | 0.000251 |
def set_routing(app, view_data):
"""
apply the routing configuration you've described
example:
view_data = [
("/", "app.IndexController", "index"),
]
1. "/" is receive request path
2. "app.IndexController" is to process the received request controller class path
3. "index" string To generate a URL that refers to the application
"""
routing_modules = convert_routing_module(view_data)
for module in routing_modules:
view = import_string(module.import_path)
app.add_url_rule(module.url, view_func=view.as_view(module.endpoint)) | 0.00321 |
def generate_project(self):
"""
Generate the whole project. Returns True if at least one
file has been generated, False otherwise."""
# checks needed properties
if not self.name or not self.destdir or \
not os.path.isdir(self.destdir):
raise ValueError("Empty or invalid property values: run with 'help' command")
_log("Generating project '%s'" % self.name)
_log("Destination directory is: '%s'" % self.destdir)
top = os.path.join(self.destdir, self.name)
src = os.path.join(top, self.src_name)
resources = os.path.join(top, self.res_name)
utils = os.path.join(src, "utils")
if self.complex:
models = os.path.join(src, "models")
ctrls = os.path.join(src, "ctrls")
views = os.path.join(src, "views")
else: models = ctrls = views = src
res = self.__generate_tree(top, src, resources, models, ctrls, views, utils)
res = self.__generate_classes(models, ctrls, views) or res
res = self.__mksrc(os.path.join(utils, "globals.py"), templates.glob) or res
if self.complex: self.templ.update({'model_import' : "from models.application import ApplModel",
'ctrl_import' : "from ctrls.application import ApplCtrl",
'view_import' : "from views.application import ApplView"})
else: self.templ.update({'model_import' : "from ApplModel import ApplModel",
'ctrl_import' : "from ApplCtrl import ApplCtrl",
'view_import' : "from ApplView import ApplView"})
res = self.__mksrc(os.path.join(top, "%s.py" % self.name), templates.main) or res
# builder file
if self.builder:
res = self.__generate_builder(resources) or res
if self.dist_gtkmvc3: res = self.__copy_framework(os.path.join(resources, "external")) or res
if not res: _log("No actions were taken")
else: _log("Done")
return res | 0.012369 |
def _deserialize(self, value, attr, data, **kwargs):
"""Deserialize an ISO8601-formatted time to a :class:`datetime.time` object."""
if not value: # falsy values are invalid
self.fail('invalid')
try:
return utils.from_iso_time(value)
except (AttributeError, TypeError, ValueError):
self.fail('invalid') | 0.008086 |
def deletecols(X, cols):
"""
Delete columns from a numpy ndarry or recarray.
Can take a string giving a column name or comma-separated list of column
names, or a list of string column names.
Implemented by the tabarray method
:func:`tabular.tab.tabarray.deletecols`.
**Parameters**
**X** : numpy recarray or ndarray with structured dtype
The numpy array from which to delete columns.
**cols** : string or list of strings
Name or list of names of columns in `X`. This can be
a string giving a column name or comma-separated list of
column names, or a list of string column names.
**Returns**
**out** : numpy ndarray with structured dtype
New numpy ndarray with structured dtype
given by `X`, excluding the columns named in `cols`.
"""
if isinstance(cols, str):
cols = cols.split(',')
retain = [n for n in X.dtype.names if n not in cols]
if len(retain) > 0:
return X[retain]
else:
return None | 0.003509 |
def kdf(size, password, salt,
opslimit=OPSLIMIT_SENSITIVE,
memlimit=MEMLIMIT_SENSITIVE,
encoder=nacl.encoding.RawEncoder):
"""
Derive a ``size`` bytes long key from a caller-supplied
``password`` and ``salt`` pair using the argon2i
memory-hard construct.
the enclosing module provides the constants
- :py:const:`.OPSLIMIT_INTERACTIVE`
- :py:const:`.MEMLIMIT_INTERACTIVE`
- :py:const:`.OPSLIMIT_MODERATE`
- :py:const:`.MEMLIMIT_MODERATE`
- :py:const:`.OPSLIMIT_SENSITIVE`
- :py:const:`.MEMLIMIT_SENSITIVE`
as a guidance for correct settings.
:param size: derived key size, must be between
:py:const:`.BYTES_MIN` and
:py:const:`.BYTES_MAX`
:type size: int
:param password: password used to seed the key derivation procedure;
it length must be between
:py:const:`.PASSWD_MIN` and
:py:const:`.PASSWD_MAX`
:type password: bytes
:param salt: **RANDOM** salt used in the key derivation procedure;
its length must be exactly :py:const:`.SALTBYTES`
:type salt: bytes
:param opslimit: the time component (operation count)
of the key derivation procedure's computational cost;
it must be between
:py:const:`.OPSLIMIT_MIN` and
:py:const:`.OPSLIMIT_MAX`
:type opslimit: int
:param memlimit: the memory occupation component
of the key derivation procedure's computational cost;
it must be between
:py:const:`.MEMLIMIT_MIN` and
:py:const:`.MEMLIMIT_MAX`
:type memlimit: int
:rtype: bytes
.. versionadded:: 1.2
"""
return encoder.encode(
nacl.bindings.crypto_pwhash_alg(size, password, salt,
opslimit, memlimit,
ALG)
) | 0.000492 |
def _path_to_module(path):
"""Translates paths to *.py? files into module paths.
>>> _path_to_module("rapport/bar.py")
'rapport.bar'
>>> _path_to_module("/usr/lib/rapport/bar.py")
'rapport.bar'
"""
# Split of preceeding path elements:
path = "rapport" + path.split("rapport")[1]
# Split of ending and replace os.sep with dots:
path = path.replace(os.sep, ".").rsplit(".", 1)[0]
return path | 0.002222 |
def scons_copytree(src, dst, symlinks=False):
"""Recursively copy a directory tree using copy2().
The destination directory must not already exist.
If exception(s) occur, an CopytreeError is raised with a list of reasons.
If the optional symlinks flag is true, symbolic links in the
source tree result in symbolic links in the destination tree; if
it is false, the contents of the files pointed to by symbolic
links are copied.
XXX Consider this example code rather than the ultimate tool.
"""
names = os.listdir(src)
# [email protected] fix: check for dir before making dirs.
if not os.path.exists(dst):
os.makedirs(dst)
errors = []
for name in names:
srcname = os.path.join(src, name)
dstname = os.path.join(dst, name)
try:
if symlinks and os.path.islink(srcname):
linkto = os.readlink(srcname)
os.symlink(linkto, dstname)
elif os.path.isdir(srcname):
scons_copytree(srcname, dstname, symlinks)
else:
shutil.copy2(srcname, dstname)
# XXX What about devices, sockets etc.?
except (IOError, os.error) as why:
errors.append((srcname, dstname, str(why)))
# catch the CopytreeError from the recursive copytree so that we can
# continue with other files
except CopytreeError as err:
errors.extend(err.args[0])
try:
shutil.copystat(src, dst)
except SCons.Util.WinError:
# can't copy file access times on Windows
pass
except OSError as why:
errors.extend((src, dst, str(why)))
if errors:
raise CopytreeError(errors) | 0.00058 |
def fatal(*args, **kwargs):
"""Log an error message and exit.
Following arguments are keyword-only.
:param exitcode: Optional exit code to use
:param cause: Optional Invoke's Result object, i.e.
result of a subprocess invocation
"""
# determine the exitcode to return to the operating system
exitcode = None
if 'exitcode' in kwargs:
exitcode = kwargs.pop('exitcode')
if 'cause' in kwargs:
cause = kwargs.pop('cause')
if not isinstance(cause, Result):
raise TypeError(
"invalid cause of fatal error: expected %r, got %r" % (
Result, type(cause)))
exitcode = exitcode or cause.return_code
logging.error(*args, **kwargs)
raise Exit(exitcode or -1) | 0.001267 |
def stop(self, key):
"""
Stop a concurrent operation.
This gets the concurrency limiter for the given key (creating it if
necessary) and stops a concurrent operation on it. If the concurrency
limiter is empty, it is deleted.
"""
self._get_limiter(key).stop()
self._cleanup_limiter(key) | 0.005714 |
def process_formdata(self, valuelist):
"""
Process data received over the wire from a form.
This will be called during form construction with data supplied
through the `formdata` argument.
Converting primary key to ORM for server processing.
:param valuelist: A list of strings to process.
"""
if valuelist:
if self.is_related:
self.data = self.datamodel.get_related_interface(self.col_name).get(
valuelist[0]
)
else:
self.data = self.datamodel.get(valuelist[0]) | 0.004839 |
def _get_package_data():
"""Iterate over the `init` dir for directories and returns
all files within them.
Only files within `binaries` and `templates` will be added.
"""
from os import listdir as ls
from os.path import join as jn
x = 'init'
b = jn('serv', x)
dr = ['binaries', 'templates']
return [jn(x, d, f) for d in ls(b) if d in dr for f in ls(jn(b, d))] | 0.002494 |
def ParseOptions(self, options):
"""Parses tool specific options.
Args:
options (argparse.Namespace): command line arguments.
Raises:
BadConfigOption: if the options are invalid.
"""
# The extraction options are dependent on the data location.
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['data_location'])
self._ReadParserPresetsFromFile()
# The output modules options are dependent on the preferred language
# and preferred time zone options.
self._ParseTimezoneOption(options)
argument_helper_names = [
'artifact_definitions', 'hashers', 'language', 'parsers']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
self.list_hashers = self._hasher_names_string == 'list'
self.list_language_identifiers = self._preferred_language == 'list'
self.list_parsers_and_plugins = self._parser_filter_expression == 'list'
# Check the list options first otherwise required options will raise.
if (self.list_hashers or self.list_language_identifiers or
self.list_parsers_and_plugins or self.list_timezones):
return
# Check output modules after the other listable options, otherwise
# it could raise with "requires an output file".
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=['output_modules'])
self.list_output_modules = self._output_format == 'list'
if self.list_output_modules:
return
self._ParseInformationalOptions(options)
argument_helper_names = ['extraction', 'status_view']
helpers_manager.ArgumentHelperManager.ParseOptions(
options, self, names=argument_helper_names)
self._ParseLogFileOptions(options)
self._ParseStorageMediaOptions(options)
self._ParsePerformanceOptions(options)
self._ParseProcessingOptions(options)
self._storage_file_path = getattr(options, 'storage_file', None)
if not self._storage_file_path:
self._storage_file_path = self._GenerateStorageFileName()
self._output_filename = getattr(options, 'write', None)
if not self._output_filename:
raise errors.BadConfigOption((
'Output format: {0:s} requires an output file '
'(-w OUTPUT_FILE)').format(self._output_format))
if os.path.exists(self._output_filename):
raise errors.BadConfigOption(
'Output file already exists: {0:s}.'.format(self._output_filename))
self._EnforceProcessMemoryLimit(self._process_memory_limit)
self._output_module = self._CreateOutputModule(options) | 0.002662 |
def _from_dict(cls, _dict):
"""Initialize a DocStructure object from a json dictionary."""
args = {}
if 'section_titles' in _dict:
args['section_titles'] = [
SectionTitles._from_dict(x)
for x in (_dict.get('section_titles'))
]
if 'leading_sentences' in _dict:
args['leading_sentences'] = [
LeadingSentence._from_dict(x)
for x in (_dict.get('leading_sentences'))
]
return cls(**args) | 0.003745 |
def _save_action(extra_context=None):
'''
Save list of revisions revisions for active Conda environment.
.. versionchanged:: 0.18
Compress action revision files using ``bz2`` to save disk space.
Parameters
----------
extra_context : dict, optional
Extra content to store in stored action revision.
Returns
-------
path_helpers.path, dict
Path to which action was written and action object, including list of
revisions for active Conda environment.
'''
# Get list of revisions to Conda environment since creation.
revisions_js = ch.conda_exec('list', '--revisions', '--json',
verbose=False)
revisions = json.loads(revisions_js)
# Save list of revisions to `/etc/microdrop/plugins/actions/rev<rev>.json`
# See [wheeler-microfluidics/microdrop#200][i200].
#
# [i200]: https://github.com/wheeler-microfluidics/microdrop/issues/200
action = extra_context.copy() if extra_context else {}
action['revisions'] = revisions
action_path = (MICRODROP_CONDA_ACTIONS
.joinpath('rev{}.json.bz2'.format(revisions[-1]['rev'])))
action_path.parent.makedirs_p()
# Compress action file using bz2 to save disk space.
with bz2.BZ2File(action_path, mode='w') as output:
json.dump(action, output, indent=2)
return action_path, action | 0.000714 |
def get_ipython_module_path(module_str):
"""Find the path to an IPython module in this version of IPython.
This will always find the version of the module that is in this importable
IPython package. This will always return the path to the ``.py``
version of the module.
"""
if module_str == 'IPython':
return os.path.join(get_ipython_package_dir(), '__init__.py')
mod = import_item(module_str)
the_path = mod.__file__.replace('.pyc', '.py')
the_path = the_path.replace('.pyo', '.py')
return py3compat.cast_unicode(the_path, fs_encoding) | 0.001709 |
def point_scalar(mesh, name):
""" Returns point scalars of a vtk object """
vtkarr = mesh.GetPointData().GetArray(name)
if vtkarr:
if isinstance(vtkarr, vtk.vtkBitArray):
vtkarr = vtk_bit_array_to_char(vtkarr)
return vtk_to_numpy(vtkarr) | 0.00361 |
def parse(self, elt, ps, **kw):
'''attempt to parse sequentially. No way to know ahead of time
what this instance represents. Must be simple type so it can
not have attributes nor children, so this isn't too bad.
'''
self.setMemberTypeCodes()
(nsuri,typeName) = self.checkname(elt, ps)
#if (nsuri,typeName) not in self.memberTypes:
# raise EvaluateException(
# 'Union Type mismatch got (%s,%s) not in %s' % \
# (nsuri, typeName, self.memberTypes), ps.Backtrace(elt))
for indx in range(len(self.memberTypeCodes)):
typecode = self.memberTypeCodes[indx]
try:
pyobj = typecode.parse(elt, ps)
except ParseException, ex:
continue
except Exception, ex:
continue
if indx > 0:
self.memberTypeCodes.remove(typecode)
self.memberTypeCodes.insert(0, typecode)
break
else:
raise
return pyobj | 0.003714 |
def _expand_var(self, in_string, available_variables):
"""Expand variable to its corresponding value in_string
:param string variable: variable name
:param value: value to replace with
:param string in_string: the string to replace in
"""
instances = self._get_instances(in_string)
for instance in instances:
for name, value in available_variables.items():
variable_string = self._get_variable_string(name)
if instance == variable_string:
in_string = in_string.replace(variable_string, value)
return in_string | 0.003135 |
def unlock(self, session=None):
"""Unlock a previously locked server.
:Parameters:
- `session` (optional): a
:class:`~pymongo.client_session.ClientSession`.
.. versionchanged:: 3.6
Added ``session`` parameter.
"""
cmd = SON([("fsyncUnlock", 1)])
with self._socket_for_writes(session) as sock_info:
if sock_info.max_wire_version >= 4:
try:
with self._tmp_session(session) as s:
sock_info.command(
"admin", cmd, session=s, client=self)
except OperationFailure as exc:
# Ignore "DB not locked" to replicate old behavior
if exc.code != 125:
raise
else:
message._first_batch(sock_info, "admin", "$cmd.sys.unlock",
{}, -1, True, self.codec_options,
ReadPreference.PRIMARY, cmd,
self._event_listeners) | 0.001825 |
def full_author_notes(soup, fntype_filter=None):
"""
Find the fn tags included in author-notes
"""
notes = []
author_notes_section = raw_parser.author_notes(soup)
if author_notes_section:
fn_nodes = raw_parser.fn(author_notes_section)
notes = footnotes(fn_nodes, fntype_filter)
return notes | 0.002976 |
def evaluate(self, reference_event_list, estimated_event_list, evaluated_length_seconds=None):
"""Evaluate file pair (reference and estimated)
Parameters
----------
reference_event_list : list of dict or dcase_util.containers.MetaDataContainer
Reference event list.
estimated_event_list : list of dict or dcase_util.containers.MetaDataContainer
Estimated event list.
evaluated_length_seconds : float, optional
Evaluated length. If none given, maximum offset is used.
Default value None
Returns
-------
self
"""
# Make sure input is dcase_util.containers.MetaDataContainer
if not isinstance(reference_event_list, dcase_util.containers.MetaDataContainer):
reference_event_list = dcase_util.containers.MetaDataContainer(reference_event_list)
if not isinstance(estimated_event_list, dcase_util.containers.MetaDataContainer):
estimated_event_list = dcase_util.containers.MetaDataContainer(estimated_event_list)
# Check that input event list have event only from one file
reference_files = reference_event_list.unique_files
if len(reference_files) > 1:
raise ValueError(
"reference_event_list contains events from multiple files. Evaluate only file by file."
)
estimated_files = estimated_event_list.unique_files
if len(estimated_files) > 1:
raise ValueError(
"estimated_event_list contains events from multiple files. Evaluate only file by file."
)
# Evaluate only valid events
valid_reference_event_list = dcase_util.containers.MetaDataContainer()
for item in reference_event_list:
if 'event_onset' in item and 'event_offset' in item and 'event_label' in item:
valid_reference_event_list.append(item)
elif 'onset' in item and 'offset' in item and 'event_label' in item:
valid_reference_event_list.append(item)
reference_event_list = valid_reference_event_list
valid_estimated_event_list = dcase_util.containers.MetaDataContainer()
for item in estimated_event_list:
if 'event_onset' in item and 'event_offset' in item and 'event_label' in item:
valid_estimated_event_list.append(item)
elif 'onset' in item and 'offset' in item and 'event_label' in item:
valid_estimated_event_list.append(item)
estimated_event_list = valid_estimated_event_list
# Convert event list into frame-based representation
reference_event_roll = util.event_list_to_event_roll(
source_event_list=reference_event_list,
event_label_list=self.event_label_list,
time_resolution=self.time_resolution
)
estimated_event_roll = util.event_list_to_event_roll(
source_event_list=estimated_event_list,
event_label_list=self.event_label_list,
time_resolution=self.time_resolution
)
if evaluated_length_seconds is None:
evaluated_length_seconds = max(reference_event_list.max_offset, estimated_event_list.max_offset)
evaluated_length_segments = int(math.ceil(evaluated_length_seconds * 1 / float(self.time_resolution)))
else:
evaluated_length_segments = int(math.ceil(evaluated_length_seconds * 1 / float(self.time_resolution)))
self.evaluated_length_seconds += evaluated_length_seconds
self.evaluated_files += 1
reference_event_roll, estimated_event_roll = util.match_event_roll_lengths(
reference_event_roll,
estimated_event_roll,
evaluated_length_segments
)
# Compute segment-based overall metrics
for segment_id in range(0, reference_event_roll.shape[0]):
annotated_segment = reference_event_roll[segment_id, :]
system_segment = estimated_event_roll[segment_id, :]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
S = min(Nref, Nsys) - Ntp
D = max(0, Nref - Nsys)
I = max(0, Nsys - Nref)
self.overall['Ntp'] += Ntp
self.overall['Ntn'] += Ntn
self.overall['Nfp'] += Nfp
self.overall['Nfn'] += Nfn
self.overall['Nref'] += Nref
self.overall['Nsys'] += Nsys
self.overall['S'] += S
self.overall['D'] += D
self.overall['I'] += I
# Compute segment-based class-wise metrics
for class_id, class_label in enumerate(self.event_label_list):
annotated_segment = reference_event_roll[:, class_id]
system_segment = estimated_event_roll[:, class_id]
Ntp = sum(system_segment + annotated_segment > 1)
Ntn = sum(system_segment + annotated_segment == 0)
Nfp = sum(system_segment - annotated_segment > 0)
Nfn = sum(annotated_segment - system_segment > 0)
Nref = sum(annotated_segment)
Nsys = sum(system_segment)
self.class_wise[class_label]['Ntp'] += Ntp
self.class_wise[class_label]['Ntn'] += Ntn
self.class_wise[class_label]['Nfp'] += Nfp
self.class_wise[class_label]['Nfn'] += Nfn
self.class_wise[class_label]['Nref'] += Nref
self.class_wise[class_label]['Nsys'] += Nsys
return self | 0.003443 |
def _get_datapoints(self, params):
"""
Will make a direct REST call with the given json body payload to
get datapoints.
"""
url = self.query_uri + '/v1/datapoints'
return self.service._get(url, params=params) | 0.007813 |
def sub(self, *segments):
"Get a sub-configuration."
d = self
for segment in segments:
try:
d = d[segment]
except KeyError:
return ConfigDict({})
return d | 0.008264 |
def run(self, **kwargs):
""" Start the tornado server, run forever"""
try:
loop = IOLoop()
app = self.make_app()
app.listen(self.port)
loop.start()
except socket.error as serr:
# Re raise the socket error if not "[Errno 98] Address already in use"
if serr.errno != errno.EADDRINUSE:
raise serr
else:
logger.warning('The webserver port {} is already used. May be the HttpRobotServer is already running or another software is using this port.'.format(self.port)) | 0.006645 |
def HTML_results(resultsFile):
"""generates HTML report of active folders/days."""
foldersByDay=loadResults(resultsFile)
# optionally skip dates before a certain date
# for day in sorted(list(foldersByDay.keys())):
# if time.strptime(day,"%Y-%m-%d")<time.strptime("2016-05-01","%Y-%m-%d"):
# del foldersByDay[day]
# Create a header
html="<div class='heading'>Active Folder Report (updated TIMESTAMP)</div>"
html+="<li>When a file is created (or modified) its parent folder is marked active for that day."
html+="<li>This page reports all folders which were active in the last several years. "
html+="<li>A single folder can be active for more than one date."
html=html.replace("TIMESTAMP",(time.strftime('%Y-%m-%d', time.localtime())))
html+="<br>"*5
# create menu at the top of the page
html+="<div class='heading'>Active Folder Dates</div>"
html+="<code>"
lastMonth=""
lastYear=""
for day in sorted(list(foldersByDay.keys())):
month=day[:7]
year=day[:4]
if year!=lastYear:
html+="<br><br><b style='font-size: 200%%;'>%s</b> "%year
lastYear=year
if month!=lastMonth:
html+="<br><b>%s:</b> "%month
lastMonth=month
html+="<a href='#%s'>%s</a>, "%(day,day[8:])
html+="<br>"*5
html=html.replace(", <br>","<br>")
html+="</code>"
# create the full list of folders organized by active date
html+="<div class='heading'>Active Folders</div>"
for day in sorted(list(foldersByDay.keys())):
dt=datetime.datetime.strptime(day, "%Y-%m-%d" )
classPrefix="weekday"
if int(dt.weekday())>4:
classPrefix="weekend"
html+="<a name='%s' href='#%s' style='color: black;'>"%(day,day)
title="%s (%s)"%(day,DAYSOFWEEK[dt.weekday()])
html+="<div class='%s_datecode'>%s</div></a>"%(classPrefix,title)
html+="<div class='%s_folders'>"%(classPrefix)
# define folders to skip
for folder in foldersByDay[day]:
if "\\References\\" in folder:
continue
if "\\MIP\\" in folder:
continue
if "LineScan-" and "\\analysis\\" in folder:
continue
if "trakem2" in folder:
continue
if "SWHlab-" in folder:
continue
if "\\swhlab" in folder:
continue
html+="%s<br>"%folder
html+="</div>"
fnameSave=resultsFile+".html"
html=html.replace("D:\\X_Drive\\","X:\\")
with open(fnameSave,'w') as f:
f.write(HTML_TEMPLATE.replace("<body>","<body>"+html))
print("saved",fnameSave) | 0.021856 |
def evaluate_expression(expression, vars):
'''evaluation an expression'''
try:
v = eval(expression, globals(), vars)
except NameError:
return None
except ZeroDivisionError:
return None
return v | 0.004219 |
def to_xml(self, root):
'''
Returns a DOM element contaning the XML representation of the
ExtensibleXMLiElement
@param root:Element Root XML element.
@return: Element
'''
if not len(self.__custom_elements):
return
for uri, tags in self.__custom_elements.items():
prefix, url = uri.split(":", 1)
for name, value in tags.items():
self.__createElementNS(root, url, prefix + ":" + name, value)
return root | 0.003795 |
def add_prefix(self):
""" Add prefix according to the specification.
The following keys can be used:
vrf ID of VRF to place the prefix in
prefix the prefix to add if already known
family address family (4 or 6)
description A short description
expires Expiry time of assignment
comment Longer comment
node Hostname of node
type Type of prefix; reservation, assignment, host
status Status of prefix; assigned, reserved, quarantine
pool ID of pool
country Country where the prefix is used
order_id Order identifier
customer_id Customer identifier
vlan VLAN ID
alarm_priority Alarm priority of prefix
monitor If the prefix should be monitored or not
from-prefix A prefix the prefix is to be allocated from
from-pool A pool (ID) the prefix is to be allocated from
prefix_length Prefix length of allocated prefix
"""
p = Prefix()
# Sanitize input parameters
if 'vrf' in request.json:
try:
if request.json['vrf'] is None or len(unicode(request.json['vrf'])) == 0:
p.vrf = None
else:
p.vrf = VRF.get(int(request.json['vrf']))
except ValueError:
return json.dumps({'error': 1, 'message': "Invalid VRF ID '%s'" % request.json['vrf']})
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
if 'description' in request.json:
p.description = validate_string(request.json, 'description')
if 'expires' in request.json:
p.expires = validate_string(request.json, 'expires')
if 'comment' in request.json:
p.comment = validate_string(request.json, 'comment')
if 'node' in request.json:
p.node = validate_string(request.json, 'node')
if 'status' in request.json:
p.status = validate_string(request.json, 'status')
if 'type' in request.json:
p.type = validate_string(request.json, 'type')
if 'pool' in request.json:
if request.json['pool'] is not None:
try:
p.pool = Pool.get(int(request.json['pool']))
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
if 'country' in request.json:
p.country = validate_string(request.json, 'country')
if 'order_id' in request.json:
p.order_id = validate_string(request.json, 'order_id')
if 'customer_id' in request.json:
p.customer_id = validate_string(request.json, 'customer_id')
if 'alarm_priority' in request.json:
p.alarm_priority = validate_string(request.json, 'alarm_priority')
if 'monitor' in request.json:
p.monitor = request.json['monitor']
if 'vlan' in request.json:
p.vlan = request.json['vlan']
if 'tags' in request.json:
p.tags = request.json['tags']
if 'avps' in request.json:
p.avps = request.json['avps']
# arguments
args = {}
if 'from_prefix' in request.json:
args['from-prefix'] = request.json['from_prefix']
if 'from_pool' in request.json:
try:
args['from-pool'] = Pool.get(int(request.json['from_pool']))
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
if 'family' in request.json:
args['family'] = request.json['family']
if 'prefix_length' in request.json:
args['prefix_length'] = request.json['prefix_length']
# manual allocation?
if args == {}:
if 'prefix' in request.json:
p.prefix = request.json['prefix']
try:
p.save(args)
except NipapError, e:
return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__})
return json.dumps(p, cls=NipapJSONEncoder) | 0.001805 |
def load(path):
'''
Load specified fault manager module
path: string
path of fault manager module
CLI Example:
.. code-block:: bash
salt '*' fmadm.load /module/path
'''
ret = {}
fmadm = _check_fmadm()
cmd = '{cmd} load {path}'.format(
cmd=fmadm,
path=path
)
res = __salt__['cmd.run_all'](cmd)
retcode = res['retcode']
result = {}
if retcode != 0:
result['Error'] = res['stderr']
else:
result = True
return result | 0.00189 |
def geturi(self):
"""Return the re-combined version of the original URI reference as a
string.
"""
scheme, authority, path, query, fragment = self
# RFC 3986 5.3. Component Recomposition
result = []
if scheme is not None:
result.extend([scheme, self.COLON])
if authority is not None:
result.extend([self.SLASH, self.SLASH, authority])
result.append(path)
if query is not None:
result.extend([self.QUEST, query])
if fragment is not None:
result.extend([self.HASH, fragment])
return self.EMPTY.join(result) | 0.003077 |
def set_parallel(self, width, name=None):
"""
Set this source stream to be split into multiple channels
as the start of a parallel region.
Calling ``set_parallel`` on a stream created by
:py:meth:`~Topology.source` results in the stream
having `width` channels, each created by its own instance
of the callable::
s = topo.source(S())
s.set_parallel(3)
f = s.filter(F())
e = f.end_parallel()
Each channel has independent instances of ``S`` and ``F``. Tuples
created by the instance of ``S`` in channel 0 are passed to the
instance of ``F`` in channel 0, and so on for channels 1 and 2.
Callable transforms instances within the channel can use
the runtime functions
:py:func:`~streamsx.ec.channel`,
:py:func:`~streamsx.ec.local_channel`,
:py:func:`~streamsx.ec.max_channels` &
:py:func:`~streamsx.ec.local_max_channels`
to adapt to being invoked in parallel. For example a
source callable can use its channel number to determine
which partition to read from in a partitioned external system.
Calling ``set_parallel`` on a stream created by
:py:meth:`~Topology.subscribe` results in the stream
having `width` channels. Subscribe ensures that the
stream will contain all published tuples matching the
topic subscription and type. A published tuple will appear
on one of the channels though the specific channel is not known
in advance.
A parallel region is terminated by :py:meth:`end_parallel`
or :py:meth:`for_each`.
The number of channels is set by `width` which may be an `int` greater
than zero or a submission parameter created by
:py:meth:`Topology.create_submission_parameter`.
With IBM Streams 4.3 or later the number of channels can be
dynamically changed at runtime.
Parallel regions are started on non-source streams using
:py:meth:`parallel`.
Args:
width: The degree of parallelism for the parallel region.
name(str): Name of the parallel region. Defaults to the name of this stream.
Returns:
Stream: Returns this stream.
.. seealso:: :py:meth:`parallel`, :py:meth:`end_parallel`
.. versionadded:: 1.9
.. versionchanged:: 1.11 `name` parameter added.
"""
self.oport.operator.config['parallel'] = True
self.oport.operator.config['width'] = streamsx.topology.graph._as_spl_json(width, int)
if name:
name = self.topology.graph._requested_name(str(name), action='set_parallel')
self.oport.operator.config['regionName'] = name
return self | 0.002481 |
def pick_target_decoy(tfeature, dfeature):
"""Feed it with a target and decoy score and the protein/gene/id names,
and this will return target/decoy type, the winning ID and the score"""
tscore, dscore = get_score(tfeature), get_score(dfeature)
if tscore == dscore:
# same score or both False
return False
elif False in [tscore, dscore]:
# return the non-False feature
return [v for k, v in {tscore: tfeature, dscore: dfeature}.items()
if k is not False][0]
elif tscore > dscore:
return tfeature
elif tscore < dscore:
return dfeature
else:
# in case uncaught edgecase occurs
print('WARNING, target score {} and decoy score {} could not be '
'compared'.format(tscore, dscore))
return False | 0.001217 |
async def select_page(self, info: SQLQueryInfo, size=1, page=1) -> Tuple[Tuple[DataRecord, ...], int]:
"""
Select from database
:param info:
:param size: -1 means infinite
:param page:
:param need_count: if True, get count as second return value, otherwise -1
:return: records. count
"""
raise NotImplementedError() | 0.010336 |
def _set_red_profile_ecn(self, v, load=False):
"""
Setter method for red_profile_ecn, mapped from YANG variable /qos/ecn/red_profile/red_profile_ecn (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_red_profile_ecn is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_red_profile_ecn() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=red_profile_ecn.red_profile_ecn, is_container='container', presence=False, yang_name="red-profile-ecn", rest_name="ecn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure RED Profile ECN', u'cli-sequence-commands': None, u'alt-name': u'ecn', u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """red_profile_ecn must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=red_profile_ecn.red_profile_ecn, is_container='container', presence=False, yang_name="red-profile-ecn", rest_name="ecn", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure RED Profile ECN', u'cli-sequence-commands': None, u'alt-name': u'ecn', u'cli-incomplete-command': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-qos', defining_module='brocade-qos', yang_type='container', is_config=True)""",
})
self.__red_profile_ecn = t
if hasattr(self, '_set'):
self._set() | 0.005319 |
def leastsqbound(func, x0, args=(), bounds=None, Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=0.0, factor=100, diag=None):
"""
Bounded minimization of the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
should take at least one (possibly length N vector) argument and
returns M floating point numbers.
x0 : ndarray
The starting estimate for the minimization.
args : tuple
Any extra arguments to func are placed in this tuple.
bounds : list
``(min, max)`` pairs for each element in ``x``, defining
the bounds on that parameter. Use None for one of ``min`` or
``max`` when there is no bound in that direction.
Dfun : callable
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool
non-zero to return all optional outputs.
col_deriv : bool
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float
Relative error desired in the sum of squares.
xtol : float
Relative error desired in the approximate solution.
gtol : float
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int
The maximum number of calls to the function. If zero, then 100*(N+1) is
the maximum where N is the number of elements in x0.
epsfcn : float
A suitable step length for the forward-difference approximation of the
Jacobian (for Dfun=None). If epsfcn is less than the machine precision,
it is assumed that the relative errors in the functions are of the
order of the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
Uses the fjac and ipvt optional outputs to construct an
estimate of the jacobian around the solution. ``None`` if a
singular matrix encountered (indicates very flat curvature in
some direction). This matrix must be multiplied by the
residual standard deviation to get the covariance of the
parameter estimates -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the key s::
- 'nfev' : the number of function calls
- 'fvec' : the function evaluated at the output
- 'fjac' : A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
- 'ipvt' : an integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
- 'qtf' : the vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
Contraints on the parameters are enforced using an internal parameter list
with appropiate transformations such that these internal parameters can be
optimized without constraints. The transfomation between a given internal
parameter, p_i, and a external parameter, p_e, are as follows:
With ``min`` and ``max`` bounds defined ::
p_i = arcsin((2 * (p_e - min) / (max - min)) - 1.)
p_e = min + ((max - min) / 2.) * (sin(p_i) + 1.)
With only ``max`` defined ::
p_i = sqrt((p_e - max + 1.)**2 - 1.)
p_e = max + 1. - sqrt(p_i**2 + 1.)
With only ``min`` defined ::
p_i = sqrt((p_e - min + 1.)**2 - 1.)
p_e = min - 1. + sqrt(p_i**2 + 1.)
These transfomations are used in the MINUIT package, and described in
detail in the section 1.3.1 of the MINUIT User's Guide.
To Do
-----
Currently the ``factor`` and ``diag`` parameters scale the
internal parameter list, but should scale the external parameter list.
The `qtf` vector in the infodic dictionary reflects internal parameter
list, it should be correct to reflect the external parameter list.
References
----------
* F. James and M. Winkler. MINUIT User's Guide, July 16, 2004.
"""
# use leastsq if no bounds are present
if bounds is None:
return leastsq(func, x0, args, Dfun, full_output, col_deriv,
ftol, xtol, gtol, maxfev, epsfcn, factor, diag)
# create function which convert between internal and external parameters
i2e = _internal2external_func(bounds)
e2i = _external2internal_func(bounds)
x0 = array(x0, ndmin=1)
i0 = e2i(x0)
n = len(x0)
if len(bounds) != n:
raise ValueError('length of x0 != length of bounds')
if type(args) != type(()):
args = (args,)
m = _check_func('leastsq', 'func', func, x0, args, n)[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n,m))
# define a wrapped func which accept internal parameters, converts them
# to external parameters and calls func
def wfunc(x, *args): return func(i2e(x), *args)
if Dfun is None:
if (maxfev == 0):
maxfev = 200*(n + 1)
retval = _minpack._lmdif(wfunc, i0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n,m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m,n))
if (maxfev == 0):
maxfev = 100*(n + 1)
def wDfun(x, *args): return Dfun(i2e(x), *args) # wrapped Dfun
retval = _minpack._lmder(func, wDfun, i0, args, full_output,
col_deriv, ftol, xtol, gtol, maxfev, factor, diag)
errors = {0:["Improper input parameters.", TypeError],
1:["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2:["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3:["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol,xtol), None],
4:["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5:["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6:["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible.""" % ftol, ValueError],
7:["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol, ValueError],
8:["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError],
'unknown':["Unknown error.", TypeError]}
info = retval[-1] # The FORTRAN return value
if (info not in [1,2,3,4] and not full_output):
if info in [5,6,7,8]:
warnings.warn(errors[info][0], RuntimeWarning)
else:
try:
raise errors[info][1](errors[info][0])
except KeyError:
raise errors['unknown'][1](errors['unknown'][0])
mesg = errors[info][0]
x = i2e(retval[0]) # internal params to external params
if full_output:
# convert fjac from internal params to external
grad = _internal2external_grad(retval[0], bounds)
retval[1]['fjac'] = (retval[1]['fjac'].T / take(grad,
retval[1]['ipvt'] - 1)).T
cov_x = None
if info in [1,2,3,4]:
from numpy.dual import inv
from numpy.linalg import LinAlgError
perm = take(eye(n),retval[1]['ipvt']-1,0)
r = triu(transpose(retval[1]['fjac'])[:n,:])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R),R))
except LinAlgError:
pass
return (x, cov_x) + retval[1:-1] + (mesg, info)
else:
return (x, info) | 0.004994 |
def get_cell_lines(self, column_idx):
'''
''returns:
the lines of the cell specified by the column_idx or an empty list if the column does not exist
'''
return [] if column_idx >= len(self.columns) else self.columns[column_idx].get_cell_lines() | 0.013699 |
def _backwards_search(self, start_node, split_name, max_depth=float('inf'), shortcuts=True):
""" Performs a backwards search from the terminal node back to the start node
:param start_node:
The node from where search starts, or here better way where backwards search should
end.
:param split_name:
List of names
:param max_depth:
Maximum search depth where to look for
:param shortcuts:
If shortcuts are allowed
"""
result_list = [] # Result list of all found items
full_name_set = set() # Set containing full names of all found items to avoid finding items
# twice due to links
colon_name = '.'.join(split_name)
key = split_name[-1]
candidate_dict = self._get_candidate_dict(key, None, use_upper_bound=False)
parent_full_name = start_node.v_full_name
split_length = len(split_name)
for candidate_name in candidate_dict:
# Check if candidate startswith the parent's name
candidate = candidate_dict[candidate_name]
if key != candidate.v_name or candidate.v_full_name in full_name_set:
# If this is not the case we do have link, that we need to skip
continue
if candidate_name.startswith(parent_full_name):
if parent_full_name != '':
reduced_candidate_name = candidate_name[len(parent_full_name) + 1:]
else:
reduced_candidate_name = candidate_name
candidate_split_name = reduced_candidate_name.split('.')
if len(candidate_split_name) > max_depth:
break
if len(split_name) == 1 or reduced_candidate_name.endswith(colon_name):
result_list.append(candidate)
full_name_set.add(candidate.v_full_name)
elif shortcuts:
candidate_set = set(candidate_split_name)
climbing = True
for name in split_name:
if name not in candidate_set:
climbing = False
break
if climbing:
count = 0
candidate_length = len(candidate_split_name)
for idx in range(candidate_length):
if idx + split_length - count > candidate_length:
break
if split_name[count] == candidate_split_name[idx]:
count += 1
if count == len(split_name):
result_list.append(candidate)
full_name_set.add(candidate.v_full_name)
break
return result_list | 0.00404 |
def _sysfs_parse(path, base_attr=None, stats=False, config=False, internals=False, options=False):
'''
Helper function for parsing BCache's SysFS interface
'''
result = {}
# ---------------- Parse through the interfaces list ----------------
intfs = __salt__['sysfs.interfaces'](path)
# Actions, we ignore
del intfs['w']
# -------- Sorting hat --------
binkeys = []
if internals:
binkeys.extend(['inter_ro', 'inter_rw'])
if config:
binkeys.append('config')
if stats:
binkeys.append('stats')
bintf = {}
for key in binkeys:
bintf[key] = []
for intf in intfs['r']:
if intf.startswith('internal'):
key = 'inter_ro'
elif 'stats' in intf:
key = 'stats'
else:
# What to do with these???
# I'll utilize 'inter_ro' as 'misc' as well
key = 'inter_ro'
if key in bintf:
bintf[key].append(intf)
for intf in intfs['rw']:
if intf.startswith('internal'):
key = 'inter_rw'
else:
key = 'config'
if key in bintf:
bintf[key].append(intf)
if base_attr is not None:
for intf in bintf:
bintf[intf] = [sintf for sintf in bintf[intf] if sintf not in base_attr]
bintf['base'] = base_attr
mods = {
'stats': ['internal/bset_tree_stats', 'writeback_rate_debug', 'metadata_written', 'nbuckets', 'written',
'average_key_size', 'btree_cache_size'],
}
for modt, modlist in mods.items():
found = []
if modt not in bintf:
continue
for mod in modlist:
for intflist in bintf.values():
if mod in intflist:
found.append(mod)
intflist.remove(mod)
bintf[modt] += found
# -------- Fetch SysFS vals --------
bintflist = [intf for iflist in bintf.values() for intf in iflist]
result.update(__salt__['sysfs.read'](bintflist, path))
# -------- Parse through well known string lists --------
for strlist in ('writeback_rate_debug', 'internal/bset_tree_stats', 'priority_stats'):
if strlist in result:
listres = {}
for line in result[strlist].split('\n'):
key, val = line.split(':', 1)
val = val.strip()
try:
val = int(val)
except Exception:
try:
val = float(val)
except Exception:
pass
listres[key.strip()] = val
result[strlist] = listres
# -------- Parse through selection lists --------
if not options:
for sellist in ('cache_mode', 'cache_replacement_policy', 'errors'):
if sellist in result:
result[sellist] = re.search(r'\[(.+)\]', result[sellist]).groups()[0]
# -------- Parse through well known bools --------
for boolkey in ('running', 'writeback_running', 'congested'):
if boolkey in result:
result[boolkey] = bool(result[boolkey])
# -------- Recategorize results --------
bresult = {}
for iftype, intflist in bintf.items():
ifres = {}
for intf in intflist:
if intf in result:
ifres[intf] = result.pop(intf)
if ifres:
bresult[iftype] = ifres
return bresult | 0.001722 |
def critical(self, msg, *args, **kwargs):
"""Log 'msg % args' with the critical severity level"""
self._log("CRITICAL", msg, args, kwargs) | 0.012987 |
def community_topic_posts(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/posts#list-posts"
api_path = "/api/v2/community/topics/{id}/posts.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | 0.010791 |
def deepfool_attack(sess,
x,
predictions,
logits,
grads,
sample,
nb_candidate,
overshoot,
max_iter,
clip_min,
clip_max,
feed=None):
"""
TensorFlow implementation of DeepFool.
Paper link: see https://arxiv.org/pdf/1511.04599.pdf
:param sess: TF session
:param x: The input placeholder
:param predictions: The model's sorted symbolic output of logits, only the
top nb_candidate classes are contained
:param logits: The model's unnormalized output tensor (the input to
the softmax layer)
:param grads: Symbolic gradients of the top nb_candidate classes, procuded
from gradient_graph
:param sample: Numpy array with sample input
:param nb_candidate: The number of classes to test against, i.e.,
deepfool only consider nb_candidate classes when
attacking(thus accelerate speed). The nb_candidate
classes are chosen according to the prediction
confidence during implementation.
:param overshoot: A termination criterion to prevent vanishing updates
:param max_iter: Maximum number of iteration for DeepFool
:param clip_min: Minimum value for components of the example returned
:param clip_max: Maximum value for components of the example returned
:return: Adversarial examples
"""
adv_x = copy.copy(sample)
# Initialize the loop variables
iteration = 0
current = utils_tf.model_argmax(sess, x, logits, adv_x, feed=feed)
if current.shape == ():
current = np.array([current])
w = np.squeeze(np.zeros(sample.shape[1:])) # same shape as original image
r_tot = np.zeros(sample.shape)
original = current # use original label as the reference
_logger.debug(
"Starting DeepFool attack up to %s iterations", max_iter)
# Repeat this main loop until we have achieved misclassification
while (np.any(current == original) and iteration < max_iter):
if iteration % 5 == 0 and iteration > 0:
_logger.info("Attack result at iteration %s is %s", iteration, current)
gradients = sess.run(grads, feed_dict={x: adv_x})
predictions_val = sess.run(predictions, feed_dict={x: adv_x})
for idx in range(sample.shape[0]):
pert = np.inf
if current[idx] != original[idx]:
continue
for k in range(1, nb_candidate):
w_k = gradients[idx, k, ...] - gradients[idx, 0, ...]
f_k = predictions_val[idx, k] - predictions_val[idx, 0]
# adding value 0.00001 to prevent f_k = 0
pert_k = (abs(f_k) + 0.00001) / np.linalg.norm(w_k.flatten())
if pert_k < pert:
pert = pert_k
w = w_k
r_i = pert * w / np.linalg.norm(w)
r_tot[idx, ...] = r_tot[idx, ...] + r_i
adv_x = np.clip(r_tot + sample, clip_min, clip_max)
current = utils_tf.model_argmax(sess, x, logits, adv_x, feed=feed)
if current.shape == ():
current = np.array([current])
# Update loop variables
iteration = iteration + 1
# need more revision, including info like how many succeed
_logger.info("Attack result at iteration %s is %s", iteration, current)
_logger.info("%s out of %s become adversarial examples at iteration %s",
sum(current != original),
sample.shape[0],
iteration)
# need to clip this image into the given range
adv_x = np.clip((1 + overshoot) * r_tot + sample, clip_min, clip_max)
return adv_x | 0.007657 |
def _initialize_tableaux_ig(X, Y, tableaux, bases):
"""
Given sequences `X` and `Y` of ndarrays, initialize the tableau and
basis arrays in place for the "geometric" imitation game as defined
in McLennan and Tourky (2006), to be passed to `_lemke_howson_tbl`.
Parameters
----------
X, Y : ndarray(float)
Arrays of the same shape (m, n).
tableaux : tuple(ndarray(float, ndim=2))
Tuple of two arrays to be used to store the tableaux, of shape
(2m, 2m). Modified in place.
bases : tuple(ndarray(int, ndim=1))
Tuple of two arrays to be used to store the bases, of shape
(m,). Modified in place.
Returns
-------
tableaux : tuple(ndarray(float, ndim=2))
View to `tableaux`.
bases : tuple(ndarray(int, ndim=1))
View to `bases`.
"""
m = X.shape[0]
min_ = np.zeros(m)
# Mover
for i in range(m):
for j in range(2*m):
if j == i or j == i + m:
tableaux[0][i, j] = 1
else:
tableaux[0][i, j] = 0
# Right hand side
tableaux[0][i, 2*m] = 1
# Imitator
for i in range(m):
# Slack variables
for j in range(m):
if j == i:
tableaux[1][i, j] = 1
else:
tableaux[1][i, j] = 0
# Payoff variables
for j in range(m):
d = X[i] - Y[j]
tableaux[1][i, m+j] = _square_sum(d) * (-1)
if tableaux[1][i, m+j] < min_[j]:
min_[j] = tableaux[1][i, m+j]
# Right hand side
tableaux[1][i, 2*m] = 1
# Shift the payoff values
for i in range(m):
for j in range(m):
tableaux[1][i, m+j] -= min_[j]
tableaux[1][i, m+j] += 1
for pl, start in enumerate([m, 0]):
for i in range(m):
bases[pl][i] = start + i
return tableaux, bases | 0.000519 |
def parse_bitcode(bitcode, context=None):
"""
Create Module from a LLVM *bitcode* (a bytes object).
"""
if context is None:
context = get_global_context()
buf = c_char_p(bitcode)
bufsize = len(bitcode)
with ffi.OutputString() as errmsg:
mod = ModuleRef(ffi.lib.LLVMPY_ParseBitcode(
context, buf, bufsize, errmsg), context)
if errmsg:
mod.close()
raise RuntimeError(
"LLVM bitcode parsing error\n{0}".format(errmsg))
return mod | 0.001873 |
def load_wav(path, mono=True):
"""Loads a .wav file as a numpy array using ``scipy.io.wavfile``.
Parameters
----------
path : str
Path to a .wav file
mono : bool
If the provided .wav has more than one channel, it will be
converted to mono if ``mono=True``. (Default value = True)
Returns
-------
audio_data : np.ndarray
Array of audio samples, normalized to the range [-1., 1.]
fs : int
Sampling rate of the audio data
"""
fs, audio_data = scipy.io.wavfile.read(path)
# Make float in range [-1, 1]
if audio_data.dtype == 'int8':
audio_data = audio_data/float(2**8)
elif audio_data.dtype == 'int16':
audio_data = audio_data/float(2**16)
elif audio_data.dtype == 'int32':
audio_data = audio_data/float(2**24)
else:
raise ValueError('Got unexpected .wav data type '
'{}'.format(audio_data.dtype))
# Optionally convert to mono
if mono and audio_data.ndim != 1:
audio_data = audio_data.mean(axis=1)
return audio_data, fs | 0.000909 |
def set_cookie(cookies, key, value='', max_age=None, expires=None, path='/',
domain=None, secure=False, httponly=False):
'''Set a cookie key into the cookies dictionary *cookies*.'''
cookies[key] = value
if expires is not None:
if isinstance(expires, datetime):
now = (expires.now(expires.tzinfo) if expires.tzinfo else
expires.utcnow())
delta = expires - now
# Add one second so the date matches exactly (a fraction of
# time gets lost between converting to a timedelta and
# then the date string).
delta = delta + timedelta(seconds=1)
# Just set max_age - the max_age logic will set expires.
expires = None
max_age = max(0, delta.days * 86400 + delta.seconds)
else:
cookies[key]['expires'] = expires
if max_age is not None:
cookies[key]['max-age'] = max_age
# IE requires expires, so set it if hasn't been already.
if not expires:
cookies[key]['expires'] = http_date(time.time() + max_age)
if path is not None:
cookies[key]['path'] = path
if domain is not None:
cookies[key]['domain'] = domain
if secure:
cookies[key]['secure'] = True
if httponly:
cookies[key]['httponly'] = True | 0.00074 |
def gen_triplets_master(wv_master, geometry=None, debugplot=0):
"""Compute information associated to triplets in master table.
Determine all the possible triplets that can be generated from the
array `wv_master`. In addition, the relative position of the
central line of each triplet is also computed.
Parameters
----------
wv_master : 1d numpy array, float
Array with wavelengths corresponding to the master table
(Angstroms).
geometry : tuple (4 integers) or None
x, y, dx, dy values employed to set the window geometry.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
ntriplets_master : int
Number of triplets built from master table.
ratios_master_sorted : 1d numpy array, float
Array with values of the relative position of the central line
of each triplet, sorted in ascending order.
triplets_master_sorted_list : list of tuples
List with tuples of three numbers, corresponding to the three
line indices in the master table. The list is sorted to be in
correspondence with `ratios_master_sorted`.
"""
nlines_master = wv_master.size
# Check that the wavelengths in the master table are sorted
wv_previous = wv_master[0]
for i in range(1, nlines_master):
if wv_previous >= wv_master[i]:
raise ValueError('Wavelengths:\n--> ' +
str(wv_previous) + '\n--> ' + str(wv_master[i]) +
'\nin master table are duplicated or not sorted')
wv_previous = wv_master[i]
# Generate all the possible triplets with the numbers of the lines
# in the master table. Each triplet is defined as a tuple of three
# numbers corresponding to the three line indices in the master
# table. The collection of tuples is stored in an ordinary python
# list.
iter_comb_triplets = itertools.combinations(range(nlines_master), 3)
triplets_master_list = [val for val in iter_comb_triplets]
# Verify that the number of triplets coincides with the expected
# value.
ntriplets_master = len(triplets_master_list)
if ntriplets_master == comb(nlines_master, 3, exact=True):
if abs(debugplot) >= 10:
print('>>> Total number of lines in master table:',
nlines_master)
print('>>> Number of triplets in master table...:',
ntriplets_master)
else:
raise ValueError('Invalid number of combinations')
# For each triplet, compute the relative position of the central
# line.
ratios_master = np.zeros(ntriplets_master)
for index, value in enumerate(triplets_master_list):
i1, i2, i3 = value
delta1 = wv_master[i2] - wv_master[i1]
delta2 = wv_master[i3] - wv_master[i1]
ratios_master[index] = delta1 / delta2
# Compute the array of indices that index the above ratios in
# sorted order.
isort_ratios_master = np.argsort(ratios_master)
# Simultaneous sort of position ratios and triplets.
ratios_master_sorted = ratios_master[isort_ratios_master]
triplets_master_sorted_list = [triplets_master_list[i]
for i in isort_ratios_master]
if abs(debugplot) in [21, 22]:
# compute and plot histogram with position ratios
bins_in = np.linspace(0.0, 1.0, 41)
hist, bins_out = np.histogram(ratios_master, bins=bins_in)
#
from numina.array.display.matplotlib_qt import plt
fig = plt.figure()
ax = fig.add_subplot(111)
width_hist = 0.8*(bins_out[1]-bins_out[0])
center = (bins_out[:-1]+bins_out[1:])/2
ax.bar(center, hist, align='center', width=width_hist)
ax.set_xlabel('distance ratio in each triplet')
ax.set_ylabel('Number of triplets')
ax.set_title("Number of lines/triplets: " +
str(nlines_master) + "/" + str(ntriplets_master))
# set window geometry
set_window_geometry(geometry)
pause_debugplot(debugplot, pltshow=True, tight_layout=True)
return ntriplets_master, ratios_master_sorted, triplets_master_sorted_list | 0.00069 |
def send_wp_requests(self, wps=None):
'''send some more WP requests'''
if wps is None:
wps = self.missing_wps_to_request()
tnow = time.time()
for seq in wps:
#print("REQUESTING %u/%u (%u)" % (seq, self.wploader.expected_count, i))
self.wp_requested[seq] = tnow
self.master.waypoint_request_send(seq) | 0.010554 |
def x(self, x):
"""Project x as y"""
if x is None:
return None
if self._force_vertical:
return super(HorizontalView, self).x(x)
return super(HorizontalView, self).y(x) | 0.008969 |
def container_freeze(name, remote_addr=None,
cert=None, key=None, verify_cert=True):
'''
Freeze a container
name :
Name of the container to freeze
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
'''
container = container_get(
name, remote_addr, cert, key, verify_cert, _raw=True
)
container.freeze(wait=True)
return _pylxd_model_to_dict(container) | 0.000995 |
def extracting(self, *names, **kwargs):
"""Asserts that val is collection, then extracts the named properties or named zero-arg methods into a list (or list of tuples if multiple names are given)."""
if not isinstance(self.val, Iterable):
raise TypeError('val is not iterable')
if isinstance(self.val, str_types):
raise TypeError('val must not be string')
if len(names) == 0:
raise ValueError('one or more name args must be given')
def _extract(x, name):
if self._check_dict_like(x, check_values=False, return_as_bool=True):
if name in x:
return x[name]
else:
raise ValueError('item keys %s did not contain key <%s>' % (list(x.keys()), name))
elif isinstance(x, Iterable):
self._check_iterable(x, name='item')
return x[name]
elif hasattr(x, name):
attr = getattr(x, name)
if callable(attr):
try:
return attr()
except TypeError:
raise ValueError('val method <%s()> exists, but is not zero-arg method' % name)
else:
return attr
else:
raise ValueError('val does not have property or zero-arg method <%s>' % name)
def _filter(x):
if 'filter' in kwargs:
if isinstance(kwargs['filter'], str_types):
return bool(_extract(x, kwargs['filter']))
elif self._check_dict_like(kwargs['filter'], check_values=False, return_as_bool=True):
for k in kwargs['filter']:
if isinstance(k, str_types):
if _extract(x, k) != kwargs['filter'][k]:
return False
return True
elif callable(kwargs['filter']):
return kwargs['filter'](x)
return False
return True
def _sort(x):
if 'sort' in kwargs:
if isinstance(kwargs['sort'], str_types):
return _extract(x, kwargs['sort'])
elif isinstance(kwargs['sort'], Iterable):
items = []
for k in kwargs['sort']:
if isinstance(k, str_types):
items.append(_extract(x, k))
return tuple(items)
elif callable(kwargs['sort']):
return kwargs['sort'](x)
return 0
extracted = []
for i in sorted(self.val, key=lambda x: _sort(x)):
if _filter(i):
items = [_extract(i, name) for name in names]
extracted.append(tuple(items) if len(items) > 1 else items[0])
return AssertionBuilder(extracted, self.description, self.kind) | 0.002683 |
def delete_raw(self):
"""Delete the current entity.
Make an HTTP DELETE call to ``self.path('base')``. Return the response.
:return: A ``requests.response`` object.
"""
return client.delete(
self.path(which='self'),
**self._server_config.get_client_kwargs()
) | 0.005988 |
def classes(equivalences):
"""Compute mapping from element to list of equivalent elements.
`equivalences` is an iterable of (x, y) tuples representing
equivalences x ~ y.
Returns an OrderedDict mapping each x to the list of elements
equivalent to x.
"""
node = OrderedDict()
def N(x):
if x in node:
return node[x]
n = node[x] = Node(x)
return n
for x, y in equivalences:
union(N(x), N(y))
eqclass = OrderedDict()
for x, n in node.iteritems():
x_ = find(n).element
if x_ not in eqclass:
eqclass[x_] = []
eqclass[x_].append(x)
eqclass[x] = eqclass[x_]
return eqclass | 0.002849 |
def reconnect(self):
"""
Invoked by the IOLoop timer if the connection is
closed. See the on_connection_closed method.
"""
# This is the old connection IOLoop instance, stop its ioloop
self._connection.ioloop.stop()
if not self._closing:
# Create a new connection
self._connection = self.connect()
# There is now a new connection, needs a new ioloop to run
self._connection.ioloop.start() | 0.004057 |
def get_anchor_point(self, anchor_name):
"""Return an anchor point of the node, if it exists."""
if anchor_name in self._possible_anchors:
return TikZNodeAnchor(self.handle, anchor_name)
else:
try:
anchor = int(anchor_name.split('_')[1])
except:
anchor = None
if anchor is not None:
return TikZNodeAnchor(self.handle, str(anchor))
raise ValueError('Invalid anchor name: "{}"'.format(anchor_name)) | 0.005693 |
def find_files(dir_or_filelist, pattern='*'):
"""
If given a path to a directory, finds files recursively,
e.g. all *.txt files in a given directory (or its subdirectories).
If given a list of files, yields all of the files that match the given
pattern.
adapted from: http://stackoverflow.com/a/2186673
"""
import os
import fnmatch
if isinstance(dir_or_filelist, str):
directory = dir_or_filelist
abspath = os.path.abspath(os.path.expanduser(directory))
for root, dirs, files in os.walk(abspath):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
else:
filelist = dir_or_filelist
for filepath in filelist:
if fnmatch.fnmatch(filepath, pattern):
yield filepath | 0.0011 |
def download_page(url, data=None):
'''Returns the response for the given url. The optional data argument is
passed directly to urlopen.'''
conn = urllib2.urlopen(url, data)
resp = conn.read()
conn.close()
return resp | 0.004167 |
def create_random(magf, magf_params, errf, errf_params,
timef=np.linspace, timef_params=None, size=DEFAULT_SIZE,
id=None, ds_name=DS_NAME, description=DESCRIPTION,
bands=BANDS, metadata=METADATA):
"""Generate a data with any given random function.
Parameters
----------
magf : callable
Function to generate the magnitudes.
magf_params : dict-like
Parameters to feed the `magf` function.
errf : callable
Function to generate the magnitudes.
errf_params : dict-like
Parameters to feed the `errf` function.
timef : callable, (default=numpy.linspace)
Function to generate the times.
timef_params : dict-like or None, (default={"start": 0., "stop": 1.})
Parameters to feed the `timef` callable.
size : int (default=10000)
Number of obervation of the light curves
id : object (default=None)
Id of the created data.
ds_name : str (default="feets-synthetic")
Name of the dataset
description : str (default="Lightcurve created with random numbers")
Description of the data
bands : tuple of strings (default=("B", "V"))
The bands to be created
metadata : dict-like or None (default=None)
The metadata of the created data
Returns
-------
data
A Data object with a random lightcurves.
Examples
--------
.. code-block:: pycon
>>> from numpy import random
>>> create_random(
... magf=random.normal, magf_params={"loc": 0, "scale": 1},
... errf=random.normal, errf_params={"loc": 0, "scale": 0.008})
Data(id=None, ds_name='feets-synthetic', bands=('B', 'V'))
"""
timef_params = (
{"start": 0., "stop": 1.}
if timef_params is None else
timef_params.copy())
timef_params.update(num=size)
magf_params = magf_params.copy()
magf_params.update(size=size)
errf_params = errf_params.copy()
errf_params.update(size=size)
data = {}
for band in bands:
data[band] = {
"time": timef(**timef_params),
"magnitude": magf(**magf_params),
"error": errf(**errf_params)}
return Data(
id=id, ds_name=ds_name, description=description,
bands=bands, metadata=metadata, data=data) | 0.000423 |
def download(date_array, tag, sat_id, data_path, user=None, password=None):
"""Routine to download Kp index data
Parameters
-----------
tag : (string or NoneType)
Denotes type of file to load. Accepted types are '1min' and '5min'.
(default=None)
sat_id : (string or NoneType)
Specifies the satellite ID for a constellation. Not used.
(default=None)
data_path : (string or NoneType)
Path to data directory. If None is specified, the value previously
set in Instrument.files.data_path is used. (default=None)
Returns
--------
Void : (NoneType)
data downloaded to disk, if available.
Notes
-----
Called by pysat. Not intended for direct use by user.
"""
import ftplib
from ftplib import FTP
import sys
ftp = FTP('ftp.gfz-potsdam.de') # connect to host, default port
ftp.login() # user anonymous, passwd anonymous@
ftp.cwd('/pub/home/obs/kp-ap/tab')
for date in date_array:
fname = 'kp{year:02d}{month:02d}.tab'
fname = fname.format(year=(date.year - date.year//100*100), month=date.month)
local_fname = fname
saved_fname = os.path.join(data_path,local_fname)
try:
print('Downloading file for '+date.strftime('%D'))
sys.stdout.flush()
ftp.retrbinary('RETR '+fname, open(saved_fname,'wb').write)
except ftplib.error_perm as exception:
# if exception[0][0:3] != '550':
if str(exception.args[0]).split(" ", 1)[0] != '550':
raise
else:
os.remove(saved_fname)
print('File not available for '+date.strftime('%D'))
ftp.close()
return | 0.003955 |
def set(key, value={}, reset=False, init=False):
"""
Set data
:param key: A unique to set, best to use __name__
:param value: dict - the value to save
:param reset: bool - If true, it will reset the value to the current one.
if False, it will just update the stored value with the current
one
:param init: bool - If True, it will create the entry if it doesn't exits
next time invoked, it will not save anything
:return:
"""
if not isinstance(value, dict):
raise ValueError("App Data value must be a dict")
k = AppData.get_by_key(key, True)
if not k:
AppData.create(key=make_key(key), value=value)
else:
if init is False:
if reset is False:
nv = copy.deepcopy(value)
value = copy.deepcopy(k.value)
value.update(nv)
k.update(value=value) | 0.004283 |
def _run(self):
"""
This is the logic to run it all.
Heavily influenced by this post: http://nickdesaulniers.github.io/blog/2015/05/25/interpreter-compiler-jit/
:return:
"""
i = 0
try:
while i < len(self.program):
if self.program[i] == ">":
self._increment_pointer()
elif self.program[i] == "<":
self._decrement_pointer()
elif self.program[i] == "+":
self._increment_current_byte()
elif self.program[i] == "-":
self._decrement_current_byte()
elif self.program[i] == ".":
self._output_current_byte()
elif self.program[i] == ",":
self._read_byte()
elif self.program[i] == "[":
"""
if the byte at the data pointer is zero, then instead of moving the instruction pointer forward to
the next command, jump it forward to the command after the matching ] command
- Wikipedia
"""
if self.tape[self.pointer] is None or self.tape[self.pointer] == 0:
loop = 1
while loop > 0:
i += 1
current_instruction = self.program[i]
if current_instruction == "]":
loop -= 1
elif current_instruction == "[":
loop += 1
elif self.program[i] == "]":
"""
if the byte at the data pointer is nonzero, then instead of moving the instruction pointer
forward to the next command, jump it back to the command after the matching [ command.
- Wikipedia
"""
if self.tape[self.pointer] is not None and self.tape[self.pointer] > 0:
loop = 1
while loop > 0:
i -= 1
current_instruction = self.program[i]
if current_instruction == "[":
loop -= 1
elif current_instruction == "]":
loop += 1
i += 1
except PointerOutOfProgramRange as e:
print e.message
sys.exit(1)
except IndexError as e:
print "The program went out of bounds of its instructions"
sys.exit(1) | 0.003336 |
def process_tokens(self, tokens):
"""process tokens from the current module to search for module/block
level options
"""
control_pragmas = {"disable", "enable"}
for (tok_type, content, start, _, _) in tokens:
if tok_type != tokenize.COMMENT:
continue
match = OPTION_RGX.search(content)
if match is None:
continue
first_group = match.group(1)
if (
first_group.strip() == "disable-all"
or first_group.strip() == "skip-file"
):
if first_group.strip() == "disable-all":
self.add_message(
"deprecated-pragma",
line=start[0],
args=("disable-all", "skip-file"),
)
self.add_message("file-ignored", line=start[0])
self._ignore_file = True
return
try:
opt, value = first_group.split("=", 1)
except ValueError:
self.add_message(
"bad-inline-option", args=first_group.strip(), line=start[0]
)
continue
opt = opt.strip()
if opt in self._options_methods or opt in self._bw_options_methods:
try:
meth = self._options_methods[opt]
except KeyError:
meth = self._bw_options_methods[opt]
# found a "(dis|en)able-msg" pragma deprecated suppression
self.add_message(
"deprecated-pragma",
line=start[0],
args=(opt, opt.replace("-msg", "")),
)
for msgid in utils._splitstrip(value):
# Add the line where a control pragma was encountered.
if opt in control_pragmas:
self._pragma_lineno[msgid] = start[0]
try:
if (opt, msgid) == ("disable", "all"):
self.add_message(
"deprecated-pragma",
line=start[0],
args=("disable=all", "skip-file"),
)
self.add_message("file-ignored", line=start[0])
self._ignore_file = True
return
meth(msgid, "module", start[0])
except exceptions.UnknownMessageError:
self.add_message("bad-option-value", args=msgid, line=start[0])
else:
self.add_message("unrecognized-inline-option", args=opt, line=start[0]) | 0.001753 |
def main():
"""Run newer stuffs."""
logging.basicConfig(format=LOGGING_FORMAT)
log = logging.getLogger(__name__)
parser = argparse.ArgumentParser()
add_debug(parser)
add_app(parser)
add_env(parser)
add_region(parser)
add_properties(parser)
parser.add_argument("--elb-subnet", help="Subnetnet type, e.g. external, internal", required=True)
args = parser.parse_args()
logging.getLogger(__package__.split('.')[0]).setLevel(args.debug)
log.debug('Parsed arguments: %s', args)
spinnakerapps = SpinnakerDns(
app=args.app, env=args.env, region=args.region, prop_path=args.properties, elb_subnet=args.elb_subnet)
spinnakerapps.create_elb_dns() | 0.004243 |
def get_regions_and_coremasks(self):
"""Generate a set of ordered paired region and core mask representations.
.. note::
The region and core masks are ordered such that ``(region << 32) |
core_mask`` is monotonically increasing. Consequently region and
core masks generated by this method can be used with SCAMP's
Flood-Fill Core Select (FFSC) method.
Yields
------
(region, core mask)
Pair of integers which represent a region of a SpiNNaker machine
and a core mask of selected cores within that region.
"""
region_code = ((self.base_x << 24) | (self.base_y << 16) |
(self.level << 16))
# Generate core masks for any regions which are selected at this level
# Create a mapping from subregion mask to core numbers
subregions_cores = collections.defaultdict(lambda: 0x0)
for core, subregions in enumerate(self.locally_selected):
if subregions: # If any subregions are selected on this level
subregions_cores[subregions] |= 1 << core
# Order the locally selected items and then yield them
for (subregions, coremask) in sorted(subregions_cores.items()):
yield (region_code | subregions), coremask
if self.level < 3:
# Iterate through the subregions and recurse, we iterate through in
# the order which ensures that anything we yield is in increasing
# order.
for i in (4*x + y for y in range(4) for x in range(4)):
subregion = self.subregions[i]
if subregion is not None:
for (region, coremask) in \
subregion.get_regions_and_coremasks():
yield (region, coremask) | 0.001609 |
def Page_deleteCookie(self, cookieName, url):
"""
Function path: Page.deleteCookie
Domain: Page
Method name: deleteCookie
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'cookieName' (type: string) -> Name of the cookie to remove.
'url' (type: string) -> URL to match cooke domain and path.
No return value.
Description: Deletes browser cookie with given name, domain and path.
"""
assert isinstance(cookieName, (str,)
), "Argument 'cookieName' must be of type '['str']'. Received type: '%s'" % type(
cookieName)
assert isinstance(url, (str,)
), "Argument 'url' must be of type '['str']'. Received type: '%s'" % type(
url)
subdom_funcs = self.synchronous_command('Page.deleteCookie', cookieName=
cookieName, url=url)
return subdom_funcs | 0.046838 |
def t_to_min(x):
"""
Convert XML 'xs: duration type' to decimal minutes, e.g.:
t_to_min('PT1H2M30S') == 62.5
"""
g = re.match('PT(?:(.*)H)?(?:(.*)M)?(?:(.*)S)?', x).groups()
return sum(0 if g[i] is None else float(g[i]) * 60. ** (1 - i)
for i in range(3)) | 0.003401 |
def partition_pairs(neurites, neurite_type=NeuriteType.all):
'''Partition pairs at bifurcation points of a collection of neurites.
Partition pait is defined as the number of bifurcations at the two
daughters of the bifurcating section'''
return map(_bifurcationfunc.partition_pair,
iter_sections(neurites,
iterator_type=Tree.ibifurcation_point,
neurite_filter=is_type(neurite_type))) | 0.002119 |
def _horizontal_segment(old_offs, new_offs, spacing, diameter):
'''Vertices of a horizontal rectangle
'''
return np.array(((old_offs[0], old_offs[1] + spacing[1]),
(new_offs[0], old_offs[1] + spacing[1]),
(new_offs[0], old_offs[1] + spacing[1] - diameter),
(old_offs[0], old_offs[1] + spacing[1] - diameter))) | 0.002604 |
def mousePressEvent(self, event):
"""Begins edit on cell clicked, if allowed, and passes event to super class"""
index = self.indexAt(event.pos())
if index.isValid():
self.selectRow(index.row())
# selecting the row sets the current index to 0,0 for tab
# order to work correctly, we must set the current index
self.setCurrentIndex(index)
self.parameterChanged.emit(self.model().selection(index))
self.edit(index, QtGui.QAbstractItemView.DoubleClicked, event)
super(AutoParameterTableView, self).mousePressEvent(event) | 0.004831 |
def search(self, string, default=None):
"""Use re.search to find the result
:rtype: list"""
default = default if default else []
result = [item[1] for item in self.container if item[0].search(string)]
if self.ensure_mapping:
assert len(result) < 2, "%s matches more than one pattern: %s" % (
string,
result,
)
return result if result else default | 0.004435 |
def validate(self, value):
"""Validate field value."""
if self.required and value is None:
raise ValidationError("field is required")
if value is not None and self.choices is not None:
choices = [choice for choice, _ in self.choices]
if value not in choices:
raise ValidationError("field must be one of: {}".format(
", ".join(choices),
)) | 0.004435 |
def delete_port_postcommit(self, context):
"""Delete port non-database commit event."""
if self._is_supported_deviceowner(context.current):
vlan_segment, vxlan_segment = self._get_segments(
context.top_bound_segment,
context.bottom_bound_segment)
vni = self._port_action_vxlan(context.current, vxlan_segment,
self._delete_nve_member) if vxlan_segment else 0
self._port_action_vlan(context.current, vlan_segment,
self._delete_switch_entry, vni) | 0.00458 |
def get_filter_form(self, **kwargs):
"""
If there is a filter_form, initializes that
form with the contents of request.GET and
returns it.
"""
form = None
if self.filter_form:
form = self.filter_form(self.request.GET)
elif self.model and hasattr(self.model._meta, '_is_view'):
form = VersionFilterForm(self.request.GET)
return form | 0.004673 |
def _get_paths():
"""Generate paths to test data. Done in a function to protect namespace a bit."""
import os
base_path = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(base_path, 'tests', 'data', 'Plate01')
test_data_file = os.path.join(test_data_dir, 'RFP_Well_A3.fcs')
return test_data_dir, test_data_file | 0.005602 |
def all_points_core_distance(distance_matrix, d=2.0):
"""
Compute the all-points-core-distance for all the points of a cluster.
Parameters
----------
distance_matrix : array (cluster_size, cluster_size)
The pairwise distance matrix between points in the cluster.
d : integer
The dimension of the data set, which is used in the computation
of the all-point-core-distance as per the paper.
Returns
-------
core_distances : array (cluster_size,)
The all-points-core-distance of each point in the cluster
References
----------
Moulavi, D., Jaskowiak, P.A., Campello, R.J., Zimek, A. and Sander, J.,
2014. Density-Based Clustering Validation. In SDM (pp. 839-847).
"""
distance_matrix[distance_matrix != 0] = (1.0 / distance_matrix[
distance_matrix != 0]) ** d
result = distance_matrix.sum(axis=1)
result /= distance_matrix.shape[0] - 1
result **= (-1.0 / d)
return result | 0.001012 |
def _octet_bits(o):
"""
Get the bits of an octet.
:param o: The octets.
:return: The bits as a list in LSB-to-MSB order.
:rtype: list
"""
if not isinstance(o, integer_types):
raise TypeError("o should be an int")
if not (0 <= o <= 255):
raise ValueError("o should be between 0 and 255 inclusive")
bits = [0] * 8
for i in range(8):
if 1 == o & 1:
bits[i] = 1
o = o >> 1
return bits | 0.002132 |
def validate_day(year, month, day):
"""Validate day."""
max_days = LONG_MONTH
if month == FEB:
max_days = FEB_LEAP_MONTH if ((year % 4 == 0) and (year % 100 != 0)) or (year % 400 == 0) else FEB_MONTH
elif month in MONTHS_30:
max_days = SHORT_MONTH
return 1 <= day <= max_days | 0.008824 |
def cube2map(data_cube, layout):
r"""Cube to Map
This method transforms the input data from a 3D cube to a 2D map with a
specified layout
Parameters
----------
data_cube : np.ndarray
Input data cube, 3D array of 2D images
Layout : tuple
2D layout of 2D images
Returns
-------
np.ndarray 2D map
Raises
------
ValueError
For invalid data dimensions
ValueError
For invalid layout
Examples
--------
>>> from modopt.base.transform import cube2map
>>> a = np.arange(16).reshape((4, 2, 2))
>>> cube2map(a, (2, 2))
array([[ 0, 1, 4, 5],
[ 2, 3, 6, 7],
[ 8, 9, 12, 13],
[10, 11, 14, 15]])
"""
if data_cube.ndim != 3:
raise ValueError('The input data must have 3 dimensions.')
if data_cube.shape[0] != np.prod(layout):
raise ValueError('The desired layout must match the number of input '
'data layers.')
return np.vstack([np.hstack(data_cube[slice(layout[1] * i, layout[1] *
(i + 1))]) for i in range(layout[0])]) | 0.000874 |
def get_email(self, token):
"""Fetches email address from email API endpoint"""
resp = requests.get(self.emails_url,
params={'access_token': token.token})
emails = resp.json().get('values', [])
email = ''
try:
email = emails[0].get('email')
primary_emails = [e for e in emails if e.get('is_primary', False)]
email = primary_emails[0].get('email')
except (IndexError, TypeError, KeyError):
return ''
finally:
return email | 0.003546 |
def availability_zone_list(request):
"""Utility method to retrieve a list of availability zones."""
try:
return api.nova.availability_zone_list(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve Nova availability zones.'))
return [] | 0.003135 |
def get_complete_slug(self, language=None, hideroot=True):
"""Return the complete slug of this page by concatenating
all parent's slugs.
:param language: the wanted slug language."""
if not language:
language = settings.PAGE_DEFAULT_LANGUAGE
if self._complete_slug and language in self._complete_slug:
return self._complete_slug[language]
self._complete_slug = cache.get(self.PAGE_URL_KEY % (self.id))
if self._complete_slug is None:
self._complete_slug = {}
elif language in self._complete_slug:
return self._complete_slug[language]
if hideroot and settings.PAGE_HIDE_ROOT_SLUG and self.is_first_root():
url = u''
else:
url = u'%s' % self.slug(language)
for ancestor in self.get_ancestors(ascending=True):
url = ancestor.slug(language) + u'/' + url
self._complete_slug[language] = url
cache.set(self.PAGE_URL_KEY % (self.id), self._complete_slug)
return url | 0.001889 |
def _registrant_publication(reg_pub, rules):
""" Separate the registration from the publication in a given
string.
:param reg_pub: A string of digits representing a registration
and publication.
:param rules: A list of RegistrantRules which designate where
to separate the values in the string.
:returns: A (registrant, publication) tuple of strings.
"""
for rule in rules:
if rule.min <= reg_pub <= rule.max:
reg_len = rule.registrant_length
break
else:
raise Exception('Registrant/Publication not found in registrant '
'rule list.')
registrant, publication = reg_pub[:reg_len], reg_pub[reg_len:]
return registrant, publication | 0.002451 |
def from_str(cls, timestr, shaked=False):
"""Use `dateutil` module to parse the give string
:param basestring timestr: string representing a date to parse
:param bool shaked: whether the input parameter been already
cleaned or not.
"""
orig = timestr
if not shaked:
timestr = cls.fix_timezone_separator(timestr)
try:
date = parser.parse(timestr)
except ValueError:
if not shaked:
shaked = False
for shaker in [
cls.fix_mispelled_day,
cls.remove_parenthesis_around_tz,
cls.remove_quotes_around_tz]:
new_timestr = shaker(timestr)
if new_timestr is not None:
timestr = new_timestr
shaked = True
if shaked:
try:
return cls.from_str(timestr, shaked=True)
except ValueError:
# raise ValueError below with proper message
pass
msg = u"Unknown string format: {!r}".format(orig)
raise ValueError(msg), None, sys.exc_info()[2]
else:
try:
return cls.from_datetime(date)
except ValueError:
new_str = cls.remove_timezone(orig)
if new_str is not None:
return cls.from_str(new_str)
else:
raise | 0.001277 |
def install_auth_basic_user_file(self, site=None):
"""
Installs users for basic httpd auth.
"""
r = self.local_renderer
hostname = self.current_hostname
target_sites = self.genv.available_sites_by_host.get(hostname, None)
for _site, site_data in self.iter_sites(site=site, setter=self.set_site_specifics):
if self.verbose:
print('~'*80, file=sys.stderr)
print('Site:', _site, file=sys.stderr)
print('env.apache_auth_basic:', r.env.auth_basic, file=sys.stderr)
# Only load site configurations that are allowed for this host.
if target_sites is not None:
assert isinstance(target_sites, (tuple, list))
if _site not in target_sites:
continue
if not r.env.auth_basic:
continue
assert r.env.auth_basic_users, 'No apache auth users specified.'
for username, password in r.env.auth_basic_users:
r.env.auth_basic_username = username
r.env.auth_basic_password = password
r.env.apache_site = _site
r.env.fn = r.format(r.env.auth_basic_authuserfile)
if self.files.exists(r.env.fn):
r.sudo('htpasswd -b {fn} {auth_basic_username} {auth_basic_password}')
else:
r.sudo('htpasswd -b -c {fn} {auth_basic_username} {auth_basic_password}') | 0.003981 |
def _download_urls(url_list, storage_folder, overwrite_existing,
meta_handler, access_cookie=None):
""" Save url from url_list to storage_folder
Parameters
----------
url_list: list of str
Valid url to download
storage_folder: str, valid path
Location to store the download, folder will be created if
not existing. If the file is already present in the folder,
the download depends on the setting in 'overwrite_existing'.
overwrite_existing: boolean, optional
If False, skip download of file already existing in
the storage folder (default). Set to True to replace
files.
meta_handler: instance of MRIOMetaData
Returns
-------
The meta_handler is passed back
"""
for url in url_list:
filename = os.path.basename(url)
if not overwrite_existing and filename in os.listdir(storage_folder):
continue
storage_file = os.path.join(storage_folder, filename)
# Using requests here - tried with aiohttp but was actually slower
# Also don’t use shutil.copyfileobj - corrupts zips from Eora
req = requests.post(url, stream=True, cookies=access_cookie)
with open(storage_file, 'wb') as lf:
for chunk in req.iter_content(1024*5):
lf.write(chunk)
meta_handler._add_fileio('Downloaded {} to {}'.format(url, filename))
meta_handler.save()
return meta_handler | 0.000671 |
def _search_show_id(self, series, year=None):
"""Search the show id from the `series` and `year`.
:param str series: series of the episode.
:param year: year of the series, if any.
:type year: int
:return: the show id, if found.
:rtype: int
"""
# addic7ed doesn't support search with quotes
series = series.replace('\'', ' ')
# build the params
series_year = '%s %d' % (series, year) if year is not None else series
params = {'search': series_year, 'Submit': 'Search'}
# make the search
logger.info('Searching show ids with %r', params)
r = self.session.get(self.server_url + 'search.php', params=params, timeout=10)
r.raise_for_status()
if r.status_code == 304:
raise TooManyRequests()
soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser'])
# get the suggestion
suggestion = soup.select('span.titulo > a[href^="/show/"]')
if not suggestion:
logger.warning('Show id not found: no suggestion')
return None
if not sanitize(suggestion[0].i.text.replace('\'', ' ')) == sanitize(series_year):
logger.warning('Show id not found: suggestion does not match')
return None
show_id = int(suggestion[0]['href'][6:])
logger.debug('Found show id %d', show_id)
return show_id | 0.002789 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.