text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_file_language(filename, text=None):
"""Get file language from filename"""
ext = osp.splitext(filename)[1]
if ext.startswith('.'):
ext = ext[1:] # file extension with leading dot
language = ext
if not ext:
if text is None:
text, _enc = encoding.read(filename)
for line in text.splitlines():
if not line.strip():
continue
if line.startswith('#!'):
shebang = line[2:]
if 'python' in shebang:
language = 'python'
else:
break
return language | 0.007825 |
def transform(self, X, y=None, sample_weight=None):
'''
Transforms the time series data with linear direct value interpolation
If y is a time series and passed, it will be transformed as well
The time dimension is removed from the data
Parameters
----------
X : array-like, shape [n_series, ...]
Time series data and (optionally) contextual data
y : array-like shape [n_series], default = None
target vector
sample_weight : array-like shape [n_series], default = None
sample weights
Returns
-------
X_new : array-like, shape [n_series, ]
transformed time series data
y_new : array-like, shape [n_series]
expanded target vector
sample_weight_new : array-like or None
None is returned if target is changed. Otherwise it is returned unchanged.
'''
check_ts_data(X, y)
xt, xc = get_ts_data_parts(X)
yt = y
swt = sample_weight
# number of data channels
d = xt[0][0].shape[0] - 2
# number of series
N = len(xt)
# retrieve the unique identifiers
s = np.unique(xt[0][:, 1])
x_new = []
t_lin = []
# transform x
for i in np.arange(N):
# splits series into a list for each variable
xs = [xt[i][xt[i][:, 1] == s[j]] for j in np.arange(len(s))]
# find latest/earliest sample time for each identifier's first/last time sample time
t_min = np.max([np.min(xs[j][:, 0]) for j in np.arange(len(s))])
t_max = np.min([np.max(xs[j][:, 0]) for j in np.arange(len(s))])
# Generate a regular series of timestamps starting at tStart and tEnd for sample_period
t_lin.append(np.arange(t_min, t_max, self.sample_period))
# Interpolate for the new regular sample times
if d == 1:
x_new.append(
np.column_stack(
[self._interp(t_lin[i], xs[j][:, 0], xs[j][:, 2], kind=self.kind)
for j in np.arange(len(s))]))
elif d > 1:
xd = []
for j in np.arange(len(s)):
# stack the columns of each variable by dimension d after interpolation to new regular sample times
temp = np.column_stack(
[(self._interp(t_lin[i], xs[j][:, 0], xs[j][:, k], kind=self.kind))
for k in np.arange(2, 2 + d)])
xd.append(temp)
# column stack each of the sensors s -- resulting in s*d columns
x_new.append(np.column_stack(xd))
# transform y
if yt is not None and len(np.atleast_1d(yt[0])) > 1:
# y is a time series
swt = None
if self.categorical_target is True:
yt = [self._interp(t_lin[i], xt[i][:, 0], yt[i], kind='nearest') for i in
np.arange(N)]
else:
yt = [self._interp(t_lin[i], xt[i][:, 0], yt[i], kind=self.kind) for i in
np.arange(N)]
else:
# y is static - leave y alone
pass
if xc is not None:
x_new = TS_Data(x_new, xc)
return x_new, yt, swt | 0.003252 |
def filter_errors(errors, select=None, ignore=None, **params):
"""Filter errors by select and ignore options.
:return bool:
"""
select = select or []
ignore = ignore or []
for e in errors:
for s in select:
if e.number.startswith(s):
yield e
break
else:
for s in ignore:
if e.number.startswith(s):
break
else:
yield e | 0.002088 |
def set(self, x, y):
"""Set a pixel of the :class:`Canvas` object.
:param x: x coordinate of the pixel
:param y: y coordinate of the pixel
"""
x = normalize(x)
y = normalize(y)
col, row = get_pos(x, y)
if type(self.chars[row][col]) != int:
return
self.chars[row][col] |= pixel_map[y % 4][x % 2] | 0.005249 |
def find_bookmark_file ():
"""Return the bookmark file of the Opera profile.
Returns absolute filename if found, or empty string if no bookmark file
could be found.
"""
try:
dirname = get_profile_dir()
if os.path.isdir(dirname):
for name in OperaBookmarkFiles:
fname = os.path.join(dirname, name)
if os.path.isfile(fname):
return fname
except Exception:
pass
return u"" | 0.004124 |
def arg_types(parsed: Parsed, errors: Errors) -> Tuple[Parsed, Errors]:
"""Add argument types to parsed function data structure
Args:
parsed: function and arg locations in BEL string
errors: error messages
Returns:
(parsed, errors): parsed, arguments with arg types plus error messages
"""
func_pattern = re.compile(r"\s*[a-zA-Z]+\(")
nsarg_pattern = re.compile(r"^\s*([A-Z]+):(.*?)\s*$")
for span in parsed:
if parsed[span]["type"] != "Function" or "parens_span" not in parsed[span]:
continue
for i, arg in enumerate(parsed[span]["args"]):
nsarg_matches = nsarg_pattern.match(arg["arg"])
if func_pattern.match(arg["arg"]):
parsed[span]["args"][i].update({"type": "Function"})
elif nsarg_matches:
(start, end) = arg["span"]
ns = nsarg_matches.group(1)
ns_val = nsarg_matches.group(2)
ns_span = nsarg_matches.span(1)
ns_span = (ns_span[0] + start, ns_span[1] + start - 1)
ns_val_span = nsarg_matches.span(2)
ns_val_span = (ns_val_span[0] + start, ns_val_span[1] + start - 1)
parsed[span]["args"][i].update(
{
"type": "NSArg",
"ns": ns,
"ns_span": ns_span,
"ns_val": ns_val,
"ns_val_span": ns_val_span,
}
)
else:
parsed[span]["args"][i].update({"type": "StrArg"})
return parsed, errors | 0.001809 |
def get_artifact(suppress_status=False, nexus_url=sample_nexus_url, timeout_sec=600, overwrite=True,
username=None, password=None, **kwargs):
"""Retrieves an artifact from Nexus
:param suppress_status: (bool) Set to True to suppress printing download status
:param nexus_url: (str) URL of the Nexus Server
:param timeout_sec: (int) Number of seconds to wait before
timing out the artifact retrieval.
:param overwrite: (bool) True overwrites the file on the local system if it exists,
False does will log an INFO message and exist if the file already exists
:param username: (str) username for basic auth
:param password: (str) password for basic auth
:param kwargs:
group_id: (str) The artifact's Group ID in Nexus
artifact_id: (str) The artifact's Artifact ID in Nexus
packaging: (str) The artifact's packaging (e.g. war, zip)
version: (str) Version of the artifact to retrieve (e.g.
LATEST, 4.8.4, 4.9.0-SNAPSHOT)
destination_dir: (str) Full path to the destination directory
classifier: (str) The artifact's classifier (e.g. bin)
:return: None
:raises: TypeError, ValueError, OSError, RuntimeError
"""
log = logging.getLogger(mod_logger + '.get_artifact')
required_args = ['group_id', 'artifact_id', 'packaging', 'version', 'destination_dir']
if not isinstance(overwrite, bool):
msg = 'overwrite arg must be a string, found: {t}'.format(t=overwrite.__class__.__name__)
log.error(msg)
raise TypeError(msg)
if not isinstance(nexus_url, basestring):
msg = 'nexus_url arg must be a string, found: {t}'.format(t=nexus_url.__class__.__name__)
log.error(msg)
raise TypeError(msg)
log.debug('Using Nexus Server URL: {u}'.format(u=nexus_url))
# Ensure the required args are supplied, and that they are all strings
for required_arg in required_args:
try:
assert required_arg in kwargs
except AssertionError:
_, ex, trace = sys.exc_info()
msg = 'A required arg was not supplied. Required args are: group_id, artifact_id, classifier, version, ' \
'packaging and destination_dir\n{e}'.format(e=str(ex))
log.error(msg)
raise ValueError(msg)
if not isinstance(kwargs[required_arg], basestring):
msg = 'Arg {a} should be a string'.format(a=required_arg)
log.error(msg)
raise TypeError(msg)
# Set variables to be used in the REST call
group_id = kwargs['group_id']
artifact_id = kwargs['artifact_id']
version = kwargs['version']
packaging = kwargs['packaging']
destination_dir = kwargs['destination_dir']
# Ensure the destination directory exists
if not os.path.isdir(destination_dir):
log.debug('Specified destination_dir not found on file system, creating: {d}'.format(d=destination_dir))
try:
mkdir_p(destination_dir)
except CommandError:
_, ex, trace = sys.exc_info()
msg = 'Unable to create destination directory: {d}\n{e}'.format(d=destination_dir, e=str(ex))
raise OSError(msg)
# Set the classifier if it was provided
classifier = None
if 'classifier' in kwargs:
if isinstance(kwargs['classifier'], basestring):
classifier = kwargs['classifier']
log.debug('Using classifier: {c}'.format(c=classifier))
else:
log.warn('Arg classifier provided but it was not an instance of basestring')
# Set the repo if it was provided
repo = None
if 'repo' in kwargs:
if isinstance(kwargs['repo'], basestring):
repo = kwargs['repo']
log.debug('Using repo: {r}'.format(r=repo))
# Determine the repo based on the version
if repo is None:
repo_test = version.lower().strip()
log.debug('Checking if the version {v} is a release or snapshot...'.format(v=repo_test))
# Determine the repo based on the version
if ('snapshot' in repo_test) or (repo_test == 'latest'):
repo = 'snapshots'
else:
repo = 'releases'
log.info('Based on the version {v}, determined repo: {r}'.format(v=version, r=repo))
# Construct the parameter string
params = 'g=' + group_id + '&a=' + artifact_id + '&v=' + version + '&r=' + repo + '&p=' + packaging
# Add the classifier if it was provided
if classifier is not None:
params = params + '&c=' + classifier
# Determine the auth based on username and password
basic_auth = None
if (username is not None) and (password is not None):
log.info('Using the provided username/password for basic authentication...')
basic_auth = HTTPBasicAuth(username, password)
# Build the query URL
query_url = nexus_url + '?' + params
# Set up for download attempts
retry_sec = 5
max_retries = 6
try_num = 1
download_success = False
dl_err = None
failed_attempt = False
# Start the retry loop
while try_num <= max_retries:
# Break the loop if the download was successful
if download_success:
break
log.info('Attempting to query Nexus for the Artifact using URL: {u}'.format(u=query_url))
try:
nexus_response = query_nexus(query_url=query_url, timeout_sec=timeout_sec, basic_auth=basic_auth)
except RuntimeError:
_, ex, trace = sys.exc_info()
msg = '{n}: There was a problem querying Nexus URL: {u}\n{e}'.format(
n=ex.__class__.__name__, u=query_url, e=str(ex))
log.error(msg)
raise RuntimeError, msg, trace
# Attempt to get the content-length
file_size = 0
try:
file_size = int(nexus_response.headers['Content-Length'])
except(KeyError, ValueError):
log.debug('Could not get Content-Length, suppressing download status...')
suppress_status = True
else:
log.info('Artifact file size: {s}'.format(s=file_size))
# Determine the full download file path
file_name = nexus_response.url.split('/')[-1]
download_file = os.path.join(destination_dir, file_name)
# Attempt to download the content from the response
log.info('Attempting to download content of size {s} from Nexus to file: {d}'.format(
s=file_size, d=download_file))
# Remove the existing file if it exists, or exit if the file exists, overwrite is set,
# and there was not a previous failed attempted download
if os.path.isfile(download_file) and overwrite:
log.debug('File already exists, removing: {d}'.format(d=download_file))
os.remove(download_file)
elif os.path.isfile(download_file) and not overwrite and not failed_attempt:
log.info('File already downloaded, and overwrite is set to False. The Artifact will '
'not be retrieved from Nexus: {f}. To overwrite the existing downloaded file, '
'set overwrite=True'.format(f=download_file))
return
# Attempt to download content
log.debug('Attempt # {n} of {m} to download content from the Nexus response'.format(n=try_num, m=max_retries))
chunk_size = 1024
file_size_dl = 0
try:
with open(download_file, 'wb') as f:
for chunk in nexus_response.iter_content(chunk_size=chunk_size):
if chunk:
f.write(chunk)
file_size_dl += len(chunk)
status = r"%10d [%3.2f%%]" % (file_size_dl, file_size_dl * 100. / file_size)
status += chr(8)*(len(status)+1)
if not suppress_status:
print(status),
except(requests.exceptions.ConnectionError, requests.exceptions.RequestException, OSError):
_, ex, trace = sys.exc_info()
dl_err = '{n}: There was an error reading content from the Nexus response. Downloaded ' \
'size: {s}.\n{e}'.format(n=ex.__class__.__name__, s=file_size_dl, t=retry_sec, e=str(ex))
failed_attempt = True
log.warn(dl_err)
if try_num < max_retries:
log.info('Retrying download in {t} sec...'.format(t=retry_sec))
time.sleep(retry_sec)
else:
log.info('File download of size {s} completed without error: {f}'.format(s=file_size_dl, f=download_file))
failed_attempt = False
download_success = True
try_num += 1
# Raise an exception if the download did not complete successfully
if not download_success:
msg = 'Unable to download file content from Nexus after {n} attempts'.format(n=max_retries)
if dl_err:
msg += '\n{m}'.format(m=dl_err)
log.error(msg)
raise RuntimeError(msg) | 0.003747 |
def lp(**kwargs):
"""
Create parameters for a new line profile dataset.
Generally, this will be used as an input to the kind argument in
:meth:`phoebe.frontend.bundle.Bundle.add_dataset`
:parameter **kwargs: defaults for the values of any of the parameters
:return: a :class:`phoebe.parameters.parameters.ParameterSet` of all newly
created :class:`phoebe.parameters.parameters.Parameter`s
"""
obs_params = []
#obs_params += [FloatParameter(qualifier='statweight', value = kwargs.get('statweight', 1.0), default_unit=u.dimensionless_unscaled, description='Statistical weight in overall fitting')]
syn_params, constraints = lp_syn(syn=False, **kwargs)
obs_params += syn_params.to_list()
#obs_params += rv_dep(**kwargs).to_list()
return ParameterSet(obs_params), constraints | 0.004785 |
def rotation_at_time(t, timestamps, rotation_sequence):
"""Get the gyro rotation at time t using SLERP.
Parameters
-----------
t : float
The query timestamp.
timestamps : array_like float
List of all timestamps
rotation_sequence : (4, N) ndarray
Rotation sequence as unit quaternions with scalar part as first element.
Returns
-----------
q : (4,) ndarray
Unit quaternion representing the rotation at time t.
"""
idx = np.flatnonzero(timestamps >= (t - 0.0001))[0]
t0 = timestamps[idx - 1]
t1 = timestamps[idx]
tau = (t - t0) / (t1 - t0)
q1 = rotation_sequence[:, idx - 1]
q2 = rotation_sequence[:, idx]
q = rotations.slerp(q1, q2, tau)
return q | 0.006757 |
def _instructions_changed(self, change):
"""Call when there is a change in the instructions."""
if change.adds():
for index, instruction in change.items():
if isinstance(instruction, dict):
in_row = self._parser.instruction_in_row(self, instruction)
self.instructions[index] = in_row
else:
instruction.transfer_to_row(self) | 0.004515 |
def DeserializeTX(buffer):
"""
Deserialize the stream into a Transaction object.
Args:
buffer (BytesIO): stream to deserialize the Transaction from.
Returns:
neo.Core.TX.Transaction:
"""
mstream = MemoryStream(buffer)
reader = BinaryReader(mstream)
tx = Transaction.DeserializeFrom(reader)
return tx | 0.005013 |
def shift(self, top=None, right=None, bottom=None, left=None):
"""
Shift the bounding box from one or more image sides, i.e. move it on the x/y-axis.
Parameters
----------
top : None or int, optional
Amount of pixels by which to shift the bounding box from the top.
right : None or int, optional
Amount of pixels by which to shift the bounding box from the right.
bottom : None or int, optional
Amount of pixels by which to shift the bounding box from the bottom.
left : None or int, optional
Amount of pixels by which to shift the bounding box from the left.
Returns
-------
result : imgaug.BoundingBox
Shifted bounding box.
"""
top = top if top is not None else 0
right = right if right is not None else 0
bottom = bottom if bottom is not None else 0
left = left if left is not None else 0
return self.copy(
x1=self.x1+left-right,
x2=self.x2+left-right,
y1=self.y1+top-bottom,
y2=self.y2+top-bottom
) | 0.003448 |
def floating_ip_disassociate(self, server_name, floating_ip):
'''
Disassociate a floating IP from server
.. versionadded:: 2016.3.0
'''
nt_ks = self.compute_conn
server_ = self.server_by_name(server_name)
server = nt_ks.servers.get(server_.__dict__['id'])
server.remove_floating_ip(floating_ip)
return self.floating_ip_list()[floating_ip] | 0.004866 |
def ask(self, number=None, xmean=None, sigma_fac=1,
gradf=None, args=()):
"""get new candidate solutions, sampled from a multi-variate
normal distribution and transformed to f-representation
(phenotype) to be evaluated.
Arguments
---------
`number`
number of returned solutions, by default the
population size ``popsize`` (AKA ``lambda``).
`xmean`
distribution mean, phenotyp?
`sigma_fac`
multiplier for internal sample width (standard
deviation)
`gradf`
gradient, ``len(gradf(x)) == len(x)``, if
``gradf is not None`` the third solution in the
returned list is "sampled" in supposedly Newton
direction ``dot(C, gradf(xmean, *args))``.
`args`
additional arguments passed to gradf
Return
------
A list of N-dimensional candidate solutions to be evaluated
Example
-------
>>> import cma
>>> es = cma.CMAEvolutionStrategy([0,0,0,0], 0.3)
>>> while not es.stop() and es.best.f > 1e-6: # my_desired_target_f_value
... X = es.ask() # get list of new solutions
... fit = [cma.fcts.rosen(x) for x in X] # call function rosen with each solution
... es.tell(X, fit) # feed values
:See: `ask_and_eval`, `ask_geno`, `tell`
"""
pop_geno = self.ask_geno(number, xmean, sigma_fac)
# N,lambda=20,200: overall CPU 7s vs 5s == 40% overhead, even without bounds!
# new data: 11.5s vs 9.5s == 20%
# TODO: check here, whether this is necessary?
# return [self.gp.pheno(x, copy=False, into_bounds=self.boundary_handler.repair) for x in pop] # probably fine
# return [Solution(self.gp.pheno(x, copy=False), copy=False) for x in pop] # here comes the memory leak, now solved
pop_pheno = [self.gp.pheno(x, copy=True, into_bounds=self.boundary_handler.repair) for x in pop_geno]
if gradf is not None:
# see Hansen (2011), Injecting external solutions into CMA-ES
if not self.gp.islinear:
_print_warning("""
using the gradient (option ``gradf``) with a non-linear
coordinate-wise transformation (option ``transformation``)
has never been tested.""")
# TODO: check this out
def grad_numerical_of_coordinate_map(x, map, epsilon=None):
"""map is a coordinate-wise independent map, return
the estimated diagonal of the Jacobian.
"""
eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon
return (map(x + eps) - map(x - eps)) / (2 * eps)
def grad_numerical_sym(x, func, epsilon=None):
"""return symmetric numerical gradient of func : R^n -> R.
"""
eps = 1e-8 * (1 + abs(x)) if epsilon is None else epsilon
grad = np.zeros(len(x))
ei = np.zeros(len(x)) # float is 1.6 times faster than int
for i in rglen(x):
ei[i] = eps[i]
grad[i] = (func(x + ei) - func(x - ei)) / (2*eps[i])
ei[i] = 0
return grad
try:
if self.last_iteration_with_gradient == self.countiter:
_print_warning('gradient is used several times in ' +
'this iteration', iteration=self.countiter)
self.last_iteration_with_gradient = self.countiter
except AttributeError:
pass
index_for_gradient = min((2, len(pop_pheno)-1))
xmean = self.mean if xmean is None else xmean
xpheno = self.gp.pheno(xmean, copy=True,
into_bounds=self.boundary_handler.repair)
grad_at_mean = gradf(xpheno, *args)
# lift gradient into geno-space
if not self.gp.isidentity or (self.boundary_handler is not None
and self.boundary_handler.has_bounds()):
boundary_repair = None
gradpen = 0
if isinstance(self.boundary_handler, BoundTransform):
boundary_repair = self.boundary_handler.repair
elif isinstance(self.boundary_handler, BoundPenalty):
fpenalty = lambda x: self.boundary_handler.__call__(
x, SolutionDict({tuple(x): {'geno': x}}), self.gp)
gradpen = grad_numerical_sym(
xmean, fpenalty)
elif self.boundary_handler is None or \
isinstance(self.boundary_handler, BoundNone):
pass
else:
raise NotImplementedError(
"unknown boundary handling method" +
str(self.boundary_handler) +
" when using gradf")
gradgp = grad_numerical_of_coordinate_map(
xmean,
lambda x: self.gp.pheno(x, copy=True,
into_bounds=boundary_repair))
grad_at_mean = grad_at_mean * gradgp + gradpen
# TODO: frozen variables brake the code (e.g. at grad of map)
if len(grad_at_mean) != self.N and self.opts['fixed_variables']:
NotImplementedError("""
gradient with fixed variables is not yet implemented""")
v = self.D * dot(self.B.T, self.sigma_vec * grad_at_mean)
# newton_direction = sv * B * D * D * B^T * sv * gradient = sv * B * D * v
# v = D^-1 * B^T * sv^-1 * newton_direction = D * B^T * sv * gradient
q = sum(v**2)
if q:
# Newton direction
pop_geno[index_for_gradient] = xmean - self.sigma \
* (self.N / q)**0.5 \
* (self.sigma_vec * dot(self.B, self.D * v))
else:
pop_geno[index_for_gradient] = xmean
_print_warning('gradient zero observed',
iteration=self.countiter)
pop_pheno[index_for_gradient] = self.gp.pheno(
pop_geno[index_for_gradient], copy=True,
into_bounds=self.boundary_handler.repair)
# insert solutions, this could also (better?) be done in self.gp.pheno
for i in rglen((pop_geno)):
self.sent_solutions.insert(pop_pheno[i], geno=pop_geno[i], iteration=self.countiter)
return pop_pheno | 0.002797 |
def validate_repo_url(self, repo: str):
""" Validates repo URL - if it's a valid git URL and if Arca can handle that type of repo URL
:raise ValueError: If the URL is not valid
"""
# that should match valid git repos
if not isinstance(repo, str) or not re.match(r"^(https?|file)://[\w._\-/~]*[.git]?/?$", repo):
raise ValueError(f"{repo} is not a valid http[s] or file:// git repository.") | 0.011312 |
def on_message(self, ws, reply, *args):
""" This method is called by the websocket connection on every
message that is received. If we receive a ``notice``, we
hand over post-processing and signalling of events to
``process_notice``.
"""
log.debug("Received message: %s" % str(reply))
data = {}
try:
data = json.loads(reply, strict=False)
except ValueError:
raise ValueError("API node returned invalid format. Expected JSON!")
if data.get("method") == "notice":
id = data["params"][0]
if id >= len(self.__events__):
log.critical("Received an id that is out of range\n\n" + str(data))
return
# This is a "general" object change notification
if id == self.__events__.index("on_object"):
# Let's see if a specific object has changed
for notice in data["params"][1]:
try:
if "id" in notice:
self.process_notice(notice)
else:
for obj in notice:
if "id" in obj:
self.process_notice(obj)
except Exception as e:
log.critical(
"Error in process_notice: {}\n\n{}".format(
str(e), traceback.format_exc
)
)
else:
try:
callbackname = self.__events__[id]
log.info("Patching through to call %s" % callbackname)
[getattr(self.events, callbackname)(x) for x in data["params"][1]]
except Exception as e:
log.critical(
"Error in {}: {}\n\n{}".format(
callbackname, str(e), traceback.format_exc()
)
) | 0.002405 |
def get_likes(self, likable_type, likable_id):
"""
likable_type: 'Comment', 'Press', 'Review', 'StartupRole', 'StatusUpdate'
likable_id: id of the object that the likes of it you are interested
"""
return _get_request(_LIKES.format(c_api=_C_API_BEGINNING,
api=_API_VERSION,
lt=likable_type,
li=likable_id,
at=self.access_token)) | 0.009398 |
def lookup_class(fully_qualified_name):
"""
Given its fully qualified name, finds the desired class and imports it.
Returns the Class object if found.
"""
module_name, class_name = str(fully_qualified_name).rsplit(".", 1)
module = __import__(module_name, globals(), locals(), [class_name], 0)
Class = getattr(module, class_name)
if not inspect.isclass(Class):
raise TypeError(
"%s is not of type class: %s" % (class_name, type(Class)))
return Class | 0.001976 |
def Network_setCacheDisabled(self, cacheDisabled):
"""
Function path: Network.setCacheDisabled
Domain: Network
Method name: setCacheDisabled
Parameters:
Required arguments:
'cacheDisabled' (type: boolean) -> Cache disabled state.
No return value.
Description: Toggles ignoring cache for each request. If <code>true</code>, cache will not be used.
"""
assert isinstance(cacheDisabled, (bool,)
), "Argument 'cacheDisabled' must be of type '['bool']'. Received type: '%s'" % type(
cacheDisabled)
subdom_funcs = self.synchronous_command('Network.setCacheDisabled',
cacheDisabled=cacheDisabled)
return subdom_funcs | 0.043348 |
def get_child_value(parent, name, allow_missing=0):
""" return the value of the child element with name in the parent Element """
if not parent.hasElement(name):
if allow_missing:
return np.nan
else:
raise Exception('failed to find child element %s in parent' % name)
else:
return XmlHelper.as_value(parent.getElement(name)) | 0.009615 |
def evaluate(dataloader):
"""Evaluate network on the specified dataset"""
total_L = 0.0
total_sample_num = 0
total_correct_num = 0
start_log_interval_time = time.time()
print('Begin Testing...')
for i, ((data, valid_length), label) in enumerate(dataloader):
data = mx.nd.transpose(data.as_in_context(context))
valid_length = valid_length.as_in_context(context).astype(np.float32)
label = label.as_in_context(context)
output = net(data, valid_length)
L = loss(output, label)
pred = (output > 0.5).reshape((-1,))
total_L += L.sum().asscalar()
total_sample_num += label.shape[0]
total_correct_num += (pred == label).sum().asscalar()
if (i + 1) % args.log_interval == 0:
print('[Batch {}/{}] elapsed {:.2f} s'.format(
i + 1, len(dataloader), time.time() - start_log_interval_time))
start_log_interval_time = time.time()
avg_L = total_L / float(total_sample_num)
acc = total_correct_num / float(total_sample_num)
return avg_L, acc | 0.000922 |
def explain_tabular(self, trainset, labels, instance, num_features=5, kernel_width=3):
"""Explain categorical and numeric features for a prediction.
It analyze the prediction by LIME, and returns a report of the most impactful tabular
features contributing to certain labels.
Args:
trainset: a DataFrame representing the training features that LIME can use to decide
value distributions.
labels: a list of labels to explain.
instance: the prediction instance. It needs to conform to model's input. Can be a csv
line string, or a dict.
num_features: maximum number of features to show.
kernel_width: Passed to LIME LimeTabularExplainer directly.
Returns:
A LIME's lime.explanation.Explanation.
"""
from lime.lime_tabular import LimeTabularExplainer
if isinstance(instance, six.string_types):
instance = next(csv.DictReader([instance], fieldnames=self._headers))
categories = self._get_unique_categories(trainset)
np_trainset = self._preprocess_data_for_tabular_explain(trainset, categories)
predict_fn = self._make_tabular_predict_fn(labels, instance, categories)
prediction_df = pd.DataFrame([instance])
prediction_instance = self._preprocess_data_for_tabular_explain(prediction_df, categories)
explainer = LimeTabularExplainer(
np_trainset,
feature_names=(self._categorical_columns + self._numeric_columns),
class_names=labels,
categorical_features=range(len(categories)),
categorical_names={i: v for i, v in enumerate(categories)},
kernel_width=kernel_width)
exp = explainer.explain_instance(
prediction_instance[0],
predict_fn,
num_features=num_features,
labels=range(len(labels)))
return exp | 0.005128 |
def frombits(cls, bits='0'):
"""Create a set from binary string."""
if len(bits) > cls._len:
raise ValueError('too many bits %r' % (bits,))
return cls.fromint(bits[::-1], 2) | 0.009569 |
def _req(self, path, method='get', json=True, assert_status=200, **kw):
"""Make a request to the API of an cdstar instance.
:param path: HTTP path.
:param method: HTTP method.
:param json: Flag signalling whether the response should be treated as JSON.
:param assert_status: Expected HTTP response status of a successful request.
:param kw: Additional keyword parameters will be handed through to the \
appropriate function of the requests library.
:return: The return value of the function of the requests library or a decoded \
JSON object/array.
"""
method = getattr(self.session, method.lower())
res = method(self.url(path), **kw)
status_code = res.status_code
if json:
try:
res = res.json()
except ValueError:
log.error(res.text[:1000])
raise
if assert_status:
if not isinstance(assert_status, (list, tuple)):
assert_status = [assert_status]
if status_code not in assert_status:
log.error(
'got HTTP %s, expected HTTP %s' % (status_code, assert_status))
log.error(res.text[:1000] if hasattr(res, 'text') else res)
raise CdstarError('Unexpected HTTP status code', res, status_code)
return res | 0.00569 |
def _prepend_row_index(rows, index):
"""Add a left-most index column."""
if index is None or index is False:
return rows
if len(index) != len(rows):
print('index=', index)
print('rows=', rows)
raise ValueError('index must be as long as the number of data rows')
rows = [[v] + list(row) for v, row in zip(index, rows)]
return rows | 0.002625 |
def _parse(string):
"""
Parses given XML document content.
Returns the resulting root XML element node or None if the given XML
content is empty.
@param string: XML document content to parse.
@type string: I{bytes}
@return: Resulting root XML element node or None.
@rtype: L{Element}|I{None}
"""
if string:
return suds.sax.parser.Parser().parse(string=string) | 0.002439 |
def get_ipv6_neighbors_table(self):
"""
Get IPv6 neighbors table information.
Return a list of dictionaries having the following set of keys:
* interface (string)
* mac (string)
* ip (string)
* age (float) in seconds
* state (string)
For example::
[
{
'interface' : 'MgmtEth0/RSP0/CPU0/0',
'mac' : '5c:5e:ab:da:3c:f0',
'ip' : '2001:db8:1:1::1',
'age' : 1454496274.84,
'state' : 'REACH'
},
{
'interface': 'MgmtEth0/RSP0/CPU0/0',
'mac' : '66:0e:94:96:e0:ff',
'ip' : '2001:db8:1:1::2',
'age' : 1435641582.49,
'state' : 'STALE'
}
]
"""
ipv6_neighbors_table = []
command = "show ipv6 neighbors"
output = self._send_command(command)
ipv6_neighbors = ""
fields = re.split(r"^IPv6\s+Address.*Interface$", output, flags=(re.M | re.I))
if len(fields) == 2:
ipv6_neighbors = fields[1].strip()
for entry in ipv6_neighbors.splitlines():
# typical format of an entry in the IOS IPv6 neighbors table:
# 2002:FFFF:233::1 0 2894.0fed.be30 REACH Fa3/1/2.233
ip, age, mac, state, interface = entry.split()
mac = "" if mac == "-" else napalm.base.helpers.mac(mac)
ip = napalm.base.helpers.ip(ip)
ipv6_neighbors_table.append(
{
"interface": interface,
"mac": mac,
"ip": ip,
"age": float(age),
"state": state,
}
)
return ipv6_neighbors_table | 0.00154 |
def zip_and_upload(app_dir, bucket, key, session=None):
"""Zip built static site and upload to S3."""
if session:
s3_client = session.client('s3')
else:
s3_client = boto3.client('s3')
transfer = S3Transfer(s3_client)
filedes, temp_file = tempfile.mkstemp()
os.close(filedes)
LOGGER.info("staticsite: archiving app at %s to s3://%s/%s",
app_dir, bucket, key)
with zipfile.ZipFile(temp_file, 'w', zipfile.ZIP_DEFLATED) as filehandle:
with change_dir(app_dir):
for dirname, _subdirs, files in os.walk('./'):
if dirname != './':
filehandle.write(dirname)
for filename in files:
filehandle.write(os.path.join(dirname, filename))
transfer.upload_file(temp_file, bucket, key)
os.remove(temp_file) | 0.001171 |
def metadata(self, name):
"""Return value and metadata associated with the named value
Parameters
----------
name : str
name to retrieve. If the name contains '.'s it will be retrieved recursively
Raises
------
KeyError
if name is not defined in the ConfigTree
"""
if name in self._children:
return self._children[name].metadata()
else:
head, _, tail = name.partition('.')
if head in self._children:
return self._children[head].metadata(key=tail)
else:
raise KeyError(name) | 0.004552 |
def merge(self, ds, inplace=False, axis=1):
"""Merge two datasets.
Parameters
----------
axis : {0,1}
ds : `Dataset`
inplace : bool, default False
Returns
-------
`Dataset`
"""
if not isinstance(ds, Dataset):
raise ValueError('Expected `Dataset`, got %s.' % ds)
X_train = concat(ds.X_train, self.X_train, axis=axis)
y_train = concat(ds.y_train, self.y_train, axis=axis)
if ds.X_test is not None:
X_test = concat(ds.X_test, self.X_test, axis=axis)
else:
X_test = None
if ds.y_test is not None:
y_test = concat(ds.y_test, self.y_test, axis=axis)
else:
y_test = None
if inplace:
self._X_train = X_train
self._y_train = y_train
if X_test is not None:
self._X_test = X_test
if y_test is not None:
self._y_test = y_test
return None
return Dataset(X_train, y_train, X_test, y_test) | 0.001837 |
def create_endpoint(service_name: str, *,
ipv4: OptStr = None,
ipv6: OptStr = None,
port: OptInt = None) -> Endpoint:
"""Factory function to create Endpoint object.
"""
return Endpoint(service_name, ipv4, ipv6, port) | 0.003472 |
def _load_key(key_object):
"""
Common code to load public and private keys into PublicKey and PrivateKey
objects
:param key_object:
An asn1crypto.keys.PublicKeyInfo or asn1crypto.keys.PrivateKeyInfo
object
:raises:
ValueError - when any of the parameters contain an invalid value
TypeError - when any of the parameters are of the wrong type
oscrypto.errors.AsymmetricKeyError - when the key is incompatible with the OS crypto library
OSError - when an error is returned by the OS crypto library
:return:
A PublicKey or PrivateKey object
"""
if key_object.algorithm == 'ec':
curve_type, details = key_object.curve
if curve_type != 'named':
raise AsymmetricKeyError('OS X only supports EC keys using named curves')
if details not in set(['secp256r1', 'secp384r1', 'secp521r1']):
raise AsymmetricKeyError(pretty_message(
'''
OS X only supports EC keys using the named curves secp256r1,
secp384r1 and secp521r1
'''
))
elif key_object.algorithm == 'dsa' and key_object.hash_algo == 'sha2':
raise AsymmetricKeyError(pretty_message(
'''
OS X only supports DSA keys based on SHA1 (2048 bits or less) - this
key is based on SHA2 and is %s bits
''',
key_object.bit_size
))
elif key_object.algorithm == 'dsa' and key_object.hash_algo is None:
raise IncompleteAsymmetricKeyError(pretty_message(
'''
The DSA key does not contain the necessary p, q and g parameters
and can not be used
'''
))
if isinstance(key_object, keys.PublicKeyInfo):
source = key_object.dump()
key_class = Security.kSecAttrKeyClassPublic
else:
source = key_object.unwrap().dump()
key_class = Security.kSecAttrKeyClassPrivate
cf_source = None
cf_dict = None
cf_output = None
try:
cf_source = CFHelpers.cf_data_from_bytes(source)
key_type = {
'dsa': Security.kSecAttrKeyTypeDSA,
'ec': Security.kSecAttrKeyTypeECDSA,
'rsa': Security.kSecAttrKeyTypeRSA,
}[key_object.algorithm]
cf_dict = CFHelpers.cf_dictionary_from_pairs([
(Security.kSecAttrKeyType, key_type),
(Security.kSecAttrKeyClass, key_class),
(Security.kSecAttrCanSign, CoreFoundation.kCFBooleanTrue),
(Security.kSecAttrCanVerify, CoreFoundation.kCFBooleanTrue),
])
error_pointer = new(CoreFoundation, 'CFErrorRef *')
sec_key_ref = Security.SecKeyCreateFromData(cf_dict, cf_source, error_pointer)
handle_cf_error(error_pointer)
if key_class == Security.kSecAttrKeyClassPublic:
return PublicKey(sec_key_ref, key_object)
if key_class == Security.kSecAttrKeyClassPrivate:
return PrivateKey(sec_key_ref, key_object)
finally:
if cf_source:
CoreFoundation.CFRelease(cf_source)
if cf_dict:
CoreFoundation.CFRelease(cf_dict)
if cf_output:
CoreFoundation.CFRelease(cf_output) | 0.001533 |
def load(js_url='', css_url='', version='5.2.0'):
"""Load Dropzone resources with given version and init dropzone configuration.
.. versionchanged:: 1.4.3
Added ``js_url`` and ``css_url`` parameters to pass custom resource URL.
.. versionchanged:: 1.4.4
This method was deprecated due to inflexible. Now it's divided into three methods:
1. Use ``load_css()`` to load css resources.
2. Use ``load_js()`` to load js resources.
3. Use ``config()`` to configure Dropzone.
:param js_url: The JavaScript url for Dropzone.js.
:param css_url: The CSS url for Dropzone.js.
:param version: The version of Dropzone.js.
"""
warnings.warn('The method will be removed in 2.0, see docs for more details.')
js_filename = 'dropzone.min.js'
css_filename = 'dropzone.min.css'
upload_multiple = current_app.config['DROPZONE_UPLOAD_MULTIPLE']
parallel_uploads = current_app.config['DROPZONE_PARALLEL_UPLOADS']
if upload_multiple in [True, 'true', 'True', 1]:
upload_multiple = 'true'
else:
upload_multiple = 'false'
serve_local = current_app.config['DROPZONE_SERVE_LOCAL']
size = current_app.config['DROPZONE_MAX_FILE_SIZE']
param = current_app.config['DROPZONE_INPUT_NAME']
redirect_view = current_app.config['DROPZONE_REDIRECT_VIEW']
if redirect_view is not None:
redirect_js = '''
this.on("queuecomplete", function(file) {
// Called when all files in the queue finish uploading.
window.location = "%s";
});''' % url_for(redirect_view)
else:
redirect_js = ''
if not current_app.config['DROPZONE_ALLOWED_FILE_CUSTOM']:
allowed_type = allowed_file_extensions[
current_app.config['DROPZONE_ALLOWED_FILE_TYPE']]
else:
allowed_type = current_app.config['DROPZONE_ALLOWED_FILE_TYPE']
max_files = current_app.config['DROPZONE_MAX_FILES']
default_message = current_app.config['DROPZONE_DEFAULT_MESSAGE']
invalid_file_type = current_app.config['DROPZONE_INVALID_FILE_TYPE']
file_too_big = current_app.config['DROPZONE_FILE_TOO_BIG']
server_error = current_app.config['DROPZONE_SERVER_ERROR']
browser_unsupported = current_app.config['DROPZONE_BROWSER_UNSUPPORTED']
max_files_exceeded = current_app.config['DROPZONE_MAX_FILE_EXCEED']
cancelUpload = current_app.config['DROPZONE_CANCEL_UPLOAD']
removeFile = current_app.config['DROPZONE_REMOVE_FILE']
cancelConfirmation = current_app.config['DROPZONE_CANCEL_CONFIRMATION']
uploadCanceled = current_app.config['DROPZONE_UPLOAD_CANCELED']
timeout = current_app.config['DROPZONE_TIMEOUT']
if timeout:
timeout_js = 'timeout: %d,' % timeout
else:
timeout_js = ''
if serve_local:
js = '<script src="%s"></script>\n' % url_for('dropzone.static', filename=js_filename)
css = '<link rel="stylesheet" href="%s" type="text/css">\n' % \
url_for('dropzone.static', filename=css_filename)
else:
js = '<script src="https://cdn.jsdelivr.net/npm/dropzone@%s/dist/%s"></script>\n' % (version, js_filename)
css = '<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/dropzone@%s/dist/min/%s"' \
' type="text/css">\n' % (version, css_filename)
if js_url:
js = '<script src="%s"></script>\n' % js_url
if css_url:
css = '<link rel="stylesheet" href="%s" type="text/css">\n' % css_url
return Markup('''
%s%s<script>
Dropzone.options.myDropzone = {
init: function() {%s},
uploadMultiple: %s,
parallelUploads: %d,
paramName: "%s", // The name that will be used to transfer the file
maxFilesize: %d, // MB
acceptedFiles: "%s",
maxFiles: %s,
dictDefaultMessage: "%s", // message display on drop area
dictFallbackMessage: "%s",
dictInvalidFileType: "%s",
dictFileTooBig: "%s",
dictResponseError: "%s",
dictMaxFilesExceeded: "%s",
dictCancelUpload: "%s",
dictRemoveFile: "%s",
dictCancelUploadConfirmation: "%s",
dictUploadCanceled: "%s",
%s // timeout
};
</script>
''' % (css, js, redirect_js, upload_multiple, parallel_uploads, param, size, allowed_type, max_files,
default_message, browser_unsupported, invalid_file_type, file_too_big, server_error,
max_files_exceeded, cancelUpload, removeFile, cancelConfirmation, uploadCanceled, timeout_js)) | 0.003215 |
def list_data(self):
"""
Return all the data stored in the autocomplete index. If the data was
stored as serialized JSON, then it will be de-serialized before being
returned.
:rtype: list
"""
fn = (lambda v: json.loads(decode(v))) if self._use_json else decode
return map(fn, self._data.values()) | 0.00554 |
def prune(containers=False, networks=False, images=False,
build=False, volumes=False, system=None, **filters):
'''
.. versionadded:: 2019.2.0
Prune Docker's various subsystems
.. note::
This requires docker-py version 2.1.0 or later.
containers : False
If ``True``, prunes stopped containers (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/container_prune/#filtering
images : False
If ``True``, prunes unused images (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/image_prune/#filtering
networks : False
If ``False``, prunes unreferenced networks (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
build : False
If ``True``, clears the builder cache
.. note::
Only supported in Docker 17.07.x and newer. Additionally, filters
do not apply to this argument.
volumes : False
If ``True``, prunes unreferenced volumes (documentation__)
.. __: https://docs.docker.com/engine/reference/commandline/volume_prune/
system
If ``True``, prunes containers, images, networks, and builder cache.
Assumed to be ``True`` if none of ``containers``, ``images``,
``networks``, or ``build`` are set to ``True``.
.. note::
``volumes=True`` must still be used to prune volumes
filters
- ``dangling=True`` (images only) - remove only dangling images
- ``until=<timestamp>`` - only remove objects created before given
timestamp. Not applicable to volumes. See the documentation links
above for examples of valid time expressions.
- ``label`` - only remove objects matching the label expression. Valid
expressions include ``labelname`` or ``labelname=value``.
CLI Examples:
.. code-block:: bash
salt myminion docker.prune system=True
salt myminion docker.prune system=True until=12h
salt myminion docker.prune images=True dangling=True
salt myminion docker.prune images=True label=foo,bar=baz
'''
if system is None and not any((containers, images, networks, build)):
system = True
filters = __utils__['args.clean_kwargs'](**filters)
for fname in list(filters):
if not isinstance(filters[fname], bool):
# support comma-separated values
filters[fname] = salt.utils.args.split_input(filters[fname])
ret = {}
if system or containers:
ret['containers'] = _client_wrapper(
'prune_containers', filters=filters)
if system or images:
ret['images'] = _client_wrapper('prune_images', filters=filters)
if system or networks:
ret['networks'] = _client_wrapper('prune_networks', filters=filters)
if system or build:
try:
# Doesn't exist currently in docker-py as of 3.0.1
ret['build'] = _client_wrapper('prune_build', filters=filters)
except SaltInvocationError:
# It's not in docker-py yet, POST directly to the API endpoint
ret['build'] = _client_wrapper(
'_result',
_client_wrapper(
'_post',
_client_wrapper('_url', '/build/prune')
),
True
)
if volumes:
ret['volumes'] = _client_wrapper('prune_volumes', filters=filters)
return ret | 0.001411 |
def is_redirecting(path):
'''Returns True if path contains a .cpenv file'''
candidate = unipath(path, '.cpenv')
return os.path.exists(candidate) and os.path.isfile(candidate) | 0.005348 |
def sign(self, payload, signing_key_or_keys):
"""
Create a JWT with one or more keys.
Returns a compact-form serialized JWT if there is only one key to sign with
Returns a JSON-structured serialized JWT if there are multiple keys to sign with
"""
if isinstance(signing_key_or_keys, list):
return self._sign_multi(payload, signing_key_or_keys)
else:
return self._sign_single(payload, signing_key_or_keys) | 0.008264 |
def create_account(self, email_address, password=None, client_id=None, client_secret=None):
''' Create a new account.
If the account is created via an app, then Account.oauth will contain the
OAuth data that can be used to execute actions on behalf of the newly created account.
Args:
email_address (str): Email address of the new account to create
password (str): [DEPRECATED] This parameter will be ignored
client_id (str, optional): Client id of the app to use to create this account
client_secret (str, optional): Secret of the app to use to create this account
Returns:
The new Account object
'''
request = self._get_request()
params = {
'email_address': email_address
}
if client_id:
params['client_id'] = client_id
params['client_secret'] = client_secret
response = request.post(self.ACCOUNT_CREATE_URL, params)
if 'oauth_data' in response:
response["account"]["oauth"] = response['oauth_data']
return response | 0.007673 |
def load_candidate(self, filename=None, config=None):
"""
Loads a candidate configuration on the device.
In case the load fails at any point, will automatically rollback to last working configuration.
:param filename: Specifies the name of the file with the configuration content.
:param config: New configuration to be uploaded on the device.
:raise pyPluribus.exceptions.ConfigLoadError: When the configuration could not be uploaded to the device.
"""
configuration = ''
if filename is None:
configuration = config
else:
with open(filename) as config_file:
configuration = config_file.read()
return self._upload_config_content(configuration) | 0.006452 |
def load_params_from_file(self, fname: str, allow_missing_params: bool = False):
"""
Loads parameters from a file and sets the parameters of the underlying module and this model instance.
:param fname: File name to load parameters from.
:param allow_missing_params: If set, the given parameters are allowed to be a subset of the Module parameters.
"""
super().load_params_from_file(fname) # sets self.params & self.aux_params
self.module.set_params(arg_params=self.params,
aux_params=self.aux_params,
allow_missing=allow_missing_params) | 0.009146 |
async def send_message(
self,
message: Message,
sender: str = None,
recipients: RecipientsType = None,
mail_options: Iterable[str] = None,
rcpt_options: Iterable[str] = None,
timeout: DefaultNumType = _default,
) -> SendmailResponseType:
r"""
Sends an :class:`email.message.Message` object.
Arguments are as for :meth:`.sendmail`, except that message is an
:class:`email.message.Message` object. If sender is None or recipients
is None, these arguments are taken from the headers of the Message as
described in RFC 2822. Regardless of the values of sender and
recipients, any Bcc field (or Resent-Bcc field, when the Message is a
resent) of the Message object will not be transmitted. The Message
object is then serialized using :class:`email.generator.Generator` and
:meth:`.sendmail` is called to transmit the message.
'Resent-Date' is a mandatory field if the Message is resent (RFC 2822
Section 3.6.6). In such a case, we use the 'Resent-\*' fields.
However, if there is more than one 'Resent-' block there's no way to
unambiguously determine which one is the most recent in all cases,
so rather than guess we raise a ``ValueError`` in that case.
:raises ValueError: on more than one Resent header block
:raises SMTPRecipientsRefused: delivery to all recipients failed
:raises SMTPResponseException: on invalid response
"""
header_sender, header_recipients, flat_message = flatten_message(message)
if sender is None:
sender = header_sender
if recipients is None:
recipients = header_recipients
result = await self.sendmail(sender, recipients, flat_message, timeout=timeout)
return result | 0.002665 |
def highlight_block(self, text, block):
"""
Highlights the block using a pygments lexer.
:param text: text of the block to highlith
:param block: block to highlight
"""
if self.color_scheme.name != self._pygments_style:
self._pygments_style = self.color_scheme.name
self._update_style()
original_text = text
if self.editor and self._lexer and self.enabled:
if block.blockNumber():
prev_data = self._prev_block.userData()
if prev_data:
if hasattr(prev_data, "syntax_stack"):
self._lexer._saved_state_stack = prev_data.syntax_stack
elif hasattr(self._lexer, '_saved_state_stack'):
del self._lexer._saved_state_stack
# Lex the text using Pygments
index = 0
usd = block.userData()
if usd is None:
usd = TextBlockUserData()
block.setUserData(usd)
tokens = list(self._lexer.get_tokens(text))
for token, text in tokens:
length = len(text)
fmt = self._get_format(token)
if token in [Token.Literal.String, Token.Literal.String.Doc,
Token.Comment]:
fmt.setObjectType(fmt.UserObject)
self.setFormat(index, length, fmt)
index += length
if hasattr(self._lexer, '_saved_state_stack'):
setattr(usd, "syntax_stack", self._lexer._saved_state_stack)
# Clean up for the next go-round.
del self._lexer._saved_state_stack
# spaces
text = original_text
expression = QRegExp(r'\s+')
index = expression.indexIn(text, 0)
while index >= 0:
index = expression.pos(0)
length = len(expression.cap(0))
self.setFormat(index, length, self._get_format(Whitespace))
index = expression.indexIn(text, index + length)
self._prev_block = block | 0.000926 |
def get_tileset_from_gid(self, gid):
""" Return tileset that owns the gid
Note: this is a slow operation, so if you are expecting to do this
often, it would be worthwhile to cache the results of this.
:param gid: gid of tile image
:rtype: TiledTileset if found, otherwise ValueError
"""
try:
tiled_gid = self.tiledgidmap[gid]
except KeyError:
raise ValueError
for tileset in sorted(self.tilesets, key=attrgetter('firstgid'),
reverse=True):
if tiled_gid >= tileset.firstgid:
return tileset
raise ValueError | 0.002954 |
def invalidate_value(
cls,
value: Any,
exc: Type[Exception]=EncodingTypeError,
msg: Optional[str]=None,
) -> None:
"""
Throws a standard exception for when a value is not encodable by an
encoder.
"""
raise exc(
"Value `{rep}` of type {typ} cannot be encoded by {cls}{msg}".format(
rep=abbr(value),
typ=type(value),
cls=cls.__name__,
msg="" if msg is None else (": " + msg),
)
) | 0.014572 |
def fix_config(self, options):
"""
Fixes the options, if necessary. I.e., it adds all required elements to the dictionary.
:param options: the options to fix
:type options: dict
:return: the (potentially) fixed options
:rtype: dict
"""
opt = "db_url"
if opt not in options:
options[opt] = "jdbc:mysql://somehost:3306/somedatabase"
if opt not in self.help:
self.help[opt] = "The JDBC database URL to connect to (str)."
opt = "user"
if opt not in options:
options[opt] = "user"
if opt not in self.help:
self.help[opt] = "The database user to use for connecting (str)."
opt = "password"
if opt not in options:
options[opt] = "secret"
if opt not in self.help:
self.help[opt] = "The password for the database user (str)."
opt = "query"
if opt not in options:
options[opt] = "SELECT * FROM table"
if opt not in self.help:
self.help[opt] = "The SQL query for generating the dataset (str)."
opt = "sparse"
if opt not in options:
options[opt] = False
if opt not in self.help:
self.help[opt] = "Whether to return the data in sparse format (bool)."
opt = "custom_props"
if opt not in options:
options[opt] = ""
if opt not in self.help:
self.help[opt] = "Custom properties filename (str)."
return super(LoadDatabase, self).fix_config(options) | 0.002517 |
def _get_valid_endpoint(resp, name, entry_type):
"""
Parse the service catalog returned by the Identity API for an endpoint matching
the Nova service with the requested version
Sends a CRITICAL service check when no viable candidates are found in the Catalog
"""
catalog = resp.get('token', {}).get('catalog', [])
for entry in catalog:
if (
entry.get('name')
and entry.get('type')
and entry.get('name') == name
and entry.get('type') == entry_type
):
# Collect any endpoints on the public or internal interface
valid_endpoints = {}
for ep in entry.get('endpoints'):
interface = ep.get('interface', '')
if interface in ['public', 'internal']:
valid_endpoints[interface] = ep.get('url')
if valid_endpoints:
# Favor public endpoints over internal
return valid_endpoints.get('public', valid_endpoints.get('internal'))
return None | 0.004363 |
def function_selector(method_name_and_signature):
"""
Makes a function hash id from a method signature
"""
s = sha3.keccak_256()
s.update(method_name_and_signature.encode())
return bytes(s.digest()[:4]) | 0.008 |
def rebuild(self, image):
"""
Rebuild the droplet with the specified image
A rebuild action functions just like a new create. [APIDocs]_
:param image: an image ID, an image slug, or an `Image` object
representing the image the droplet should use as a base
:type image: integer, string, or `Image`
:return: an `Action` representing the in-progress operation on the
droplet
:rtype: Action
:raises DOAPIError: if the API endpoint replies with an error
"""
if isinstance(image, Image):
image = image.id
return self.act(type='rebuild', image=image) | 0.002972 |
def appliance_device_snmp_v1_trap_destinations(self):
"""
Gets the ApplianceDeviceSNMPv1TrapDestinations API client.
Returns:
ApplianceDeviceSNMPv1TrapDestinations:
"""
if not self.__appliance_device_snmp_v1_trap_destinations:
self.__appliance_device_snmp_v1_trap_destinations = ApplianceDeviceSNMPv1TrapDestinations(self.__connection)
return self.__appliance_device_snmp_v1_trap_destinations | 0.006452 |
def get_model_list(class_list):
"""
Receives a list of strings with app_name.model_name format
and turns them into classes. If an item is already a class
it ignores it.
"""
for idx, item in enumerate(class_list):
if isinstance(item, six.string_types):
model_class = apps.get_model(item)
class_list[idx] = model_class | 0.002688 |
def pretty_date(the_datetime):
"""Attempt to return a human-readable time delta string."""
# Source modified from
# http://stackoverflow.com/a/5164027/176978
diff = datetime.utcnow() - the_datetime
if diff.days > 7 or diff.days < 0:
return the_datetime.strftime('%A %B %d, %Y')
elif diff.days == 1:
return '1 day ago'
elif diff.days > 1:
return '{0} days ago'.format(diff.days)
elif diff.seconds <= 1:
return 'just now'
elif diff.seconds < 60:
return '{0} seconds ago'.format(diff.seconds)
elif diff.seconds < 120:
return '1 minute ago'
elif diff.seconds < 3600:
return '{0} minutes ago'.format(int(round(diff.seconds / 60)))
elif diff.seconds < 7200:
return '1 hour ago'
else:
return '{0} hours ago'.format(int(round(diff.seconds / 3600))) | 0.001157 |
def zrange(key, start, stop, host=None, port=None, db=None, password=None):
'''
Get a range of values from a sorted set in Redis by index
CLI Example:
.. code-block:: bash
salt '*' redis.zrange foo_sorted 0 10
'''
server = _connect(host, port, db, password)
return server.zrange(key, start, stop) | 0.002985 |
def get_object(self, resource_url):
"""Get remote resource information. Creates a local directory for the
resource if this is the first access to the resource. Downloads the
resource Json representation and writes it into a .json file in the
cache directory.
Raises ValueError if resource is not cached and does not exist. If the
resource no longer exists on the server but in the local cache, a
reference to the local copy is returned and the value of the is_active
flag is False.
Parameters
----------
cache_id : string
Unique cache identifier
resource_url : string
Url of the resource
Returns
-------
(string, Json, Boolean, string)
Returns a 4-tuple containing local resource directory, the Json
object representing the resource, an active flag indicating if
the resource still exists on the remote server or only in the local
cache, and the resource unique cache identifier.
"""
# Check if resource is in local cache. If not, create a new cache
# identifier and set is_cached flag to false
if resource_url in self.cache:
cache_id = self.cache[resource_url]
else:
cache_id = str(uuid.uuid4())
# The local cahce directory for resource is given by cache identifier
obj_dir = os.path.join(self.directory, cache_id)
# File for local copy of object's Json representation
f_json = os.path.join(obj_dir, '.json')
# Object active flag
is_active = True
# Read the remote resource representation
try:
obj_json = sco.JsonResource(resource_url).json
# Save local copy of Json object. Create local resource directory if
# it doesn't exist
if not os.path.isdir(obj_dir):
os.mkdir(obj_dir)
with open(f_json, 'w') as f:
json.dump(obj_json, f)
except ValueError as ex:
# If the resource does not exists but we have a local copy then read
# object from local disk. Set is_active flag to false. Raise
# ValueError if no local copy exists
if os.path.isfile(f_json):
with open(f_json, 'r') as f:
obj_json = json.load(f)
is_active = False
else:
raise ex
# Return object directory, Json, active flag, and cache identifier
return obj_dir, obj_json, is_active, cache_id | 0.00153 |
def _clamp_value(value, minimum, maximum):
"""
Clamp a value to fit between a minimum and a maximum.
* If ``value`` is between ``minimum`` and ``maximum``, return ``value``
* If ``value`` is below ``minimum``, return ``minimum``
* If ``value is above ``maximum``, return ``maximum``
Args:
value (float or int): The number to clamp
minimum (float or int): The lowest allowed return value
maximum (float or int): The highest allowed return value
Returns:
float or int: the clamped value
Raises:
ValueError: if maximum < minimum
Example:
>>> _clamp_value(3, 5, 10)
5
>>> _clamp_value(11, 5, 10)
10
>>> _clamp_value(8, 5, 10)
8
"""
if maximum < minimum:
raise ValueError
if value < minimum:
return minimum
elif value > maximum:
return maximum
else:
return value | 0.001066 |
def mime_type(self, category=None):
"""
:param category: application|audio|image|message|model|multipart|text|video
"""
category = category if category else self.random_element(
list(self.mime_types.keys()))
return self.random_element(self.mime_types[category]) | 0.009585 |
def search(self, buf):
"""Search the provided buffer for matching text.
Search the provided buffer for matching text. If the *match* is found,
returns a :class:`SequenceMatch` object, otherwise returns ``None``.
:param buf: Buffer to search for a match.
:return: :class:`SequenceMatch` if matched, None if no match was found.
"""
self._check_type(buf)
normalized = unicodedata.normalize(self.FORM, buf)
idx = normalized.find(self._text)
if idx < 0:
return None
start = idx
end = idx + len(self._text)
return SequenceMatch(self, normalized[start:end], start, end) | 0.002941 |
def __update_stats(self, server):
"""
Update stats for the given server (picked from the server list)
"""
# Get the server URI
uri = self.__get_uri(server)
# Try to connect to the server
t = GlancesClientTransport()
t.set_timeout(3)
# Get common stats
try:
s = ServerProxy(uri, transport=t)
except Exception as e:
logger.warning(
"Client browser couldn't create socket {}: {}".format(uri, e))
else:
# Mandatory stats
try:
# CPU%
cpu_percent = 100 - json.loads(s.getCpu())['idle']
server['cpu_percent'] = '{:.1f}'.format(cpu_percent)
# MEM%
server['mem_percent'] = json.loads(s.getMem())['percent']
# OS (Human Readable name)
server['hr_name'] = json.loads(s.getSystem())['hr_name']
except (socket.error, Fault, KeyError) as e:
logger.debug(
"Error while grabbing stats form {}: {}".format(uri, e))
server['status'] = 'OFFLINE'
except ProtocolError as e:
if e.errcode == 401:
# Error 401 (Authentication failed)
# Password is not the good one...
server['password'] = None
server['status'] = 'PROTECTED'
else:
server['status'] = 'OFFLINE'
logger.debug("Cannot grab stats from {} ({} {})".format(uri, e.errcode, e.errmsg))
else:
# Status
server['status'] = 'ONLINE'
# Optional stats (load is not available on Windows OS)
try:
# LOAD
load_min5 = json.loads(s.getLoad())['min5']
server['load_min5'] = '{:.2f}'.format(load_min5)
except Exception as e:
logger.warning(
"Error while grabbing stats form {}: {}".format(uri, e))
return server | 0.001873 |
def get():
"""Subarray list.
This method will list all sub-arrays known to SDP.
"""
_url = get_root_url()
LOG.debug('GET Sub array list')
sub_array_ids = sorted(DB.get_sub_array_ids())
response = dict(sub_arrays=[])
for array_id in sub_array_ids:
array_summary = dict(sub_arrary_id=array_id)
block_ids = DB.get_sub_array_sbi_ids(array_id)
LOG.debug('Subarray IDs: %s', array_id)
LOG.debug('SBI IDs: %s', block_ids)
array_summary['num_scheduling_blocks'] = len(block_ids)
array_summary['links'] = {
'detail': '{}/sub-array/{}'.format(_url, array_id)
}
response['sub_arrays'].append(array_summary)
response['links'] = dict(self=request.url, home=_url)
return response, status.HTTP_200_OK | 0.001244 |
def getATSTemplateMgtURL(self, CorpNum, UserID):
"""
알림톡 템플릿관리 팝업 URL
:param CorpNum: 팝빌회원 사업자번호
:param UserID: 팝빌회원 아이디
:return: 팝빌 URL
"""
result = self._httpget('/KakaoTalk/?TG=TEMPLATE', CorpNum, UserID)
return result.url | 0.00692 |
def get_planes(im, squeeze=True):
r"""
Extracts three planar images from the volumetric image, one for each
principle axis. The planes are taken from the middle of the domain.
Parameters
----------
im : ND-array
The volumetric image from which the 3 planar images are to be obtained
squeeze : boolean, optional
If True (default) the returned images are 2D (i.e. squeezed). If
False, the images are 1 element deep along the axis where the slice
was obtained.
Returns
-------
planes : list
A list of 2D-images
"""
x, y, z = (sp.array(im.shape) / 2).astype(int)
planes = [im[x, :, :], im[:, y, :], im[:, :, z]]
if not squeeze:
imx = planes[0]
planes[0] = sp.reshape(imx, [1, imx.shape[0], imx.shape[1]])
imy = planes[1]
planes[1] = sp.reshape(imy, [imy.shape[0], 1, imy.shape[1]])
imz = planes[2]
planes[2] = sp.reshape(imz, [imz.shape[0], imz.shape[1], 1])
return planes | 0.000979 |
def bench(client, n):
""" Benchmark n requests """
items = list(range(n))
# Time client publish operations
# ------------------------------
started = time.time()
for i in items:
client.publish('test', i)
duration = time.time() - started
print('Publisher client stats:')
util.print_stats(n, duration) | 0.002899 |
def get_head_node_ip(config_file, override_cluster_name):
"""Returns head node IP for given configuration file if exists."""
config = yaml.load(open(config_file).read())
if override_cluster_name is not None:
config["cluster_name"] = override_cluster_name
provider = get_node_provider(config["provider"], config["cluster_name"])
try:
head_node = _get_head_node(config, config_file, override_cluster_name)
if config.get("provider", {}).get("use_internal_ips", False) is True:
head_node_ip = provider.internal_ip(head_node)
else:
head_node_ip = provider.external_ip(head_node)
finally:
provider.cleanup()
return head_node_ip | 0.001397 |
def reshuffle_batches(self, indices, rng):
"""
Permutes global batches
:param indices: torch.tensor with batch indices
:param rng: instance of torch.Generator
"""
indices = indices.view(-1, self.global_batch_size)
num_batches = indices.shape[0]
order = torch.randperm(num_batches, generator=rng)
indices = indices[order, :]
indices = indices.view(-1)
return indices | 0.004405 |
def get_files_to_check(self):
"""Generate files and error codes to check on each one.
Walk dir trees under `self._arguments` and yield file names
that `match` under each directory that `match_dir`.
The method locates the configuration for each file name and yields a
tuple of (filename, [error_codes]).
With every discovery of a new configuration file `IllegalConfiguration`
might be raised.
"""
def _get_matches(conf):
"""Return the `match` and `match_dir` functions for `config`."""
match_func = re(conf.match + '$').match
match_dir_func = re(conf.match_dir + '$').match
return match_func, match_dir_func
def _get_ignore_decorators(conf):
"""Return the `ignore_decorators` as None or regex."""
return (re(conf.ignore_decorators) if conf.ignore_decorators
else None)
for name in self._arguments:
if os.path.isdir(name):
for root, dirs, filenames in os.walk(name):
config = self._get_config(os.path.abspath(root))
match, match_dir = _get_matches(config)
ignore_decorators = _get_ignore_decorators(config)
# Skip any dirs that do not match match_dir
dirs[:] = [d for d in dirs if match_dir(d)]
for filename in filenames:
if match(filename):
full_path = os.path.join(root, filename)
yield (full_path, list(config.checked_codes),
ignore_decorators)
else:
config = self._get_config(os.path.abspath(name))
match, _ = _get_matches(config)
ignore_decorators = _get_ignore_decorators(config)
if match(name):
yield (name, list(config.checked_codes), ignore_decorators) | 0.000998 |
def has_space(self, length=1, offset=0):
"""Returns boolean if self.pos + length < working string length."""
return self.pos + (length + offset) - 1 < self.length | 0.011236 |
def _get_main_language():
"""
returns the main language
:return:
"""
try:
main_language = TransLanguage.objects.filter(main_language=True).get()
return main_language.code
except TransLanguage.DoesNotExist:
return 'es' | 0.009967 |
def get_mr_filters(data_shape, opt='', coarse=False): # pragma: no cover
"""Get mr_transform filters
This method obtains wavelet filters by calling mr_transform
Parameters
----------
data_shape : tuple
2D data shape
opt : list, optional
List of additonal mr_transform options
coarse : bool, optional
Option to keep coarse scale (default is 'False')
Returns
-------
np.ndarray 3D array of wavelet filters
"""
# Adjust the shape of the input data.
data_shape = np.array(data_shape)
data_shape += data_shape % 2 - 1
# Create fake data.
fake_data = np.zeros(data_shape)
fake_data[tuple(zip(data_shape // 2))] = 1
# Call mr_transform.
mr_filters = call_mr_transform(fake_data, opt=opt)
# Return filters
if coarse:
return mr_filters
else:
return mr_filters[:-1] | 0.001121 |
def call_somatic(tumor_name, normal_name):
"""Call SOMATIC variants from tumor/normal calls, adding REJECT filters and SOMATIC flag.
Works from stdin and writes to stdout, finding positions of tumor and normal samples.
Uses MuTect like somatic filter based on implementation in speedseq:
https://github.com/cc2qe/speedseq/blob/e6729aa2589eca4e3a946f398c1a2bdc15a7300d/bin/speedseq#L62
Extracts the genotype likelihoods (GLs) from FreeBayes, which are like phred scores
except not multiplied by 10.0 (https://en.wikipedia.org/wiki/Phred_quality_score).
For tumors, we retrieve the best likelihood to not be reference (the first GL) and
for normal, the best likelhood to be reference.
After calculating the likelihoods, we compare these to thresholds to pass variants
at tuned sensitivity/precision. Tuning done on DREAM synthetic 3 dataset evaluations.
We also check that the frequency of the tumor exceeds the frequency of the normal by
a threshold to avoid calls that are low frequency in both tumor and normal. This supports
both FreeBayes and VarDict output frequencies.
"""
# Thresholds are like phred scores, so 3.5 = phred35
tumor_thresh, normal_thresh = 3.5, 3.5
new_headers = ['##INFO=<ID=SOMATIC,Number=0,Type=Flag,Description="Somatic event">\n',
('##FILTER=<ID=REJECT,Description="Not somatic due to normal call frequency '
'or phred likelihoods: tumor: %s, normal %s.">\n')
% (int(tumor_thresh * 10), int(normal_thresh * 10))]
def _output_filter_line(line, indexes):
parts = line.split("\t")
if _check_lods(parts, tumor_thresh, normal_thresh, indexes) and _check_freqs(parts, indexes):
parts[7] = parts[7] + ";SOMATIC"
else:
if parts[6] in set([".", "PASS"]):
parts[6] = "REJECT"
else:
parts[6] += ";REJECT"
line = "\t".join(parts)
sys.stdout.write(line)
def _write_header(header):
for hline in header[:-1] + new_headers + [header[-1]]:
sys.stdout.write(hline)
header = []
indexes = None
for line in sys.stdin:
if not indexes:
if line.startswith("#"):
header.append(line)
else:
parts = header[-1].rstrip().split("\t")
indexes = {"tumor": parts.index(tumor_name), "normal": parts.index(normal_name)}
_write_header(header)
_output_filter_line(line, indexes)
else:
_output_filter_line(line, indexes)
# no calls, only output the header
if not indexes:
_write_header(header) | 0.005908 |
def search_string_filter(i):
"""
Input: {
repo_uoa - repo UOA
module_uoa - module UOA
data_uoa - data UOA
path - path
(search_string) - search with expressions *?
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
lst - [{'repo_uoa', 'repo_uid',
'module_uoa', 'module_uid',
'data_uoa','data_uid',
'path'}]
}
"""
# To be fast, load directly
p=i['path']
skip='yes'
ss=i.get('search_string','')
if ss=='':
skip='no'
else:
ic=i.get('ignore_case','')
p1=os.path.join(p,cfg['subdir_ck_ext'],cfg['file_meta'])
if not os.path.isfile(p1):
p1=os.path.join(p,cfg['subdir_ck_ext'],cfg['file_meta_old'])
if not os.path.isfile(p1):
return {'return':0, 'skip':'yes'}
r=load_json_file({'json_file':p1})
if r['return']>0: return r
d=r['dict']
# Check directly
rx=find_string_in_dict_or_list({'dict':d, 'search_string':ss, 'ignore_case':ic})
if rx['return']>0: return rx
found=rx['found']
if found=='yes': skip='no'
return {'return':0, 'skip':skip} | 0.034597 |
def get_route_io_data_types(self):
# type: () -> typing.List[UserDefined]
"""
Returns a list of all user-defined data types that are referenced as
either an argument, result, or error of a route. If a List or Nullable
data type is referenced, then the contained data type is returned
assuming it's a user-defined type.
"""
data_types = set() # type: typing.Set[UserDefined]
for route in self.routes:
data_types |= self.get_route_io_data_types_for_route(route)
return sorted(data_types, key=lambda dt: dt.name) | 0.004983 |
def get_ast_obj(belstr, bel_version, component_type: str = ""):
"""Convert AST partialparse dict to BELAst"""
ast_dict, errors = get_ast_dict(belstr, component_type)
spec = bel_specification.get_specification(bel_version)
subj = ast_dict["subject"]
subj_ast = add_ast_fn(subj, spec)
relation = None
obj = None
if "relation" in ast_dict:
relation = ast_dict["relation"]["name"]
if "object" in ast_dict:
obj = ast_dict["object"]
obj_ast = add_ast_fn(obj, spec)
return BELAst(subj_ast, relation, obj_ast, spec)
elif "nested" in ast_dict:
nested_subj = ast_dict["nested"]["subject"]
nested_subj_ast = add_ast_fn(nested_subj, spec)
nested_relation = ast_dict["nested"]["relation"]["name"]
nested_obj = ast_dict["nested"]["object"]
nested_obj_ast = add_ast_fn(nested_obj, spec)
return BELAst(
subj_ast,
relation,
BELAst(nested_subj_ast, nested_relation, nested_obj_ast, spec),
spec,
)
return BELAst(subj_ast, None, None, spec) | 0.000902 |
def from_dict(values):
'''
Instantiate a BlockadeConfig instance based on
a given dictionary of configuration values
'''
try:
containers = values['containers']
parsed_containers = {}
for name, container_dict in containers.items():
try:
# one config entry might result in many container
# instances (indicated by the 'count' config value)
for cnt in BlockadeContainerConfig.from_dict(name, container_dict):
# check for duplicate 'container_name' definitions
if cnt.container_name:
cname = cnt.container_name
existing = [c for c in parsed_containers.values() if c.container_name == cname]
if existing:
raise BlockadeConfigError("Duplicate 'container_name' definition: %s" % (cname))
parsed_containers[cnt.name] = cnt
except Exception as err:
raise BlockadeConfigError(
"Container '%s' config problem: %s" % (name, err))
network = values.get('network')
if network:
defaults = _DEFAULT_NETWORK_CONFIG.copy()
defaults.update(network)
network = defaults
else:
network = _DEFAULT_NETWORK_CONFIG.copy()
return BlockadeConfig(parsed_containers, network=network)
except KeyError as err:
raise BlockadeConfigError("Config missing value: " + str(err))
except Exception as err:
# TODO log this to some debug stream?
raise BlockadeConfigError("Failed to load config: " + str(err)) | 0.002726 |
def get_router_for_floatingip(self, context, internal_port,
internal_subnet, external_network_id):
"""We need to over-load this function so that we only return the
user visible router and never its redundancy routers (as they never
have floatingips associated with them).
"""
gw_port = orm.aliased(models_v2.Port, name="gw_port")
routerport_qry = context.session.query(
RouterPort.router_id, models_v2.IPAllocation.ip_address).join(
models_v2.Port, models_v2.IPAllocation).filter(
models_v2.Port.network_id == internal_port['network_id'],
RouterPort.port_type.in_(bc.constants.ROUTER_INTERFACE_OWNERS),
models_v2.IPAllocation.subnet_id == internal_subnet['id']
).join(gw_port, gw_port.device_id == RouterPort.router_id).filter(
gw_port.network_id == external_network_id,
gw_port.device_owner == bc.constants.DEVICE_OWNER_ROUTER_GW
).distinct()
# Ensure that redundancy routers (in a ha group) are not returned,
# since only the user visible router should have floatingips.
# This can be done by checking that the id of routers does not
# appear in the 'redundancy_router_id' column in the
# 'cisco_router_redundancy_bindings' table.
routerport_qry = routerport_qry.outerjoin(
RouterRedundancyBinding,
RouterRedundancyBinding.redundancy_router_id ==
RouterPort.router_id)
routerport_qry = routerport_qry.filter(
RouterRedundancyBinding.redundancy_router_id == expr.null())
first_router_id = None
for router_id, interface_ip in routerport_qry:
if interface_ip == internal_subnet['gateway_ip']:
return router_id
if not first_router_id:
first_router_id = router_id
if first_router_id:
return first_router_id
raise l3_exceptions.ExternalGatewayForFloatingIPNotFound(
subnet_id=internal_subnet['id'],
external_network_id=external_network_id,
port_id=internal_port['id']) | 0.00137 |
def set_parallel_value_for_key(self, key, value):
"""
Set a globally available key and value that can be accessed
from all the pabot processes.
"""
if self._remotelib:
self._remotelib.run_keyword('set_parallel_value_for_key',
[key, value], {})
else:
_PabotLib.set_parallel_value_for_key(self, key, value) | 0.004808 |
def __initialize_ui(self):
"""
Initializes the View ui.
"""
self.viewport().installEventFilter(ReadOnlyFilter(self))
if issubclass(type(self), QListView):
super(type(self), self).setUniformItemSizes(True)
elif issubclass(type(self), QTreeView):
super(type(self), self).setUniformRowHeights(True) | 0.00542 |
def get_connection_params(self):
"""
Default method to acquire database connection parameters.
Sets connection parameters to match settings.py, and sets
default values to blank fields.
"""
valid_settings = {
'NAME': 'name',
'HOST': 'host',
'PORT': 'port',
'USER': 'username',
'PASSWORD': 'password',
'AUTH_SOURCE': 'authSource',
'AUTH_MECHANISM': 'authMechanism',
'ENFORCE_SCHEMA': 'enforce_schema',
'REPLICASET': 'replicaset',
'SSL': 'ssl',
'SSL_CERTFILE': 'ssl_certfile',
'SSL_CA_CERTS': 'ssl_ca_certs',
'READ_PREFERENCE': 'read_preference'
}
connection_params = {
'name': 'djongo_test',
'enforce_schema': True
}
for setting_name, kwarg in valid_settings.items():
try:
setting = self.settings_dict[setting_name]
except KeyError:
continue
if setting or setting is False:
connection_params[kwarg] = setting
return connection_params | 0.001686 |
def messaging(self):
"""
Access the Messaging Twilio Domain
:returns: Messaging Twilio Domain
:rtype: twilio.rest.messaging.Messaging
"""
if self._messaging is None:
from twilio.rest.messaging import Messaging
self._messaging = Messaging(self)
return self._messaging | 0.005764 |
def _login(session):
"""Login."""
_LOGGER.info("logging in (no valid cookie found)")
session.cookies.clear()
resp = session.post(SSO_URL, {
'USER': session.auth.username,
'PASSWORD': session.auth.password,
'TARGET': TARGET_URL
})
parsed = BeautifulSoup(resp.text, HTML_PARSER)
relay_state = parsed.find('input', {'name': 'RelayState'}).get('value')
saml_response = parsed.find('input', {'name': 'SAMLResponse'}).get('value')
session.post(SIGNIN_URL, {
'RelayState': relay_state,
'SAMLResponse': saml_response
})
session.get(SIGNIN_URL)
_save_cookies(session.cookies, session.auth.cookie_path) | 0.001471 |
def get_device_offset(self):
"""Returns the previous device offset set by :meth:`set_device_offset`.
:returns: ``(x_offset, y_offset)``
"""
offsets = ffi.new('double[2]')
cairo.cairo_surface_get_device_offset(
self._pointer, offsets + 0, offsets + 1)
return tuple(offsets) | 0.005988 |
def destinations(self, cluster='main'):
"""Return a list of destinations for a cluster."""
if not self.config.has_section(cluster):
raise SystemExit("Cluster '%s' not defined in %s"
% (cluster, self.config_file))
destinations = self.config.get(cluster, 'destinations')
return destinations.replace(' ', '').split(',') | 0.005141 |
def print_summary(graph, tails, node_id_map):
"""Print out summary and per-node comparison data."""
# Get comparison data
heads = get_heads(tails)
heights = get_heights(tails)
max_height = max(heights)
common_height, block_ids_at_common_height = get_common_height(tails)
lags = get_lags(heights, max_height)
common_ancestor = graph.root
divergences = get_divergences(heights, graph.root)
# Print summary info
col_1 = 8
col_n = 8
format_str = '{:<' + str(col_1) + '} ' + ('{:<' + str(col_n) + '} ') * 2
header = format_str.format("COMMON", "HEIGHT", "BLOCKS")
print(header)
print("-" * len(header))
print(format_str.format(
"ANCESTOR", common_ancestor.num, common_ancestor.ident[:col_n]))
print(format_str.format(
"HEIGHT", common_height, str(block_ids_at_common_height)))
print()
# Print per-node data
node_col_width = get_col_width_for_num(len(tails), len("NODE"))
num_col_width = get_col_width_for_num(max_height, len("HEIGHT"))
lag_col_width = get_col_width_for_num(max(lags), len("LAG"))
diverg_col_width = get_col_width_for_num(max(divergences), len("DIVERG"))
format_str = (
'{:<' + str(node_col_width) + '} '
'{:<8} '
'{:<' + str(num_col_width) + '} '
'{:<' + str(lag_col_width) + '} '
'{:<' + str(diverg_col_width) + '}'
)
header = format_str.format("NODE", "HEAD", "HEIGHT", "LAG", "DIVERG")
print(header)
print('-' * len(header))
for i, _ in enumerate(tails):
print(format_str.format(
node_id_map[i],
heads[i],
heights[i],
lags[i],
divergences[i],
))
print() | 0.000578 |
def list_view_row_clicked(self, list_view, path, view_column):
"""
Function opens the firefox window with relevant link
"""
model = list_view.get_model()
text = model[path][0]
match = URL_FINDER.search(text)
if match is not None:
url = match.group(1)
import webbrowser
webbrowser.open(url) | 0.005236 |
def bubble_sizes_ref(self, series):
"""
The Excel worksheet reference to the range containing the bubble
sizes for *series* (not including the column heading cell).
"""
top_row = self.series_table_row_offset(series) + 2
bottom_row = top_row + len(series) - 1
return "Sheet1!$C$%d:$C$%d" % (top_row, bottom_row) | 0.005464 |
def unregister_service(self, name):
"""
Implementation of :meth:`twitcher.api.IRegistry.unregister_service`.
"""
try:
self.store.delete_service(name=name)
except Exception:
LOGGER.exception('unregister failed')
return False
else:
return True | 0.005935 |
def get_child_account(self, account_name):
"""
Retrieves a child account.
This could be a descendant nested at any level.
:param account_name: The name of the account to retrieve.
:returns: The child account, if found, else None.
"""
if r'/' in account_name:
accs_in_path = account_name.split(r'/', 1)
curr_acc = self[accs_in_path[0]]
if curr_acc is None:
return None
return curr_acc.get_child_account(accs_in_path[1])
pass
else:
return self[account_name] | 0.003273 |
def add_local_node(self, child_node, name=None):
"""Append a child that should alter the locals of this scope node.
:param child_node: The child node that will alter locals.
:type child_node: NodeNG
:param name: The name of the local that will be altered by
the given child node.
:type name: str or None
"""
if name != "__class__":
# add __class__ node as a child will cause infinite recursion later!
self._append_node(child_node)
self.set_local(name or child_node.name, child_node) | 0.005137 |
def pfunc_multi(self, strands, permutation=None, temp=37.0, pseudo=False,
material=None, dangles='some', sodium=1.0, magnesium=0.0):
'''Compute the partition function for an ordered complex of strands.
Runs the \'pfunc\' command.
:param strands: List of strands to use as inputs to pfunc -multi.
:type strands: list
:param permutation: The circular permutation of strands to test in
complex. e.g. to test in the order that was input
for 4 strands, the permutation would be [1,2,3,4].
If set to None, defaults to the order of the
input strands.
:type permutation: list
:param temp: Temperature setting for the computation. Negative values
are not allowed.
:type temp: float
:param pseudo: Enable pseudoknots.
:type pseudo: bool
:param material: The material setting to use in the computation. If set
to None (the default), the material type is inferred
from the strands. Other settings available: 'dna' for
DNA parameters, 'rna' for RNA (1995) parameters, and
'rna1999' for the RNA 1999 parameters.
:type material: str
:param dangles: How to treat dangles in the computation. From the
user guide: For \'none\': Dangle energies are ignored.
For \'some\': \'A dangle energy is incorporated for
each unpaired base flanking a duplex\'. For 'all': all
dangle energy is considered.
:type dangles: str
:param sodium: Sodium concentration in solution (molar), only applies
to DNA.
:type sodium: float
:param magnesium: Magnesium concentration in solution (molar), only
applies to DNA>
:type magnesium: float
:returns: A 2-tuple of the free energy of the ordered complex
(float) and the partition function (float).
:rtype: tuple
'''
# Set the material (will be used to set command material flag)
material = self._set_material(strands, material, multi=True)
# Set up command flags
cmd_args = self._prep_cmd_args(temp, dangles, material, pseudo, sodium,
magnesium, multi=True)
# Set up the input file and run the command
if permutation is None:
permutation = range(1, len(strands) + 1)
lines = self._multi_lines(strands, permutation)
stdout = self._run('pfunc', cmd_args, lines).split('\n')
return (float(stdout[-3]), float(stdout[-2])) | 0.001058 |
def command(func):
"""Command line interface decorator.
Decorate a function for building a Bowtie
application and turn it into a command line interface.
"""
# pylint: disable=protected-access,unused-variable
nargs = numargs(func)
if nargs > 0:
raise WrongNumberOfArguments(
f'Decorated function "{func.__name__}" should have no arguments, it has {nargs}.'
)
app = func()
if app is None:
raise TypeError(
'No `App` instance was returned. '
'In the function decorated with @command, '
'return the `App` instance so it can be built.'
)
if not isinstance(app, App):
raise TypeError(
f'Returned value {app} is of type {type(app)}, '
'it needs to be a bowtie.App instance.'
)
@click.group(options_metavar='[--help]')
def cmd():
"""Bowtie CLI to help build and run your app."""
@cmd.command(add_help_option=False)
def build():
"""Write the app, downloads the packages, and bundles it with Webpack."""
app._build()
@cmd.command(add_help_option=False)
@click.option('--host', '-h', default='0.0.0.0', type=str)
@click.option('--port', '-p', default=9991, type=int)
def run(host, port):
"""Build the app and serve it."""
app._build()
app._serve(host, port)
@cmd.command(add_help_option=True)
@click.option('--host', '-h', default='0.0.0.0', type=str)
@click.option('--port', '-p', default=9991, type=int)
def serve(host, port):
"""Serve the Bowtie app."""
app._serve(host, port)
@cmd.command(context_settings=dict(ignore_unknown_options=True),
add_help_option=False)
@click.argument('extra', nargs=-1, type=click.UNPROCESSED)
def dev(extra):
"""Recompile the app for development."""
line = (_WEBPACK, '--config', 'webpack.dev.js') + extra
call(line, cwd=app._build_dir)
@cmd.command(context_settings=dict(ignore_unknown_options=True),
add_help_option=False)
@click.argument('extra', nargs=-1, type=click.UNPROCESSED)
def prod(extra):
"""Recompile the app for production."""
line = (_WEBPACK, '--config', 'webpack.prod.js', '--progress') + extra
call(line, cwd=app._build_dir)
locale = inspect.stack()[1][0].f_locals
module = locale.get("__name__")
if module == "__main__":
try:
arg = sys.argv[1:]
except IndexError:
arg = ('--help',)
# pylint: disable=too-many-function-args
sys.exit(cmd(arg))
return cmd | 0.00113 |
def _simulate(self, nreps, admix=None, Ns=500000, gen=20):
"""
Enter a baba.Tree object in which the 'tree' attribute (newick
derived tree) has edge lengths in units of generations. You can
use the 'gen' parameter to multiply branch lengths by a constant.
Parameters:
-----------
nreps: (int)
Number of reps (loci) to simulate under the demographic scenario
tree: (baba.Tree object)
A baba.Tree object initialized by calling baba.Tree(*args).
admix: (list)
A list of admixture events to occur on the tree. Nodes must be
reference by their index number, and events must occur in time
intervals when edges exist. Use the .draw() function of the
baba.Tree object to see node index numbers and coalescent times.
Ns: (float)
Fixed effective population size for all lineages (may allow to vary
in the future).
gen: (int)
A multiplier applied to branch lengths to scale into units of
generations. Example, if all edges on a tree were 1 then you might
enter 50000 to multiply so that edges are 50K generations long.
"""
## node ages
Taus = np.array(list(set(self.verts[:, 1]))) * 1e4 * gen
## The tips samples, ordered alphanumerically
## Population IDs correspond to their indexes in pop config
ntips = len(self.tree)
#names = {name: idx for idx, name in enumerate(sorted(self.tree.get_leaf_names()))}
## rev ladderized leaf name order (left to right on downward facing tree)
names = {name: idx for idx, name in enumerate(self.tree.get_leaf_names()[::-1])}
pop_config = [
ms.PopulationConfiguration(sample_size=2, initial_size=Ns)
for i in range(ntips)
]
## migration matrix all zeros init
migmat = np.zeros((ntips, ntips)).tolist()
## a list for storing demographic events
demog = []
## coalescent times
coals = sorted(list(set(self.verts[:, 1])))[1:]
for ct in xrange(len(coals)):
## check for admix event before next coalescence
## ...
## print coals[ct], nidxs, time
nidxs = np.where(self.verts[:, 1] == coals[ct])[0]
time = Taus[ct+1]
## add coalescence at each node
for nidx in nidxs:
node = self.tree.search_nodes(name=str(nidx))[0]
## get destionation (lowest child idx number), and other
dest = sorted(node.get_leaves(), key=lambda x: x.idx)[0]
otherchild = [i for i in node.children if not
i.get_leaves_by_name(dest.name)][0]
## get source
if otherchild.is_leaf():
source = otherchild
else:
source = sorted(otherchild.get_leaves(), key=lambda x: x.idx)[0]
## add coal events
event = ms.MassMigration(
time=int(time),
source=names[source.name],
destination=names[dest.name],
proportion=1.0)
#print(int(time), names[source.name], names[dest.name])
## ...
demog.append(event)
## sim the data
replicates = ms.simulate(
population_configurations=pop_config,
migration_matrix=migmat,
demographic_events=demog,
num_replicates=nreps,
length=100,
mutation_rate=1e-8)
return replicates | 0.011455 |
def setOutputNode(self, node):
"""
Sets the node that will be generating the output information for \
this connection.
:param node | <XNode>
"""
# if the output node matches the current, ignore
if node == self._outputNode:
return
# disconnect from an existing node
self.disconnectSignals(self._outputNode)
# set the current node
self._outputNode = node
self.connectSignals(self._outputNode)
# force the rebuilding of the path
self.setPath(self.rebuild()) | 0.004975 |
def process(self, data):
"""Process the results from episode processing.
:param list data: result instances
"""
fields = []
for res in data:
for epname, out in six.iteritems(res.status):
fields.append(
[out.get('state'),
epname,
out.get('formatted_filename'),
out.get('messages')])
if fields:
table.write_output(fields) | 0.004065 |
def more_like_this(self, model_instance, additional_query=None,
start_offset=0, end_offset=None,
limit_to_registered_models=True, result_class=None, **kwargs):
"""
Given a model instance, returns a result set of similar documents.
Required arguments:
`model_instance` -- The model instance to use as a basis for
retrieving similar documents.
Optional arguments:
`additional_query` -- An additional query to narrow results
`start_offset` -- The starting offset (default=0)
`end_offset` -- The ending offset (default=None), if None, then all documents
`limit_to_registered_models` -- Limit returned results to models registered in the search (default = True)
Returns:
A dictionary with the following keys:
`results` -- A list of `SearchResult`
`hits` -- The total available results
Opens a database connection, then builds a simple query using the
`model_instance` to build the unique identifier.
For each document retrieved(should always be one), adds an entry into
an RSet (relevance set) with the document id, then, uses the RSet
to query for an ESet (A set of terms that can be used to suggest
expansions to the original query), omitting any document that was in
the original query.
Finally, processes the resulting matches and returns.
"""
database = self._database()
if result_class is None:
result_class = SearchResult
query = xapian.Query(TERM_PREFIXES[ID] + get_identifier(model_instance))
enquire = xapian.Enquire(database)
enquire.set_query(query)
rset = xapian.RSet()
if not end_offset:
end_offset = database.get_doccount()
match = None
for match in self._get_enquire_mset(database, enquire, 0, end_offset):
rset.add_document(match.docid)
if match is None:
if not self.silently_fail:
raise InvalidIndexError('Instance %s with id "%d" not indexed' %
(get_identifier(model_instance), model_instance.id))
else:
return {'results': [],
'hits': 0}
query = xapian.Query(
xapian.Query.OP_ELITE_SET,
[expand.term for expand in enquire.get_eset(match.document.termlist_count(), rset, XHExpandDecider())],
match.document.termlist_count()
)
query = xapian.Query(
xapian.Query.OP_AND_NOT, [query, TERM_PREFIXES[ID] + get_identifier(model_instance)]
)
if limit_to_registered_models:
query = self._build_models_query(query)
if additional_query:
query = xapian.Query(
xapian.Query.OP_AND, query, additional_query
)
enquire.set_query(query)
results = []
matches = self._get_enquire_mset(database, enquire, start_offset, end_offset)
for match in matches:
app_label, model_name, pk, model_data = pickle.loads(self._get_document_data(database, match.document))
results.append(
result_class(app_label, model_name, pk, match.percent, **model_data)
)
return {
'results': results,
'hits': self._get_hit_count(database, enquire),
'facets': {
'fields': {},
'dates': {},
'queries': {},
},
'spelling_suggestion': None,
} | 0.004045 |
def get_locations():
'''
Compiles default locations
:returns:
A dictionary with folders as values:
* 'home_dir': Your home-directory (:file:`~`)
* 'call_dir': Where you called the first Python script from. (``argv[0]``)
* 'conf_dir': The :envvar:`XDG_CONFIG_HOME`-directory + \
``photon`` (:file:`~/.config/photon`)
* 'data_dir': The :envvar:`XDG_DATA_HOME`-directory + \
``photon`` (:file:`~/.local/share/photon`)
.. note::
* Both :func:`search_location` and :func:`make_locations` \
have the argument `locations`.
* |param_locations_none|
'''
home_dir = _path.expanduser('~')
conf_dir = _path.join(
_environ.get(
'XDG_CONFIG_HOME',
_path.join(home_dir, '.config')
),
IDENT
)
data_dir = _path.join(
_environ.get(
'XDG_DATA_HOME',
_path.join(home_dir, '.local', 'share')
),
IDENT
)
return {
'home_dir': home_dir,
'call_dir': _path.dirname(_path.abspath(_argv[0])),
'conf_dir': conf_dir,
'data_dir': data_dir
} | 0.000869 |
def register_signals(self):
"""Register signals."""
from .receivers import OAIServerUpdater
# Register Record signals to update OAI informations
self.update_function = OAIServerUpdater()
records_signals.before_record_insert.connect(self.update_function,
weak=False)
records_signals.before_record_update.connect(self.update_function,
weak=False)
if self.app.config['OAISERVER_REGISTER_SET_SIGNALS']:
self.register_signals_oaiset() | 0.003317 |
def _affine_inv_mult(c, m):
"Applies the inverse affine transform described in `m` to `c`."
size = c.flow.size()
h,w = c.size
m[0,1] *= h/w
m[1,0] *= w/h
c.flow = c.flow.view(-1,2)
a = torch.inverse(m[:2,:2].t())
c.flow = torch.mm(c.flow - m[:2,2], a).view(size)
return c | 0.022801 |
def persist(self):
"""
Banana banana
"""
if self.app.dry:
return
for proj in self.subprojects.values():
proj.persist() | 0.01087 |
def default_validity_start():
"""
Sets validity_start field to 1 day before the current date
(avoids "certificate not valid yet" edge case).
In some cases, because of timezone differences, when certificates
were just created they were considered valid in a timezone (eg: Europe)
but not yet valid in another timezone (eg: US).
This function intentionally returns naive datetime (not timezone aware),
so that certificates are valid from 00:00 AM in all timezones.
"""
start = datetime.now() - timedelta(days=1)
return start.replace(hour=0, minute=0, second=0, microsecond=0) | 0.001616 |
def generate(self, x, **kwargs):
"""
Return a tensor that constructs adversarial examples for the given
input. Generate uses tf.py_func in order to operate over tensors.
:param x: A tensor with the inputs.
:param kwargs: See `parse_params`
"""
assert self.sess is not None, \
'Cannot use `generate` when no `sess` was provided'
self.parse_params(**kwargs)
labels, nb_classes = self.get_or_guess_labels(x, kwargs)
attack = CWL2(self.sess, self.model, self.batch_size, self.confidence,
'y_target' in kwargs, self.learning_rate,
self.binary_search_steps, self.max_iterations,
self.abort_early, self.initial_const, self.clip_min,
self.clip_max, nb_classes,
x.get_shape().as_list()[1:])
def cw_wrap(x_val, y_val):
return np.array(attack.attack(x_val, y_val), dtype=self.np_dtype)
wrap = tf.py_func(cw_wrap, [x, labels], self.tf_dtype)
wrap.set_shape(x.get_shape())
return wrap | 0.001929 |
def get_last_update_of_model(self, model, **kwargs):
"""
Return the last time a given model's items were updated. Returns the
epoch if the items were never updated.
"""
qs = self.get_for_model(model)
if kwargs:
qs = qs.filter(**kwargs)
try:
return qs.order_by('-timestamp')[0].timestamp
except IndexError:
return datetime.datetime.fromtimestamp(0) | 0.004474 |
def _add_baseline_to_exclude_files(args):
"""
Modifies args.exclude_files in-place.
"""
baseline_name_regex = r'^{}$'.format(args.import_filename[0])
if not args.exclude_files:
args.exclude_files = baseline_name_regex
elif baseline_name_regex not in args.exclude_files:
args.exclude_files += r'|{}'.format(baseline_name_regex) | 0.002725 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.