text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_bounds(self):
"""
Returns
-------
(start, end)
Datetime instants of beginning and end of data. If no data, will be: (None, None).
"""
start, end = None, None
if len(self._weather_series) == 0:
return start, end
for i in (0, -1):
# create or find instant
if self.has_tuple_instants:
row = self._weather_series.iloc[i, :]
instant = dt.datetime(row["year"], row["month"], row["day"], row["hour"], row["minute"])
else:
instant = self._weather_series.index[i].to_pydatetime()
# store
if i == 0:
start = instant
else:
end = instant
return start, end | 0.005006 |
def subset(self, interval: Interval,
flexibility: int = 2) -> "IntervalList":
"""
Returns an IntervalList that's a subset of this one, only containing
intervals that meet the "interval" parameter criterion. What "meet"
means is defined by the ``flexibility`` parameter.
``flexibility == 0``: permits only wholly contained intervals:
.. code-block:: none
interval:
I----------------I
intervals in self that will/won't be returned:
N---N N---N Y---Y N---N N---N
N---N N---N
``flexibility == 1``: permits overlapping intervals as well:
.. code-block:: none
I----------------I
N---N Y---Y Y---Y Y---Y N---N
N---N N---N
``flexibility == 2``: permits adjoining intervals as well:
.. code-block:: none
I----------------I
N---N Y---Y Y---Y Y---Y N---N
Y---Y Y---Y
"""
if flexibility not in [0, 1, 2]:
raise ValueError("subset: bad flexibility value")
permitted = []
for i in self.intervals:
if flexibility == 0:
ok = i.start > interval.start and i.end < interval.end
elif flexibility == 1:
ok = i.end > interval.start and i.start < interval.end
else:
ok = i.end >= interval.start and i.start <= interval.end
if ok:
permitted.append(i)
return IntervalList(permitted) | 0.001766 |
async def update_pin(**payload):
"""Update the onboarding welcome message after recieving a "pin_added"
event from Slack. Update timestamp for welcome message as well.
"""
data = payload["data"]
web_client = payload["web_client"]
channel_id = data["channel_id"]
user_id = data["user"]
# Get the original tutorial sent.
onboarding_tutorial = onboarding_tutorials_sent[channel_id][user_id]
# Mark the pin task as completed.
onboarding_tutorial.pin_task_completed = True
# Get the new message payload
message = onboarding_tutorial.get_message_payload()
# Post the updated message in Slack
updated_message = await web_client.chat_update(**message)
# Update the timestamp saved on the onboarding tutorial object
onboarding_tutorial.timestamp = updated_message["ts"] | 0.0012 |
def complete_command_help(self, tokens: List[str], text: str, line: str, begidx: int, endidx: int) -> List[str]:
"""Supports the completion of sub-commands for commands through the cmd2 help command."""
for idx, token in enumerate(tokens):
if idx >= self._token_start_index:
if self._positional_completers:
# For now argparse only allows 1 sub-command group per level
# so this will only loop once.
for completers in self._positional_completers.values():
if token in completers:
return completers[token].complete_command_help(tokens, text, line, begidx, endidx)
else:
return self._cmd2_app.basic_complete(text, line, begidx, endidx, completers.keys())
return [] | 0.007982 |
def delete(self):
"""Delete the workspace from FireCloud.
Note:
This action cannot be undone. Be careful!
"""
r = fapi.delete_workspace(self.namespace, self.name)
fapi._check_response_code(r, 202) | 0.008032 |
def _generate_instances(self):
"""
ListNode item generator. Will be used internally by __iter__ and __getitem__
Yields:
ListNode items (instances)
"""
for node in self.node_stack:
yield node
while self._data:
yield self._make_instance(self._data.pop(0)) | 0.008876 |
def format_units(self, value, unit="B", optimal=5, auto=True, si=False):
"""
Takes a value and formats it for user output, we can choose the unit to
use eg B, MiB, kbits/second. This is mainly for use with bytes/bits it
converts the value into a human readable form. It has various
additional options but they are really only for special cases.
The function returns a tuple containing the new value (this is a number
so that the user can still format it if required) and a unit that is
the units that we have been converted to.
By supplying unit to the function we can force those units to be used
eg ``unit=KiB`` would force the output to be in Kibibytes. By default we
use non-si units but if the unit is si eg kB then we will switch to si
units. Units can also be things like ``Mbit/sec``.
If the auto parameter is False then we use the unit provided. This
only makes sense when the unit is singular eg 'Bytes' and we want the
result in bytes and not say converted to MBytes.
optimal is used to control the size of the output value. We try to
provide an output value of that number of characters (including decimal
point), it may also be less due to rounding. If a fixed unit is used
the output may be more than this number of characters.
"""
UNITS = "KMGTPEZY"
DECIMAL_SIZE = 1000
BINARY_SIZE = 1024
CUTOFF = 1000
can_round = False
if unit:
# try to guess the unit. Do we have a known prefix too it?
if unit[0].upper() in UNITS:
index = UNITS.index(unit[0].upper()) + 1
post = unit[1:]
si = len(unit) > 1 and unit[1] != "i"
if si:
post = post[1:]
if unit[1] == "b":
value *= 8
auto = False
else:
index = 0
post = unit
if si:
size = DECIMAL_SIZE
else:
size = BINARY_SIZE
if auto:
# we will try to use an appropriate prefix
if value < CUTOFF:
unit_out = post
else:
value /= size
for prefix in UNITS:
if abs(value) < CUTOFF:
break
value /= size
if si:
# si kilo is lowercase
if prefix == "K":
prefix = "k"
else:
post = "i" + post
unit_out = prefix + post
can_round = True
else:
# we are using a fixed unit
unit_out = unit
size = pow(size, index)
if size:
value /= size
can_round = True
if can_round and optimal and value:
# we will try to make the output value the desired size
# we need to keep out value as a numeric type
places = int(log10(abs(value)))
if places >= optimal - 2:
value = int(value)
else:
value = round(value, max(optimal - places - 2, 0))
return value, unit_out | 0.000893 |
def cached(fn, size=32):
''' this decorator creates a type safe lru_cache
around the decorated function. Unlike
functools.lru_cache, this will not crash when
unhashable arguments are passed to the function'''
assert callable(fn)
assert isinstance(size, int)
return overload(fn)(lru_cache(size, typed=True)(fn)) | 0.002959 |
def values_from(self, base):
"""
A reusable generator for increasing pointer-sized values from an address
(usually the stack).
"""
word_bytes = self._cpu.address_bit_size // 8
while True:
yield base
base += word_bytes | 0.010381 |
def refractive_index(CASRN, T=None, AvailableMethods=False, Method=None,
full_info=True):
r'''This function handles the retrieval of a chemical's refractive
index. Lookup is based on CASRNs. Will automatically select a data source
to use if no Method is provided; returns None if the data is not available.
Function has data for approximately 4500 chemicals.
Parameters
----------
CASRN : string
CASRN [-]
Returns
-------
RI : float
Refractive Index on the Na D line, [-]
T : float, only returned if full_info == True
Temperature at which refractive index reading was made
methods : list, only returned if AvailableMethods == True
List of methods which can be used to obtain RI with the given inputs
Other Parameters
----------------
Method : string, optional
A string for the method name to use, as defined by constants in
RI_methods
AvailableMethods : bool, optional
If True, function will determine which methods can be used to obtain
RI for the desired chemical, and will return methods instead of RI
full_info : bool, optional
If True, function will return the temperature at which the refractive
index reading was made
Notes
-----
Only one source is available in this function. It is:
* 'CRC', a compillation of Organic RI data in [1]_.
Examples
--------
>>> refractive_index(CASRN='64-17-5')
(1.3611, 293.15)
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics, 95E. Boca Raton, FL: CRC press, 2014.
'''
def list_methods():
methods = []
if CASRN in CRC_RI_organic.index:
methods.append(CRC)
methods.append(NONE)
return methods
if AvailableMethods:
return list_methods()
if not Method:
Method = list_methods()[0]
if Method == CRC:
_RI = float(CRC_RI_organic.at[CASRN, 'RI'])
if full_info:
_T = float(CRC_RI_organic.at[CASRN, 'RIT'])
elif Method == NONE:
_RI, _T = None, None
else:
raise Exception('Failure in in function')
if full_info:
return _RI, _T
else:
return _RI | 0.00043 |
def set_char_callback(window, cbfun):
"""
Sets the Unicode character callback.
Wrapper for:
GLFWcharfun glfwSetCharCallback(GLFWwindow* window, GLFWcharfun cbfun);
"""
window_addr = ctypes.cast(ctypes.pointer(window),
ctypes.POINTER(ctypes.c_long)).contents.value
if window_addr in _char_callback_repository:
previous_callback = _char_callback_repository[window_addr]
else:
previous_callback = None
if cbfun is None:
cbfun = 0
c_cbfun = _GLFWcharfun(cbfun)
_char_callback_repository[window_addr] = (cbfun, c_cbfun)
cbfun = c_cbfun
_glfw.glfwSetCharCallback(window, cbfun)
if previous_callback is not None and previous_callback[0] != 0:
return previous_callback[0] | 0.001272 |
def load_empty(cls, path:PathOrStr, fn:PathOrStr):
"Load the state in `fn` to create an empty `LabelList` for inference."
return cls.load_state(path, pickle.load(open(Path(path)/fn, 'rb'))) | 0.019512 |
def merge_pres_feats(pres, features):
"""
Helper function to merge pres and features to support legacy features argument
"""
sub = []
for psub, fsub in zip(pres, features):
exp = []
for pexp, fexp in zip(psub, fsub):
lst = []
for p, f in zip(pexp, fexp):
p.update(f)
lst.append(p)
exp.append(lst)
sub.append(exp)
return sub | 0.004545 |
def register_default_prefixes(handler):
"""\
"""
for prefix, ns in _PREFIXES.iteritems():
handler.add_prefix(prefix, str(ns)) | 0.013793 |
def get_nonvaried_cfg_lbls(cfg_list, default_cfg=None, mainkey='_cfgname'):
r"""
TODO: this might only need to return a single value. Maybe not if the names
are different.
Args:
cfg_list (list):
default_cfg (None): (default = None)
Returns:
list: cfglbl_list
CommandLine:
python -m utool.util_gridsearch --exec-get_nonvaried_cfg_lbls
Example:
>>> # ENABLE_DOCTEST
>>> from utool.util_gridsearch import * # NOQA
>>> import utool as ut
>>> cfg_list = [{'_cfgname': 'test', 'f': 1, 'b': 1},
>>> {'_cfgname': 'test', 'f': 2, 'b': 1},
>>> {'_cfgname': 'test', 'f': 3, 'b': 1, 'z': 4}]
>>> default_cfg = None
>>> cfglbl_list = get_nonvaried_cfg_lbls(cfg_list, default_cfg)
>>> result = ('cfglbl_list = %s' % (ut.repr2(cfglbl_list),))
>>> print(result)
cfglbl_list = ['test:b=1', 'test:b=1', 'test:b=1']
"""
try:
cfgname_list = [cfg[mainkey] for cfg in cfg_list]
except KeyError:
cfgname_list = [''] * len(cfg_list)
nonvaried_cfg = partition_varied_cfg_list(cfg_list, default_cfg)[0]
cfglbl_list = [get_cfg_lbl(nonvaried_cfg, name) for name in cfgname_list]
return cfglbl_list | 0.000775 |
def dist(self, src, tar, max_offset=5, max_distance=0):
"""Return the normalized "common" Sift4 distance between two terms.
This is Sift4 distance, normalized to [0, 1].
Parameters
----------
src : str
Source string for comparison
tar : str
Target string for comparison
max_offset : int
The number of characters to search for matching letters
max_distance : int
The distance at which to stop and exit
Returns
-------
float
The normalized Sift4 distance
Examples
--------
>>> cmp = Sift4()
>>> round(cmp.dist('cat', 'hat'), 12)
0.333333333333
>>> cmp.dist('Niall', 'Neil')
0.4
>>> cmp.dist('Colin', 'Cuilen')
0.5
>>> cmp.dist('ATCG', 'TAGC')
0.5
"""
return self.dist_abs(src, tar, max_offset, max_distance) / (
max(len(src), len(tar), 1)
) | 0.00197 |
def get_v_distance(self, latlonalt1, latlonalt2):
'''get the horizontal distance between threat and vehicle'''
(lat1, lon1, alt1) = latlonalt1
(lat2, lon2, alt2) = latlonalt2
return alt2 - alt1 | 0.008889 |
def visit_Assign(self, node):
"""
Implement assignment walker.
Parse class properties defined via the property() function
"""
# [[[cog
# cog.out("print(pcolor('Enter assign visitor', 'magenta'))")
# ]]]
# [[[end]]]
# ###
# Class-level assignment may also be a class attribute that is not
# a managed attribute, record it anyway, no harm in doing so as it
# is not attached to a callable
if self._in_class(node):
element_full_name = self._pop_indent_stack(node, "prop")
code_id = (self._fname, node.lineno)
self._processed_line = node.lineno
self._callables_db[element_full_name] = {
"name": element_full_name,
"type": "prop",
"code_id": code_id,
"last_lineno": None,
}
self._reverse_callables_db[code_id] = element_full_name
# [[[cog
# code = """
# print(
# pcolor(
# 'Visiting property {0} @ {1}'.format(
# element_full_name, code_id[1]
# ),
# 'green'
# )
# )
# """
# cog.out(code)
# ]]]
# [[[end]]]
# Get property actions
self.generic_visit(node) | 0.001407 |
def mk_size(field):
"""Builds an identifier for a container type.
"""
name = field.type_id
if name == "string" and field.options.get('size', None):
return "%s[%d];" % (field.identifier, field.options.get('size').value)
elif name == "string":
return "%s[0];" % field.identifier
elif name == "array" and field.options.get('size', None):
return "%s[%d];" % (field.identifier, field.options.get('size').value)
elif name == "array":
return "%s[0];" % field.identifier
else:
return '%s;' % field.identifier | 0.014925 |
def to_sample_rdd(x, y, numSlices=None):
"""
Conver x and y into RDD[Sample]
:param x: ndarray and the first dimension should be batch
:param y: ndarray and the first dimension should be batch
:param numSlices:
:return:
"""
sc = get_spark_context()
from bigdl.util.common import Sample
x_rdd = sc.parallelize(x, numSlices)
y_rdd = sc.parallelize(y, numSlices)
return x_rdd.zip(y_rdd).map(lambda item: Sample.from_ndarray(item[0], item[1])) | 0.004107 |
def get_bulb(self, mac):
"""
Returns a Bulb object corresponding to the bulb with the mac address
`mac` (a 6-byte bytestring).
"""
return self.bulbs.get(mac, Bulb('Bulb %s' % _bytes(mac), mac)) | 0.008584 |
def cc(self) -> Optional[Sequence[AddressHeader]]:
"""The ``Cc`` header."""
try:
return cast(Sequence[AddressHeader], self[b'cc'])
except KeyError:
return None | 0.009662 |
async def Prune(self, max_history_mb, max_history_time):
'''
max_history_mb : int
max_history_time : int
Returns -> None
'''
# map input types to rpc msg
_params = dict()
msg = dict(type='ActionPruner',
request='Prune',
version=1,
params=_params)
_params['max-history-mb'] = max_history_mb
_params['max-history-time'] = max_history_time
reply = await self.rpc(msg)
return reply | 0.003774 |
def execute_cleanup_tasks(ctx, cleanup_tasks, dry_run=False):
"""Execute several cleanup tasks as part of the cleanup.
REQUIRES: ``clean(ctx, dry_run=False)`` signature in cleanup tasks.
:param ctx: Context object for the tasks.
:param cleanup_tasks: Collection of cleanup tasks (as Collection).
:param dry_run: Indicates dry-run mode (bool)
"""
# pylint: disable=redefined-outer-name
executor = Executor(cleanup_tasks, ctx.config)
for cleanup_task in cleanup_tasks.tasks:
print("CLEANUP TASK: %s" % cleanup_task)
executor.execute((cleanup_task, dict(dry_run=dry_run))) | 0.00155 |
def trainGP(self,fast=False,scales0=None,fixed0=None,lambd=None):
"""
Train the gp
Args:
fast: if true and the gp has not been initialized, initializes a kronSum gp
scales0: initial variance components params
fixed0: initial fixed effect params
"""
assert self.n_terms>0, 'CVarianceDecomposition:: No variance component terms'
if not self.init: self.initGP(fast=fast)
# set lambda
if lambd!=None: self.gp.setLambda(lambd)
# set scales0
if scales0!=None:
self.setScales(scales0)
# init gp params
self.vd.initGPparams()
# set fixed0
if fixed0!=None:
params = self.gp.getParams()
params['dataTerm'] = fixed0
self.gp.setParams(params)
# LIMIX CVARIANCEDECOMPOSITION TRAINING
conv =self.vd.trainGP()
self.cache['Sigma'] = None
self.cache['Hessian'] = None
return conv | 0.021033 |
def impute_element(self, records=('ATOM', 'HETATM'), inplace=False):
"""Impute element_symbol from atom_name section.
Parameters
----------
records : iterable, default: ('ATOM', 'HETATM')
Coordinate sections for which the element symbols should be
imputed.
inplace : bool, (default: False
Performs the operation in-place if True and returns a copy of the
PDB DataFrame otherwise.
Returns
---------
DataFrame
"""
if inplace:
t = self.df
else:
t = self.df.copy()
for d in self.df:
t[d] = self.df[d].copy()
for sec in records:
t[sec]['element_symbol'] = \
t[sec][['atom_name', 'element_symbol']].\
apply(lambda x: x[0][1]
if len(x[1]) == 3
else x[0][0], axis=1)
return t | 0.002075 |
def get_appstruct(self):
""" return list of tuples keys and values corresponding to this model's
data """
result = []
for k in self._get_keys():
result.append((k, getattr(self, k)))
return result | 0.008097 |
def emoji(string):
'''emot.emoji is use to detect emoji from text
>>> text = "I love python 👨 :-)"
>>> emot.emoji(text)
>>> {'value': ['👨'], 'mean': [':man:'], 'location': [[14, 14]], 'flag': True}
'''
__entities = {}
__value = []
__mean = []
__location = []
flag = True
try:
pro_string = str(string)
for pos,ej in enumerate(pro_string):
if ej in emo_unicode.UNICODE_EMO:
try:
__value.append(ej)
__mean.append(emo_unicode.UNICODE_EMO[ej])
__location.append([pos,pos])
except Exception as e:
flag = False
__entities.append({"flag": False})
return __entities
except Exception as e:
flag = False
__entities.append({"flag": False})
return __entities
if len(__value) < 1:
flag = False
__entities = {
'value' : __value,
'mean' : __mean,
'location' : __location,
'flag' : flag
}
return __entities | 0.008065 |
def _exclude_paths_from_environ(env_prefix=''):
"""Environment value via `/login;/register`"""
paths = os.environ.get(env_prefix + 'WSGI_AUTH_EXCLUDE_PATHS')
if not paths:
return []
return paths.split(';') | 0.004367 |
def connection(self):
""" Provide the connection parameters for kombu's ConsumerMixin.
The `Connection` object is a declaration of connection parameters
that is lazily evaluated. It doesn't represent an established
connection to the broker at this point.
"""
heartbeat = self.container.config.get(
HEARTBEAT_CONFIG_KEY, DEFAULT_HEARTBEAT
)
transport_options = self.container.config.get(
TRANSPORT_OPTIONS_CONFIG_KEY, DEFAULT_TRANSPORT_OPTIONS
)
ssl = self.container.config.get(AMQP_SSL_CONFIG_KEY)
conn = Connection(self.amqp_uri,
transport_options=transport_options,
heartbeat=heartbeat,
ssl=ssl
)
return conn | 0.002392 |
def wait_for_host(self, host):
"""Throttle requests to one host."""
t = time.time()
if host in self.times:
due_time = self.times[host]
if due_time > t:
wait = due_time - t
time.sleep(wait)
t = time.time()
wait_time = random.uniform(self.wait_time_min, self.wait_time_max)
self.times[host] = t + wait_time | 0.004808 |
def get_variable_str(self):
"""
Utility method to get the variable value or 'var_name=value' if name is not None.
Note that values with large string representations will not get printed
:return:
"""
if self.var_name is None:
prefix = ''
else:
prefix = self.var_name
suffix = str(self.var_value)
if len(suffix) == 0:
suffix = "''"
elif len(suffix) > self.__max_str_length_displayed__:
suffix = ''
if len(prefix) > 0 and len(suffix) > 0:
return prefix + '=' + suffix
else:
return prefix + suffix | 0.004518 |
def index():
"""Display list of the user's repositories."""
github = GitHubAPI(user_id=current_user.id)
token = github.session_token
ctx = dict(connected=False)
if token:
# The user is authenticated and the token we have is still valid.
if github.account.extra_data.get('login') is None:
github.init_account()
db.session.commit()
# Sync if needed
if request.method == 'POST' or github.check_sync():
# When we're in an XHR request, we want to synchronously sync hooks
github.sync(async_hooks=(not request.is_xhr))
db.session.commit()
# Generate the repositories view object
extra_data = github.account.extra_data
repos = extra_data['repos']
if repos:
# 'Enhance' our repos dict, from our database model
db_repos = Repository.query.filter(
Repository.github_id.in_([int(k) for k in repos.keys()]),
).all()
for repo in db_repos:
repos[str(repo.github_id)]['instance'] = repo
repos[str(repo.github_id)]['latest'] = GitHubRelease(
repo.latest_release())
last_sync = humanize.naturaltime(
(utcnow() - parse_timestamp(extra_data['last_sync'])))
ctx.update({
'connected': True,
'repos': sorted(repos.items(), key=lambda x: x[1]['full_name']),
'last_sync': last_sync,
})
return render_template(current_app.config['GITHUB_TEMPLATE_INDEX'], **ctx) | 0.000634 |
def segment_length(curve, start, end, start_point, end_point,
error=LENGTH_ERROR, min_depth=LENGTH_MIN_DEPTH, depth=0):
"""Recursively approximates the length by straight lines"""
mid = (start + end)/2
mid_point = curve.point(mid)
length = abs(end_point - start_point)
first_half = abs(mid_point - start_point)
second_half = abs(end_point - mid_point)
length2 = first_half + second_half
if (length2 - length > error) or (depth < min_depth):
# Calculate the length of each segment:
depth += 1
return (segment_length(curve, start, mid, start_point, mid_point,
error, min_depth, depth) +
segment_length(curve, mid, end, mid_point, end_point,
error, min_depth, depth))
# This is accurate enough.
return length2 | 0.001152 |
def read_namespaced_pod(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_pod # noqa: E501
read the specified Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Pod (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1Pod
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_pod_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_pod_with_http_info(name, namespace, **kwargs) # noqa: E501
return data | 0.001471 |
def col2name(col_item):
"helper for SyntheticTable.columns. takes something from SelectX.cols, returns a string column name"
if isinstance(col_item, sqparse2.NameX): return col_item.name
elif isinstance(col_item, sqparse2.AliasX): return col_item.alias
else: raise TypeError(type(col_item), col_item) | 0.029221 |
def setup(app):
''' Required Sphinx extension setup function. '''
app.add_node(
bokehjs_content,
html=(
html_visit_bokehjs_content,
html_depart_bokehjs_content
)
)
app.add_directive('bokehjs-content', BokehJSContent) | 0.003571 |
def mirror_video(self, is_mirror, callback=None):
'''
Mirror video
``is_mirror``: 0 not mirror, 1 mirror
'''
params = {'isMirror': is_mirror}
return self.execute_command('mirrorVideo', params, callback=callback) | 0.007752 |
def plot_filter_transmissions(log, filterList):
"""
*Plot the filters on a single plot*
**Key Arguments:**
- ``log`` -- logger
- ``filterList`` -- list of absolute paths to plain text files containing filter transmission profiles
**Return:**
- None
"""
################ > IMPORTS ################
## STANDARD LIB ##
## THIRD PARTY ##
import matplotlib.pyplot as plt
import numpy as np
## LOCAL APPLICATION ##
################ > VARIABLE SETTINGS ######
################ >ACTION(S) ################
for filterFile in filterList:
data = np.genfromtxt(filterFile)
plt.plot(data[:, 0], data[:, 1])
plt.show()
return | 0.011127 |
def get_submission_filenames(self, tournament=None, round_num=None):
"""Get filenames of the submission of the user.
Args:
tournament (int): optionally filter by ID of the tournament
round_num (int): optionally filter round number
Returns:
list: list of user filenames (`dict`)
Each filenames in the list as the following structure:
* filename (`str`)
* round_num (`int`)
* tournament (`int`)
Example:
>>> NumerAPI().get_submission_filenames(3, 111)
[{'filename': 'model57-dMpHpYMPIUAF.csv',
'round_num': 111,
'tournament': 3}]
"""
query = '''
query {
user {
submissions {
filename
selected
round {
tournament
number
}
}
}
}
'''
data = self.raw_query(query, authorization=True)['data']['user']
filenames = [{"round_num": item['round']['number'],
"tournament": item['round']['tournament'],
"filename": item['filename']}
for item in data['submissions'] if item['selected']]
if round_num is not None:
filenames = [f for f in filenames if f['round_num'] == round_num]
if tournament is not None:
filenames = [f for f in filenames if f['tournament'] == tournament]
filenames.sort(key=lambda f: (f['round_num'], f['tournament']))
return filenames | 0.001203 |
def export_coreml(self, filename):
"""
Export the model in Core ML format.
Parameters
----------
filename: str
A valid filename where the model can be saved.
Examples
--------
>>> model.export_coreml("MyModel.mlmodel")
"""
from turicreate.toolkits import _coreml_utils
display_name = "boosted trees classifier"
short_description = _coreml_utils._mlmodel_short_description(display_name)
context = {"mode" : "classification",
"model_type" : "boosted_trees",
"version": _turicreate.__version__,
"class": self.__class__.__name__,
"short_description": short_description,
'user_defined':{
'turicreate_version': _turicreate.__version__
}
}
self._export_coreml_impl(filename, context) | 0.007368 |
def _create_credentials(self, n):
"""
Create security credentials, if necessary.
"""
if not n:
return n
elif isinstance(n, SecurityCreds):
return n
elif isinstance(n, dict):
return SecurityCreds(**n)
else:
raise TypeError("%s is not a valid security configuration"
% repr(n)) | 0.004914 |
def _parse_spectra(self, line):
"""Parse and store the spectral details
"""
if line in ['\n', '\r\n', '//\n', '//\r\n', '', '//']:
self.start_spectra = False
self.current_id_meta += 1
self.collect_meta = True
return
splist = line.split()
if len(splist) > 2 and not self.ignore_additional_spectra_info:
additional_info = ''.join(map(str, splist[2:len(splist)]))
else:
additional_info = ''
srow = (
self.current_id_spectra, float(splist[0]), float(splist[1]), additional_info,
self.current_id_meta)
self.spectra_all.append(srow)
self.current_id_spectra += 1 | 0.004121 |
async def addSignalHandlers(self):
'''
Register SIGINT signal handler with the ioloop to cancel the currently running cmdloop task.
'''
def sigint():
self.printf('<ctrl-c>')
if self.cmdtask is not None:
self.cmdtask.cancel()
self.loop.add_signal_handler(signal.SIGINT, sigint) | 0.00838 |
def copy_w_id_suffix(elem, suffix="_copy"):
"""Make a deep copy of the provided tree, altering ids."""
mycopy = deepcopy(elem)
for id_elem in mycopy.xpath('//*[@id]'):
id_elem.set('id', id_elem.get('id') + suffix)
return mycopy | 0.003984 |
def quokka_heatmap(size=None, extract=None):
"""
Returns a heatmap (here: depth map) for the standard example quokka image.
Parameters
----------
size : None or float or tuple of int, optional
See :func:`imgaug.quokka`.
extract : None or 'square' or tuple of number or imgaug.BoundingBox or imgaug.BoundingBoxesOnImage
See :func:`imgaug.quokka`.
Returns
-------
result : imgaug.HeatmapsOnImage
Depth map as an heatmap object. Values close to 0.0 denote objects that are close to
the camera. Values close to 1.0 denote objects that are furthest away (among all shown
objects).
"""
# TODO get rid of this deferred import
from imgaug.augmentables.heatmaps import HeatmapsOnImage
img = imageio.imread(QUOKKA_DEPTH_MAP_HALFRES_FP, pilmode="RGB")
img = imresize_single_image(img, (643, 960), interpolation="cubic")
if extract is not None:
bb = _quokka_normalize_extract(extract)
img = bb.extract_from_image(img)
if size is None:
size = img.shape[0:2]
shape_resized = _compute_resized_shape(img.shape, size)
img = imresize_single_image(img, shape_resized[0:2])
img_0to1 = img[..., 0] # depth map was saved as 3-channel RGB
img_0to1 = img_0to1.astype(np.float32) / 255.0
img_0to1 = 1 - img_0to1 # depth map was saved as 0 being furthest away
return HeatmapsOnImage(img_0to1, shape=img_0to1.shape[0:2] + (3,)) | 0.002736 |
def align_cell(fmt, elem, width):
"""Returns an aligned element."""
if fmt == "<":
return elem + ' ' * (width - len(elem))
if fmt == ">":
return ' ' * (width - len(elem)) + elem
return elem | 0.004525 |
def compute(self, xt1, yt1, xt, yt, theta1t1, theta2t1, theta1, theta2, learn):
"""
The main function to call.
If learn is False, it will print a prediction: (theta1, theta2)
"""
dx = xt - xt1
dy = yt - yt1
self.minDx = min(self.minDx, dx)
self.maxDx = max(self.maxDx, dx)
print >>sys.stderr, "Learn: ", learn
print >>sys.stderr, "Training iterations: ", self.trainingIterations
print >>sys.stderr, "Test iterations: ", self.testIterations
print >>sys.stderr, "Xt's: ", xt1, yt1, xt, yt, "Delta's: ", dx, dy
print >>sys.stderr, "Theta t-1: ", theta1t1, theta2t1, "t:",theta1, theta2
bottomUpSDR = self.encodeThetas(theta1, theta2)
self.decodeThetas(bottomUpSDR)
# Encode the inputs appropriately and train the HTM
externalSDR = self.encodeDeltas(dx,dy)
if learn:
# During learning we provide the current pose angle as bottom up input
bottomUpSDR = self.encodeThetas(theta1, theta2)
self.trainTM(bottomUpSDR, externalSDR)
self.trainingIterations += 1
else:
# During inference we provide the previous pose angle as bottom up input
# If we don't get a prediction, we keep trying random shifts until we get
# something.
predictedCells = []
newt1 = theta1t1
newt2 = theta2t1
newdx = dx
newdy = dy
angleRange = 10
numAttempts = 1
while len(predictedCells) == 0 and numAttempts < 3:
print >> sys.stderr, "Attempt:", numAttempts,
print >> sys.stderr, "Trying to predict using thetas:", newt1, newt2,
print >> sys.stderr, "and deltas:", newdx, newdy
externalSDR = self.encodeDeltas(newdx, newdy)
bottomUpSDR = self.encodeThetas(newt1, newt2)
predictedCells = self.inferTM(bottomUpSDR, externalSDR)
predictedValues = self.decodeThetas(predictedCells)
print >> sys.stderr, "Predicted values",predictedValues
newt1 = theta1t1 + random.randrange(-angleRange,angleRange)
newt2 = theta2t1 + random.randrange(-angleRange,angleRange)
newdx = dx + (random.random()/2.0 - 0.25)
newdy = dy + (random.random()/2.0 - 0.25)
# Ensure we are in bounds otherwise we get an exception
newt1 = min(self.maxTheta1, max(self.minTheta1, newt1))
newt2 = min(self.maxTheta2, max(self.minTheta2, newt2))
newdx = min(2.0, max(-2.0, newdx))
newdy = min(2.0, max(-2.0, newdy))
numAttempts += 1
if numAttempts % 10 == 0: angleRange += 2
print predictedValues
# Accumulate errors for our metrics
if len(predictedCells) == 0:
self.numMissedPredictions += 1
self.testIterations += 1
error = abs(predictedValues[0] - theta1) + abs(predictedValues[1] - theta2)
self.totalPredictionError += error
if self.maxPredictionError < error:
self.maxPredictionError = error
print >> sys.stderr, "Error: ", error
print >> sys.stderr | 0.01041 |
def get_cardinality(self, field=None):
"""
Create a cardinality aggregation object and add it to the aggregation dict
:param field: the field present in the index that is to be aggregated
:returns: self, which allows the method to be chainable with the other methods
"""
if not field:
raise AttributeError("Please provide field to apply aggregation to!")
agg = A("cardinality", field=field, precision_threshold=self.precision_threshold)
self.aggregations['cardinality_' + field] = agg
return self | 0.010309 |
def parse_qs(qs):
"""Helper func to parse query string with py2/py3 compatibility
Ensures that dict keys are native strings.
"""
result = {}
qs = bstr(qs, 'latin1')
pairs = [s2 for s1 in qs.split(b'&') for s2 in s1.split(b';')]
uq = urlparse.unquote if PY2 else urlparse.unquote_to_bytes
for name_value in pairs:
if not name_value:
continue
nv = name_value.split(b'=', 1)
if len(nv) != 2:
nv.append(b'')
name = nv[0].replace(b'+', b' ')
name = uq(name)
if not PY2: # pragma: no cover py2
name = ustr(name, 'latin1')
value = nv[1].replace(b'+', b' ')
value = uq(value)
result.setdefault(name, []).append(value)
return result | 0.001299 |
def clean_new(self, value):
"""Return a new object instantiated with cleaned data."""
value = self.schema_class(value).full_clean()
return self.object_class(**value) | 0.010582 |
def parse_line_headers(self, line):
"""We must build headers carefully: there are multiple blank values
in the header row, and the instrument may just add more for all
we know.
"""
headers = line.split(",")
for i, v in enumerate(headers):
if v:
headers[i] = v
else:
headers[i] = str(i)
self.headers = headers | 0.004762 |
def select_good_pixel_region(hits, col_span, row_span, min_cut_threshold=0.2, max_cut_threshold=2.0):
'''Takes the hit array and masks all pixels with a certain occupancy.
Parameters
----------
hits : array like
If dim > 2 the additional dimensions are summed up.
min_cut_threshold : float
A number to specify the minimum threshold, which pixel to take. Pixels are masked if
occupancy < min_cut_threshold * np.ma.median(occupancy)
0 means that no pixels are masked
max_cut_threshold : float
A number to specify the maximum threshold, which pixel to take. Pixels are masked if
occupancy > max_cut_threshold * np.ma.median(occupancy)
Can be set to None that no pixels are masked by max_cut_threshold
Returns
-------
numpy.ma.array, shape=(80,336)
The hits array with masked pixels.
'''
hits = np.sum(hits, axis=(-1)).astype('u8')
mask = np.ones(shape=(80, 336), dtype=np.uint8)
mask[min(col_span):max(col_span) + 1, min(row_span):max(row_span) + 1] = 0
ma = np.ma.masked_where(mask, hits)
if max_cut_threshold is not None:
return np.ma.masked_where(np.logical_or(ma < min_cut_threshold * np.ma.median(ma), ma > max_cut_threshold * np.ma.median(ma)), ma)
else:
return np.ma.masked_where(ma < min_cut_threshold * np.ma.median(ma), ma) | 0.004354 |
def stream_data(self, ostream):
"""Writes our data directly to the given output stream
:param ostream: File object compatible stream object.
:return: self"""
istream = self.repo.odb.stream(self.binsha)
stream_copy(istream, ostream)
return self | 0.006873 |
def user_active_directory_deactivate(user, attributes, created, updated):
"""
Deactivate user accounts based on Active Directory's
userAccountControl flags. Requires 'userAccountControl'
to be included in LDAP_SYNC_USER_EXTRA_ATTRIBUTES.
"""
try:
user_account_control = int(attributes['userAccountControl'][0])
if user_account_control & 2:
user.is_active = False
except KeyError:
pass | 0.002179 |
def matches_rule(message, rule, destinations = None) :
"does Message message match against the specified rule."
if not isinstance(message, Message) :
raise TypeError("message must be a Message")
#end if
rule = unformat_rule(rule)
eavesdrop = rule.get("eavesdrop", "false") == "true"
def match_message_type(expect, actual) :
return \
actual == Message.type_from_string(expect)
#end match_message_type
def match_path_namespace(expect, actual) :
return \
(
actual != None
and
(
expect == actual
or
actual.startswith(expect) and (expect == "/" or actual[len(expect)] == "/")
)
)
#end match_path_namespace
def match_dotted_namespace(expect, actual) :
return \
(
actual != None
and
(
expect == actual
or
actual.startswith(expect) and actual[len(expect)] == "."
)
)
#end match_dotted_namespace
def get_nth_arg(msg, n, expect_types) :
msg_signature = parse_signature(msg.signature)
if n >= len(msg_signature) :
raise IndexError("arg nr %d beyond nr args %d" % (n, len(msg_signature)))
#end if
val = msg.all_objects[n]
valtype = msg_signature[n]
if valtype not in expect_types :
if False :
raise TypeError \
(
"expecting one of types %s, not %s for arg %d val %s"
%
((repr(expect_types), repr(valtype), n, repr(val)))
)
#end if
val = None # never match
#end if
return \
val
#end get_nth_arg
def get_arg_0_str(message) :
return \
get_nth_arg(message, 0, [BasicType(TYPE.STRING)])
#end get_arg_0_str
def match_arg_paths(expect, actual) :
return \
(
actual != None
and
(
expect == actual
or
expect.endswith("/") and actual.startswith(expect)
or
actual.endswith("/") and expect.startswith(actual)
)
)
#end match_arg_paths
match_types = \
( # note that message attribute value of None will fail to match
# any expected string value, which is exactly what we want
("type", None, match_message_type, None),
("sender", None, operator.eq, None),
("interface", None, operator.eq, None),
("member", None, operator.eq, None),
("path", None, operator.eq, None),
("destination", None, operator.eq, None),
("path_namespace", "path", match_path_namespace, None),
("arg0namespace", None, match_dotted_namespace, get_arg_0_str),
# “arg«n»path” handled specially below
)
#begin matches_rule
keys_used = set(rule.keys()) - {"eavesdrop"}
matches = \
(
eavesdrop
or
destinations == None
or
message.destination == None
or
message.destination in destinations
)
if matches :
try_matching = iter(match_types)
while True :
try_rule = next(try_matching, None)
if try_rule == None :
break
rulekey, attrname, action, accessor = try_rule
if attrname == None :
attrname = rulekey
#end if
if rulekey in rule :
if accessor != None :
val = accessor(message)
else :
val = getattr(message, attrname)
#end if
keys_used.remove(rulekey)
if not action(rule[rulekey], val) :
matches = False
break
#end if
#end if
#end while
#end if
if matches :
try_matching = iter(rule.keys())
while True :
try_key = next(try_matching, None)
if try_key == None :
break
if try_key.startswith("arg") and not try_key.endswith("namespace") :
argnr = try_key[3:]
is_path = argnr.endswith("path")
if is_path :
argnr = argnr[:-4]
#end if
argnr = int(argnr)
if not (0 <= argnr < 64) :
raise ValueError("argnr %d out of range" % argnr)
#end if
argval = get_nth_arg \
(
message,
argnr,
[BasicType(TYPE.STRING)] + ([], [BasicType(TYPE.OBJECT_PATH)])[is_path]
)
keys_used.remove(try_key)
if not (operator.eq, match_arg_paths)[is_path](rule[try_key], argval) :
matches = False
break
#end if
#end if
#end while
#end if
if matches and len(keys_used) != 0 :
# fixme: not checking for unrecognized rule keys if I didn’t try matching them all
raise KeyError("unrecognized rule keywords: %s" % ", ".join(sorted(keys_used)))
#end if
return \
matches | 0.01549 |
def _replace_rlhist_multiline(self, source_raw, hlen_before_cell):
"""Store multiple lines as a single entry in history"""
# do nothing without readline or disabled multiline
if not self.has_readline or not self.multiline_history:
return hlen_before_cell
# windows rl has no remove_history_item
if not hasattr(self.readline, "remove_history_item"):
return hlen_before_cell
# skip empty cells
if not source_raw.rstrip():
return hlen_before_cell
# nothing changed do nothing, e.g. when rl removes consecutive dups
hlen = self.readline.get_current_history_length()
if hlen == hlen_before_cell:
return hlen_before_cell
for i in range(hlen - hlen_before_cell):
self.readline.remove_history_item(hlen - i - 1)
stdin_encoding = get_stream_enc(sys.stdin, 'utf-8')
self.readline.add_history(py3compat.unicode_to_str(source_raw.rstrip(),
stdin_encoding))
return self.readline.get_current_history_length() | 0.00271 |
def search(self, search, **kwargs):
"""
Search for Repository Configurations based on internal or external url, ignoring the protocol and \".git\" suffix. The matching is done using LIKE.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.search(search, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str search: Url part to search for (required)
:param int page_index: Page Index
:param int page_size: Pagination size
:param str sort: Sorting RSQL
:return: RepositoryConfigurationPage
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.search_with_http_info(search, **kwargs)
else:
(data) = self.search_with_http_info(search, **kwargs)
return data | 0.003147 |
def gaussian_window(t, params):
"""
Calculates a Gaussian window function in the time domain which will broaden
peaks in the frequency domain by params["line_broadening"] Hertz.
:param t:
:param params:
:return:
"""
window = suspect.basis.gaussian(t, 0, 0, params["line_broadening"])
# the above gaussian function returns an area 1 fid, for a windowing
# function we need to be area preserving (first point must be 1)
return window / window[0] | 0.002049 |
def create_container_service(access_token, subscription_id, resource_group, service_name, \
agent_count, agent_vm_size, agent_dns, master_dns, admin_user, location, public_key=None,\
master_count=3, orchestrator='DCOS', app_id=None, app_secret=None, admin_password=None, \
ostype='Linux'):
'''Create a new container service - include app_id and app_secret if using Kubernetes.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
service_name (str): Name of container service.
agent_count (int): The number of agent VMs.
agent_vm_size (str): VM size of agents, e.g. Standard_D1_v2.
agent_dns (str): A unique DNS string for the agent DNS.
master_dns (str): A unique string for the master DNS.
admin_user (str): Admin user name.
location (str): Azure data center location, e.g. westus.
public_key (str): RSA public key (utf-8).
master_count (int): Number of master VMs.
orchestrator (str): Container orchestrator. E.g. DCOS, Kubernetes.
app_id (str): Application ID for Kubernetes.
app_secret (str): Application secret for Kubernetes.
admin_password (str): Admin user password.
ostype (str): Operating system. Windows of Linux.
Returns:
HTTP response. Container service JSON model.
'''
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerService/ContainerServices/', service_name,
'?api-version=', ACS_API])
acs_body = {'location': location}
properties = {'orchestratorProfile': {'orchestratorType': orchestrator}}
properties['masterProfile'] = {'count': master_count, 'dnsPrefix': master_dns}
ap_profile = {'name': 'AgentPool1'}
ap_profile['count'] = agent_count
ap_profile['vmSize'] = agent_vm_size
ap_profile['dnsPrefix'] = agent_dns
properties['agentPoolProfiles'] = [ap_profile]
if ostype == 'Linux':
linux_profile = {'adminUsername': admin_user}
linux_profile['ssh'] = {'publicKeys': [{'keyData': public_key}]}
properties['linuxProfile'] = linux_profile
else: # Windows
windows_profile = {'adminUsername': admin_user, 'adminPassword': admin_password}
properties['windowsProfile'] = windows_profile
if orchestrator == 'Kubernetes':
sp_profile = {'ClientID': app_id}
sp_profile['Secret'] = app_secret
properties['servicePrincipalProfile'] = sp_profile
acs_body['properties'] = properties
body = json.dumps(acs_body)
return do_put(endpoint, body, access_token) | 0.005616 |
def scale_and_crop_with_subject_location(im, size, subject_location=False,
zoom=None, crop=False, upscale=False,
**kwargs):
"""
Like ``easy_thumbnails.processors.scale_and_crop``, but will use the
coordinates in ``subject_location`` to make sure that that part of the
image is in the center or at least somewhere on the cropped image.
Please not that this does *not* work correctly if the image has been
resized by a previous processor (e.g ``autocrop``).
``crop`` needs to be set for this to work, but any special cropping
parameters will be ignored.
"""
subject_location = normalize_subject_location(subject_location)
if not (subject_location and crop):
# use the normal scale_and_crop
return processors.scale_and_crop(im, size, zoom=zoom, crop=crop,
upscale=upscale, **kwargs)
# for here on we have a subject_location and cropping is on
# --snip-- this is a copy and paste of the first few
# lines of ``scale_and_crop``
source_x, source_y = [float(v) for v in im.size]
target_x, target_y = [float(v) for v in size]
if crop or not target_x or not target_y:
scale = max(target_x / source_x, target_y / source_y)
else:
scale = min(target_x / source_x, target_y / source_y)
# Handle one-dimensional targets.
if not target_x:
target_x = source_x * scale
elif not target_y:
target_y = source_y * scale
if zoom:
if not crop:
target_x = round(source_x * scale)
target_y = round(source_y * scale)
scale *= (100 + int(zoom)) / 100.0
if scale < 1.0 or (scale > 1.0 and upscale):
im = im.resize((int(source_x * scale), int(source_y * scale)),
resample=Image.ANTIALIAS)
# --endsnip-- begin real code
# ===============================
# subject location aware cropping
# ===============================
# res_x, res_y: the resolution of the possibly already resized image
res_x, res_y = [float(v) for v in im.size]
# subj_x, subj_y: the position of the subject (maybe already re-scaled)
subj_x = res_x * float(subject_location[0]) / source_x
subj_y = res_y * float(subject_location[1]) / source_y
ex = (res_x - min(res_x, target_x)) / 2
ey = (res_y - min(res_y, target_y)) / 2
fx, fy = res_x - ex, res_y - ey
# box_width, box_height: dimensions of the target image
box_width, box_height = fx - ex, fy - ey
# try putting the box in the center around the subject point
# (this will be partially outside of the image in most cases)
tex, tey = subj_x - (box_width / 2), subj_y - (box_height / 2)
tfx, tfy = subj_x + (box_width / 2), subj_y + (box_height / 2)
if tex < 0:
# its out of the img to the left, move both to the right until tex is 0
tfx = tfx - tex # tex is negative!
tex = 0
elif tfx > res_x:
# its out of the img to the right
tex = tex - (tfx - res_x)
tfx = res_x
if tey < 0:
# its out of the img to the top, move both to the bottom until tey is 0
tfy = tfy - tey # tey is negative!)
tey = 0
elif tfy > res_y:
# its out of the img to the bottom
tey = tey - (tfy - res_y)
tfy = res_y
if ex or ey:
crop_box = ((int(tex), int(tey), int(tfx), int(tfy)))
if FILER_SUBJECT_LOCATION_IMAGE_DEBUG:
# draw elipse on focal point for Debugging
draw = ImageDraw.Draw(im)
esize = 10
draw.ellipse(((subj_x - esize, subj_y - esize),
(subj_x + esize, subj_y + esize)), outline="#FF0000")
im = im.crop(crop_box)
return im | 0.00026 |
def readinto(self, b):
"""Read up to len(b) bytes into the writable buffer *b* and return
the number of bytes read. If the socket is non-blocking and no bytes
are available, None is returned.
If *b* is non-empty, a 0 return value indicates that the connection
was shutdown at the other end.
"""
self._checkClosed()
self._checkReadable()
if self._timeout_occurred:
raise IOError("cannot read from timed out object")
while True:
try:
return self._sock.recv_into(b)
except timeout:
self._timeout_occurred = True
raise
# except InterruptedError:
# continue
except error as e:
if e.args[0] in _blocking_errnos:
return None
raise | 0.00227 |
def disconnect_socket(self):
"""
Disconnect the underlying socket connection
"""
self.running = False
if self.socket is not None:
if self.__need_ssl():
#
# Even though we don't want to use the socket, unwrap is the only API method which does a proper SSL
# shutdown
#
try:
self.socket = self.socket.unwrap()
except Exception:
#
# unwrap seems flaky on Win with the back-ported ssl mod, so catch any exception and log it
#
_, e, _ = sys.exc_info()
log.warning(e)
elif hasattr(socket, 'SHUT_RDWR'):
try:
self.socket.shutdown(socket.SHUT_RDWR)
except socket.error:
_, e, _ = sys.exc_info()
# ignore when socket already closed
if get_errno(e) != errno.ENOTCONN:
log.warning("Unable to issue SHUT_RDWR on socket because of error '%s'", e)
#
# split this into a separate check, because sometimes the socket is nulled between shutdown and this call
#
if self.socket is not None:
try:
self.socket.close()
except socket.error:
_, e, _ = sys.exc_info()
log.warning("Unable to close socket because of error '%s'", e)
self.current_host_and_port = None
self.socket = None
self.notify('disconnected') | 0.003679 |
def iter_nautilus(method):
""" Iterate NAUTILUS method either interactively, or using given preferences if given
Parameters
----------
method : instance of NAUTILUS subclass
Fully initialized NAUTILUS method instance
"""
solution = None
while method.current_iter:
preference_class = init_nautilus(method)
pref = preference_class(method, None)
default = ",".join(map(str, pref.default_input()))
while method.current_iter:
method.print_current_iteration()
pref_input = _prompt_wrapper(
u"Preferences: ",
default=default,
validator=VectorValidator(method, pref),
)
cmd = _check_cmd(pref_input)
if cmd:
solution = method.zh
break
pref = preference_class(
method, np.fromstring(pref_input, dtype=np.float, sep=",")
)
default = ",".join(map(str, pref.pref_input))
solution, _ = method.next_iteration(pref)
if cmd and list(cmd)[0] == "c":
break
return solution | 0.001735 |
async def extended_analog(self, pin, data):
"""
This method will send an extended-data analog write command to the
selected pin.
:param pin: 0 - 127
:param data: 0 - 0xfffff
:returns: No return value
"""
analog_data = [pin, data & 0x7f, (data >> 7) & 0x7f, (data >> 14) & 0x7f]
await self._send_sysex(PrivateConstants.EXTENDED_ANALOG, analog_data) | 0.007109 |
def binary(self):
"""
Get the object this function belongs to.
:return: The object this function belongs to.
"""
return self._project.loader.find_object_containing(self.addr, membership_check=False) | 0.012552 |
def clean_download_cache(self, args):
""" Deletes a download cache for recipes passed as arguments. If no
argument is passed, it'll delete *all* downloaded caches. ::
p4a clean_download_cache kivy,pyjnius
This does *not* delete the build caches or final distributions.
"""
ctx = self.ctx
if hasattr(args, 'recipes') and args.recipes:
for package in args.recipes:
remove_path = join(ctx.packages_path, package)
if exists(remove_path):
shutil.rmtree(remove_path)
info('Download cache removed for: "{}"'.format(package))
else:
warning('No download cache found for "{}", skipping'.format(
package))
else:
if exists(ctx.packages_path):
shutil.rmtree(ctx.packages_path)
info('Download cache removed.')
else:
print('No cache found at "{}"'.format(ctx.packages_path)) | 0.002871 |
def sign_in(self, timeout=60, safe=True, tries=1, channel=None):
'''
Send a sign in request to the master, sets the key information and
returns a dict containing the master publish interface to bind to
and the decrypted aes key for transport decryption.
:param int timeout: Number of seconds to wait before timing out the sign-in request
:param bool safe: If True, do not raise an exception on timeout. Retry instead.
:param int tries: The number of times to try to authenticate before giving up.
:raises SaltReqTimeoutError: If the sign-in request has timed out and :param safe: is not set
:return: Return a string on failure indicating the reason for failure. On success, return a dictionary
with the publication port and the shared AES key.
'''
auth = {}
auth_timeout = self.opts.get('auth_timeout', None)
if auth_timeout is not None:
timeout = auth_timeout
auth_safemode = self.opts.get('auth_safemode', None)
if auth_safemode is not None:
safe = auth_safemode
auth_tries = self.opts.get('auth_tries', None)
if auth_tries is not None:
tries = auth_tries
m_pub_fn = os.path.join(self.opts['pki_dir'], self.mpub)
auth['master_uri'] = self.opts['master_uri']
close_channel = False
if not channel:
close_channel = True
channel = salt.transport.client.AsyncReqChannel.factory(self.opts,
crypt='clear',
io_loop=self.io_loop)
sign_in_payload = self.minion_sign_in_payload()
try:
payload = yield channel.send(
sign_in_payload,
tries=tries,
timeout=timeout
)
except SaltReqTimeoutError as e:
if safe:
log.warning('SaltReqTimeoutError: %s', e)
raise tornado.gen.Return('retry')
if self.opts.get('detect_mode') is True:
raise tornado.gen.Return('retry')
else:
raise SaltClientError('Attempt to authenticate with the salt master failed with timeout error')
finally:
if close_channel:
channel.close()
if not isinstance(payload, dict):
log.error('Sign-in attempt failed: %s', payload)
raise tornado.gen.Return(False)
if 'load' in payload:
if 'ret' in payload['load']:
if not payload['load']['ret']:
if self.opts['rejected_retry']:
log.error(
'The Salt Master has rejected this minion\'s public '
'key.\nTo repair this issue, delete the public key '
'for this minion on the Salt Master.\nThe Salt '
'Minion will attempt to to re-authenicate.'
)
raise tornado.gen.Return('retry')
else:
log.critical(
'The Salt Master has rejected this minion\'s public '
'key!\nTo repair this issue, delete the public key '
'for this minion on the Salt Master and restart this '
'minion.\nOr restart the Salt Master in open mode to '
'clean out the keys. The Salt Minion will now exit.'
)
# Add a random sleep here for systems that are using a
# a service manager to immediately restart the service
# to avoid overloading the system
time.sleep(random.randint(10, 20))
sys.exit(salt.defaults.exitcodes.EX_NOPERM)
# has the master returned that its maxed out with minions?
elif payload['load']['ret'] == 'full':
raise tornado.gen.Return('full')
else:
log.error(
'The Salt Master has cached the public key for this '
'node, this salt minion will wait for %s seconds '
'before attempting to re-authenticate',
self.opts['acceptance_wait_time']
)
raise tornado.gen.Return('retry')
auth['aes'] = self.verify_master(payload, master_pub='token' in sign_in_payload)
if not auth['aes']:
log.critical(
'The Salt Master server\'s public key did not authenticate!\n'
'The master may need to be updated if it is a version of Salt '
'lower than %s, or\n'
'If you are confident that you are connecting to a valid Salt '
'Master, then remove the master public key and restart the '
'Salt Minion.\nThe master public key can be found '
'at:\n%s', salt.version.__version__, m_pub_fn
)
raise SaltClientError('Invalid master key')
if self.opts.get('syndic_master', False): # Is syndic
syndic_finger = self.opts.get('syndic_finger', self.opts.get('master_finger', False))
if syndic_finger:
if salt.utils.crypt.pem_finger(m_pub_fn, sum_type=self.opts['hash_type']) != syndic_finger:
self._finger_fail(syndic_finger, m_pub_fn)
else:
if self.opts.get('master_finger', False):
if salt.utils.crypt.pem_finger(m_pub_fn, sum_type=self.opts['hash_type']) != self.opts['master_finger']:
self._finger_fail(self.opts['master_finger'], m_pub_fn)
auth['publish_port'] = payload['publish_port']
raise tornado.gen.Return(auth) | 0.003499 |
def _bsecurate_cli_view_graph(args):
'''Handles the view-graph subcommand'''
curate.view_graph(args.basis, args.version, args.data_dir)
return '' | 0.006329 |
def get_logger(name, log_level=logging.INFO, log_file=None, global_log_file=False, silence=False):
"""
Build a logger. All logs will be propagated up to the root logger if not silenced. If log_file is provided, logs will be written out to that file. If global_log_file is true, log_file will be handed the root logger, otherwise it will only be used by this particular logger.
Parameters
----------
name :obj:`str`
The name of the logger to be built.
log_level : `int`
The log level. See the python logging module documentation for possible enum values.
log_file :obj:`str`
The path to the log file to log to.
global_log_file :obj:`bool`
Whether or not to use the given log_file for this particular logger or for the root logger.
silence :obj:`bool`
Whether or not to silence this logger. If it is silenced, the only way to get output from this logger is through a non-global log file.
Returns
-------
:obj:`logging.Logger`
A custom logger.
"""
no_op = False
# some checks for silencing/no-op logging
if silence and global_log_file:
raise ValueError("You can't silence a logger and log to a global log file!")
if silence and log_file is None:
logging.warning('You are creating a no-op logger!')
no_op = True
# configure the root logger if it hasn't been already
if not Logger.ROOT_CONFIGURED:
configure_root()
Logger.ROOT_CONFIGURED = True
# build a logger
logger = logging.getLogger(name)
logger.setLevel(log_level)
# silence the logger by preventing it from propagating upwards to the root
logger.propagate = not silence
# configure the log file stream
if log_file is not None:
# if the log file is global, add it to the root logger
if global_log_file:
add_root_log_file(log_file)
# otherwise add it to this particular logger
else:
hdlr = logging.FileHandler(log_file)
formatter = logging.Formatter('%(asctime)s %(name)-10s %(levelname)-8s %(message)s', datefmt='%m-%d %H:%M:%S')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
# add a no-op handler to suppress warnings about there being no handlers
if no_op:
logger.addHandler(logging.NullHandler())
return logger | 0.005401 |
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a Mac AppFirewall log file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
line (str): line from a text file.
Returns:
bool: True if the line is in the expected format, False if not.
"""
self._last_month = 0
self._year_use = parser_mediator.GetEstimatedYear()
try:
structure = self.FIREWALL_LINE.parseString(line)
except pyparsing.ParseException as exception:
logger.debug((
'Unable to parse file as a Mac AppFirewall log file with error: '
'{0!s}').format(exception))
return False
if structure.action != 'creating /var/log/appfirewall.log':
logger.debug(
'Not a Mac AppFirewall log file, invalid action: {0!s}'.format(
structure.action))
return False
if structure.status != 'Error':
logger.debug(
'Not a Mac AppFirewall log file, invalid status: {0!s}'.format(
structure.status))
return False
time_elements_tuple = self._GetTimeElementsTuple(structure)
try:
dfdatetime_time_elements.TimeElements(
time_elements_tuple=time_elements_tuple)
except ValueError:
logger.debug((
'Not a Mac AppFirewall log file, invalid date and time: '
'{0!s}').format(structure.date_time))
return False
self._last_month = time_elements_tuple[1]
return True | 0.007106 |
def _output_marc(output_complete, categories,
kw_field=None,
auth_field=None,
acro_field=None,
provenience='Classifier'):
"""Output the keywords in the MARCXML format.
:var skw_matches: list of single keywords
:var ckw_matches: list of composite keywords
:var author_keywords: dictionary of extracted author keywords
:var acronyms: dictionary of acronyms
:var spires: boolean, True=generate spires output - BUT NOTE: it is
here only not to break compatibility, in fact spires output
should never be used for xml because if we read marc back
into the KeywordToken objects, we would not find them
:keyword provenience: string that identifies source (authority) that
assigned the contents of the field
:return: string, formatted MARC
"""
if kw_field is None:
kw_field = current_app.config["CLASSIFIER_RECORD_KEYWORD_FIELD"]
if auth_field is None:
auth_field = current_app.config[
"CLASSIFIER_RECORD_KEYWORD_AUTHOR_FIELD"
]
if acro_field is None:
acro_field = current_app.config[
"CLASSIFIER_RECORD_KEYWORD_ACRONYM_FIELD"
]
kw_template = ('<datafield tag="%s" ind1="%s" ind2="%s">\n'
' <subfield code="2">%s</subfield>\n'
' <subfield code="a">%s</subfield>\n'
' <subfield code="n">%s</subfield>\n'
' <subfield code="9">%s</subfield>\n'
'</datafield>\n')
output = []
tag, ind1, ind2 = _parse_marc_code(kw_field)
for keywords in (output_complete["single_keywords"],
output_complete["core_keywords"]):
for kw in keywords:
output.append(kw_template % (tag, ind1, ind2,
encode_for_xml(provenience),
encode_for_xml(kw), keywords[kw],
encode_for_xml(categories[kw])))
author_keywords = [keyword['author_keyword'] for keyword in
output_complete["author_keywords"]]
for field, keywords in ((auth_field, author_keywords),
(acro_field, output_complete["acronyms"])):
# field='' we shall not save the keywords
if keywords and len(keywords) and field:
tag, ind1, ind2 = _parse_marc_code(field)
for kw, info in keywords.items():
output.append(kw_template % (tag, ind1, ind2,
encode_for_xml(provenience),
encode_for_xml(kw), '',
encode_for_xml(categories[kw])))
return "".join(output) | 0.000352 |
def validate_is_non_abstract_edge_type(self, edge_classname):
"""Validate that a edge classname corresponds to a non-abstract edge class."""
element = self.get_edge_schema_element_or_raise(edge_classname)
if element.abstract:
raise InvalidClassError(u'Expected a non-abstract vertex class, but {} is abstract'
.format(edge_classname)) | 0.009828 |
def as_dict(self):
"""Dict representation of parsed VCF data"""
self_as_dict = {'chrom': self.chrom,
'start': self.start,
'ref_allele': self.ref_allele,
'alt_alleles': self.alt_alleles,
'alleles': [x.as_dict() for x in self.alleles]}
try:
self_as_dict['info'] = self.info
except AttributeError:
pass
return self_as_dict | 0.004175 |
def _get_seq2c_options(data):
"""Get adjustable, through resources, or default options for seq2c.
"""
cov2lr_possible_opts = ["-F"]
defaults = {}
ropts = config_utils.get_resources("seq2c", data["config"]).get("options", [])
assert len(ropts) % 2 == 0, "Expect even number of options for seq2c" % ropts
defaults.update(dict(tz.partition(2, ropts)))
cov2lr_out, lr2gene_out = [], []
for k, v in defaults.items():
if k in cov2lr_possible_opts:
cov2lr_out += [str(k), str(v)]
else:
lr2gene_out += [str(k), str(v)]
return cov2lr_out, lr2gene_out | 0.004831 |
def get_version(self):
"""Get the DCNM version."""
url = '%s://%s/rest/dcnm-version' % (self.dcnm_protocol, self._ip)
payload = {}
try:
res = self._send_request('GET', url, payload, 'dcnm-version')
if res and res.status_code in self._resp_ok:
return res.json().get('Dcnm-Version')
except dexc.DfaClientRequestFailed as exc:
LOG.error("Failed to get DCNM version.")
sys.exit("ERROR: Failed to connect to DCNM: %s", exc) | 0.003817 |
def get_derived_from(self, address):
"""Get the target the specified target was derived from.
If a Target was injected programmatically, e.g. from codegen, this allows us to trace its
ancestry. If a Target is not derived, default to returning itself.
:API: public
"""
parent_address = self._derived_from_by_derivative.get(address, address)
return self.get_target(parent_address) | 0.00489 |
def build(self, builder):
"""Build XML by appending to builder"""
builder.start("BasicDefinitions", {})
for child in self.measurement_units:
child.build(builder)
builder.end("BasicDefinitions") | 0.008439 |
def add_schemas(path, ext="json"):
"""Add schemas from files in 'path'.
:param path: Path with schema files. Schemas are named by their file,
with the extension stripped. e.g., if path is "/tmp/foo",
then the schema in "/tmp/foo/bar.json" will be named "bar".
:type path: str
:param ext: File extension that identifies schema files
:type ext: str
:return: None
:raise: SchemaPathError, if no such path. SchemaParseError, if a schema
is not valid JSON.
"""
if not os.path.exists(path):
raise SchemaPathError()
filepat = "*." + ext if ext else "*"
for f in glob.glob(os.path.join(path, filepat)):
with open(f, 'r') as fp:
try:
schema = json.load(fp)
except ValueError:
raise SchemaParseError("error parsing '{}'".format(f))
name = os.path.splitext(os.path.basename(f))[0]
schemata[name] = Schema(schema) | 0.00102 |
def solve(self):
'''
Solves a one period consumption saving problem with risky income, with
persistent income explicitly tracked as a state variable.
Parameters
----------
None
Returns
-------
solution : ConsumerSolution
The solution to the one period problem, including a consumption
function (defined over market resources and persistent income), a
marginal value function, bounding MPCs, and human wealth as a func-
tion of persistent income. Might also include a value function and
marginal marginal value function, depending on options selected.
'''
aLvl,pLvl = self.prepareToCalcEndOfPrdvP()
EndOfPrdvP = self.calcEndOfPrdvP()
if self.vFuncBool:
self.makeEndOfPrdvFunc(EndOfPrdvP)
if self.CubicBool:
interpolator = self.makeCubiccFunc
else:
interpolator = self.makeLinearcFunc
solution = self.makeBasicSolution(EndOfPrdvP,aLvl,pLvl,interpolator)
solution = self.addMPCandHumanWealth(solution)
if self.vFuncBool:
solution.vFunc = self.makevFunc(solution)
if self.CubicBool:
solution = self.addvPPfunc(solution)
return solution | 0.006823 |
def initialize_slot(obj, name, value):
"""Initalize an unitialized slot to a value.
If there is already a value for this slot, this is a nop.
Parameters
----------
obj : immutable
An immutable object.
name : str
The name of the slot to initialize.
value : any
The value to initialize the slot to.
"""
if not hasattr(obj, name):
object_setattr(obj, name, value) | 0.002326 |
def get_server_data(self, UUID):
"""
Return '/server/uuid' data in Python dict.
Creates object representations of any IP-address and Storage.
"""
data = self.get_request('/server/{0}'.format(UUID))
server = data['server']
# Populate subobjects
IPAddresses = IPAddress._create_ip_address_objs(server.pop('ip_addresses'),
cloud_manager=self)
storages = Storage._create_storage_objs(server.pop('storage_devices'),
cloud_manager=self)
return server, IPAddresses, storages | 0.006088 |
def get_bookmarks(self, **filters):
"""
Get Bookmarks for the current user.
Filters:
:param archive: Filter Bookmarks returned by archived status.
:param favorite: Filter Bookmarks returned by favorite status.
:param domain: Filter Bookmarks returned by a domain.
:param added_since: Filter bookmarks by date added (since this date).
:param added_until: Filter bookmarks by date added (until this date).
:param opened_since: Filter bookmarks by date opened (since this date).
:param opened_until: Filter bookmarks by date opened (until this date).
:param archived_since: Filter bookmarks by date archived (since this date.)
:param archived_until: Filter bookmarks by date archived (until this date.)
:param updated_since: Filter bookmarks by date updated (since this date.)
:param updated_until: Filter bookmarks by date updated (until this date.)
:param page: What page of results to return. Default is 1.
:param per_page: How many results to return per page. Default is 20, max is 50.
:param only_deleted: Return only bookmarks that this user has deleted.
:param tags: Comma separated string of tags to filter bookmarks.
"""
filter_dict = filter_args_to_dict(filters, ACCEPTED_BOOKMARK_FILTERS)
url = self._generate_url('bookmarks', query_params=filter_dict)
return self.get(url) | 0.004798 |
def load_graphs():
'''load graphs from mavgraphs.xml'''
mestate.graphs = []
gfiles = ['mavgraphs.xml']
if 'HOME' in os.environ:
for dirname, dirnames, filenames in os.walk(os.path.join(os.environ['HOME'], ".mavproxy")):
for filename in filenames:
if filename.lower().endswith('.xml'):
gfiles.append(os.path.join(dirname, filename))
elif 'LOCALAPPDATA' in os.environ:
for dirname, dirnames, filenames in os.walk(os.path.join(os.environ['LOCALAPPDATA'], "MAVProxy")):
for filename in filenames:
if filename.lower().endswith('.xml'):
gfiles.append(os.path.join(dirname, filename))
for file in gfiles:
if not os.path.exists(file):
continue
graphs = load_graph_xml(open(file).read(), file)
if graphs:
mestate.graphs.extend(graphs)
mestate.console.writeln("Loaded %s" % file)
# also load the built in graphs
try:
dlist = pkg_resources.resource_listdir("MAVProxy", "tools/graphs")
for f in dlist:
raw = pkg_resources.resource_stream("MAVProxy", "tools/graphs/%s" % f).read()
graphs = load_graph_xml(raw, None)
if graphs:
mestate.graphs.extend(graphs)
mestate.console.writeln("Loaded %s" % f)
except Exception:
#we're in a Windows exe, where pkg_resources doesn't work
import pkgutil
for f in ["ekf3Graphs.xml", "ekfGraphs.xml", "mavgraphs.xml", "mavgraphs2.xml"]:
raw = pkgutil.get_data( 'MAVProxy', 'tools//graphs//' + f)
graphs = load_graph_xml(raw, None)
if graphs:
mestate.graphs.extend(graphs)
mestate.console.writeln("Loaded %s" % f)
mestate.graphs = sorted(mestate.graphs, key=lambda g: g.name) | 0.003717 |
def uninstall(ctx, plugin):
"""
Uninstall the given plugin.
"""
ensure_inside_venv(ctx)
if plugin not in get_installed_plugins():
echo_error("Plugin {} does not seem to be installed.".format(plugin))
sys.exit(1)
plugin_name = get_plugin_name(plugin)
try:
run_command([sys.executable, '-m', 'pip', 'uninstall', '-y',
plugin_name])
except subprocess.CalledProcessError as e:
echo_error(
"Error when trying to uninstall plugin {}. Error message "
"was:\n\n{}".format(plugin, e.output.decode())
)
sys.exit(1)
else:
echo_success("Plugin {} uninstalled successfully.".format(plugin)) | 0.001397 |
def get_key_for_enctype(self, etype):
"""
Returns the encryption key bytes for the enctryption type.
"""
if etype == EncryptionType.AES256_CTS_HMAC_SHA1_96:
if self.kerberos_key_aes_256:
return bytes.fromhex(self.kerberos_key_aes_256)
if self.password is not None:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.AES256, self.password.encode(), salt).contents
raise Exception('There is no key for AES256 encryption')
elif etype == EncryptionType.AES128_CTS_HMAC_SHA1_96:
if self.kerberos_key_aes_128:
return bytes.fromhex(self.kerberos_key_aes_128)
if self.password is not None:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.AES128, self.password.encode(), salt).contents
raise Exception('There is no key for AES128 encryption')
elif etype == EncryptionType.ARCFOUR_HMAC_MD5:
if self.kerberos_key_rc4:
return bytes.fromhex(self.kerberos_key_rc4)
if self.nt_hash:
return bytes.fromhex(self.nt_hash)
elif self.password:
self.nt_hash = hashlib.new('md4', self.password.encode('utf-16-le')).hexdigest().upper()
return bytes.fromhex(self.nt_hash)
else:
raise Exception('There is no key for RC4 encryption')
elif etype == EncryptionType.DES3_CBC_SHA1:
if self.kerberos_key_des3:
return bytes.fromhex(self.kerberos_key_des)
elif self.password:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.DES3, self.password.encode(), salt).contents
else:
raise Exception('There is no key for DES3 encryption')
elif etype == EncryptionType.DES_CBC_MD5: #etype == EncryptionType.DES_CBC_CRC or etype == EncryptionType.DES_CBC_MD4 or
if self.kerberos_key_des:
return bytes.fromhex(self.kerberos_key_des)
elif self.password:
salt = (self.domain.upper() + self.username).encode()
return string_to_key(Enctype.DES_MD5, self.password.encode(), salt).contents
else:
raise Exception('There is no key for DES3 encryption')
else:
raise Exception('Unsupported encryption type: %s' % etype.name) | 0.026836 |
def update_ip_address(context, id, ip_address):
"""Due to NCP-1592 ensure that address_type cannot change after update."""
LOG.info("update_ip_address %s for tenant %s" % (id, context.tenant_id))
ports = []
if 'ip_address' not in ip_address:
raise n_exc.BadRequest(resource="ip_addresses",
msg="Invalid request body.")
with context.session.begin():
db_address = db_api.ip_address_find(context, id=id, scope=db_api.ONE)
if not db_address:
raise q_exc.IpAddressNotFound(addr_id=id)
iptype = db_address.address_type
if iptype == ip_types.FIXED and not CONF.QUARK.ipaddr_allow_fixed_ip:
raise n_exc.BadRequest(
resource="ip_addresses",
msg="Fixed ips cannot be updated using this interface.")
reset = ip_address['ip_address'].get('reset_allocation_time', False)
if reset and db_address['deallocated'] == 1:
if context.is_admin:
LOG.info("IP's deallocated time being manually reset")
db_address['deallocated_at'] = _get_deallocated_override()
else:
msg = "Modification of reset_allocation_time requires admin"
raise webob.exc.HTTPForbidden(detail=msg)
port_ids = ip_address['ip_address'].get('port_ids', None)
if port_ids is not None and not port_ids:
raise n_exc.BadRequest(
resource="ip_addresses",
msg="Cannot be updated with empty port_id list")
if iptype == ip_types.SHARED:
has_owner = db_address.has_any_shared_owner()
if port_ids:
if iptype == ip_types.FIXED and len(port_ids) > 1:
raise n_exc.BadRequest(
resource="ip_addresses",
msg="Fixed ips cannot be updated with more than one port.")
_raise_if_shared_and_enabled(ip_address, db_address)
ports = db_api.port_find(context, tenant_id=context.tenant_id,
id=port_ids, scope=db_api.ALL)
# NOTE(name): could be considered inefficient because we're
# converting to a list to check length. Maybe revisit
if len(ports) != len(port_ids):
raise n_exc.PortNotFound(port_id=port_ids)
validate_and_fetch_segment(ports, db_address["network_id"])
validate_port_ip_quotas(context, db_address.network_id, ports)
if iptype == ip_types.SHARED and has_owner:
for assoc in db_address.associations:
pid = assoc.port_id
if pid not in port_ids and 'none' != assoc.service:
raise q_exc.PortRequiresDisassociation()
LOG.info("Updating IP address, %s, to only be used by the"
"following ports: %s" % (db_address.address_readable,
[p.id for p in ports]))
new_address = db_api.update_port_associations_for_ip(context,
ports,
db_address)
elif iptype == ip_types.SHARED and has_owner:
raise q_exc.PortRequiresDisassociation()
elif 'deallocated' in ip_address['ip_address']\
and context.is_admin:
# Verify no port associations
if len(db_address.associations) != 0:
exc_msg = ("IP %s cannot be deallocated or allocated while"
" still associated with ports: %s"
% (db_address['address_readable'],
db_address.associations))
raise q_exc.ActionNotAuthorized(msg=exc_msg)
# NOTE: If an admin, allow a user to set deallocated to false
# in order to reserve a deallocated IP. Alternatively, allow them
# reverse that choice if a mistake was made.
if ip_address['ip_address']['deallocated'] == 'False':
db_address['deallocated'] = False
else:
db_address['deallocated'] = True
return v._make_ip_dict(db_address, context.is_admin)
else:
ipam_driver.deallocate_ip_address(context, db_address)
return v._make_ip_dict(db_address, context.is_admin)
return v._make_ip_dict(new_address, context.is_admin) | 0.000222 |
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
_markupbase.ParserBase.reset(self) | 0.007843 |
def render_item(self, all_posts):
"""
Renders the Post as HTML using the template specified in :attr:`html_template_path`.
:param all_posts: An optional :class:`PostCollection` containing all of the posts in the site.
:return: The rendered HTML as a string.
"""
index = all_posts.index(self)
if index > 0: # has newer posts
newer_post = all_posts[index - 1]
else:
newer_post = None
if index < len(all_posts) - 1: # has older posts
older_post = all_posts[index + 1]
else:
older_post = None
return settings.JINJA_ENV.get_template(self.template).render(
post=self,
newer_post=newer_post,
older_post=older_post,
all_posts=all_posts,
nav_context='post'
) | 0.004667 |
def stop(self, msg=None):
'''Stopping a run. Control for loops. Gentle stop/abort.
This event should provide a more gentle abort. The run should stop ASAP but the run is still considered complete.
'''
if not self.stop_run.is_set():
if msg:
logging.info('%s%s Stopping run...', msg, ('' if msg[-1] in punctuation else '.'))
else:
logging.info('Stopping run...')
self.stop_run.set() | 0.008386 |
def to_xml(self):
"""
Serialize all properties as XML
"""
ret = '<exif>'
for k in self.__dict__:
ret += '<%s>%s</%s>' % (k, self.__dict__[k], k)
ret += '</exif>'
return ret | 0.008333 |
def set_timezone(self, timezone: str):
""" sets the timezone for the AP. e.g. "Europe/Berlin"
Args:
timezone(str): the new timezone
"""
data = {"timezoneId": timezone}
return self._restCall("home/setTimezone", body=json.dumps(data)) | 0.010309 |
def flatten_all_paths(group, group_filter=lambda x: True,
path_filter=lambda x: True, path_conversions=CONVERSIONS,
group_search_xpath=SVG_GROUP_TAG):
"""Returns the paths inside a group (recursively), expressing the
paths in the base coordinates.
Note that if the group being passed in is nested inside some parent
group(s), we cannot take the parent group(s) into account, because
xml.etree.Element has no pointer to its parent. You should use
Document.flatten_group(group) to flatten a specific nested group into
the root coordinates.
Args:
group is an Element
path_conversions (dict):
A dictionary to convert from an SVG element to a path data
string. Any element tags that are not included in this
dictionary will be ignored (including the `path` tag). To
only convert explicit path elements, pass in
`path_conversions=CONVERT_ONLY_PATHS`.
"""
if not isinstance(group, Element):
raise TypeError('Must provide an xml.etree.Element object. '
'Instead you provided {0}'.format(type(group)))
# Stop right away if the group_selector rejects this group
if not group_filter(group):
return []
# To handle the transforms efficiently, we'll traverse the tree of
# groups depth-first using a stack of tuples.
# The first entry in the tuple is a group element and the second
# entry is its transform. As we pop each entry in the stack, we
# will add all its child group elements to the stack.
StackElement = collections.namedtuple('StackElement',
['group', 'transform'])
def new_stack_element(element, last_tf):
return StackElement(element, last_tf.dot(
parse_transform(element.get('transform'))))
def get_relevant_children(parent, last_tf):
children = []
for elem in filter(group_filter,
parent.iterfind(group_search_xpath, SVG_NAMESPACE)):
children.append(new_stack_element(elem, last_tf))
return children
stack = [new_stack_element(group, np.identity(3))]
FlattenedPath = collections.namedtuple('FlattenedPath',
['path', 'element', 'transform'])
paths = []
while stack:
top = stack.pop()
# For each element type that we know how to convert into path
# data, parse the element after confirming that the path_filter
# accepts it.
for key, converter in path_conversions.items():
for path_elem in filter(path_filter, top.group.iterfind(
'svg:'+key, SVG_NAMESPACE)):
path_tf = top.transform.dot(
parse_transform(path_elem.get('transform')))
path = transform(parse_path(converter(path_elem)), path_tf)
paths.append(FlattenedPath(path, path_elem, path_tf))
stack.extend(get_relevant_children(top.group, top.transform))
return paths | 0.000322 |
def _mid(string, start, end=None):
"""
Returns a substring delimited by start and end position.
"""
if end is None:
end = len(string)
return string[start:start + end] | 0.005155 |
def get_or_create(cls, filter_key=None, with_status=False, **kwargs):
"""
Convenience method to retrieve an Element or create if it does not
exist. If an element does not have a `create` classmethod, then it
is considered read-only and the request will be redirected to :meth:`~get`.
Any keyword arguments passed except the optional filter_key
will be used in a create() call. If filter_key is provided, this
should define an attribute and value to use for an exact match on
the element. Valid attributes are ones required on the elements
``create`` method or can be viewed by the elements class docs.
If no filter_key is provided, the name field will be used to
find the element.
::
>>> Network.get_or_create(
filter_key={'ipv4_network': '123.123.123.0/24'},
name='mynetwork',
ipv4_network='123.123.123.0/24')
Network(name=mynetwork)
The kwargs should be used to satisfy the elements ``create``
classmethod parameters to create in the event it cannot be found.
:param dict filter_key: filter key represents the data attribute and
value to use to find the element. If none is provided, the name
field will be used.
:param kwargs: keyword arguments mapping to the elements ``create``
method.
:param bool with_status: if set to True, a tuple is returned with
(Element, created), where the second tuple item indicates if
the element has been created or not.
:raises CreateElementFailed: could not create element with reason
:raises ElementNotFound: if read-only element does not exist
:return: element instance by type
:rtype: Element
"""
was_created = False
if 'name' not in kwargs:
raise ElementNotFound('Name field is a required parameter '
'for all create or update_or_create type operations on an element')
if filter_key:
elements = cls.objects.filter(**filter_key)
element = elements.first() if elements.exists() else None
else:
try:
element = cls.get(kwargs.get('name'))
except ElementNotFound:
if not hasattr(cls, 'create'):
raise CreateElementFailed('%s: %r not found and this element '
'type does not have a create method.' %
(cls.__name__, kwargs['name']))
element = None
if not element:
params = {k: v() if callable(v) else v
for k, v in kwargs.items()}
try:
element = cls.create(**params)
was_created = True
except TypeError:
raise CreateElementFailed('%s: %r not found and missing '
'constructor arguments to properly create.' %
(cls.__name__, kwargs['name']))
if with_status:
return element, was_created
return element | 0.00347 |
def _prep_jid(self, clear_load, extra):
'''
Return a jid for this publication
'''
# the jid in clear_load can be None, '', or something else. this is an
# attempt to clean up the value before passing to plugins
passed_jid = clear_load['jid'] if clear_load.get('jid') else None
nocache = extra.get('nocache', False)
# Retrieve the jid
fstr = '{0}.prep_jid'.format(self.opts['master_job_cache'])
try:
# Retrieve the jid
jid = self.mminion.returners[fstr](nocache=nocache,
passed_jid=passed_jid)
except (KeyError, TypeError):
# The returner is not present
msg = (
'Failed to allocate a jid. The requested returner \'{0}\' '
'could not be loaded.'.format(fstr.split('.')[0])
)
log.error(msg)
return {'error': msg}
return jid | 0.002041 |
def scaledBy(self, scale):
""" Return a new Selector with scale denominators scaled by a number.
"""
scaled = deepcopy(self)
for test in scaled.elements[0].tests:
if type(test.value) in (int, float):
if test.property == 'scale-denominator':
test.value /= scale
elif test.property == 'zoom':
test.value += log(scale)/log(2)
return scaled | 0.008439 |
def w_diffuser_outer(sed_inputs=sed_dict):
"""Return the outer width of each diffuser in the sedimentation tank.
Parameters
----------
sed_inputs : dict
A dictionary of all of the constant inputs needed for sedimentation tank
calculations can be found in sed.yaml
Returns
-------
float
Outer width of each diffuser in the sedimentation tank
Examples
--------
>>> from aide_design.play import*
>>>
"""
return (w_diffuser_inner_min(sed_inputs['tank']['W']) +
(2 * sed_inputs['manifold']['diffuser']['thickness_wall'])).to(u.m).magnitude | 0.004823 |
def _call(self, method, auth, arg, defer, notimeout=False):
"""Calls the Exosite One Platform RPC API.
If `defer` is False, result is a tuple with this structure:
(success (boolean), response)
Otherwise, the result is just True.
notimeout, if True, ignores the reuseconnection setting, creating
a new connection with no timeout.
"""
if defer:
self.deferred.add(auth, method, arg, notimeout=notimeout)
return True
else:
calls = self._composeCalls([(method, arg)])
return self._callJsonRPC(auth, calls, notimeout=notimeout) | 0.003008 |
def is_number(string):
""" checks if a string is a number (int/float) """
string = str(string)
if string.isnumeric():
return True
try:
float(string)
return True
except ValueError:
return False | 0.004098 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.