text
stringlengths 78
104k
| score
float64 0
0.18
|
---|---|
def get_library(self, library):
"""
Return the library instance. Can generally use slicing to return the library:
arctic_store[library]
Parameters
----------
library : `str`
The name of the library. e.g. 'library' or 'user.library'
"""
if library in self._library_cache:
return self._library_cache[library]
try:
error = None
lib = ArcticLibraryBinding(self, library)
lib_type = lib.get_library_type()
except (OperationFailure, AutoReconnect) as e:
error = e
if error:
raise LibraryNotFoundException("Library %s was not correctly initialized in %s.\nReason: %r)" %
(library, self, error))
elif not lib_type:
raise LibraryNotFoundException("Library %s was not correctly initialized in %s." %
(library, self))
elif lib_type not in LIBRARY_TYPES:
raise LibraryNotFoundException("Couldn't load LibraryType '%s' for '%s' (has the class been registered?)" %
(lib_type, library))
instance = LIBRARY_TYPES[lib_type](lib)
self._library_cache[library] = instance
# The library official name may be different from 'library': e.g. 'library' vs 'user.library'
self._library_cache[lib.get_name()] = instance
return self._library_cache[library] | 0.004614 |
def timed_request(self, subject, payload, timeout=0.5):
"""
Implements the request/response pattern via pub/sub
using an ephemeral subscription which will be published
with a limited interest of 1 reply returning the response
or raising a Timeout error.
->> SUB _INBOX.2007314fe0fcb2cdc2a2914c1 90
->> UNSUB 90 1
->> PUB hello _INBOX.2007314fe0fcb2cdc2a2914c1 5
->> MSG_PAYLOAD: world
<<- MSG hello 2 _INBOX.2007314fe0fcb2cdc2a2914c1 5
"""
next_inbox = INBOX_PREFIX[:]
next_inbox.extend(self._nuid.next())
inbox = next_inbox.decode()
future = asyncio.Future(loop=self._loop)
sid = yield from self.subscribe(inbox, future=future, max_msgs=1)
yield from self.auto_unsubscribe(sid, 1)
yield from self.publish_request(subject, inbox, payload)
try:
msg = yield from asyncio.wait_for(future, timeout, loop=self._loop)
return msg
except asyncio.TimeoutError:
future.cancel()
raise ErrTimeout | 0.001808 |
def remove_thumbnail(self, thumbnail):
"""Remove thumbnail."""
if thumbnail in self._thumbnails:
index = self._thumbnails.index(thumbnail)
self._thumbnails.remove(thumbnail)
self.layout().removeWidget(thumbnail)
thumbnail.deleteLater()
thumbnail.sig_canvas_clicked.disconnect()
thumbnail.sig_remove_figure.disconnect()
thumbnail.sig_save_figure.disconnect()
# Select a new thumbnail if any :
if thumbnail == self.current_thumbnail:
if len(self._thumbnails) > 0:
self.set_current_index(min(index, len(self._thumbnails)-1))
else:
self.current_thumbnail = None
self.figure_viewer.figcanvas.clear_canvas() | 0.002597 |
def extract_intro(filename):
""" Extract the first paragraph of module-level docstring. max:95 char"""
docstring, _ = get_docstring_and_rest(filename)
# lstrip is just in case docstring has a '\n\n' at the beginning
paragraphs = docstring.lstrip().split('\n\n')
if len(paragraphs) > 1:
first_paragraph = re.sub('\n', ' ', paragraphs[1])
first_paragraph = (first_paragraph[:95] + '...'
if len(first_paragraph) > 95 else first_paragraph)
else:
raise ValueError(
"Example docstring should have a header for the example title "
"and at least a paragraph explaining what the example is about. "
"Please check the example file:\n {}\n".format(filename))
return first_paragraph | 0.001269 |
def help_center_user_comments(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/comments#list-comments"
api_path = "/api/v2/help_center/users/{id}/comments.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | 0.010274 |
def getReadGroup(self, id_):
"""
Returns the ReadGroup with the specified id if it exists in this
ReadGroupSet, or raises a ReadGroupNotFoundException otherwise.
"""
if id_ not in self._readGroupIdMap:
raise exceptions.ReadGroupNotFoundException(id_)
return self._readGroupIdMap[id_] | 0.005831 |
def _data_dict_to_bokeh_chart_data(self, data):
"""
Take a dictionary of data, as returned by the :py:class:`~.ProjectStats`
per_*_data properties, return a 2-tuple of data dict and x labels list
usable by bokeh.charts.
:param data: data dict from :py:class:`~.ProjectStats` property
:type data: dict
:return: 2-tuple of data dict, x labels list
:rtype: tuple
"""
labels = []
# find all the data keys
keys = set()
for date in data:
for k in data[date]:
keys.add(k)
# final output dict
out_data = {}
for k in keys:
out_data[k] = []
# transform the data; deal with sparse data
for data_date, data_dict in sorted(data.items()):
labels.append(data_date)
for k in out_data:
if k in data_dict:
out_data[k].append(data_dict[k])
else:
out_data[k].append(0)
return out_data, labels | 0.002828 |
def get_pkg_info_revision():
"""
Get a -r### off of PKG-INFO Version in case this is an sdist of
a subversion revision.
"""
warnings.warn("get_pkg_info_revision is deprecated.", EggInfoDeprecationWarning)
if os.path.exists('PKG-INFO'):
with io.open('PKG-INFO') as f:
for line in f:
match = re.match(r"Version:.*-r(\d+)\s*$", line)
if match:
return int(match.group(1))
return 0 | 0.004202 |
def getData4cryptID(self, tablename, ID):
"""get the whole row from the database and store it in a dict"""
fields = self._getFieldsInDB(tablename)
SQL = 'SELECT *,MAKETIME(0,0,TIMESTAMPDIFF(SECOND, StartDate, EndDate)),DATE_ADD(EndDate, INTERVAL %s DAY),TIMESTAMPDIFF(DAY,DATE_ADD(EndDate, INTERVAL %s DAY), NOW()),TIMESTAMPDIFF(HOUR,DATE_ADD(EndDate, INTERVAL %s DAY), NOW()) FROM %s WHERE cryptID="%s"' % (self.store_time, self.store_time, self.store_time, tablename, ID)
array_data = self.execQuery(SQL)
if len(array_data) > 0:
for x in range( len(fields) ):
self.data[fields[x]] = array_data[0][x]
self.data['date_expiration'] = array_data[0][-3]
time_expiration = None
if array_data[0][-2] and array_data[0][-1]:
time_expiration = "%d days, %d hours" % (abs(array_data[0][-2]),abs(array_data[0][-1]) - abs(array_data[0][-2] * 24))
self.data['time_expiration'] = time_expiration
self.data['time_computation'] = array_data[0][-4]
return self.data | 0.035433 |
def _validate_exp(claims, leeway=0):
"""Validates that the 'exp' claim is valid.
The "exp" (expiration time) claim identifies the expiration time on
or after which the JWT MUST NOT be accepted for processing. The
processing of the "exp" claim requires that the current date/time
MUST be before the expiration date/time listed in the "exp" claim.
Implementers MAY provide for some small leeway, usually no more than
a few minutes, to account for clock skew. Its value MUST be a number
containing a NumericDate value. Use of this claim is OPTIONAL.
Args:
claims (dict): The claims dictionary to validate.
leeway (int): The number of seconds of skew that is allowed.
"""
if 'exp' not in claims:
return
try:
exp = int(claims['exp'])
except ValueError:
raise JWTClaimsError('Expiration Time claim (exp) must be an integer.')
now = timegm(datetime.utcnow().utctimetuple())
if exp < (now - leeway):
raise ExpiredSignatureError('Signature has expired.') | 0.000942 |
def pool_list(self, name_matches=None, pool_ids=None, category=None,
description_matches=None, creator_name=None, creator_id=None,
is_deleted=None, is_active=None, order=None):
"""Get a list of pools.
Parameters:
name_matches (str):
pool_ids (str): Can search for multiple ID's at once, separated by
commas.
description_matches (str):
creator_name (str):
creator_id (int):
is_active (bool): Can be: true, false.
is_deleted (bool): Can be: True, False.
order (str): Can be: name, created_at, post_count, date.
category (str): Can be: series, collection.
"""
params = {
'search[name_matches]': name_matches,
'search[id]': pool_ids,
'search[description_matches]': description_matches,
'search[creator_name]': creator_name,
'search[creator_id]': creator_id,
'search[is_active]': is_active,
'search[is_deleted]': is_deleted,
'search[order]': order,
'search[category]': category
}
return self._get('pools.json', params) | 0.003213 |
def get_task_doc(self, path):
"""
Get the entire task doc for a path, including any post-processing.
"""
logger.info("Getting task doc for base dir :{}".format(path))
files = os.listdir(path)
vasprun_files = OrderedDict()
if "STOPCAR" in files:
#Stopped runs. Try to parse as much as possible.
logger.info(path + " contains stopped run")
for r in self.runs:
if r in files: #try subfolder schema
for f in os.listdir(os.path.join(path, r)):
if fnmatch(f, "vasprun.xml*"):
vasprun_files[r] = os.path.join(r, f)
else: #try extension schema
for f in files:
if fnmatch(f, "vasprun.xml.{}*".format(r)):
vasprun_files[r] = f
if len(vasprun_files) == 0:
for f in files: #get any vasprun from the folder
if fnmatch(f, "vasprun.xml*") and \
f not in vasprun_files.values():
vasprun_files['standard'] = f
if len(vasprun_files) > 0:
d = self.generate_doc(path, vasprun_files)
if not d:
d = self.process_killed_run(path)
self.post_process(path, d)
elif (not (path.endswith("relax1") or
path.endswith("relax2"))) and contains_vasp_input(path):
#If not Materials Project style, process as a killed run.
logger.warning(path + " contains killed run")
d = self.process_killed_run(path)
self.post_process(path, d)
else:
raise ValueError("No VASP files found!")
return d | 0.005811 |
def finalize(self):
"""
Connects the wires.
"""
self._check_finalized()
self._final = True
for dest_w, values in self.dest_instrs_info.items():
mux_vals = dict(zip(self.instructions, values))
dest_w <<= sparse_mux(self.signal_wire, mux_vals) | 0.006369 |
def raise_error(e):
"""Take a bravado-core Error model and raise it as an exception"""
code = e.error
if code in code_to_class:
raise code_to_class[code](e.error_description)
else:
raise InternalServerError(e.error_description) | 0.003861 |
def handle_exit_code(d, code):
"""Sample function showing how to interpret the dialog exit codes.
This function is not used after every call to dialog in this demo
for two reasons:
1. For some boxes, unfortunately, dialog returns the code for
ERROR when the user presses ESC (instead of the one chosen
for ESC). As these boxes only have an OK button, and an
exception is raised and correctly handled here in case of
real dialog errors, there is no point in testing the dialog
exit status (it can't be CANCEL as there is no CANCEL
button; it can't be ESC as unfortunately, the dialog makes
it appear as an error; it can't be ERROR as this is handled
in dialog.py to raise an exception; therefore, it *is* OK).
2. To not clutter simple code with things that are
demonstrated elsewhere.
"""
# d is supposed to be a Dialog instance
if code in (d.DIALOG_CANCEL, d.DIALOG_ESC):
if code == d.DIALOG_CANCEL:
msg = "You chose cancel in the last dialog box. Do you want to " \
"exit this demo?"
else:
msg = "You pressed ESC in the last dialog box. Do you want to " \
"exit this demo?"
# "No" or "ESC" will bring the user back to the demo.
# DIALOG_ERROR is propagated as an exception and caught in main().
# So we only need to handle OK here.
if d.yesno(msg) == d.DIALOG_OK:
sys.exit(0)
return 0
else:
return 1 | 0.000636 |
def _generate_validation_scripts(self):
"""
Include the scripts used by solutions.
"""
id_script_list_validation_fields = (
AccessibleFormImplementation.ID_SCRIPT_LIST_VALIDATION_FIELDS
)
local = self.parser.find('head,body').first_result()
if local is not None:
if (
self.parser.find(
'#'
+ AccessibleEventImplementation.ID_SCRIPT_COMMON_FUNCTIONS
).first_result() is None
):
common_functions_file = open(
os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)
))),
'js',
'common.js'
),
'r'
)
common_functions_content = common_functions_file.read()
common_functions_file.close()
common_functions_script = self.parser.create_element('script')
common_functions_script.set_attribute(
'id',
AccessibleEventImplementation.ID_SCRIPT_COMMON_FUNCTIONS
)
common_functions_script.set_attribute(
'type',
'text/javascript'
)
common_functions_script.append_text(common_functions_content)
local.prepend_element(common_functions_script)
self.script_list_fields_with_validation = self.parser.find(
'#'
+ id_script_list_validation_fields
).first_result()
if self.script_list_fields_with_validation is None:
script_list_file = open(
os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)
))),
'js',
'scriptlist_validation_fields.js'
),
'r'
)
script_list_content = script_list_file.read()
script_list_file.close()
self.script_list_fields_with_validation = (
self.parser.create_element('script')
)
self.script_list_fields_with_validation.set_attribute(
'id',
id_script_list_validation_fields
)
self.script_list_fields_with_validation.set_attribute(
'type',
'text/javascript'
)
self.script_list_fields_with_validation.append_text(
script_list_content
)
local.append_element(self.script_list_fields_with_validation)
if (
self.parser.find(
'#'
+ AccessibleFormImplementation.ID_SCRIPT_EXECUTE_VALIDATION
).first_result() is None
):
script_function_file = open(
os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(
os.path.realpath(__file__)
))),
'js',
'validation.js'
),
'r'
)
script_function_content = script_function_file.read()
script_function_file.close()
script_function = self.parser.create_element('script')
script_function.set_attribute(
'id',
AccessibleFormImplementation.ID_SCRIPT_EXECUTE_VALIDATION
)
script_function.set_attribute('type', 'text/javascript')
script_function.append_text(script_function_content)
self.parser.find('body').first_result().append_element(
script_function
)
self.scripts_added = True | 0.000477 |
def _decrypt_object(obj):
'''
Recursively try to find a pass path (string) that can be handed off to pass
'''
if isinstance(obj, six.string_types):
return _fetch_secret(obj)
elif isinstance(obj, dict):
for pass_key, pass_path in six.iteritems(obj):
obj[pass_key] = _decrypt_object(pass_path)
elif isinstance(obj, list):
for pass_key, pass_path in enumerate(obj):
obj[pass_key] = _decrypt_object(pass_path)
return obj | 0.002033 |
def fix(x, digs):
"""Format x as [-]ddd.ddd with 'digs' digits after the point
and at least one digit before.
If digs <= 0, the point is suppressed."""
if type(x) != type(''): x = repr(x)
try:
sign, intpart, fraction, expo = extract(x)
except NotANumber:
return x
intpart, fraction = unexpo(intpart, fraction, expo)
intpart, fraction = roundfrac(intpart, fraction, digs)
while intpart and intpart[0] == '0': intpart = intpart[1:]
if intpart == '': intpart = '0'
if digs > 0: return sign + intpart + '.' + fraction
else: return sign + intpart | 0.01157 |
def override(func, auto = False):
"""Decorator applicable to methods only.
For a version applicable also to classes or modules use auto_override.
Asserts that for the decorated method a parent method exists in its mro.
If both the decorated method and its parent method are type annotated,
the decorator additionally asserts compatibility of the annotated types.
Note that the return type is checked in contravariant manner.
A successful check guarantees that the child method can always be used in
places that support the parent method's signature.
Use pytypes.check_override_at_runtime and pytypes.check_override_at_class_definition_time
to control whether checks happen at class definition time or at "actual runtime".
"""
if not pytypes.checking_enabled:
return func
# notes:
# - don't use @override on __init__ (raise warning? Error for now!),
# because __init__ is not intended to be called after creation
# - @override applies typechecking to every match in mro, because class might be used as
# replacement for each class in its mro. So each must be compatible.
# - @override does not/cannot check signature of builtin ancestors (for now).
# - @override starts checking only at its declaration level. If in a subclass an @override
# annotated method is not s.t. @override any more.
# This is difficult to achieve in case of a call to super. Runtime-override checking
# would use the subclass-self and thus unintentionally would also check the submethod's
# signature. We actively avoid this here.
func.override_checked = True
_actualfunc(func).override_checked = True
if pytypes.check_override_at_class_definition_time:
# We need some trickery here, because details of the class are not yet available
# as it is just getting defined. Luckily we can get base-classes via inspect.stack():
stack = inspect.stack()
try:
base_classes = _re.search(r'class.+\((.+)\)\s*\:', stack[2][4][0]).group(1)
except IndexError:
raise _function_instead_of_method_error(func)
except AttributeError:
base_classes = 'object'
meth_cls_name = stack[1][3]
if func.__name__ == '__init__':
raise OverrideError(
'Invalid use of @override in %s:\n @override must not be applied to __init__.'
% util._fully_qualified_func_name(func, True, None, meth_cls_name))
# handle multiple inheritance
base_classes = [s.strip() for s in base_classes.split(',')]
if not base_classes:
raise ValueError('@override: unable to determine base class')
# stack[0]=overrides, stack[1]=inside class def'n, stack[2]=outside class def'n
derived_class_locals = stack[2][0].f_locals
derived_class_globals = stack[2][0].f_globals
# replace each class name in base_classes with the actual class type
for i, base_class in enumerate(base_classes):
if '.' not in base_class:
if base_class in derived_class_locals:
base_classes[i] = derived_class_locals[base_class]
elif base_class in derived_class_globals:
base_classes[i] = derived_class_globals[base_class]
elif base_class in types.__builtins__:
base_classes[i] = types.__builtins__[base_class]
else:
raise TypeError("Could not lookup type: "+base_class)
else:
components = base_class.split('.')
# obj is either a module or a class
if components[0] in derived_class_locals:
obj = derived_class_locals[components[0]]
elif components[0] in derived_class_globals:
obj = derived_class_globals[components[0]]
elif components[0] in types.__builtins__:
obj = types.__builtins__[components[0]]
elif components[0] in sys.modules:
obj = sys.modules[components[0]]
else:
raise TypeError("Could not lookup type or module: "+base_class)
for c in components[1:]:
assert(ismodule(obj) or isclass(obj))
obj = getattr(obj, c)
base_classes[i] = obj
mro_set = set() # contains everything in would-be-mro, however in unspecified order
mro_pool = [base_classes]
while len(mro_pool) > 0:
lst = mro_pool.pop()
for base_cls in lst:
if not is_builtin_type(base_cls):
mro_set.add(base_cls)
mro_pool.append(base_cls.__bases__)
base_method_exists = False
argSpecs = util.getargspecs(func)
for cls in mro_set:
if hasattr(cls, func.__name__):
base_method_exists = True
base_method = getattr(cls, func.__name__)
_check_override_argspecs(func, argSpecs, meth_cls_name, base_method, cls)
if has_type_hints(func):
try:
_check_override_types(func, _funcsigtypes(func, True, cls), meth_cls_name,
base_method, cls)
except NameError:
_delayed_checks.append(_DelayedCheck(func, func, meth_cls_name, base_method,
cls, sys.exc_info()))
if not base_method_exists:
if not auto:
raise _no_base_method_error(func)
if pytypes.check_override_at_runtime:
specs = util.getargspecs(func)
argNames = util.getargnames(specs)
def checker_ov(*args, **kw):
if hasattr(checker_ov, '__annotations__') and len(checker_ov.__annotations__) > 0:
checker_ov.ov_func.__annotations__ = checker_ov.__annotations__
args_kw = util.getargskw(args, kw, specs)
if len(argNames) > 0 and argNames[0] == 'self':
if hasattr(args_kw[0].__class__, func.__name__) and \
ismethod(getattr(args_kw[0], func.__name__)):
actual_class = args_kw[0].__class__
if _actualfunc(getattr(args_kw[0], func.__name__)) != func:
for acls in util.mro(args_kw[0].__class__):
if not is_builtin_type(acls):
if hasattr(acls, func.__name__) and func.__name__ in acls.__dict__ and \
_actualfunc(acls.__dict__[func.__name__]) == func:
actual_class = acls
if func.__name__ == '__init__':
raise OverrideError(
'Invalid use of @override in %s:\n @override must not be applied to __init__.'
% util._fully_qualified_func_name(func, True, actual_class))
ovmro = []
base_method_exists = False
for mc in util.mro(actual_class)[1:]:
if hasattr(mc, func.__name__):
ovf = getattr(mc, func.__name__)
base_method_exists = True
if not is_builtin_type(mc):
ovmro.append(mc)
if not base_method_exists:
if not auto:
raise _no_base_method_error(func)
else:
return func(*args, **kw)
# Not yet support overloading
# Check arg-count compatibility
for ovcls in ovmro:
ovf = getattr(ovcls, func.__name__)
_check_override_argspecs(func, specs, actual_class.__name__, ovf, ovcls)
# Check arg/res-type compatibility
meth_types = _funcsigtypes(func, True, args_kw[0].__class__)
if has_type_hints(func):
for ovcls in ovmro:
ovf = getattr(ovcls, func.__name__)
_check_override_types(func, meth_types, actual_class.__name__, ovf, ovcls)
else:
raise OverrideError('@override was applied to a non-method: %s.%s.\n'
% (func.__module__, func.__name__)
+ "that declares 'self' although not a method.")
else:
raise _function_instead_of_method_error(func)
return func(*args, **kw)
checker_ov.ov_func = func
if hasattr(func, '__func__'):
checker_ov.__func__ = func.__func__
checker_ov.__name__ = func.__name__
checker_ov.__module__ = func.__module__
checker_ov.__globals__.update(func.__globals__)
if hasattr(func, '__annotations__'):
checker_ov.__annotations__ = func.__annotations__
if hasattr(func, '__qualname__'):
checker_ov.__qualname__ = func.__qualname__
checker_ov.__doc__ = func.__doc__
# Todo: Check what other attributes might be needed (e.g. by debuggers).
checker_ov._check_parent_types = True
return checker_ov
else:
func._check_parent_types = True
return func | 0.003994 |
def to_one_hot(dataY):
"""Convert the vector of labels dataY into one-hot encoding.
:param dataY: vector of labels
:return: one-hot encoded labels
"""
nc = 1 + np.max(dataY)
onehot = [np.zeros(nc, dtype=np.int8) for _ in dataY]
for i, j in enumerate(dataY):
onehot[i][j] = 1
return onehot | 0.00304 |
def lazily(self, name, callable, args):
"""
Load something lazily
"""
self._lazy[name] = callable, args
self._all.add(name) | 0.01227 |
async def multi_set(self, pairs, ttl=SENTINEL, dumps_fn=None, namespace=None, _conn=None):
"""
Stores multiple values in the given keys.
:param pairs: list of two element iterables. First is key and second is value
:param ttl: int the expiration time in seconds. Due to memcached
restrictions if you want compatibility use int. In case you
need miliseconds, redis and memory support float ttls
:param dumps_fn: callable alternative to use as dumps function
:param namespace: str alternative namespace to use
:param timeout: int or float in seconds specifying maximum timeout
for the operations to last
:returns: True
:raises: :class:`asyncio.TimeoutError` if it lasts more than self.timeout
"""
start = time.monotonic()
dumps = dumps_fn or self._serializer.dumps
tmp_pairs = []
for key, value in pairs:
tmp_pairs.append((self.build_key(key, namespace=namespace), dumps(value)))
await self._multi_set(tmp_pairs, ttl=self._get_ttl(ttl), _conn=_conn)
logger.debug(
"MULTI_SET %s %d (%.4f)s",
[key for key, value in tmp_pairs],
len(pairs),
time.monotonic() - start,
)
return True | 0.004549 |
def renew_compose(self, compose_id):
"""Renew, or extend, existing compose
If the compose has already been removed, ODCS creates a new compose.
Otherwise, it extends the time_to_expire of existing compose. In most
cases, caller should assume the compose ID will change.
:param compose_id: int, compose ID to renew
:return: dict, status of compose being renewed.
"""
logger.info("Renewing compose %d", compose_id)
response = self.session.patch('{}composes/{}'.format(self.url, compose_id))
response.raise_for_status()
response_json = response.json()
compose_id = response_json['id']
logger.info("Renewed compose is %d", compose_id)
return response_json | 0.003916 |
def wait_for_ajax_calls_to_complete(self, timeout=5):
"""
Waits until there are no active or pending ajax requests.
Raises TimeoutException should silence not be had.
:param timeout: time to wait for silence (default: 5 seconds)
:return: None
"""
from selenium.webdriver.support.ui import WebDriverWait
WebDriverWait(self.driver, timeout).until(lambda s: s.execute_script("return jQuery.active === 0")) | 0.006397 |
def splitstring(string):
"""
>>> string = 'apple orange "banana tree" green'
>>> splitstring(string)
['apple', 'orange', 'green', '"banana tree"']
"""
patt = re.compile(r'"[\w ]+"')
if patt.search(string):
quoted_item = patt.search(string).group()
newstring = patt.sub('', string)
return newstring.split() + [quoted_item]
else:
return string.split() | 0.002421 |
def listTheExtras(self, deleteAlso):
""" Use ConfigObj's get_extra_values() call to find any extra/unknown
parameters we may have loaded. Return a string similar to findTheLost.
If deleteAlso is True, this will also delete any extra/unknown items.
"""
# get list of extras
extras = configobj.get_extra_values(self)
# extras is in format: [(sections, key), (sections, key), ]
# but we need: [(sections, key, result), ...] - set all results to
# a bool just to make it the right shape. BUT, since we are in
# here anyway, make that bool mean something - hide info in it about
# whether that extra item is a section (1) or just a single par (0)
#
# simplified, this is: expanded = [ (x+(abool,)) for x in extras]
expanded = [ (x+ \
( bool(len(x[0])<1 and hasattr(self[x[1]], 'keys')), ) \
) for x in extras]
retval = ''
if expanded:
retval = flattened2str(expanded, extra=1)
# but before we return, delete them (from ourself!) if requested to
if deleteAlso:
for tup_to_del in extras:
target = self
# descend the tree to the dict where this items is located.
# (this works because target is not a copy (because the dict
# type is mutable))
location = tup_to_del[0]
for subdict in location: target = target[subdict]
# delete it
target.pop(tup_to_del[1])
return retval | 0.006811 |
def get_stats(a, full=False):
"""Compute and print statistics for input array
Needs to be cleaned up, return a stats object
"""
from scipy.stats.mstats import mode
a = checkma(a)
thresh = 4E6
if full or a.count() < thresh:
q = (iqr(a))
p16, p84, spread = robust_spread(a)
#There has to be a better way to compute the mode for a ma
#mstats.mode returns tuple of (array[mode], array[count])
a_mode = float(mode(a, axis=None)[0])
stats = (a.count(), a.min(), a.max(), a.mean(dtype='float64'), a.std(dtype='float64'), \
fast_median(a), mad(a), q[0], q[1], q[2], a_mode, p16, p84, spread)
else:
ac = a.compressed()
stride = int(np.around(ac.size / thresh))
ac = np.ma.array(ac[::stride])
#idx = np.random.permutation(ac.size)
#Note: need the ma cast here b/c of a.count() below
#ac = np.ma.array(ac[idx[::stride]])
q = (iqr(ac))
p16, p84, spread = robust_spread(ac)
ac_mode = float(mode(ac, axis=None)[0])
stats = (a.count(), a.min(), a.max(), a.mean(dtype='float64'), a.std(dtype='float64'), \
fast_median(ac), mad(ac), q[0], q[1], q[2], ac_mode, p16, p84, spread)
return stats | 0.013354 |
def read_wv_master_file(wv_master_file, lines='brightest', debugplot=0):
"""read arc line wavelengths from external file.
Parameters
----------
wv_master_file : string
File name of txt file containing the wavelength database.
lines : string
Indicates which lines to read. For files with a single column
or two columns this parameter is irrelevant. For files with
three columns, lines='brightest' indicates that only the
brightest lines are read, whereas lines='all' means that all
the lines are considered.
debugplot : int
Determines whether intermediate computations and/or plots
are displayed. The valid codes are defined in
numina.array.display.pause_debugplot.
Returns
-------
wv_master : 1d numpy array
Array with arc line wavelengths.
"""
# protection
if lines not in ['brightest', 'all']:
raise ValueError('Unexpected lines=' + str(lines))
# read table from txt file
master_table = np.genfromtxt(wv_master_file)
wv_master = read_wv_master_from_array(master_table, lines)
if abs(debugplot) >= 10:
print("Reading master table: " + wv_master_file)
print("wv_master:\n", wv_master)
return wv_master | 0.000781 |
def search(self, q=''):
"""GET /v1/search"""
if q:
q = '?q=' + q
return self._http_call('/v1/search' + q, get) | 0.013699 |
def import_class(name):
"""Load class from fully-qualified python module name.
ex: import_class('bulbs.content.models.Content')
"""
module, _, klass = name.rpartition('.')
mod = import_module(module)
return getattr(mod, klass) | 0.003968 |
def get_start_stops(transcript_sequence, start_codons=None, stop_codons=None):
"""Return start and stop positions for all frames in the given
transcript.
"""
transcript_sequence = transcript_sequence.upper() # for comparison with codons below
if not start_codons:
start_codons = ['ATG']
if not stop_codons:
stop_codons = ['TAA', 'TAG', 'TGA']
seq_frames = {1: {'starts': [], 'stops': []},
2: {'starts': [], 'stops': []},
3: {'starts': [], 'stops': []}}
for codons, positions in ((start_codons, 'starts'),
(stop_codons, 'stops')):
if len(codons) > 1:
pat = re.compile('|'.join(codons))
else:
pat = re.compile(codons[0])
for m in re.finditer(pat, transcript_sequence):
# Increment position by 1, Frame 1 starts at position 1 not 0
start = m.start() + 1
rem = start % 3
if rem == 1: # frame 1
seq_frames[1][positions].append(start)
elif rem == 2: # frame 2
seq_frames[2][positions].append(start)
elif rem == 0: # frame 3
seq_frames[3][positions].append(start)
return seq_frames | 0.002174 |
def fi_ssn(ssn, allow_temporal_ssn=True):
"""
Validate a Finnish Social Security Number.
This validator is based on `django-localflavor-fi`_.
.. _django-localflavor-fi:
https://github.com/django/django-localflavor-fi/
Examples::
>>> fi_ssn('010101-0101')
True
>>> fi_ssn('101010-0102')
ValidationFailure(func=fi_ssn, args=...)
.. versionadded:: 0.5
:param ssn: Social Security Number to validate
:param allow_temporal_ssn:
Whether to accept temporal SSN numbers. Temporal SSN numbers are the
ones where the serial is in the range [900-999]. By default temporal
SSN numbers are valid.
"""
if not ssn:
return False
result = re.match(ssn_pattern, ssn)
if not result:
return False
gd = result.groupdict()
checksum = int(gd['date'] + gd['serial'])
return (
int(gd['serial']) >= 2 and
(allow_temporal_ssn or int(gd['serial']) <= 899) and
ssn_checkmarks[checksum % len(ssn_checkmarks)] ==
gd['checksum']
) | 0.000923 |
def count_var(nex):
"""
count number of sites with cov=4, and number of variable sites.
"""
arr = np.array([list(i.split()[-1]) for i in nex])
miss = np.any(arr=="N", axis=0)
nomiss = arr[:, ~miss]
nsnps = np.invert(np.all(nomiss==nomiss[0, :], axis=0)).sum()
return nomiss.shape[1], nsnps | 0.009346 |
def hparams_to_batching_scheme(hparams,
drop_long_sequences=False,
shard_multiplier=1,
length_multiplier=1):
"""Wrapper around _batching_scheme with hparams."""
return batching_scheme(
batch_size=hparams.batch_size,
min_length=hparams.min_length,
max_length=hparams.max_length,
min_length_bucket=hparams.min_length_bucket,
length_bucket_step=hparams.length_bucket_step,
drop_long_sequences=drop_long_sequences,
shard_multiplier=shard_multiplier,
length_multiplier=length_multiplier) | 0.004785 |
def inicializar_y_capturar_excepciones(func):
"Decorador para inicializar y capturar errores (version para webservices)"
@functools.wraps(func)
def capturar_errores_wrapper(self, *args, **kwargs):
try:
# inicializo (limpio variables)
self.Errores = [] # listas de str para lenguajes legados
self.Observaciones = []
self.errores = [] # listas de dict para usar en python
self.observaciones = []
self.Eventos = []
self.Traceback = self.Excepcion = ""
self.ErrCode = self.ErrMsg = self.Obs = ""
# limpio variables especificas del webservice:
self.inicializar()
# actualizo los parámetros
kwargs.update(self.params_in)
# limpio los parámetros
self.params_in = {}
self.params_out = {}
# llamo a la función (con reintentos)
retry = self.reintentos + 1
while retry:
try:
retry -= 1
return func(self, *args, **kwargs)
except socket.error, e:
if e[0] not in (10054, 10053):
# solo reintentar si el error es de conexión
# (10054, 'Connection reset by peer')
# (10053, 'Software caused connection abort')
raise
else:
if DEBUG: print e, "Reintentando..."
self.log(exception_info().get("msg", ""))
except SoapFault, e:
# guardo destalle de la excepción SOAP
self.ErrCode = unicode(e.faultcode)
self.ErrMsg = unicode(e.faultstring)
self.Excepcion = u"%s: %s" % (e.faultcode, e.faultstring, )
if self.LanzarExcepciones:
raise
except Exception, e:
ex = exception_info()
self.Traceback = ex.get("tb", "")
try:
self.Excepcion = ex.get("msg", "")
except:
self.Excepcion = u"<no disponible>"
if self.LanzarExcepciones:
raise
else:
self.ErrMsg = self.Excepcion
finally:
# guardo datos de depuración
if self.client:
self.XmlRequest = self.client.xml_request
self.XmlResponse = self.client.xml_response
return capturar_errores_wrapper | 0.00119 |
def cmServiceAccept():
"""CM SERVICE ACCEPT Section 9.2.5"""
a = TpPd(pd=0x5)
b = MessageType(mesType=0x21) # 00100001
packet = a / b
return packet | 0.005952 |
def get_value(self):
"""Retreive usage value within report"""
if self.__is_value_array:
if self.__bit_size == 8: #matching c_ubyte
return list(self.__value)
else:
result = []
for i in range(self.__report_count):
result.append(self.__getitem__(i))
return result
else:
return self.__value | 0.00907 |
def update_notebooks(source_path, dest_path=None, update_html=True, document_new_fns=False,
update_nb_links=True, html_path=None, force=False):
"`source_path` can be a directory or a file. Assume all modules reside in the fastai directory."
from .convert2html import convert_nb
source_path = Path(source_path)
if source_path.is_file():
dest_path = source_path.parent if dest_path is None else Path(dest_path)
html_path = dest_path/'..'/'docs' if html_path is None else Path(html_path)
doc_path = source_path
assert source_path.suffix == '.ipynb', 'Must update from notebook or module'
if document_new_fns:
mod = import_mod(get_module_from_notebook(source_path))
if not mod: print('Could not find module for path:', source_path)
elif mod.__file__.endswith('__init__.py'): pass
else: update_module_page(mod, dest_path)
generate_missing_metadata(doc_path)
if update_nb_links:
print(f'Updating notebook {doc_path}. Please wait...')
link_nb(doc_path)
execute_nb(doc_path, {'metadata': {'path': doc_path.parent}}, show_doc_only=True)
if update_html:
check_nbconvert_version()
html_fn = html_path/doc_path.with_suffix('.html').name
if not force and html_fn.is_file():
in_mod = os.path.getmtime(doc_path)
out_mod = os.path.getmtime(html_fn)
if in_mod < out_mod: return
convert_nb(doc_path, html_path)
elif (source_path.name.startswith('fastai.')):
# Do module update
assert dest_path is not None, 'To update a module, you must specify a destination folder for where notebook resides'
mod = import_mod(source_path.name)
if not mod: return print('Could not find module for:', source_path)
doc_path = Path(dest_path)/(strip_fastai(mod.__name__)+'.ipynb')
if not doc_path.exists():
print('Notebook does not exist. Creating:', doc_path)
create_module_page(mod, dest_path)
update_notebooks(doc_path, dest_path=dest_path, update_html=update_html, document_new_fns=document_new_fns,
update_nb_links=update_nb_links, html_path=html_path)
elif source_path.is_dir():
for f in sorted(Path(source_path).glob('*.ipynb')):
update_notebooks(f, dest_path=dest_path, update_html=update_html, document_new_fns=document_new_fns,
update_nb_links=update_nb_links, html_path=html_path)
else: print('Could not resolve source file:', source_path) | 0.006759 |
def SPEEDY_band_fraction(T):
'''Python / numpy implementation of the formula used by SPEEDY and MITgcm
to partition longwave emissions into 4 spectral bands.
Input: temperature in Kelvin
returns: a four-element array of band fraction
Reproducing here the FORTRAN code from MITgcm/pkg/aim_v23/phy_radiat.F
.. code-block:: fortran
EPS3=0.95 _d 0
DO JTEMP=200,320
FBAND(JTEMP,0)= EPSLW
FBAND(JTEMP,2)= 0.148 _d 0 - 3.0 _d -6 *(JTEMP-247)**2
FBAND(JTEMP,3)=(0.375 _d 0 - 5.5 _d -6 *(JTEMP-282)**2)*EPS3
FBAND(JTEMP,4)= 0.314 _d 0 + 1.0 _d -5 *(JTEMP-315)**2
FBAND(JTEMP,1)= 1. _d 0 -(FBAND(JTEMP,0)+FBAND(JTEMP,2)
& +FBAND(JTEMP,3)+FBAND(JTEMP,4))
ENDDO
DO JB=0,NBAND
DO JTEMP=lwTemp1,199
FBAND(JTEMP,JB)=FBAND(200,JB)
ENDDO
DO JTEMP=321,lwTemp2
FBAND(JTEMP,JB)=FBAND(320,JB)
ENDDO
ENDDO
'''
# EPSLW is the fraction of longwave emission that goes directly to space
# It is set to zero by default in MITgcm code. We won't use it here.
Tarray = np.array(T)
Tarray = np.minimum(Tarray, 230.)
Tarray = np.maximum(Tarray, 200.)
num_band = 4
dims = [num_band]
dims.extend(Tarray.shape)
FBAND = np.zeros(dims)
EPS2=0.95
FBAND[1,:] = 0.148 - 3.0E-6 *(T-247.)**2
FBAND[2,:] = (0.375 - 5.5E-6 *(T-282.)**2)*EPS2
FBAND[3,:] = 0.314 + 1.0E-5 *(T-315.)**2
FBAND[0,:] = 1. - np.sum(FBAND, axis=0)
return FBAND | 0.026429 |
def _add_comments(self, comments, original_string=""):
"""
Returns a string with comments added
"""
return comments and "{0} # {1}".format(self._strip_comments(original_string)[0],
"; ".join(comments)) or original_string | 0.013115 |
def murmur3_64(data: Union[bytes, bytearray], seed: int = 19820125) -> int:
"""
Pure 64-bit Python implementation of MurmurHash3; see
http://stackoverflow.com/questions/13305290/is-there-a-pure-python-implementation-of-murmurhash
(plus RNC bugfixes).
Args:
data: data to hash
seed: seed
Returns:
integer hash
""" # noqa
m = 0xc6a4a7935bd1e995
r = 47
mask = 2 ** 64 - 1
length = len(data)
h = seed ^ ((m * length) & mask)
offset = (length // 8) * 8
# RNC: was /, but for Python 3 that gives float; brackets added for clarity
for ll in range(0, offset, 8):
k = bytes_to_long(data[ll:ll + 8])
k = (k * m) & mask
k ^= (k >> r) & mask
k = (k * m) & mask
h = (h ^ k)
h = (h * m) & mask
l = length & 7
if l >= 7:
h = (h ^ (data[offset + 6] << 48))
if l >= 6:
h = (h ^ (data[offset + 5] << 40))
if l >= 5:
h = (h ^ (data[offset + 4] << 32))
if l >= 4:
h = (h ^ (data[offset + 3] << 24))
if l >= 3:
h = (h ^ (data[offset + 2] << 16))
if l >= 2:
h = (h ^ (data[offset + 1] << 8))
if l >= 1:
h = (h ^ data[offset])
h = (h * m) & mask
h ^= (h >> r) & mask
h = (h * m) & mask
h ^= (h >> r) & mask
return h | 0.006623 |
async def publish(self, endpoint: str, payload: str):
"""
Publish to an endpoint.
:param str endpoint: Key by which the endpoint is recognised.
Subscribers will use this key to listen to events
:param str payload: Payload to publish with the event
:return: A boolean indicating if the publish was successful
"""
if self._conn is not None:
try:
await self._conn.publish(endpoint, payload)
return True
except redis.Error as e:
self._logger.error('Publish failed with error %s', repr(e))
return False | 0.00304 |
def is_equal_type(type_a: GraphQLType, type_b: GraphQLType):
"""Check whether two types are equal.
Provided two types, return true if the types are equal (invariant)."""
# Equivalent types are equal.
if type_a is type_b:
return True
# If either type is non-null, the other must also be non-null.
if is_non_null_type(type_a) and is_non_null_type(type_b):
# noinspection PyUnresolvedReferences
return is_equal_type(type_a.of_type, type_b.of_type) # type:ignore
# If either type is a list, the other must also be a list.
if is_list_type(type_a) and is_list_type(type_b):
# noinspection PyUnresolvedReferences
return is_equal_type(type_a.of_type, type_b.of_type) # type:ignore
# Otherwise the types are not equal.
return False | 0.001238 |
def get_all_manifests(image, registry, insecure=False, dockercfg_path=None,
versions=('v1', 'v2', 'v2_list')):
"""Return manifest digests for image.
:param image: ImageName, the remote image to inspect
:param registry: str, URI for registry, if URI schema is not provided,
https:// will be used
:param insecure: bool, when True registry's cert is not verified
:param dockercfg_path: str, dirname of .dockercfg location
:param versions: tuple, for which manifest schema versions to fetch manifests
:return: dict of successful responses, with versions as keys
"""
digests = {}
registry_session = RegistrySession(registry, insecure=insecure, dockercfg_path=dockercfg_path)
for version in versions:
response, _ = get_manifest(image, registry_session, version)
if response:
digests[version] = response
return digests | 0.003198 |
def delete(self):
"""Delete the note, removing it from it's task.
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> project = user.get_project('PyTodoist')
>>> task = project.add_task('Install PyTodoist.')
>>> note = task.add_note('https://pypi.python.org/pypi')
>>> note.delete()
>>> notes = task.get_notes()
>>> print(len(notes))
0
"""
args = {'id': self.id}
owner = self.task.project.owner
_perform_command(owner, 'note_delete', args) | 0.00335 |
def graphdata(data):
"""returns ratings and episode number
to be used for making graphs"""
data = jh.get_ratings(data)
num = 1
rating_final = []
episode_final = []
for k,v in data.iteritems():
rating=[]
epinum=[]
for r in v:
if r != None:
rating.append(float(r))
epinum.append(num)
num+=1
rating_final.append(rating)
episode_final.append(epinum)
return rating_final,episode_final | 0.013699 |
def setpurpose(self, purpose):
"""
Sets certificate purpose which verified certificate should match
@param purpose - number from 1 to 9 or standard strind defined
in Openssl
possible strings - sslcient,sslserver, nssslserver, smimesign,i
smimeencrypt, crlsign, any, ocsphelper
"""
if isinstance(purpose, str):
purp_no = libcrypto.X509_PURPOSE_get_by_sname(purpose)
if purp_no <= 0:
raise X509Error("Invalid certificate purpose '%s'" % purpose)
elif isinstance(purpose, int):
purp_no = purpose
if libcrypto.X509_STORE_set_purpose(self.store, purp_no) <= 0:
raise X509Error("cannot set purpose") | 0.002594 |
def make_frequency_series(vec):
"""Return a frequency series of the input vector.
If the input is a frequency series it is returned, else if the input
vector is a real time series it is fourier transformed and returned as a
frequency series.
Parameters
----------
vector : TimeSeries or FrequencySeries
Returns
-------
Frequency Series: FrequencySeries
A frequency domain version of the input vector.
"""
if isinstance(vec, FrequencySeries):
return vec
if isinstance(vec, TimeSeries):
N = len(vec)
n = N/2+1
delta_f = 1.0 / N / vec.delta_t
vectilde = FrequencySeries(zeros(n, dtype=complex_same_precision_as(vec)),
delta_f=delta_f, copy=False)
fft(vec, vectilde)
return vectilde
else:
raise TypeError("Can only convert a TimeSeries to a FrequencySeries") | 0.003254 |
def cluster_types(types, max_clust=12):
"""
Generates a dictionary mapping each binary number in types to an integer
from 0 to max_clust. Hierarchical clustering is used to determine which
which binary numbers should map to the same integer.
"""
if len(types) < max_clust:
max_clust = len(types)
# Do actual clustering
cluster_dict = do_clustering(types, max_clust)
cluster_ranks = rank_clusters(cluster_dict)
# Create a dictionary mapping binary numbers to indices
ranks = {}
for key in cluster_dict:
for typ in cluster_dict[key]:
ranks[typ] = cluster_ranks[key]
return ranks | 0.001515 |
def delete_database(self, name_or_obj):
"""
Deletes the specified database. If no database by that name
exists, no exception will be raised; instead, nothing at all
is done.
"""
name = utils.get_name(name_or_obj)
self._database_manager.delete(name) | 0.006579 |
def search_ipv6_environment(self, ipv6, id_environment):
"""Get IPv6 with an associated environment.
:param ipv6: IPv6 address in the format x1:x2:x3:x4:x5:x6:x7:x8.
:param id_environment: Environment identifier. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{'ipv6': {'id': < id >,
'id_vlan': < id_vlan >,
'bloco1': < bloco1 >,
'bloco2': < bloco2 >,
'bloco3': < bloco3 >,
'bloco4': < bloco4 >,
'bloco5': < bloco5 >,
'bloco6': < bloco6 >,
'bloco7': < bloco7 >,
'bloco8': < bloco8 >,
'descricao': < descricao > }}
:raise IpNaoExisteError: IPv6 is not registered or is not associated to the environment.
:raise AmbienteNaoExisteError: Environment not found.
:raise InvalidParameterError: Environment identifier and/or IPv6 string is/are none or invalid.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_environment):
raise InvalidParameterError(
u'Environment identifier is invalid or was not informed.')
ipv6_map = dict()
ipv6_map['ipv6'] = ipv6
ipv6_map['id_environment'] = id_environment
code, xml = self.submit(
{'ipv6_map': ipv6_map}, 'POST', 'ipv6/environment/')
return self.response(code, xml) | 0.003179 |
def update_asset(self, asset_form=None):
"""Updates an existing asset.
:param asset_form: the form containing the elements to be updated
:type asset_form: ``osid.repository.AssetForm``
:raise: ``IllegalState`` -- ``asset_form`` already used in anupdate transaction
:raise: ``InvalidArgument`` -- the form contains an invalid value
:raise: ``NullArgument`` -- ``asset_form`` is ``null``
:raise: ``OperationFailed`` -- unable to complete request
:raise: ``PermissionDenied`` -- authorization failure
:raise: ``Unsupported`` -- ``asset_form`` did not originate from ``get_asset_form_for_update()``
*compliance: mandatory -- This method must be implemented.*
"""
if asset_form is None:
raise NullArgument()
if not isinstance(asset_form, abc_repository_objects.AssetForm):
raise InvalidArgument('argument type is not an AssetForm')
if not asset_form.is_for_update():
raise InvalidArgument('form is for create only, not update')
try:
if self._forms[asset_form.get_id().get_identifier()] == UPDATED:
raise IllegalState('form already used in an update transaction')
except KeyError:
raise Unsupported('form did not originate from this session')
if not asset_form.is_valid():
raise InvalidArgument('one or more of the form elements is invalid')
url_path = construct_url('assets',
bank_id=self._catalog_idstr)
try:
result = self._put_request(url_path, asset_form._my_map)
except Exception:
raise # OperationFailed()
self._forms[asset_form.get_id().get_identifier()] = UPDATED
return objects.Asset(result) | 0.003297 |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UsersCollector, self).get_default_config()
config.update({
'path': 'users',
'utmp': None,
})
return config | 0.006993 |
def yield_expr__26(self, yield_loc, exprs):
"""(2.6, 2.7, 3.0, 3.1, 3.2) yield_expr: 'yield' [testlist]"""
if exprs is not None:
return ast.Yield(value=exprs,
yield_loc=yield_loc, loc=yield_loc.join(exprs.loc))
else:
return ast.Yield(value=None,
yield_loc=yield_loc, loc=yield_loc) | 0.007752 |
def _encode_gif(images, fps):
"""Encodes numpy images into gif string.
Args:
images: A 4-D `uint8` `np.array` (or a list of 3-D images) of shape
`[time, height, width, channels]` where `channels` is 1 or 3.
fps: frames per second of the animation
Returns:
The encoded gif string.
Raises:
IOError: If the ffmpeg command returns an error.
"""
writer = WholeVideoWriter(fps)
writer.write_multi(images)
return writer.finish() | 0.010823 |
def account(self):
""" Returns the :class:`~plexapi.server.Account` object this server belongs to. """
data = self.query(Account.key)
return Account(self, data) | 0.016304 |
def remove_subscriptions(self, server_id, sub_paths):
# pylint: disable=line-too-long
"""
Remove indication subscription(s) from a WBEM server, by deleting the
indication subscription instances in the server.
The indication subscriptions must be owned or permanent (i.e. not
static).
Parameters:
server_id (:term:`string`):
The server ID of the WBEM server, returned by
:meth:`~pywbem.WBEMSubscriptionManager.add_server`.
sub_paths (:class:`~pywbem.CIMInstanceName` or list of :class:`~pywbem.CIMInstanceName`):
Instance path(s) of the indication subscription instance(s) in the
WBEM server.
Raises:
Exceptions raised by :class:`~pywbem.WBEMConnection`.
""" # noqa: E501
# Validate server_id
server = self._get_server(server_id)
# If list, recursively call this function with each list item.
if isinstance(sub_paths, list):
for sub_path in sub_paths:
self.remove_subscriptions(server_id, sub_path)
return
# Here, the variable will be a single list item.
sub_path = sub_paths
server.conn.DeleteInstance(sub_path)
inst_list = self._owned_subscriptions[server_id]
# We iterate backwards because we change the list
for i in six.moves.range(len(inst_list) - 1, -1, -1):
inst = inst_list[i]
if inst.path == sub_path:
del inst_list[i] | 0.001934 |
def iterator(self, symbol, chunk_range=None):
"""
Returns a generator that accesses each chunk in ascending order
Parameters
----------
symbol: str
the symbol for the given item in the DB
chunk_range: None, or a range object
allows you to subset the chunks by range
Returns
-------
generator
"""
sym = self._get_symbol_info(symbol)
if not sym:
raise NoDataFoundException("Symbol does not exist.")
c = CHUNKER_MAP[sym[CHUNKER]]
for chunk in list(self.get_chunk_ranges(symbol, chunk_range=chunk_range)):
yield self.read(symbol, chunk_range=c.to_range(chunk[0], chunk[1])) | 0.004093 |
def init_widget(self):
""" Our widget may not exist yet so we have to diverge from the normal
way of doing initialization. See `update_widget`
"""
if not self.toast:
return
super(AndroidToast, self).init_widget()
d = self.declaration
if not self.made_toast:
#: Set it to LONG
self.toast.setDuration(1)
if d.gravity:
self.set_gravity(d.gravity)
if d.show:
self.set_show(d.show) | 0.007707 |
def plot_confusion_matrix(y_true, y_pred, labels=None, true_labels=None,
pred_labels=None, title=None, normalize=False,
hide_zeros=False, x_tick_rotation=0, ax=None,
figsize=None, cmap='Blues', title_fontsize="large",
text_fontsize="medium"):
"""Generates confusion matrix plot from predictions and true labels
Args:
y_true (array-like, shape (n_samples)):
Ground truth (correct) target values.
y_pred (array-like, shape (n_samples)):
Estimated targets as returned by a classifier.
labels (array-like, shape (n_classes), optional): List of labels to
index the matrix. This may be used to reorder or select a subset
of labels. If none is given, those that appear at least once in
``y_true`` or ``y_pred`` are used in sorted order. (new in v0.2.5)
true_labels (array-like, optional): The true labels to display.
If none is given, then all of the labels are used.
pred_labels (array-like, optional): The predicted labels to display.
If none is given, then all of the labels are used.
title (string, optional): Title of the generated plot. Defaults to
"Confusion Matrix" if `normalize` is True. Else, defaults to
"Normalized Confusion Matrix.
normalize (bool, optional): If True, normalizes the confusion matrix
before plotting. Defaults to False.
hide_zeros (bool, optional): If True, does not plot cells containing a
value of zero. Defaults to False.
x_tick_rotation (int, optional): Rotates x-axis tick labels by the
specified angle. This is useful in cases where there are numerous
categories and the labels overlap each other.
ax (:class:`matplotlib.axes.Axes`, optional): The axes upon which to
plot the curve. If None, the plot is drawn on a new set of axes.
figsize (2-tuple, optional): Tuple denoting figure size of the plot
e.g. (6, 6). Defaults to ``None``.
cmap (string or :class:`matplotlib.colors.Colormap` instance, optional):
Colormap used for plotting the projection. View Matplotlib Colormap
documentation for available options.
https://matplotlib.org/users/colormaps.html
title_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"large".
text_fontsize (string or int, optional): Matplotlib-style fontsizes.
Use e.g. "small", "medium", "large" or integer-values. Defaults to
"medium".
Returns:
ax (:class:`matplotlib.axes.Axes`): The axes on which the plot was
drawn.
Example:
>>> import scikitplot.plotters as skplt
>>> rf = RandomForestClassifier()
>>> rf = rf.fit(X_train, y_train)
>>> y_pred = rf.predict(X_test)
>>> skplt.plot_confusion_matrix(y_test, y_pred, normalize=True)
<matplotlib.axes._subplots.AxesSubplot object at 0x7fe967d64490>
>>> plt.show()
.. image:: _static/examples/plot_confusion_matrix.png
:align: center
:alt: Confusion matrix
"""
if ax is None:
fig, ax = plt.subplots(1, 1, figsize=figsize)
cm = confusion_matrix(y_true, y_pred, labels=labels)
if labels is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(labels)
if normalize:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
cm = np.around(cm, decimals=2)
cm[np.isnan(cm)] = 0.0
if true_labels is None:
true_classes = classes
else:
validate_labels(classes, true_labels, "true_labels")
true_label_indexes = np.in1d(classes, true_labels)
true_classes = classes[true_label_indexes]
cm = cm[true_label_indexes]
if pred_labels is None:
pred_classes = classes
else:
validate_labels(classes, pred_labels, "pred_labels")
pred_label_indexes = np.in1d(classes, pred_labels)
pred_classes = classes[pred_label_indexes]
cm = cm[:, pred_label_indexes]
if title:
ax.set_title(title, fontsize=title_fontsize)
elif normalize:
ax.set_title('Normalized Confusion Matrix', fontsize=title_fontsize)
else:
ax.set_title('Confusion Matrix', fontsize=title_fontsize)
image = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.get_cmap(cmap))
plt.colorbar(mappable=image)
x_tick_marks = np.arange(len(pred_classes))
y_tick_marks = np.arange(len(true_classes))
ax.set_xticks(x_tick_marks)
ax.set_xticklabels(pred_classes, fontsize=text_fontsize,
rotation=x_tick_rotation)
ax.set_yticks(y_tick_marks)
ax.set_yticklabels(true_classes, fontsize=text_fontsize)
thresh = cm.max() / 2.
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
if not (hide_zeros and cm[i, j] == 0):
ax.text(j, i, cm[i, j],
horizontalalignment="center",
verticalalignment="center",
fontsize=text_fontsize,
color="white" if cm[i, j] > thresh else "black")
ax.set_ylabel('True label', fontsize=text_fontsize)
ax.set_xlabel('Predicted label', fontsize=text_fontsize)
ax.grid('off')
return ax | 0.000359 |
def _extract_t_indices(self, X, X2=None, dL_dK=None):
"""Extract times and output indices from the input matrix X. Times are ordered according to their index for convenience of computation, this ordering is stored in self._order and self.order2. These orderings are then mapped back to the original ordering (in X) using self._rorder and self._rorder2. """
# TODO: some fast checking here to see if this needs recomputing?
self._t = X[:, 0]
if not X.shape[1] == 2:
raise ValueError('Input matrix for ode1 covariance should have two columns, one containing times, the other output indices')
self._index = np.asarray(X[:, 1],dtype=np.int)
# Sort indices so that outputs are in blocks for computational
# convenience.
self._order = self._index.argsort()
self._index = self._index[self._order]
self._t = self._t[self._order]
self._rorder = self._order.argsort() # rorder is for reversing the order
if X2 is None:
self._t2 = None
self._index2 = None
self._order2 = self._order
self._rorder2 = self._rorder
else:
if not X2.shape[1] == 2:
raise ValueError('Input matrix for ode1 covariance should have two columns, one containing times, the other output indices')
self._t2 = X2[:, 0]
self._index2 = np.asarray(X2[:, 1],dtype=np.int)
self._order2 = self._index2.argsort()
self._index2 = self._index2[self._order2]
self._t2 = self._t2[self._order2]
self._rorder2 = self._order2.argsort() # rorder2 is for reversing order
if dL_dK is not None:
self._dL_dK = dL_dK[self._order, :]
self._dL_dK = self._dL_dK[:, self._order2] | 0.006597 |
def _draw_button(self, overlay, text, location):
"""Draws a button on the won and lost overlays, and return its hitbox."""
label = self.button_font.render(text, True, (119, 110, 101))
w, h = label.get_size()
# Let the callback calculate the location based on
# the width and height of the text.
x, y = location(w, h)
# Draw a box with some border space.
pygame.draw.rect(overlay, (238, 228, 218), (x - 5, y - 5, w + 10, h + 10))
overlay.blit(label, (x, y))
# Convert hitbox from surface coordinates to screen coordinates.
x += self.origin[0] - 5
y += self.origin[1] - 5
# Return the hitbox.
return x - 5, y - 5, x + w + 10, y + h + 10 | 0.005362 |
def _put_policy_set(self, policy_set_id, body):
"""
Will create or update a policy set for the given path.
"""
assert isinstance(body, (dict)), "PUT requires body to be a dict."
uri = self._get_policy_set_uri(guid=policy_set_id)
return self.service._put(uri, body) | 0.00641 |
def _reduce(self):
"""Perform a greedy reduction of token stream.
If a reducer method matches, it will be executed, then the
:meth:`reduce` method will be called recursively to search
for any more possible reductions.
"""
for reduction, methname in self.reducers:
token_num = len(reduction)
if (len(self.tokens) >= token_num and
self.tokens[-token_num:] == reduction):
# Get the reduction method
meth = getattr(self, methname)
# Reduce the token stream
results = meth(*self.values[-token_num:])
self.tokens[-token_num:] = [r[0] for r in results]
self.values[-token_num:] = [r[1] for r in results]
# Check for any more reductions
return self._reduce() | 0.002286 |
def _store_basic_estimation_results(self, results_dict):
"""
Extracts the basic estimation results (i.e. those that need no further
calculation or logic applied to them) and stores them on the model
object.
Parameters
----------
results_dict : dict.
The estimation result dictionary that is output from
scipy.optimize.minimize. In addition to the standard keys which are
included, it should also contain the following keys:
`["final_log_likelihood", "chosen_probs", "long_probs",
"residuals", "ind_chi_squareds", "sucess", "message",
"rho_squared", "rho_bar_squared", "log_likelihood_null"]`
Returns
-------
None.
"""
# Store the log-likelilhood, fitted probabilities, residuals, and
# individual chi-square statistics
self.log_likelihood = results_dict["final_log_likelihood"]
self.fitted_probs = results_dict["chosen_probs"]
self.long_fitted_probs = results_dict["long_probs"]
self.long_residuals = results_dict["residuals"]
self.ind_chi_squareds = results_dict["ind_chi_squareds"]
self.chi_square = self.ind_chi_squareds.sum()
# Store the 'estimation success' of the optimization
self.estimation_success = results_dict["success"]
self.estimation_message = results_dict["message"]
# Store the summary measures of the model fit
self.rho_squared = results_dict["rho_squared"]
self.rho_bar_squared = results_dict["rho_bar_squared"]
# Store the initial and null log-likelihoods
self.null_log_likelihood = results_dict["log_likelihood_null"]
return None | 0.001138 |
def load_data_and_labels(filename, encoding='utf-8'):
"""Loads data and label from a file.
Args:
filename (str): path to the file.
encoding (str): file encoding format.
The file format is tab-separated values.
A blank line is required at the end of a sentence.
For example:
```
EU B-ORG
rejects O
German B-MISC
call O
to O
boycott O
British B-MISC
lamb O
. O
Peter B-PER
Blackburn I-PER
...
```
Returns:
tuple(numpy array, numpy array): data and labels.
Example:
>>> filename = 'conll2003/en/ner/train.txt'
>>> data, labels = load_data_and_labels(filename)
"""
sents, labels = [], []
words, tags = [], []
with open(filename, encoding=encoding) as f:
for line in f:
line = line.rstrip()
if line:
word, tag = line.split('\t')
words.append(word)
tags.append(tag)
else:
sents.append(words)
labels.append(tags)
words, tags = [], []
return sents, labels | 0.000829 |
def nbody_separation(expr, qs):
"""Convert n-body problem to 2-body problem.
Args:
expr: sympy expressions to be separated.
qs: sympy's symbols to be used as supplementary variable.
Return:
new_expr(sympy expr), constraints(sympy expr), mapping(dict(str, str -> Symbol)):
`new_expr` is converted problem, `constraints` is constraints for supplementary variable.
You may use `expr = new_expr + delta * constraints`, delta is floating point variable.
mapping is supplementary variable's mapping.
"""
try:
import sympy
except ImportError:
raise ImportError("This function requires sympy. Please install it.")
logging.debug(expr)
free_symbols = expr.free_symbols
logging.debug(free_symbols)
assert type(expr) == sympy.Add
logging.debug(expr.args)
mapping = {}
new_expr = sympy.expand(0)
constraints = sympy.expand(0)
i_var = 0
for arg in expr.args:
if isinstance(arg, sympy.Symbol):
new_expr += arg
continue
if not arg.free_symbols:
new_expr += arg
continue
assert type(arg) == sympy.Mul
syms = arg.free_symbols.copy()
while len(syms) > 2:
it = iter(syms)
for v1, v2 in zip(it, it):
if (str(v1), str(v2)) in mapping:
v = mapping[str(v1), str(v2)]
logging.debug(f"{v1}*{v2} -> {v} (Existed variable)")
else:
v = qs[i_var]
i_var += 1
mapping[(str(v1), str(v2))] = v
logging.debug(f"{v1}*{v2} -> {v} (New variable)")
constraints += 3*v + v1*v2 - 2*v1*v - 2*v2*v
logging.debug(f"constraints: {constraints}")
arg = arg.subs(v1*v2, v)
syms = arg.free_symbols.copy()
new_expr += arg
logging.debug(f"new_expr: {new_expr}")
return new_expr, constraints, mapping | 0.002444 |
def usermacro_get(macro=None, hostids=None, templateids=None, hostmacroids=None,
globalmacroids=None, globalmacro=False, **kwargs):
'''
Retrieve user macros according to the given parameters.
Args:
macro: name of the usermacro
hostids: Return macros for the given hostids
templateids: Return macros for the given templateids
hostmacroids: Return macros with the given hostmacroids
globalmacroids: Return macros with the given globalmacroids (implies globalmacro=True)
globalmacro: if True, returns only global macros
optional kwargs:
_connection_user: zabbix user (can also be set in opts or pillar, see module's docstring)
_connection_password: zabbix password (can also be set in opts or pillar, see module's docstring)
_connection_url: url of zabbix frontend (can also be set in opts or pillar, see module's docstring)
Returns:
Array with usermacro details, False if no usermacro found or on failure.
CLI Example:
.. code-block:: bash
salt '*' zabbix.usermacro_get macro='{$SNMP_COMMUNITY}'
'''
conn_args = _login(**kwargs)
ret = {}
try:
if conn_args:
method = 'usermacro.get'
params = {"output": "extend", "filter": {}}
if macro:
# Python mistakenly interprets macro names starting and ending with '{' and '}' as a dict
if isinstance(macro, dict):
macro = "{" + six.text_type(macro.keys()[0]) +"}"
if not macro.startswith('{') and not macro.endswith('}'):
macro = "{" + macro + "}"
params['filter'].setdefault('macro', macro)
if hostids:
params.setdefault('hostids', hostids)
elif templateids:
params.setdefault('templateids', hostids)
if hostmacroids:
params.setdefault('hostmacroids', hostmacroids)
elif globalmacroids:
globalmacro = True
params.setdefault('globalmacroids', globalmacroids)
if globalmacro:
params = _params_extend(params, globalmacro=True)
params = _params_extend(params, **kwargs)
ret = _query(method, params, conn_args['url'], conn_args['auth'])
return ret['result'] if ret['result'] else False
else:
raise KeyError
except KeyError:
return ret | 0.003532 |
def start(name, call=None):
'''
Start a VM in Linode.
name
The name of the VM to start.
CLI Example:
.. code-block:: bash
salt-cloud -a stop vm_name
'''
if call != 'action':
raise SaltCloudException(
'The start action must be called with -a or --action.'
)
node_id = get_linode_id_from_name(name)
node = get_linode(kwargs={'linode_id': node_id})
if node['STATUS'] == 1:
return {'success': True,
'action': 'start',
'state': 'Running',
'msg': 'Machine already running'}
response = _query('linode', 'boot', args={'LinodeID': node_id})['DATA']
if _wait_for_job(node_id, response['JobID']):
return {'state': 'Running',
'action': 'start',
'success': True}
else:
return {'action': 'start',
'success': False} | 0.001082 |
def svd_convolution(inp, outmaps, kernel, r, pad=None, stride=None,
dilation=None, uv_init=None, b_init=None, base_axis=1,
fix_parameters=False, rng=None, with_bias=True):
"""SVD convolution is a low rank approximation of the convolution
layer. It can be seen as a depth wise convolution followed by a
1x1 convolution.
The flattened kernels for the i-th input map are expressed by
their low rank approximation. The kernels for the i-th input
:math:`{\\mathbf W_i}` are approximated with the singular value
decomposition (SVD) and by selecting the :math:`{R}` dominant
singular values and the corresponding singular vectors.
.. math::
{\\mathbf W_{:,i,:}} ~ {\\mathbf U_i} {\\mathbf V_i}.
:math:`{\\mathbf U}` contains the weights of the depthwise
convolution with multiplier :math:`{R}` and :math:`{\\mathbf V}`
contains the weights of the 1x1 convolution.
If `uv_init` is a numpy array, :math:`{\\mathbf U}` and
:math:`{\\mathbf V}` are computed such that `uv_init` is
approximated by :math:`{\\mathbf{UV}}`. If `uv_init` is `None` or
an initializer, the product of :math:`{\\mathbf U}` and
:math:`{\\mathbf V}` approximates the random initialization.
If :math:`{\\mathbf U}` and :math:`{\\mathbf V}` exist in the
context, they take precedence over `uv_init`.
Suppose the kernel tensor of the convolution is of :math:`{O \\times I \\times K \\times K}` and
the compression rate you want to specify is :math:`{CR}`, then you
set :math:`{R}` as
.. math::
R = \\left\\lfloor \\frac{(1 - CR)OIK^2}{I(O + K^2)} \\right\\rfloor.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of convolution kernels (which is equal
to the number of output channels). For example, to apply
convolution on an input with 16 types of filters, specify
16.
kernel (tuple): Convolution kernel size. For example,
to apply convolution on an image with a 3 (height) by 5
(width) two-dimensional kernel, specify (3, 5).
r (int): Rank of the factorized layer.
pad (tuple): Padding sizes (`int`) for dimensions.
stride (tuple): Stride sizes (`int`) for dimensions.
dilation (tuple): Dilation sizes (`int`) for dimensions.
uv_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`):
Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the
sample dimensions.
fix_parameters (bool): When set to `True`, the weights and
biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
Returns:
:class:`~nnabla.Variable`: :math:`(B + 1)`-D array.
(:math:`M_0 \\times \ldots \\times M_{B-1} \\times L`)
"""
assert r > 0, "svd_convolution: The rank must larger than zero"
if uv_init is None:
uv_init = UniformInitializer(
calc_uniform_lim_glorot(inp.shape[base_axis], outmaps,
tuple(kernel)), rng=rng)
if type(uv_init) is np.ndarray:
# TODO: Assert that size of uv_init is correct
# uv is initialize with numpy array
uv = uv_init
else:
# uv is initialize from initializer
uv = uv_init((outmaps, inp.shape[base_axis]) + tuple(kernel))
# flatten kernels
uv = uv.reshape((outmaps, inp.shape[base_axis], np.prod(kernel)))
u = get_parameter('U')
v = get_parameter('V')
if (u is None) or (v is None):
inmaps = inp.shape[base_axis]
u_low_rank = np.zeros((inmaps, np.prod(kernel), r))
v_low_rank = np.zeros((inmaps, r, outmaps))
for i in range(inmaps):
K = np.transpose(uv[:, i, :])
u_, s_, v_ = np.linalg.svd(K, full_matrices=False)
u_low_rank[i, :, :] = np.dot(u_[:, :r], np.diag(s_[:r]))
v_low_rank[i, :, :] = v_[:r, :]
# reshape U : (I,K*K,r) -> (I*r,K,K) for depthwise conv
u = nn.Variable((inmaps * r,) + tuple(kernel),
need_grad=True)
u.d = (np.transpose(u_low_rank, axes=(0, 2, 1))
.reshape((inmaps * r,) + tuple(kernel)))
nn.parameter.set_parameter("U", u)
# reshape V : (I,r,O) -> (O,I*r,1,1) for 1X1 conv
kernel_one = (1,) * len(kernel) # 1x1 for 2D convolution
v = nn.Variable((outmaps, inmaps * r) + kernel_one,
need_grad=True)
v.d = (np.transpose(v_low_rank, axes=(2, 0, 1))
.reshape((outmaps, inmaps * r) + kernel_one))
nn.parameter.set_parameter("V", v)
if fix_parameters == u.need_grad:
u = u.get_unlinked_variable(need_grad=not fix_parameters)
if fix_parameters == v.need_grad:
v = v.get_unlinked_variable(need_grad=not fix_parameters)
if with_bias and b_init is None:
b_init = ConstantInitializer()
b = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
y = F.depthwise_convolution(inp, u, bias=None, base_axis=base_axis,
pad=pad, stride=stride, dilation=dilation,
multiplier=r)
y = F.convolution(y, v, bias=b, base_axis=base_axis, pad=None,
stride=None, dilation=None, group=1)
return y | 0.001184 |
def reference(self, ):
"""Reference a file
:returns: None
:rtype: None
:raises: None
"""
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.reference(tfi) | 0.008547 |
def dump_csv(data: List[dict], fieldnames: Sequence[str], with_header: bool = False, crlf: bool = False,
tsv: bool = False) -> str:
"""
:param data:
:param fieldnames:
:param with_header:
:param crlf:
:param tsv:
:return: unicode
"""
def force_str(v):
# XXX: Double quotation behaves strangely... so replace (why?)
return dump_json(v).replace('"', "'") if isinstance(v, (dict, list)) else v
with io.StringIO() as sio:
dialect = get_dialect_name(crlf, tsv)
writer = csv.DictWriter(sio, fieldnames=fieldnames, dialect=dialect, extrasaction='ignore')
if with_header:
writer.writeheader()
for x in data:
writer.writerow({k: force_str(v) for k, v in x.items()})
sio.seek(0)
return sio.read() | 0.004825 |
def build_docker_run_command(configuration):
"""
Translate a declarative docker `configuration` to a `docker run` command.
Parameters
----------
configuration : dict
configuration
Returns
-------
args : list
sequence of command line arguments to run a command in a container
"""
parts = configuration.pop('docker').split()
parts.append('run')
run = configuration.pop('run')
# Ensure all env-files have proper paths
if 'env-file' in run:
run['env-file'] = [os.path.join(configuration['workspace'], env_file)
for env_file in run['env-file']]
parts.extend(build_parameter_parts(
run, 'user', 'workdir', 'rm', 'interactive', 'tty', 'env-file', 'cpu-shares', 'name',
'network', 'label', 'memory', 'entrypoint', 'runtime', 'privileged', 'group-add'
))
# Add the mounts
# The following code requires docker >= 17.06
'''for mount in run.pop('mount', []):
if mount['type'] == 'bind':
mount['source'] = os.path.join(
configuration['workspace'], mount['source'])
parts.extend(['--mount', ",".join(["%s=%s" % item for item in mount.items()])])'''
# Add the mounts
for mount in run.pop('mount', []):
if mount['type'] == 'tmpfs':
raise RuntimeError('tmpfs-mounts are currently not supported via the mount ' +
'directive in docker_interface. Consider using the tmpfs ' +
'directive instead.')
if mount['type'] == 'bind':
mount['source'] = os.path.abspath(
os.path.join(configuration['workspace'], mount['source']))
vol_config = '--volume=%s:%s' % (mount['source'], mount['destination'])
if 'readonly' in mount and mount['readonly']:
vol_config += ':ro'
parts.append(vol_config)
# Set or forward environment variables
for key, value in run.pop('env', {}).items():
if value is None:
parts.append('--env=%s' % key)
else:
parts.append('--env=%s=%s' % (key, value))
parts.append('--env=DOCKER_INTERFACE=true')
# Forward ports
for publish in run.pop('publish', []):
parts.append('--publish=%s:%s:%s' % tuple([
publish.get(key, '') for key in "ip host container".split()]))
# Add temporary file systems
for tmpfs in run.pop('tmpfs', []):
destination = tmpfs['destination']
options = tmpfs.pop('options', [])
for key in ['mode', 'size']:
if key in tmpfs:
options.append('%s=%s' % (key, tmpfs[key]))
if options:
destination = "%s:%s" % (destination, ",".join(options))
parts.extend(['--tmpfs', destination])
parts.append(run.pop('image'))
parts.extend(run.pop('cmd', []))
return parts | 0.002415 |
def fspaths(draw, allow_pathlike=None):
"""A strategy which generates filesystem path values.
The generated values include everything which the builtin
:func:`python:open` function accepts i.e. which won't lead to
:exc:`ValueError` or :exc:`TypeError` being raised.
Note that the range of the returned values depends on the operating
system, the Python version, and the filesystem encoding as returned by
:func:`sys.getfilesystemencoding`.
:param allow_pathlike:
If :obj:`python:None` makes the strategy include objects implementing
the :class:`python:os.PathLike` interface when Python >= 3.6 is used.
If :obj:`python:False` no pathlike objects will be generated. If
:obj:`python:True` pathlike will be generated (Python >= 3.6 required)
:type allow_pathlike: :obj:`python:bool` or :obj:`python:None`
.. versionadded:: 3.15
"""
has_pathlike = hasattr(os, 'PathLike')
if allow_pathlike is None:
allow_pathlike = has_pathlike
if allow_pathlike and not has_pathlike:
raise InvalidArgument(
'allow_pathlike: os.PathLike not supported, use None instead '
'to enable it only when available')
result_type = draw(sampled_from([bytes, text_type]))
def tp(s=''):
return _str_to_path(s, result_type)
special_component = sampled_from([tp(os.curdir), tp(os.pardir)])
normal_component = _filename(result_type)
path_component = one_of(normal_component, special_component)
extension = normal_component.map(lambda f: tp(os.extsep) + f)
root = _path_root(result_type)
def optional(st):
return one_of(st, just(result_type()))
sep = sampled_from([os.sep, os.altsep or os.sep]).map(tp)
path_part = builds(lambda s, l: s.join(l), sep, lists(path_component))
main_strategy = builds(lambda *x: tp().join(x),
optional(root), path_part, optional(extension))
if allow_pathlike and hasattr(os, 'fspath'):
pathlike_strategy = main_strategy.map(lambda p: _PathLike(p))
main_strategy = one_of(main_strategy, pathlike_strategy)
return draw(main_strategy) | 0.00046 |
def init_argument_parser(name=None, **kwargs):
"""Creates a global ArgumentParser instance with the given name,
passing any args other than "name" to the ArgumentParser constructor.
This instance can then be retrieved using get_argument_parser(..)
"""
if name is None:
name = "default"
if name in _parsers:
raise ValueError(("kwargs besides 'name' can only be passed in the"
" first time. '%s' ArgumentParser already exists: %s") % (
name, _parsers[name]))
kwargs.setdefault('formatter_class', argparse.ArgumentDefaultsHelpFormatter)
kwargs.setdefault('conflict_handler', 'resolve')
_parsers[name] = ArgumentParser(**kwargs) | 0.004267 |
def outgoing_caller_ids(self):
"""
Access the outgoing_caller_ids
:returns: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList
:rtype: twilio.rest.api.v2010.account.outgoing_caller_id.OutgoingCallerIdList
"""
if self._outgoing_caller_ids is None:
self._outgoing_caller_ids = OutgoingCallerIdList(self._version, account_sid=self._solution['sid'], )
return self._outgoing_caller_ids | 0.010684 |
def build_from_source(version, **kwargs):
"""
Builds specified Spark version from source.
:param version:
:param kwargs:
:return: (Integer) Status code of build/mvn command.
"""
mvn = os.path.join(Spark.svm_version_path(version), 'build', 'mvn')
Spark.chmod_add_excute(mvn)
p = subprocess.Popen([mvn, '-DskipTests', 'clean', 'package'], cwd=Spark.svm_version_path(version))
p.wait()
return p.returncode | 0.006122 |
def add_log_level(value, name):
"""
Add a new log level to the :mod:`logging` module.
:param value: The log level's number (an integer).
:param name: The name for the log level (a string).
"""
logging.addLevelName(value, name)
setattr(logging, name, value) | 0.003509 |
def get_parser():
"""Return a parser for the command-line arguments."""
parser = argparse.ArgumentParser(
add_help=False,
description='Analysis of your architecture strength based on DSM data')
parser.add_argument(
'-c', '--config', action='store', type=valid_file, dest='config_file',
metavar='FILE', help='Configuration file to use.')
parser.add_argument(
'-h', '--help', action='help', default=argparse.SUPPRESS,
help='Show this help message and exit.')
parser.add_argument(
'-i', '--input', action='store', type=valid_file, dest='input_file',
metavar='FILE', help='Input file containing CSV data.')
parser.add_argument(
'-l', '--list-plugins', action='store_true', dest='list_plugins',
default=False, help='Show the available plugins. Default: false.')
parser.add_argument(
'--no-color', action='store_true', dest='no_color', default=False,
help='Do not use colors. Default: false.')
parser.add_argument(
'--no-config', action='store_true', dest='no_config', default=False,
help='Do not load configuration from file. Default: false.')
parser.add_argument(
'-v', '--verbose-level', action='store', dest='level',
type=valid_level, default='ERROR', help='Level of verbosity.')
parser.add_argument(
'-V', '--version', action='version', version='archan %s' % __version__,
help='Show the current version of the program and exit.')
return parser | 0.000654 |
def _get_network(self, kind, router=True, vlans=True, vlan_ids=True):
"""Wrapper for getting details about networks.
:param string kind: network kind. Typically 'public' or 'private'
:param boolean router: flag to include router information
:param boolean vlans: flag to include vlan information
:param boolean vlan_ids: flag to include vlan_ids
"""
network = {}
macs = self.get('%s_mac' % kind)
network['mac_addresses'] = macs
if len(macs) == 0:
return network
if router:
network['router'] = self.get('router', macs[0])
if vlans:
network['vlans'] = self.get('vlans', macs[0])
if vlan_ids:
network['vlan_ids'] = self.get('vlan_ids', macs[0])
return network | 0.002384 |
def _wrap_result(name, data, sparse_index, fill_value, dtype=None):
"""
wrap op result to have correct dtype
"""
if name.startswith('__'):
# e.g. __eq__ --> eq
name = name[2:-2]
if name in ('eq', 'ne', 'lt', 'gt', 'le', 'ge'):
dtype = np.bool
fill_value = lib.item_from_zerodim(fill_value)
if is_bool_dtype(dtype):
# fill_value may be np.bool_
fill_value = bool(fill_value)
return SparseArray(data,
sparse_index=sparse_index,
fill_value=fill_value,
dtype=dtype) | 0.001653 |
def dot_path(obj: t.Union[t.Dict, object],
path: str,
default: t.Any = None,
separator: str = '.'):
"""
Provides an access to elements of a mixed dict/object type by a delimiter-separated path.
::
class O1:
my_dict = {'a': {'b': 1}}
class O2:
def __init__(self):
self.nested = O1()
class O3:
final = O2()
o = O3()
assert utils.dot_path(o, 'final.nested.my_dict.a.b') == 1
.. testoutput::
True
:param obj: object or dict
:param path: path to value
:param default: default value if chain resolve failed
:param separator: ``.`` by default
:return: value or default
"""
path_items = path.split(separator)
val = obj
sentinel = object()
for item in path_items:
if isinstance(val, dict):
val = val.get(item, sentinel)
if val is sentinel:
return default
else:
val = getattr(val, item, sentinel)
if val is sentinel:
return default
return val | 0.001764 |
def get_outcome(self, outcome):
"""
Returns the details of the outcome with the given id.
:calls: `GET /api/v1/outcomes/:id \
<https://canvas.instructure.com/doc/api/outcomes.html#method.outcomes_api.show>`_
:param outcome: The outcome object or ID to return.
:type outcome: :class:`canvasapi.outcome.Outcome` or int
:returns: An Outcome object.
:rtype: :class:`canvasapi.outcome.Outcome`
"""
from canvasapi.outcome import Outcome
outcome_id = obj_or_id(outcome, "outcome", (Outcome,))
response = self.__requester.request(
'GET',
'outcomes/{}'.format(outcome_id)
)
return Outcome(self.__requester, response.json()) | 0.002653 |
def valid_ip_prefix(ip_prefix):
"""Perform a sanity check on ip_prefix.
Arguments:
ip_prefix (str): The IP-Prefix to validate
Returns:
True if ip_prefix is a valid IPv4 address with prefix length 32 or a
valid IPv6 address with prefix length 128, otherwise False
"""
try:
ip_prefix = ipaddress.ip_network(ip_prefix)
except ValueError:
return False
else:
if ip_prefix.version == 4 and ip_prefix.max_prefixlen != 32:
return False
if ip_prefix.version == 6 and ip_prefix.max_prefixlen != 128:
return False
return True | 0.00158 |
def onex(self):
"""
delete all X columns except the first one.
"""
xCols=[i for i in range(self.nCols) if self.colTypes[i]==3]
if len(xCols)>1:
for colI in xCols[1:][::-1]:
self.colDelete(colI) | 0.019157 |
def upload_to_s3(self, region='us-east-1'):
"""
Uploads the vmdk file to aws s3
:param file_location: location of vmdk
:return:
"""
s3_import_cmd = "aws s3 cp {} s3://{} --profile '{}' --region {}".format(self.upload_file, self.bucket_name,
self.aws_project, region)
print "Uploading to bucket {} in s3 with the cmd: {}".format(self.bucket_name, s3_import_cmd)
# s3 upload puts DL progress to stderr
s3_upload = subprocess.Popen(shlex.split(s3_import_cmd), stderr=subprocess.PIPE)
while True:
progress = s3_upload.stderr.readline()
if progress == '' and s3_upload.poll() is not None:
break
if progress:
print (progress)
rc = s3_upload.poll()
if rc != 0:
raise subprocess.CalledProcessError(rc)
print "Upload completed successfully" | 0.007028 |
def customPRF512(key, amac, smac, anonce, snonce):
"""Source https://stackoverflow.com/questions/12018920/"""
A = b"Pairwise key expansion"
B = b"".join(sorted([amac, smac]) + sorted([anonce, snonce]))
blen = 64
i = 0
R = b''
while i <= ((blen * 8 + 159) / 160):
hmacsha1 = hmac.new(key, A + chb(0x00) + B + chb(i), hashlib.sha1)
i += 1
R = R + hmacsha1.digest()
return R[:blen] | 0.002299 |
def binify_and_jsd(df1, df2, bins, pair=None):
"""Binify and calculate jensen-shannon divergence between two dataframes
Parameters
----------
df1, df2 : pandas.DataFrames
Dataframes to calculate JSD between columns of. Must have overlapping
column names
bins : array-like
Bins to use for transforming df{1,2} into probability distributions
pair : str, optional
Name of the pair to save as the name of the series
Returns
-------
divergence : pandas.Series
The Jensen-Shannon divergence between columns of df1, df2
"""
binned1 = binify(df1, bins=bins).dropna(how='all', axis=1)
binned2 = binify(df2, bins=bins).dropna(how='all', axis=1)
binned1, binned2 = binned1.align(binned2, axis=1, join='inner')
series = np.sqrt(jsd(binned1, binned2))
series.name = pair
return series | 0.001138 |
def bind(self, name, filterset):
""" attach filter to filterset
gives a name to use to extract arguments from querydict
"""
if self.name is not None:
name = self.name
self.field.bind(name, self) | 0.008097 |
def _copy_across(self, rel_path, cb=None):
"""If the upstream doesn't have the file, get it from the alternate and store it in the upstream"""
from . import copy_file_or_flo
if not self.upstream.has(rel_path):
if not self.alternate.has(rel_path):
return None
source = self.alternate.get_stream(rel_path)
sink = self.upstream.put_stream(rel_path, metadata=source.meta)
try:
copy_file_or_flo(source, sink, cb=cb)
except:
self.upstream.remove(rel_path, propagate=True)
raise
source.close()
sink.close() | 0.005891 |
def input_format(self, content_type):
"""Returns the set input_format handler for the given content_type"""
return getattr(self, '_input_format', {}).get(content_type, hug.defaults.input_format.get(content_type, None)) | 0.012821 |
def all(self, campaign_id, get_all=False, **queryparams):
"""
Get information about members who have unsubscribed from a specific
campaign.
:param campaign_id: The unique id for the campaign.
:type campaign_id: :py:class:`str`
:param get_all: Should the query get all results
:type get_all: :py:class:`bool`
:param queryparams: The query string parameters
queryparams['fields'] = []
queryparams['exclude_fields'] = []
queryparams['count'] = integer
queryparams['offset'] = integer
"""
self.campaign_id = campaign_id
self.subscriber_hash = None
if get_all:
return self._iterate(url=self._build_path(campaign_id, 'unsubscribed'), **queryparams)
else:
return self._mc_client._get(url=self._build_path(campaign_id, 'unsubscribed'), **queryparams) | 0.00443 |
def read_routes6():
"""Return a list of IPv6 routes than can be used by Scapy."""
# Call netstat to retrieve IPv6 routes
fd_netstat = os.popen("netstat -rn -f inet6")
# List interfaces IPv6 addresses
lifaddr = in6_getifaddr()
if not lifaddr:
return []
# Routes header information
got_header = False
mtu_present = False
prio_present = False
# Parse the routes
routes = []
for line in fd_netstat.readlines():
# Parse the routes header and try to identify extra columns
if not got_header:
if "Destination" == line[:11]:
got_header = True
mtu_present = "Mtu" in line
prio_present = "Prio" in line
continue
# Parse a route entry according to the operating system
splitted_line = line.split()
if OPENBSD or NETBSD:
index = 5 + mtu_present + prio_present
if len(splitted_line) < index:
warning("Not enough columns in route entry !")
continue
destination, next_hop, flags = splitted_line[:3]
dev = splitted_line[index]
else:
# FREEBSD or DARWIN
if len(splitted_line) < 4:
warning("Not enough columns in route entry !")
continue
destination, next_hop, flags, dev = splitted_line[:4]
# XXX: TODO: add metrics for unix.py (use -e option on netstat)
metric = 1
# Check flags
if "U" not in flags: # usable route
continue
if "R" in flags: # Host or net unreachable
continue
if "m" in flags: # multicast address
# Note: multicast routing is handled in Route6.route()
continue
# Replace link with the default route in next_hop
if "link" in next_hop:
next_hop = "::"
# Default prefix length
destination_plen = 128
# Extract network interface from the zone id
if '%' in destination:
destination, dev = destination.split('%')
if '/' in dev:
# Example: fe80::%lo0/64 ; dev = "lo0/64"
dev, destination_plen = dev.split('/')
if '%' in next_hop:
next_hop, dev = next_hop.split('%')
# Ensure that the next hop is a valid IPv6 address
if not in6_isvalid(next_hop):
# Note: the 'Gateway' column might contain a MAC address
next_hop = "::"
# Modify parsed routing entries
# Note: these rules are OS specific and may evolve over time
if destination == "default":
destination, destination_plen = "::", 0
elif '/' in destination:
# Example: fe80::/10
destination, destination_plen = destination.split('/')
if '/' in dev:
# Example: ff02::%lo0/32 ; dev = "lo0/32"
dev, destination_plen = dev.split('/')
# Check route entries parameters consistency
if not in6_isvalid(destination):
warning("Invalid destination IPv6 address in route entry !")
continue
try:
destination_plen = int(destination_plen)
except Exception:
warning("Invalid IPv6 prefix length in route entry !")
continue
if in6_ismlladdr(destination) or in6_ismnladdr(destination):
# Note: multicast routing is handled in Route6.route()
continue
if LOOPBACK_NAME in dev:
# Handle ::1 separately
cset = ["::1"]
next_hop = "::"
else:
# Get possible IPv6 source addresses
devaddrs = (x for x in lifaddr if x[2] == dev)
cset = construct_source_candidate_set(destination, destination_plen, devaddrs) # noqa: E501
if len(cset):
routes.append((destination, destination_plen, next_hop, dev, cset, metric)) # noqa: E501
fd_netstat.close()
return routes | 0.000248 |
def update(self, **args):
"""
Update the current :class:`InstanceResource`
"""
self_dict = self.to_dict()
if args:
self_dict = dict(list(self_dict.items()) + list(args.items()))
response = self.requester.put(
'/{endpoint}/{id}', endpoint=self.endpoint,
id=self.id, payload=self_dict
)
obj_json = response.json()
if 'version' in obj_json:
self.__dict__['version'] = obj_json['version']
return self | 0.003817 |
def update_transfer(
self,
nonce: Nonce,
balance_hash: BalanceHash,
additional_hash: AdditionalHash,
partner_signature: Signature,
signature: Signature,
block_identifier: BlockSpecification,
):
""" Updates the channel using the provided balance proof. """
self.token_network.update_transfer(
channel_identifier=self.channel_identifier,
partner=self.participant2,
balance_hash=balance_hash,
nonce=nonce,
additional_hash=additional_hash,
closing_signature=partner_signature,
non_closing_signature=signature,
given_block_identifier=block_identifier,
) | 0.003968 |
def save(self, filename, format=None, **kwargs):
""" Save the object to file given by filename.
"""
if format is None:
# try to derive protocol from file extension
format = format_from_extension(filename)
with file(filename, 'wb') as fp:
self.save_to_file_object(fp, format, **kwargs) | 0.005682 |
def stresser(self, stress_rule='FSR'):
"""
Args:
:param stress_rule: Stress Rule, valid options:
'FSR': French Stress Rule, stress falls on the ultima, unless
it contains schwa (ends with e), in which case the penult is
stressed
'GSR': Germanic Stress Rule, stress falls on the first syllable
of the stemm. Note that the accuracy of the function directly
depends on that of the stemmer.
'LSR': Latin Stress Rule, stress falls on the penult if its
heavy, else, if it has more than two syllables on the
antepenult, else on the ultima.
Returns:
list: A list containing the separate syllable, where the stressed
syllable is prefixed by ' . Monosyllabic words are left unchanged,
since stress indicates relative emphasis.
Examples:
>>> Word('beren').stresser(stress_rule = "FSR")
['ber', "'en"]
>>> Word('prendre').stresser(stress_rule = "FSR")
["'pren", 'dre']
>>> Word('yisterday').stresser(stress_rule = "GSR")
['yi', 'ster', "'day"]
>>> Word('day').stresser(stress_rule = "GSR")
['day']
>>> Word('mervelus').stresser(stress_rule = "LSR")
["'mer", 'vel', 'us']
>>> Word('verbum').stresser(stress_rule = "LSR")
['ver', "'bum"]
"""
# Syllabify word
if not self.syllabified:
self.syllabify()
# Check whether word is monosyllabic
if len(self.syllabified) == 1:
return self.syllabified
if stress_rule == 'FSR':
# Check whether ultima ends in e
if self.syllabified[-1][-1] == 'e':
return self.syllabified[:-2] + ['\'{0}'.format(self.syllabified[-2])] + self.syllabified[-1:]
else:
return self.syllabified[:-1] + ['\'{0}'.format(self.syllabified[-1])]
elif stress_rule == 'GSR':
# The word striped of suffixes
st_word = affix_stemmer([self.word], strip_suf=False)
affix = self.word[:len(self.word) - len(st_word)]
# Syllabify stripped word and affix
syl_word = Word(st_word).syllabify()
# Add stress
syl_word = ['\'{0}'.format(syl_word[0])] + syl_word[1:]
if affix:
affix = Word(affix).syllabify()
syl_word = affix + syl_word
return syl_word
elif stress_rule == 'LSR':
# Check whether penult is heavy (contains more than one mora)
if sum(map(lambda x: x in SHORT_VOWELS, self.syllabified[-1])) > 1:
return self.syllabified[:-2] + ['\'{0}'.format(self.syllabified[-2])] + self.syllabified[-1:]
elif len(self.syllabified) > 2:
return self.syllabified[:-3] + ['\'{0}'.format(self.syllabified[-3])] + self.syllabified[-2:]
else:
return self.syllabified[:-1] + ['\'{0}'.format(self.syllabified[-1])] | 0.002207 |
def load_conf(yml_file, conf={}):
"""
To load the config
:param yml_file: the config file path
:param conf: dict, to override global config
:return: dict
"""
with open(yml_file) as f:
data = yaml.load(f)
if conf:
data.update(conf)
return dictdot(data) | 0.003175 |
def paths(self):
"""
Sequence of closed paths, encoded by entity index.
Returns
---------
paths: (n,) sequence of (*,) int referencing self.entities
"""
paths = traversal.closed_paths(self.entities,
self.vertices)
return paths | 0.006061 |
def update_process_work_item_type_rule(self, process_rule, process_id, wit_ref_name, rule_id):
"""UpdateProcessWorkItemTypeRule.
[Preview API] Updates a rule in the work item type of the process.
:param :class:`<UpdateProcessRuleRequest> <azure.devops.v5_0.work_item_tracking_process.models.UpdateProcessRuleRequest>` process_rule:
:param str process_id: The ID of the process
:param str wit_ref_name: The reference name of the work item type
:param str rule_id: The ID of the rule
:rtype: :class:`<ProcessRule> <azure.devops.v5_0.work_item_tracking_process.models.ProcessRule>`
"""
route_values = {}
if process_id is not None:
route_values['processId'] = self._serialize.url('process_id', process_id, 'str')
if wit_ref_name is not None:
route_values['witRefName'] = self._serialize.url('wit_ref_name', wit_ref_name, 'str')
if rule_id is not None:
route_values['ruleId'] = self._serialize.url('rule_id', rule_id, 'str')
content = self._serialize.body(process_rule, 'UpdateProcessRuleRequest')
response = self._send(http_method='PUT',
location_id='76fe3432-d825-479d-a5f6-983bbb78b4f3',
version='5.0-preview.2',
route_values=route_values,
content=content)
return self._deserialize('ProcessRule', response) | 0.006757 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.