code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def run_and_measure_payload(quil_program, qubits, trials, random_seed):
"""REST payload for :py:func:`ForestConnection._run_and_measure`"""
if not quil_program:
raise ValueError("You have attempted to run an empty program."
" Please provide gates or measure instructions to your program.")
if not isinstance(quil_program, Program):
raise TypeError("quil_program must be a Quil program object")
qubits = validate_qubit_list(qubits)
if not isinstance(trials, integer_types):
raise TypeError("trials must be an integer")
payload = {"type": TYPE_MULTISHOT_MEASURE,
"qubits": list(qubits),
"trials": trials,
"compiled-quil": quil_program.out()}
if random_seed is not None:
payload['rng-seed'] = random_seed
return payload | REST payload for :py:func:`ForestConnection._run_and_measure` | Below is the the instruction that describes the task:
### Input:
REST payload for :py:func:`ForestConnection._run_and_measure`
### Response:
def run_and_measure_payload(quil_program, qubits, trials, random_seed):
"""REST payload for :py:func:`ForestConnection._run_and_measure`"""
if not quil_program:
raise ValueError("You have attempted to run an empty program."
" Please provide gates or measure instructions to your program.")
if not isinstance(quil_program, Program):
raise TypeError("quil_program must be a Quil program object")
qubits = validate_qubit_list(qubits)
if not isinstance(trials, integer_types):
raise TypeError("trials must be an integer")
payload = {"type": TYPE_MULTISHOT_MEASURE,
"qubits": list(qubits),
"trials": trials,
"compiled-quil": quil_program.out()}
if random_seed is not None:
payload['rng-seed'] = random_seed
return payload |
def _wrap(obj, wrapper=None, methods_to_add=(), name=None, skip=(), wrap_return_values=False, wrap_filenames=(),
filename=None, wrapped_name_func=None, wrapped=None):
"""
Wrap module, class, function or another variable recursively
:param Any obj: Object to wrap recursively
:param Optional[Callable] wrapper: Wrapper to wrap functions and methods in (accepts function as argument)
:param Collection[Callable] methods_to_add: Container of functions, which accept class as argument, and return \
tuple of method name and method to add to all classes
:param Optional[str] name: Name of module to wrap to (if `obj` is module)
:param Collection[Union[str, type, Any]] skip: Items to skip wrapping (if an item of a collection is the str, wrap \
will check the obj name, if an item of a collection is the type, wrap will check the obj type, else wrap will \
check an item itself)
:param bool wrap_return_values: If try, wrap return values of callables (only types, supported by wrap function \
are supported)
:param Collection[str] wrap_filenames: Files to wrap
:param Optional[str] filename: Source file of `obj`
:param Optional[Callable[Any, str]] wrapped_name_func: Function that accepts `obj` as argument and returns the \
name of wrapped `obj` that will be written into wrapped `obj`
:param Any wrapped: Object to wrap to
:return: Wrapped `obj`
"""
# noinspection PyUnresolvedReferences
class ModuleProxy(types.ModuleType, Proxy):
# noinspection PyShadowingNames
def __init__(self, name, doc=None):
super().__init__(name=name, doc=doc)
try:
# Subclassing from obj to pass isinstance(some_object, obj) checks. If defining the class fails, it means that
# `obj` was not a class, that means ClassProxy wouldn't be used, we can create a dummy class.
class ClassProxy(obj, Proxy):
@staticmethod
def __new__(cls, *args, **kwargs):
# noinspection PyUnresolvedReferences
original_obj_object = cls._original_obj(*args, **kwargs)
# noinspection PyArgumentList
result = _wrap(obj=original_obj_object,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=name,
skip=skip,
wrap_return_values=wrap_return_values,
wrap_filenames=wrap_filenames,
filename=filename,
wrapped_name_func=wrapped_name_func)
return result
except TypeError:
class ClassProxy(Proxy):
pass
class ObjectProxy(Proxy):
pass
# noinspection PyShadowingNames
def get_name(*names):
name = None
for obj in names:
try:
name = obj.__name__
except AttributeError:
if isinstance(obj, str):
name = obj
if name is not None:
return name
return name
# noinspection PyShadowingNames
def make_key(obj, wrapper, methods_to_add, name, skip, wrap_return_values, wrap_filenames, filename,
wrapped_name_func):
try:
obj_key = 'hash', hash(obj)
except TypeError:
obj_key = 'id', id(obj)
return obj_key + (wrapper, methods_to_add, name, skip, wrap_return_values, wrap_filenames, filename,
wrapped_name_func)
# noinspection PyShadowingNames
def wrap_(obj, name, members, wrapped=None):
def get_obj_type():
if inspect.ismodule(object=obj):
result = ObjectType.MODULE
elif inspect.isclass(object=obj):
result = ObjectType.CLASS
elif (inspect.isbuiltin(object=obj) or
inspect.isfunction(object=obj) or
inspect.ismethod(object=obj) or
inspect.ismethoddescriptor(object=obj) or
isinstance(obj, MethodWrapper)):
result = ObjectType.FUNCTION_OR_METHOD
elif inspect.iscoroutine(object=obj):
result = ObjectType.COROUTINE
else:
result = ObjectType.OBJECT
return result
def create_proxy(proxy_type):
return {
ProxyType.MODULE: ModuleProxy(name=name),
ProxyType.CLASS: ClassProxy,
ProxyType.OBJECT: ObjectProxy(),
}[proxy_type]
def add_methods():
for method_to_add in methods_to_add:
method_name, method = method_to_add(wrapped)
if method is not None:
setattr(wrapped, method_name, method)
def set_original_obj():
with suppress(AttributeError):
what = type if obj_type == ObjectType.CLASS else object
what.__setattr__(wrapped, wrapped_name_func(obj), obj)
def need_to_wrap():
return is_magic_name(name=attr_name) and attr_name not in ['__class__', '__new__']
obj_type = get_obj_type()
if wrapped is None:
if obj_type in [ObjectType.MODULE, ObjectType.CLASS]:
wrapped = create_proxy(proxy_type=ProxyType.MODULE if inspect.ismodule(obj) else ProxyType.CLASS)
elif obj_type == ObjectType.FUNCTION_OR_METHOD:
wrapped = function_or_method_wrapper()
elif obj_type == ObjectType.COROUTINE:
wrapped = coroutine_wrapper()
else:
wrapped = create_proxy(proxy_type=ProxyType.OBJECT)
key = make_key(obj=obj,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=name,
skip=skip,
wrap_return_values=wrap_return_values,
wrap_filenames=wrap_filenames,
filename=filename,
wrapped_name_func=wrapped_name_func)
_wrapped_objs[key] = wrapped
set_original_obj()
if obj_type in [ObjectType.FUNCTION_OR_METHOD, ObjectType.COROUTINE]:
return wrapped
add_methods()
if obj_type == ObjectType.CLASS:
for attr_name, attr_value in members:
if need_to_wrap():
raises_exception = (isinstance(attr_value, tuple) and
len(attr_value) > 0 and
attr_value[0] == RAISES_EXCEPTION)
if raises_exception and not obj_type == ObjectType.MODULE:
def raise_exception(self):
_ = self
raise attr_value[1]
attr_value = property(raise_exception)
with suppress(AttributeError, TypeError):
# noinspection PyArgumentList
attr_value_new = _wrap(obj=attr_value,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=get_name(attr_value, attr_name),
skip=skip,
wrap_return_values=wrap_return_values,
wrap_filenames=wrap_filenames,
filename=get_obj_file(obj=attr_value) or filename,
wrapped_name_func=wrapped_name_func)
with suppress(Exception):
type.__setattr__(wrapped, attr_name, attr_value_new)
if obj_type != ObjectType.CLASS:
wrapped_class_name = get_name(obj.__class__)
# noinspection PyArgumentList
wrapped_class = _wrap(obj=obj.__class__,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=wrapped_class_name,
skip=skip,
wrap_return_values=wrap_return_values,
wrap_filenames=wrap_filenames,
filename=get_obj_file(obj=obj.__class__) or filename,
wrapped_name_func=wrapped_name_func,
wrapped=wrapped.__class__)
object.__setattr__(wrapped, '__class__', wrapped_class)
return wrapped
def wrap_return_values_(result):
if wrap_return_values:
# noinspection PyArgumentList
result = _wrap(obj=result,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=get_name(result, 'result'),
skip=skip,
wrap_return_values=wrap_return_values,
wrap_filenames=wrap_filenames,
filename=filename,
wrapped_name_func=wrapped_name_func)
return result
# noinspection PyShadowingNames
def is_magic_name(name):
return name.startswith('__') and name.endswith('__')
# noinspection PyShadowingNames
def is_magic(obj):
return is_magic_name(name=obj.__name__)
# noinspection PyShadowingNames
def is_coroutine_function(obj, wrapper):
return inspect.iscoroutinefunction(object=wrapper(obj)) and not is_magic(obj=obj)
# noinspection PyShadowingNames
def wrap_call_and_wrap_return_values(obj, wrapper):
if is_coroutine_function(obj=obj, wrapper=wrapper):
# noinspection PyShadowingNames
@wraps(obj)
async def wrapper(*args, **kwargs):
return wrap_return_values_(result=await obj(*args, **kwargs))
else:
# noinspection PyShadowingNames
@wraps(obj)
def wrapper(*args, **kwargs):
return wrap_return_values_(result=obj(*args, **kwargs))
return wrapper
def function_or_method_wrapper():
# noinspection PyShadowingNames
@wraps(obj)
def wrapped_obj(*args, **kwargs):
return wrapper(obj)(*args, **kwargs)
@wraps(obj)
def obj_with_original_obj_as_self(*args, **kwargs):
if len(args) > 0 and isinstance(args[0], Proxy):
# noinspection PyProtectedMember
args = (object.__getattribute__(args[0], '_original_obj'), ) + args[1:]
return obj(*args, **kwargs)
if wrapper is None:
result = obj
elif is_magic(obj=obj):
if obj.__name__ == '__getattribute__':
@wraps(obj)
def result(*args, **kwargs):
# If we are trying to access magic attribute, call obj with args[0]._original_obj as self,
# else call original __getattribute__ and wrap the result before returning it.
# noinspection PyShadowingNames
name = args[1]
attr_value = obj_with_original_obj_as_self(*args, **kwargs)
if is_magic_name(name=name):
return attr_value
else:
# noinspection PyShadowingNames,PyArgumentList
return _wrap(obj=attr_value,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=name,
skip=skip,
wrap_return_values=wrap_return_values,
wrap_filenames=wrap_filenames,
filename=filename,
wrapped_name_func=wrapped_name_func)
else:
result = obj_with_original_obj_as_self
elif obj.__name__ == '__getattr__':
@wraps(obj)
def result(*args, **kwargs):
return wrapper(obj(*args, **kwargs))
else:
result = wrapped_obj
if wrap_return_values:
result = wrap_call_and_wrap_return_values(obj=result, wrapper=wrapper)
return result
def coroutine_wrapper():
@wraps(obj)
async def result(*args, **kwargs):
return await wrapper(obj)(*args, **kwargs)
if wrap_return_values:
result = wrap_call_and_wrap_return_values(obj=result, wrapper=wrapper)
return result
def is_in_skip():
result = False
for s in skip:
if isinstance(s, str):
if name == s:
result = True
elif isinstance(s, type):
if isinstance(obj, s):
result = True
else:
if obj == s:
result = True
return result
# noinspection PyShadowingNames
def get_obj_file(obj):
# noinspection PyShadowingNames
def _get_obj_file(obj):
try:
result = (obj.__file__
if hasattr(obj, '__file__') else
sys.modules[obj.__module__].__file__
if hasattr(obj, '__module__') else
None)
except (AttributeError, KeyError):
result = None
return result
result = _get_obj_file(obj=obj)
if result is None:
result = _get_obj_file(obj=type(obj))
return result
def get_obj_library_files():
obj_file = get_obj_file(obj=obj)
if obj_file is not None:
obj_file = Path(obj_file)
if obj_file.name == '__init__.py':
result = obj_file.parent.glob('**/*.py')
else:
result = [obj_file]
result = [str(obj_file) for obj_file in result]
else:
result = []
result = frozenset(result)
return result
methods_to_add = frozenset(methods_to_add)
skip = frozenset(skip)
wrap_filenames = frozenset(wrap_filenames)
if wrapped_name_func is None:
# noinspection PyShadowingNames
def wrapped_name_func(obj):
_ = obj
return '_original_obj'
name = get_name(name, obj)
if name is None:
raise ValueError("name was not passed and obj.__name__ not found")
key = make_key(obj=obj,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=name,
skip=skip,
wrap_return_values=wrap_return_values,
wrap_filenames=wrap_filenames,
filename=filename,
wrapped_name_func=wrapped_name_func)
wrap_filenames = wrap_filenames or get_obj_library_files()
filename = get_obj_file(obj=obj) or filename
# noinspection PyUnusedLocal
members = []
with suppress(ModuleNotFoundError):
members = getmembers(object=obj)
try:
already_wrapped = key in _wrapped_objs
except TypeError:
already_wrapped = False
if filename not in wrap_filenames or is_in_skip():
wrapped = obj
elif already_wrapped:
wrapped = _wrapped_objs[key]
elif members:
wrapped = wrap_(obj=obj, name=name, members=members, wrapped=wrapped)
else:
wrapped = obj
_wrapped_objs[key] = wrapped
return wrapped | Wrap module, class, function or another variable recursively
:param Any obj: Object to wrap recursively
:param Optional[Callable] wrapper: Wrapper to wrap functions and methods in (accepts function as argument)
:param Collection[Callable] methods_to_add: Container of functions, which accept class as argument, and return \
tuple of method name and method to add to all classes
:param Optional[str] name: Name of module to wrap to (if `obj` is module)
:param Collection[Union[str, type, Any]] skip: Items to skip wrapping (if an item of a collection is the str, wrap \
will check the obj name, if an item of a collection is the type, wrap will check the obj type, else wrap will \
check an item itself)
:param bool wrap_return_values: If try, wrap return values of callables (only types, supported by wrap function \
are supported)
:param Collection[str] wrap_filenames: Files to wrap
:param Optional[str] filename: Source file of `obj`
:param Optional[Callable[Any, str]] wrapped_name_func: Function that accepts `obj` as argument and returns the \
name of wrapped `obj` that will be written into wrapped `obj`
:param Any wrapped: Object to wrap to
:return: Wrapped `obj` | Below is the the instruction that describes the task:
### Input:
Wrap module, class, function or another variable recursively
:param Any obj: Object to wrap recursively
:param Optional[Callable] wrapper: Wrapper to wrap functions and methods in (accepts function as argument)
:param Collection[Callable] methods_to_add: Container of functions, which accept class as argument, and return \
tuple of method name and method to add to all classes
:param Optional[str] name: Name of module to wrap to (if `obj` is module)
:param Collection[Union[str, type, Any]] skip: Items to skip wrapping (if an item of a collection is the str, wrap \
will check the obj name, if an item of a collection is the type, wrap will check the obj type, else wrap will \
check an item itself)
:param bool wrap_return_values: If try, wrap return values of callables (only types, supported by wrap function \
are supported)
:param Collection[str] wrap_filenames: Files to wrap
:param Optional[str] filename: Source file of `obj`
:param Optional[Callable[Any, str]] wrapped_name_func: Function that accepts `obj` as argument and returns the \
name of wrapped `obj` that will be written into wrapped `obj`
:param Any wrapped: Object to wrap to
:return: Wrapped `obj`
### Response:
def _wrap(obj, wrapper=None, methods_to_add=(), name=None, skip=(), wrap_return_values=False, wrap_filenames=(),
filename=None, wrapped_name_func=None, wrapped=None):
"""
Wrap module, class, function or another variable recursively
:param Any obj: Object to wrap recursively
:param Optional[Callable] wrapper: Wrapper to wrap functions and methods in (accepts function as argument)
:param Collection[Callable] methods_to_add: Container of functions, which accept class as argument, and return \
tuple of method name and method to add to all classes
:param Optional[str] name: Name of module to wrap to (if `obj` is module)
:param Collection[Union[str, type, Any]] skip: Items to skip wrapping (if an item of a collection is the str, wrap \
will check the obj name, if an item of a collection is the type, wrap will check the obj type, else wrap will \
check an item itself)
:param bool wrap_return_values: If try, wrap return values of callables (only types, supported by wrap function \
are supported)
:param Collection[str] wrap_filenames: Files to wrap
:param Optional[str] filename: Source file of `obj`
:param Optional[Callable[Any, str]] wrapped_name_func: Function that accepts `obj` as argument and returns the \
name of wrapped `obj` that will be written into wrapped `obj`
:param Any wrapped: Object to wrap to
:return: Wrapped `obj`
"""
# noinspection PyUnresolvedReferences
class ModuleProxy(types.ModuleType, Proxy):
# noinspection PyShadowingNames
def __init__(self, name, doc=None):
super().__init__(name=name, doc=doc)
try:
# Subclassing from obj to pass isinstance(some_object, obj) checks. If defining the class fails, it means that
# `obj` was not a class, that means ClassProxy wouldn't be used, we can create a dummy class.
class ClassProxy(obj, Proxy):
@staticmethod
def __new__(cls, *args, **kwargs):
# noinspection PyUnresolvedReferences
original_obj_object = cls._original_obj(*args, **kwargs)
# noinspection PyArgumentList
result = _wrap(obj=original_obj_object,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=name,
skip=skip,
wrap_return_values=wrap_return_values,
wrap_filenames=wrap_filenames,
filename=filename,
wrapped_name_func=wrapped_name_func)
return result
except TypeError:
class ClassProxy(Proxy):
pass
class ObjectProxy(Proxy):
pass
# noinspection PyShadowingNames
def get_name(*names):
name = None
for obj in names:
try:
name = obj.__name__
except AttributeError:
if isinstance(obj, str):
name = obj
if name is not None:
return name
return name
# noinspection PyShadowingNames
def make_key(obj, wrapper, methods_to_add, name, skip, wrap_return_values, wrap_filenames, filename,
wrapped_name_func):
try:
obj_key = 'hash', hash(obj)
except TypeError:
obj_key = 'id', id(obj)
return obj_key + (wrapper, methods_to_add, name, skip, wrap_return_values, wrap_filenames, filename,
wrapped_name_func)
# noinspection PyShadowingNames
def wrap_(obj, name, members, wrapped=None):
def get_obj_type():
if inspect.ismodule(object=obj):
result = ObjectType.MODULE
elif inspect.isclass(object=obj):
result = ObjectType.CLASS
elif (inspect.isbuiltin(object=obj) or
inspect.isfunction(object=obj) or
inspect.ismethod(object=obj) or
inspect.ismethoddescriptor(object=obj) or
isinstance(obj, MethodWrapper)):
result = ObjectType.FUNCTION_OR_METHOD
elif inspect.iscoroutine(object=obj):
result = ObjectType.COROUTINE
else:
result = ObjectType.OBJECT
return result
def create_proxy(proxy_type):
return {
ProxyType.MODULE: ModuleProxy(name=name),
ProxyType.CLASS: ClassProxy,
ProxyType.OBJECT: ObjectProxy(),
}[proxy_type]
def add_methods():
for method_to_add in methods_to_add:
method_name, method = method_to_add(wrapped)
if method is not None:
setattr(wrapped, method_name, method)
def set_original_obj():
with suppress(AttributeError):
what = type if obj_type == ObjectType.CLASS else object
what.__setattr__(wrapped, wrapped_name_func(obj), obj)
def need_to_wrap():
return is_magic_name(name=attr_name) and attr_name not in ['__class__', '__new__']
obj_type = get_obj_type()
if wrapped is None:
if obj_type in [ObjectType.MODULE, ObjectType.CLASS]:
wrapped = create_proxy(proxy_type=ProxyType.MODULE if inspect.ismodule(obj) else ProxyType.CLASS)
elif obj_type == ObjectType.FUNCTION_OR_METHOD:
wrapped = function_or_method_wrapper()
elif obj_type == ObjectType.COROUTINE:
wrapped = coroutine_wrapper()
else:
wrapped = create_proxy(proxy_type=ProxyType.OBJECT)
key = make_key(obj=obj,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=name,
skip=skip,
wrap_return_values=wrap_return_values,
wrap_filenames=wrap_filenames,
filename=filename,
wrapped_name_func=wrapped_name_func)
_wrapped_objs[key] = wrapped
set_original_obj()
if obj_type in [ObjectType.FUNCTION_OR_METHOD, ObjectType.COROUTINE]:
return wrapped
add_methods()
if obj_type == ObjectType.CLASS:
for attr_name, attr_value in members:
if need_to_wrap():
raises_exception = (isinstance(attr_value, tuple) and
len(attr_value) > 0 and
attr_value[0] == RAISES_EXCEPTION)
if raises_exception and not obj_type == ObjectType.MODULE:
def raise_exception(self):
_ = self
raise attr_value[1]
attr_value = property(raise_exception)
with suppress(AttributeError, TypeError):
# noinspection PyArgumentList
attr_value_new = _wrap(obj=attr_value,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=get_name(attr_value, attr_name),
skip=skip,
wrap_return_values=wrap_return_values,
wrap_filenames=wrap_filenames,
filename=get_obj_file(obj=attr_value) or filename,
wrapped_name_func=wrapped_name_func)
with suppress(Exception):
type.__setattr__(wrapped, attr_name, attr_value_new)
if obj_type != ObjectType.CLASS:
wrapped_class_name = get_name(obj.__class__)
# noinspection PyArgumentList
wrapped_class = _wrap(obj=obj.__class__,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=wrapped_class_name,
skip=skip,
wrap_return_values=wrap_return_values,
wrap_filenames=wrap_filenames,
filename=get_obj_file(obj=obj.__class__) or filename,
wrapped_name_func=wrapped_name_func,
wrapped=wrapped.__class__)
object.__setattr__(wrapped, '__class__', wrapped_class)
return wrapped
def wrap_return_values_(result):
if wrap_return_values:
# noinspection PyArgumentList
result = _wrap(obj=result,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=get_name(result, 'result'),
skip=skip,
wrap_return_values=wrap_return_values,
wrap_filenames=wrap_filenames,
filename=filename,
wrapped_name_func=wrapped_name_func)
return result
# noinspection PyShadowingNames
def is_magic_name(name):
return name.startswith('__') and name.endswith('__')
# noinspection PyShadowingNames
def is_magic(obj):
return is_magic_name(name=obj.__name__)
# noinspection PyShadowingNames
def is_coroutine_function(obj, wrapper):
return inspect.iscoroutinefunction(object=wrapper(obj)) and not is_magic(obj=obj)
# noinspection PyShadowingNames
def wrap_call_and_wrap_return_values(obj, wrapper):
if is_coroutine_function(obj=obj, wrapper=wrapper):
# noinspection PyShadowingNames
@wraps(obj)
async def wrapper(*args, **kwargs):
return wrap_return_values_(result=await obj(*args, **kwargs))
else:
# noinspection PyShadowingNames
@wraps(obj)
def wrapper(*args, **kwargs):
return wrap_return_values_(result=obj(*args, **kwargs))
return wrapper
def function_or_method_wrapper():
# noinspection PyShadowingNames
@wraps(obj)
def wrapped_obj(*args, **kwargs):
return wrapper(obj)(*args, **kwargs)
@wraps(obj)
def obj_with_original_obj_as_self(*args, **kwargs):
if len(args) > 0 and isinstance(args[0], Proxy):
# noinspection PyProtectedMember
args = (object.__getattribute__(args[0], '_original_obj'), ) + args[1:]
return obj(*args, **kwargs)
if wrapper is None:
result = obj
elif is_magic(obj=obj):
if obj.__name__ == '__getattribute__':
@wraps(obj)
def result(*args, **kwargs):
# If we are trying to access magic attribute, call obj with args[0]._original_obj as self,
# else call original __getattribute__ and wrap the result before returning it.
# noinspection PyShadowingNames
name = args[1]
attr_value = obj_with_original_obj_as_self(*args, **kwargs)
if is_magic_name(name=name):
return attr_value
else:
# noinspection PyShadowingNames,PyArgumentList
return _wrap(obj=attr_value,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=name,
skip=skip,
wrap_return_values=wrap_return_values,
wrap_filenames=wrap_filenames,
filename=filename,
wrapped_name_func=wrapped_name_func)
else:
result = obj_with_original_obj_as_self
elif obj.__name__ == '__getattr__':
@wraps(obj)
def result(*args, **kwargs):
return wrapper(obj(*args, **kwargs))
else:
result = wrapped_obj
if wrap_return_values:
result = wrap_call_and_wrap_return_values(obj=result, wrapper=wrapper)
return result
def coroutine_wrapper():
@wraps(obj)
async def result(*args, **kwargs):
return await wrapper(obj)(*args, **kwargs)
if wrap_return_values:
result = wrap_call_and_wrap_return_values(obj=result, wrapper=wrapper)
return result
def is_in_skip():
result = False
for s in skip:
if isinstance(s, str):
if name == s:
result = True
elif isinstance(s, type):
if isinstance(obj, s):
result = True
else:
if obj == s:
result = True
return result
# noinspection PyShadowingNames
def get_obj_file(obj):
# noinspection PyShadowingNames
def _get_obj_file(obj):
try:
result = (obj.__file__
if hasattr(obj, '__file__') else
sys.modules[obj.__module__].__file__
if hasattr(obj, '__module__') else
None)
except (AttributeError, KeyError):
result = None
return result
result = _get_obj_file(obj=obj)
if result is None:
result = _get_obj_file(obj=type(obj))
return result
def get_obj_library_files():
obj_file = get_obj_file(obj=obj)
if obj_file is not None:
obj_file = Path(obj_file)
if obj_file.name == '__init__.py':
result = obj_file.parent.glob('**/*.py')
else:
result = [obj_file]
result = [str(obj_file) for obj_file in result]
else:
result = []
result = frozenset(result)
return result
methods_to_add = frozenset(methods_to_add)
skip = frozenset(skip)
wrap_filenames = frozenset(wrap_filenames)
if wrapped_name_func is None:
# noinspection PyShadowingNames
def wrapped_name_func(obj):
_ = obj
return '_original_obj'
name = get_name(name, obj)
if name is None:
raise ValueError("name was not passed and obj.__name__ not found")
key = make_key(obj=obj,
wrapper=wrapper,
methods_to_add=methods_to_add,
name=name,
skip=skip,
wrap_return_values=wrap_return_values,
wrap_filenames=wrap_filenames,
filename=filename,
wrapped_name_func=wrapped_name_func)
wrap_filenames = wrap_filenames or get_obj_library_files()
filename = get_obj_file(obj=obj) or filename
# noinspection PyUnusedLocal
members = []
with suppress(ModuleNotFoundError):
members = getmembers(object=obj)
try:
already_wrapped = key in _wrapped_objs
except TypeError:
already_wrapped = False
if filename not in wrap_filenames or is_in_skip():
wrapped = obj
elif already_wrapped:
wrapped = _wrapped_objs[key]
elif members:
wrapped = wrap_(obj=obj, name=name, members=members, wrapped=wrapped)
else:
wrapped = obj
_wrapped_objs[key] = wrapped
return wrapped |
def touch(self, conn, key, exptime):
"""The command is used to update the expiration time of
an existing item without fetching it.
:param key: ``bytes``, is the key to update expiration time
:param exptime: ``int``, is expiration time. This replaces the existing
expiration time.
:return: ``bool``, True in case of success.
"""
assert self._validate_key(key)
_cmd = b' '.join([b'touch', key, str(exptime).encode('utf-8')])
cmd = _cmd + b'\r\n'
resp = yield from self._execute_simple_command(conn, cmd)
if resp not in (const.TOUCHED, const.NOT_FOUND):
raise ClientException('Memcached touch failed', resp)
return resp == const.TOUCHED | The command is used to update the expiration time of
an existing item without fetching it.
:param key: ``bytes``, is the key to update expiration time
:param exptime: ``int``, is expiration time. This replaces the existing
expiration time.
:return: ``bool``, True in case of success. | Below is the the instruction that describes the task:
### Input:
The command is used to update the expiration time of
an existing item without fetching it.
:param key: ``bytes``, is the key to update expiration time
:param exptime: ``int``, is expiration time. This replaces the existing
expiration time.
:return: ``bool``, True in case of success.
### Response:
def touch(self, conn, key, exptime):
"""The command is used to update the expiration time of
an existing item without fetching it.
:param key: ``bytes``, is the key to update expiration time
:param exptime: ``int``, is expiration time. This replaces the existing
expiration time.
:return: ``bool``, True in case of success.
"""
assert self._validate_key(key)
_cmd = b' '.join([b'touch', key, str(exptime).encode('utf-8')])
cmd = _cmd + b'\r\n'
resp = yield from self._execute_simple_command(conn, cmd)
if resp not in (const.TOUCHED, const.NOT_FOUND):
raise ClientException('Memcached touch failed', resp)
return resp == const.TOUCHED |
def get_build_configuration_by_name(name):
"""
Returns the build configuration matching the name
:param name: name of build configuration
:return: The matching build configuration, or None if no match found
"""
response = utils.checked_api_call(pnc_api.build_configs, 'get_all', q='name==' + name).content
if not response:
return None
return response[0] | Returns the build configuration matching the name
:param name: name of build configuration
:return: The matching build configuration, or None if no match found | Below is the the instruction that describes the task:
### Input:
Returns the build configuration matching the name
:param name: name of build configuration
:return: The matching build configuration, or None if no match found
### Response:
def get_build_configuration_by_name(name):
"""
Returns the build configuration matching the name
:param name: name of build configuration
:return: The matching build configuration, or None if no match found
"""
response = utils.checked_api_call(pnc_api.build_configs, 'get_all', q='name==' + name).content
if not response:
return None
return response[0] |
def validate(self, cipher_text, max_timedelta=None):
"""
Will decrypt the url safe base64 encoded crypted str or bytes array.
Args:
cipher_text: the encrypted text
max_timedelta: maximum timedelta in seconds
Returns:
the original message list
"""
if isinstance(cipher_text, six.string_types):
cipher_text.encode()
cipher_text = base64.urlsafe_b64decode(cipher_text)
decrypted = self.encryption_suite.decrypt(cipher_text).decode().split(self.separator)
message_list, dt = decrypted[:-1], decrypted[-1]
dt = int(''.join(re.findall(r'\d+', dt)))
now = int(time.time())
if max_timedelta and max_timedelta < now - dt:
raise ValueError('Expired')
return message_list, dt | Will decrypt the url safe base64 encoded crypted str or bytes array.
Args:
cipher_text: the encrypted text
max_timedelta: maximum timedelta in seconds
Returns:
the original message list | Below is the the instruction that describes the task:
### Input:
Will decrypt the url safe base64 encoded crypted str or bytes array.
Args:
cipher_text: the encrypted text
max_timedelta: maximum timedelta in seconds
Returns:
the original message list
### Response:
def validate(self, cipher_text, max_timedelta=None):
"""
Will decrypt the url safe base64 encoded crypted str or bytes array.
Args:
cipher_text: the encrypted text
max_timedelta: maximum timedelta in seconds
Returns:
the original message list
"""
if isinstance(cipher_text, six.string_types):
cipher_text.encode()
cipher_text = base64.urlsafe_b64decode(cipher_text)
decrypted = self.encryption_suite.decrypt(cipher_text).decode().split(self.separator)
message_list, dt = decrypted[:-1], decrypted[-1]
dt = int(''.join(re.findall(r'\d+', dt)))
now = int(time.time())
if max_timedelta and max_timedelta < now - dt:
raise ValueError('Expired')
return message_list, dt |
def place(self, part: str, val: Union['ApiNode', 'ApiEndpoint']):
"""place a leaf node"""
if part.startswith(':'):
if self.param and self.param != part:
err = """Cannot place param '{}' as '{self.param_name}' exist on node already!"""
raise ParamAlreadyExist(err.format(part, self=self))
self.param = val
self.param_name = part
return val
self.paths[part] = val
return val | place a leaf node | Below is the the instruction that describes the task:
### Input:
place a leaf node
### Response:
def place(self, part: str, val: Union['ApiNode', 'ApiEndpoint']):
"""place a leaf node"""
if part.startswith(':'):
if self.param and self.param != part:
err = """Cannot place param '{}' as '{self.param_name}' exist on node already!"""
raise ParamAlreadyExist(err.format(part, self=self))
self.param = val
self.param_name = part
return val
self.paths[part] = val
return val |
def dump_state(self):
"""Dump the current state of this emulated object as a dictionary.
Returns:
dict: The current state of the object that could be passed to load_state.
"""
state = {}
state['tile_states'] = {}
for address, tile in self._tiles.items():
state['tile_states'][address] = tile.dump_state()
return state | Dump the current state of this emulated object as a dictionary.
Returns:
dict: The current state of the object that could be passed to load_state. | Below is the the instruction that describes the task:
### Input:
Dump the current state of this emulated object as a dictionary.
Returns:
dict: The current state of the object that could be passed to load_state.
### Response:
def dump_state(self):
"""Dump the current state of this emulated object as a dictionary.
Returns:
dict: The current state of the object that could be passed to load_state.
"""
state = {}
state['tile_states'] = {}
for address, tile in self._tiles.items():
state['tile_states'][address] = tile.dump_state()
return state |
def event_list_to_event_roll(source_event_list, event_label_list=None, time_resolution=0.01):
"""Convert event list into event roll, binary activity matrix
Parameters
----------
source_event_list : list, shape=(n,)
A list containing event dicts
event_label_list : list, shape=(k,) or None
A list of containing unique labels in alphabetical order
(Default value = None)
time_resolution : float > 0
Time resolution in seconds of the event roll
(Default value = 0.01)
Returns
-------
event_roll: np.ndarray, shape=(m,k)
Event roll
"""
if isinstance(source_event_list, dcase_util.containers.MetaDataContainer):
max_offset_value = source_event_list.max_offset
if event_label_list is None:
event_label_list = source_event_list.unique_event_labels
elif isinstance(source_event_list, list):
max_offset_value = event_list.max_event_offset(source_event_list)
if event_label_list is None:
event_label_list = event_list.unique_event_labels(source_event_list)
else:
raise ValueError('Unknown source_event_list type.')
# Initialize event roll
event_roll = numpy.zeros((int(math.ceil(max_offset_value * 1 / time_resolution)), len(event_label_list)))
# Fill-in event_roll
for event in source_event_list:
pos = event_label_list.index(event['event_label'])
if 'event_onset' in event and 'event_offset' in event:
event_onset = event['event_onset']
event_offset = event['event_offset']
elif 'onset' in event and 'offset' in event:
event_onset = event['onset']
event_offset = event['offset']
onset = int(math.floor(event_onset * 1 / float(time_resolution)))
offset = int(math.ceil(event_offset * 1 / float(time_resolution)))
event_roll[onset:offset, pos] = 1
return event_roll | Convert event list into event roll, binary activity matrix
Parameters
----------
source_event_list : list, shape=(n,)
A list containing event dicts
event_label_list : list, shape=(k,) or None
A list of containing unique labels in alphabetical order
(Default value = None)
time_resolution : float > 0
Time resolution in seconds of the event roll
(Default value = 0.01)
Returns
-------
event_roll: np.ndarray, shape=(m,k)
Event roll | Below is the the instruction that describes the task:
### Input:
Convert event list into event roll, binary activity matrix
Parameters
----------
source_event_list : list, shape=(n,)
A list containing event dicts
event_label_list : list, shape=(k,) or None
A list of containing unique labels in alphabetical order
(Default value = None)
time_resolution : float > 0
Time resolution in seconds of the event roll
(Default value = 0.01)
Returns
-------
event_roll: np.ndarray, shape=(m,k)
Event roll
### Response:
def event_list_to_event_roll(source_event_list, event_label_list=None, time_resolution=0.01):
"""Convert event list into event roll, binary activity matrix
Parameters
----------
source_event_list : list, shape=(n,)
A list containing event dicts
event_label_list : list, shape=(k,) or None
A list of containing unique labels in alphabetical order
(Default value = None)
time_resolution : float > 0
Time resolution in seconds of the event roll
(Default value = 0.01)
Returns
-------
event_roll: np.ndarray, shape=(m,k)
Event roll
"""
if isinstance(source_event_list, dcase_util.containers.MetaDataContainer):
max_offset_value = source_event_list.max_offset
if event_label_list is None:
event_label_list = source_event_list.unique_event_labels
elif isinstance(source_event_list, list):
max_offset_value = event_list.max_event_offset(source_event_list)
if event_label_list is None:
event_label_list = event_list.unique_event_labels(source_event_list)
else:
raise ValueError('Unknown source_event_list type.')
# Initialize event roll
event_roll = numpy.zeros((int(math.ceil(max_offset_value * 1 / time_resolution)), len(event_label_list)))
# Fill-in event_roll
for event in source_event_list:
pos = event_label_list.index(event['event_label'])
if 'event_onset' in event and 'event_offset' in event:
event_onset = event['event_onset']
event_offset = event['event_offset']
elif 'onset' in event and 'offset' in event:
event_onset = event['onset']
event_offset = event['offset']
onset = int(math.floor(event_onset * 1 / float(time_resolution)))
offset = int(math.ceil(event_offset * 1 / float(time_resolution)))
event_roll[onset:offset, pos] = 1
return event_roll |
def wait(object_ids, num_returns=1, timeout=None):
"""Return a list of IDs that are ready and a list of IDs that are not.
This method is identical to `ray.wait` except it adds support for tuples
and ndarrays.
Args:
object_ids (List[ObjectID], Tuple(ObjectID), np.array(ObjectID)):
List like of object IDs for objects that may or may not be ready.
Note that these IDs must be unique.
num_returns (int): The number of object IDs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
Returns:
A list of object IDs that are ready and a list of the remaining object
IDs.
"""
if isinstance(object_ids, (tuple, np.ndarray)):
return ray.wait(
list(object_ids), num_returns=num_returns, timeout=timeout)
return ray.wait(object_ids, num_returns=num_returns, timeout=timeout) | Return a list of IDs that are ready and a list of IDs that are not.
This method is identical to `ray.wait` except it adds support for tuples
and ndarrays.
Args:
object_ids (List[ObjectID], Tuple(ObjectID), np.array(ObjectID)):
List like of object IDs for objects that may or may not be ready.
Note that these IDs must be unique.
num_returns (int): The number of object IDs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
Returns:
A list of object IDs that are ready and a list of the remaining object
IDs. | Below is the the instruction that describes the task:
### Input:
Return a list of IDs that are ready and a list of IDs that are not.
This method is identical to `ray.wait` except it adds support for tuples
and ndarrays.
Args:
object_ids (List[ObjectID], Tuple(ObjectID), np.array(ObjectID)):
List like of object IDs for objects that may or may not be ready.
Note that these IDs must be unique.
num_returns (int): The number of object IDs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
Returns:
A list of object IDs that are ready and a list of the remaining object
IDs.
### Response:
def wait(object_ids, num_returns=1, timeout=None):
"""Return a list of IDs that are ready and a list of IDs that are not.
This method is identical to `ray.wait` except it adds support for tuples
and ndarrays.
Args:
object_ids (List[ObjectID], Tuple(ObjectID), np.array(ObjectID)):
List like of object IDs for objects that may or may not be ready.
Note that these IDs must be unique.
num_returns (int): The number of object IDs that should be returned.
timeout (float): The maximum amount of time in seconds to wait before
returning.
Returns:
A list of object IDs that are ready and a list of the remaining object
IDs.
"""
if isinstance(object_ids, (tuple, np.ndarray)):
return ray.wait(
list(object_ids), num_returns=num_returns, timeout=timeout)
return ray.wait(object_ids, num_returns=num_returns, timeout=timeout) |
def get_html(self):
"""Bibliographic entry in html format."""
# Author links
au_link = ('<a href="https://www.scopus.com/authid/detail.url'
'?origin=AuthorProfile&authorId={0}">{1}</a>')
if len(self.authors) > 1:
authors = u', '.join([au_link.format(a.auid, a.given_name +
' ' + a.surname)
for a in self.authors[0:-1]])
authors += (u' and ' +
au_link.format(self.authors[-1].auid,
(str(self.authors[-1].given_name) +
' ' +
str(self.authors[-1].surname))))
else:
a = self.authors[0]
authors = au_link.format(a.auid, a.given_name + ' ' + a.surname)
title = u'<a href="{}">{}</a>'.format(self.scopus_link, self.title)
if self.volume and self.issueIdentifier:
volissue = u'<b>{}({})</b>'.format(self.volume, self.issueIdentifier)
elif self.volume:
volissue = u'<b>{}</b>'.format(self.volume)
else:
volissue = 'no volume'
jlink = '<a href="https://www.scopus.com/source/sourceInfo.url'\
'?sourceId={}">{}</a>'.format(
self.source_id, self.publicationName)
pages = _parse_pages(self, unicode=True)
s = "{auth}, {title}, {jour}, {volissue}, {pages}, ({year}).".format(
auth=authors, title=title, jour=jlink, volissue=volissue,
pages=pages, year=self.coverDate[:4])
if self.doi:
s += ' <a href="https://doi.org/{0}">doi:{0}</a>.'.format(self.doi)
return s | Bibliographic entry in html format. | Below is the the instruction that describes the task:
### Input:
Bibliographic entry in html format.
### Response:
def get_html(self):
"""Bibliographic entry in html format."""
# Author links
au_link = ('<a href="https://www.scopus.com/authid/detail.url'
'?origin=AuthorProfile&authorId={0}">{1}</a>')
if len(self.authors) > 1:
authors = u', '.join([au_link.format(a.auid, a.given_name +
' ' + a.surname)
for a in self.authors[0:-1]])
authors += (u' and ' +
au_link.format(self.authors[-1].auid,
(str(self.authors[-1].given_name) +
' ' +
str(self.authors[-1].surname))))
else:
a = self.authors[0]
authors = au_link.format(a.auid, a.given_name + ' ' + a.surname)
title = u'<a href="{}">{}</a>'.format(self.scopus_link, self.title)
if self.volume and self.issueIdentifier:
volissue = u'<b>{}({})</b>'.format(self.volume, self.issueIdentifier)
elif self.volume:
volissue = u'<b>{}</b>'.format(self.volume)
else:
volissue = 'no volume'
jlink = '<a href="https://www.scopus.com/source/sourceInfo.url'\
'?sourceId={}">{}</a>'.format(
self.source_id, self.publicationName)
pages = _parse_pages(self, unicode=True)
s = "{auth}, {title}, {jour}, {volissue}, {pages}, ({year}).".format(
auth=authors, title=title, jour=jlink, volissue=volissue,
pages=pages, year=self.coverDate[:4])
if self.doi:
s += ' <a href="https://doi.org/{0}">doi:{0}</a>.'.format(self.doi)
return s |
def decode_hparams(overrides=""):
"""Hyperparameters for decoding."""
hp = hparam.HParams(
save_images=False,
log_results=True,
extra_length=100,
min_length_ratio=0.0,
batch_size=0,
beam_size=4,
alpha=0.6,
eos_penalty=0.0,
block_size=0,
guess_and_check_top_k=0,
guess_and_check_epsilon=-1,
insertion_parallel=False,
return_beams=False,
write_beam_scores=False,
max_input_size=-1,
identity_output=False,
num_samples=-1, # Number of examples to decode.
delimiter="\n",
decode_to_file="", # str. Prefix for filename to write decodings to.
decode_reference="", # str. Filename to read references from.
decode_in_memory=False,
# How much decode should wait for the next checkpoint
decode_timeout_mins=240,
summaries_log_dir="decode", # Directory to write hook summaries.
shards=1, # How many shards of data to decode (treating 1 as None).
shard_id=0, # Which shard are we decoding if more than 1 above.
shards_start_offset=0, # Number of the first shard to decode.
shard_google_format=False, # If True use Google shard naming format.
num_decodes=1, # Number of times to go over the dataset.
force_decode_length=False,
display_decoded_images=False,
# Multi-problem decoding task id.
multiproblem_task_id=-1,
# Used for video decoding.
frames_per_second=10,
skip_eos_postprocess=False,
# Creates a blue/red border covering border_percent of the frame.
border_percent=2,
# Maximum number of videos displayed.
# number of videos displayed = max_display_outputs * max_display_decodes
max_display_outputs=10,
max_display_decodes=5,
# Used in computation of VGG feature based video metrics.
# Set this to be the path to a trained VGG ckpt to output
# useful metrics.
vgg_ckpt_path="",
# Used for MLPerf compliance logging.
mlperf_decode_step=0.0,
mlperf_threshold=25.0,
mlperf_success=False)
hp.parse(overrides)
return hp | Hyperparameters for decoding. | Below is the the instruction that describes the task:
### Input:
Hyperparameters for decoding.
### Response:
def decode_hparams(overrides=""):
"""Hyperparameters for decoding."""
hp = hparam.HParams(
save_images=False,
log_results=True,
extra_length=100,
min_length_ratio=0.0,
batch_size=0,
beam_size=4,
alpha=0.6,
eos_penalty=0.0,
block_size=0,
guess_and_check_top_k=0,
guess_and_check_epsilon=-1,
insertion_parallel=False,
return_beams=False,
write_beam_scores=False,
max_input_size=-1,
identity_output=False,
num_samples=-1, # Number of examples to decode.
delimiter="\n",
decode_to_file="", # str. Prefix for filename to write decodings to.
decode_reference="", # str. Filename to read references from.
decode_in_memory=False,
# How much decode should wait for the next checkpoint
decode_timeout_mins=240,
summaries_log_dir="decode", # Directory to write hook summaries.
shards=1, # How many shards of data to decode (treating 1 as None).
shard_id=0, # Which shard are we decoding if more than 1 above.
shards_start_offset=0, # Number of the first shard to decode.
shard_google_format=False, # If True use Google shard naming format.
num_decodes=1, # Number of times to go over the dataset.
force_decode_length=False,
display_decoded_images=False,
# Multi-problem decoding task id.
multiproblem_task_id=-1,
# Used for video decoding.
frames_per_second=10,
skip_eos_postprocess=False,
# Creates a blue/red border covering border_percent of the frame.
border_percent=2,
# Maximum number of videos displayed.
# number of videos displayed = max_display_outputs * max_display_decodes
max_display_outputs=10,
max_display_decodes=5,
# Used in computation of VGG feature based video metrics.
# Set this to be the path to a trained VGG ckpt to output
# useful metrics.
vgg_ckpt_path="",
# Used for MLPerf compliance logging.
mlperf_decode_step=0.0,
mlperf_threshold=25.0,
mlperf_success=False)
hp.parse(overrides)
return hp |
def set_write_buffer_limits(self, high=None, low=None):
"""
Called by the client protocol to set the high- and low-water
limits for write flow control.
These two values control when call the protocol's
``pause_writing()`` and ``resume_writing()`` methods are
called.
:param high: The high-water limit. Must be a non-negative
integer greater than or equal to ``low``, if both
are specified.
:param low: The low-water limit. Must be a non-negative
integer less than or equal to ``high``, if both
are specified. If only ``high`` is specified,
defaults to an implementation-specific value less
than or equal to ``high``.
"""
# Call set_write_buffer_limits() on the transport
self._transport.set_write_buffer_limits(high=high, low=low) | Called by the client protocol to set the high- and low-water
limits for write flow control.
These two values control when call the protocol's
``pause_writing()`` and ``resume_writing()`` methods are
called.
:param high: The high-water limit. Must be a non-negative
integer greater than or equal to ``low``, if both
are specified.
:param low: The low-water limit. Must be a non-negative
integer less than or equal to ``high``, if both
are specified. If only ``high`` is specified,
defaults to an implementation-specific value less
than or equal to ``high``. | Below is the the instruction that describes the task:
### Input:
Called by the client protocol to set the high- and low-water
limits for write flow control.
These two values control when call the protocol's
``pause_writing()`` and ``resume_writing()`` methods are
called.
:param high: The high-water limit. Must be a non-negative
integer greater than or equal to ``low``, if both
are specified.
:param low: The low-water limit. Must be a non-negative
integer less than or equal to ``high``, if both
are specified. If only ``high`` is specified,
defaults to an implementation-specific value less
than or equal to ``high``.
### Response:
def set_write_buffer_limits(self, high=None, low=None):
"""
Called by the client protocol to set the high- and low-water
limits for write flow control.
These two values control when call the protocol's
``pause_writing()`` and ``resume_writing()`` methods are
called.
:param high: The high-water limit. Must be a non-negative
integer greater than or equal to ``low``, if both
are specified.
:param low: The low-water limit. Must be a non-negative
integer less than or equal to ``high``, if both
are specified. If only ``high`` is specified,
defaults to an implementation-specific value less
than or equal to ``high``.
"""
# Call set_write_buffer_limits() on the transport
self._transport.set_write_buffer_limits(high=high, low=low) |
def getdata(inputfile, argnum=None, close=False):
"""
Get data from the .dat files
args:
inputfile: file
Input File
close: bool, default=False
Closes inputfile if True
inputfile (File): Input file
close (boolean): Closes inputfile if True (default: False)
returns:
dictionary:
data: list of parsed data
variables: dictionary of errors and other additional variables
"""
# get data and converts them to list
# outputtype - list, dict, all
output = []
add_data = {}
line_num = 0
for line in inputfile:
line_num += 1
if ("#" not in line) and (line != ""):
linesplit = line.split()
if argnum is not None and len(linesplit) != int(argnum):
raise ValueError(
"Line {:d} has {:d} arguments (need {:d})".format(
line_num, len(linesplit), argnum))
output.append(linesplit)
# additional float variable
if "#f" in line:
data = line.split()[1].split("=")
add_data[data[0]] = float(data[1])
# additional list float variable
if "#l" in line:
data = line.split()[1].split("=")
add_data[data[0]] = [float(e) for e in data[1].split(",")]
if close:
inputfile.close()
output = cleandata(output)
return {
"data": np.array(output),
"variables": add_data,
} | Get data from the .dat files
args:
inputfile: file
Input File
close: bool, default=False
Closes inputfile if True
inputfile (File): Input file
close (boolean): Closes inputfile if True (default: False)
returns:
dictionary:
data: list of parsed data
variables: dictionary of errors and other additional variables | Below is the the instruction that describes the task:
### Input:
Get data from the .dat files
args:
inputfile: file
Input File
close: bool, default=False
Closes inputfile if True
inputfile (File): Input file
close (boolean): Closes inputfile if True (default: False)
returns:
dictionary:
data: list of parsed data
variables: dictionary of errors and other additional variables
### Response:
def getdata(inputfile, argnum=None, close=False):
"""
Get data from the .dat files
args:
inputfile: file
Input File
close: bool, default=False
Closes inputfile if True
inputfile (File): Input file
close (boolean): Closes inputfile if True (default: False)
returns:
dictionary:
data: list of parsed data
variables: dictionary of errors and other additional variables
"""
# get data and converts them to list
# outputtype - list, dict, all
output = []
add_data = {}
line_num = 0
for line in inputfile:
line_num += 1
if ("#" not in line) and (line != ""):
linesplit = line.split()
if argnum is not None and len(linesplit) != int(argnum):
raise ValueError(
"Line {:d} has {:d} arguments (need {:d})".format(
line_num, len(linesplit), argnum))
output.append(linesplit)
# additional float variable
if "#f" in line:
data = line.split()[1].split("=")
add_data[data[0]] = float(data[1])
# additional list float variable
if "#l" in line:
data = line.split()[1].split("=")
add_data[data[0]] = [float(e) for e in data[1].split(",")]
if close:
inputfile.close()
output = cleandata(output)
return {
"data": np.array(output),
"variables": add_data,
} |
def remove_update_callback(self, group, name=None, cb=None):
"""Remove the supplied callback for a group or a group.name"""
if not cb:
return
if not name:
if group in self.group_update_callbacks:
self.group_update_callbacks[group].remove_callback(cb)
else:
paramname = '{}.{}'.format(group, name)
if paramname in self.param_update_callbacks:
self.param_update_callbacks[paramname].remove_callback(cb) | Remove the supplied callback for a group or a group.name | Below is the the instruction that describes the task:
### Input:
Remove the supplied callback for a group or a group.name
### Response:
def remove_update_callback(self, group, name=None, cb=None):
"""Remove the supplied callback for a group or a group.name"""
if not cb:
return
if not name:
if group in self.group_update_callbacks:
self.group_update_callbacks[group].remove_callback(cb)
else:
paramname = '{}.{}'.format(group, name)
if paramname in self.param_update_callbacks:
self.param_update_callbacks[paramname].remove_callback(cb) |
def compute_rewards(self, scores):
"""Compute the velocity of thte k+1 most recent scores.
The velocity is the average distance between scores. Return a list with those k velocities
padded out with zeros so that the count remains the same.
"""
# take the k + 1 most recent scores so we can get k velocities
recent_scores = scores[:-self.k - 2:-1]
velocities = [recent_scores[i] - recent_scores[i + 1] for i in
range(len(recent_scores) - 1)]
# pad the list out with zeros, so the length of the list is
# maintained
zeros = (len(scores) - self.k) * [0]
return velocities + zeros | Compute the velocity of thte k+1 most recent scores.
The velocity is the average distance between scores. Return a list with those k velocities
padded out with zeros so that the count remains the same. | Below is the the instruction that describes the task:
### Input:
Compute the velocity of thte k+1 most recent scores.
The velocity is the average distance between scores. Return a list with those k velocities
padded out with zeros so that the count remains the same.
### Response:
def compute_rewards(self, scores):
"""Compute the velocity of thte k+1 most recent scores.
The velocity is the average distance between scores. Return a list with those k velocities
padded out with zeros so that the count remains the same.
"""
# take the k + 1 most recent scores so we can get k velocities
recent_scores = scores[:-self.k - 2:-1]
velocities = [recent_scores[i] - recent_scores[i + 1] for i in
range(len(recent_scores) - 1)]
# pad the list out with zeros, so the length of the list is
# maintained
zeros = (len(scores) - self.k) * [0]
return velocities + zeros |
def checkfuncname(b, frame):
"""Check whether we should break here because of `b.funcname`."""
if not b.funcname:
# Breakpoint was set via line number.
if b.line != frame.f_lineno:
# Breakpoint was set at a line with a def statement and the function
# defined is called: don't break.
return False
return True
# Breakpoint set via function name.
if frame.f_code.co_name != b.funcname:
# It's not a function call, but rather execution of def statement.
return False
# We are in the right frame.
if not b.func_first_executable_line:
# The function is entered for the 1st time.
b.func_first_executable_line = frame.f_lineno
if b.func_first_executable_line != frame.f_lineno:
# But we are not at the first line number: don't break.
return False
return True | Check whether we should break here because of `b.funcname`. | Below is the the instruction that describes the task:
### Input:
Check whether we should break here because of `b.funcname`.
### Response:
def checkfuncname(b, frame):
"""Check whether we should break here because of `b.funcname`."""
if not b.funcname:
# Breakpoint was set via line number.
if b.line != frame.f_lineno:
# Breakpoint was set at a line with a def statement and the function
# defined is called: don't break.
return False
return True
# Breakpoint set via function name.
if frame.f_code.co_name != b.funcname:
# It's not a function call, but rather execution of def statement.
return False
# We are in the right frame.
if not b.func_first_executable_line:
# The function is entered for the 1st time.
b.func_first_executable_line = frame.f_lineno
if b.func_first_executable_line != frame.f_lineno:
# But we are not at the first line number: don't break.
return False
return True |
def asinh(x, context=None):
"""
Return the inverse hyperbolic sine of x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_asinh,
(BigFloat._implicit_convert(x),),
context,
) | Return the inverse hyperbolic sine of x. | Below is the the instruction that describes the task:
### Input:
Return the inverse hyperbolic sine of x.
### Response:
def asinh(x, context=None):
"""
Return the inverse hyperbolic sine of x.
"""
return _apply_function_in_current_context(
BigFloat,
mpfr.mpfr_asinh,
(BigFloat._implicit_convert(x),),
context,
) |
def status():
'''Display the database migrations status'''
for plugin, package, filename in available_migrations():
migration = get_migration(plugin, filename)
if migration:
status = green(migration['date'].strftime(DATE_FORMAT))
else:
status = yellow('Not applied')
log_status(plugin, filename, status) | Display the database migrations status | Below is the the instruction that describes the task:
### Input:
Display the database migrations status
### Response:
def status():
'''Display the database migrations status'''
for plugin, package, filename in available_migrations():
migration = get_migration(plugin, filename)
if migration:
status = green(migration['date'].strftime(DATE_FORMAT))
else:
status = yellow('Not applied')
log_status(plugin, filename, status) |
def constructor(self, random, args):
"""Return a candidate solution for an ant colony optimization."""
self._use_ants = True
candidate = []
while len(candidate) < len(self.weights) - 1:
# Find feasible components
feasible_components = []
if len(candidate) == 0:
feasible_components = self.components
elif len(candidate) == len(self.weights) - 1:
first = candidate[0]
last = candidate[-1]
feasible_components = [c for c in self.components if c.element[0] == last.element[1] and c.element[1] == first.element[0]]
else:
last = candidate[-1]
already_visited = [c.element[0] for c in candidate]
already_visited.extend([c.element[1] for c in candidate])
already_visited = set(already_visited)
feasible_components = [c for c in self.components if c.element[0] == last.element[1] and c.element[1] not in already_visited]
if len(feasible_components) == 0:
candidate = []
else:
# Choose a feasible component
if random.random() <= self.bias:
next_component = max(feasible_components)
else:
next_component = selectors.fitness_proportionate_selection(random, feasible_components, {'num_selected': 1})[0]
candidate.append(next_component)
return candidate | Return a candidate solution for an ant colony optimization. | Below is the the instruction that describes the task:
### Input:
Return a candidate solution for an ant colony optimization.
### Response:
def constructor(self, random, args):
"""Return a candidate solution for an ant colony optimization."""
self._use_ants = True
candidate = []
while len(candidate) < len(self.weights) - 1:
# Find feasible components
feasible_components = []
if len(candidate) == 0:
feasible_components = self.components
elif len(candidate) == len(self.weights) - 1:
first = candidate[0]
last = candidate[-1]
feasible_components = [c for c in self.components if c.element[0] == last.element[1] and c.element[1] == first.element[0]]
else:
last = candidate[-1]
already_visited = [c.element[0] for c in candidate]
already_visited.extend([c.element[1] for c in candidate])
already_visited = set(already_visited)
feasible_components = [c for c in self.components if c.element[0] == last.element[1] and c.element[1] not in already_visited]
if len(feasible_components) == 0:
candidate = []
else:
# Choose a feasible component
if random.random() <= self.bias:
next_component = max(feasible_components)
else:
next_component = selectors.fitness_proportionate_selection(random, feasible_components, {'num_selected': 1})[0]
candidate.append(next_component)
return candidate |
def constructFiniteStateMachine(inputs, outputs, states, table, initial,
richInputs, inputContext, world,
logger=LOGGER):
"""
Construct a new finite state machine from a definition of its states.
@param inputs: Definitions of all input symbols the resulting machine will
need to handle, as a L{twisted.python.constants.Names} subclass.
@param outputs: Definitions of all output symbols the resulting machine is
allowed to emit, as a L{twisted.python.constants.Names} subclass.
@param states: Definitions of all possible states the resulting machine
will be capable of inhabiting, as a L{twisted.python.constants.Names}
subclass.
@param table: The state transition table, defining which output and next
state results from the receipt of any and all inputs in any and all
states.
@type table: L{TransitionTable}
@param initial: The state the machine will start in (one of the symbols
from C{states}).
@param richInputs: A L{list} of types which correspond to each of the input
symbols from C{inputs}.
@type richInputs: L{list} of L{IRichInput} I{providers}
@param inputContext: A L{dict} mapping output symbols to L{Interface}
subclasses describing the requirements of the inputs which lead to
them.
@param world: An object responsible for turning FSM outputs into observable
side-effects.
@type world: L{IOutputExecutor} provider
@param logger: The logger to which to write messages.
@type logger: L{eliot.ILogger} or L{NoneType} if there is no logger.
@return: An L{IFiniteStateMachine} provider
"""
table = table.table
_missingExtraCheck(
set(table.keys()), set(states.iterconstants()),
ExtraTransitionState, MissingTransitionState)
_missingExtraCheck(
set(i for s in table.values() for i in s), set(inputs.iterconstants()),
ExtraTransitionInput, MissingTransitionInput)
_missingExtraCheck(
set(output for s in table.values() for transition in s.values() for output in transition.output),
set(outputs.iterconstants()),
ExtraTransitionOutput, MissingTransitionOutput)
try:
_missingExtraCheck(
set(transition.nextState for s in table.values() for transition in s.values()),
set(states.iterconstants()),
ExtraTransitionNextState, MissingTransitionNextState)
except MissingTransitionNextState as e:
if e.args != ({initial},):
raise
if initial not in states.iterconstants():
raise InvalidInitialState(initial)
extraInputContext = set(inputContext) - set(outputs.iterconstants())
if extraInputContext:
raise ExtraInputContext(extraInputContext)
_checkConsistency(richInputs, table, inputContext)
fsm = _FiniteStateMachine(inputs, outputs, states, table, initial)
executor = IOutputExecutor(world)
interpreter = _FiniteStateInterpreter(
tuple(richInputs), inputContext, fsm, executor)
if logger is not None:
interpreter = FiniteStateLogger(
interpreter, logger, executor.identifier())
return interpreter | Construct a new finite state machine from a definition of its states.
@param inputs: Definitions of all input symbols the resulting machine will
need to handle, as a L{twisted.python.constants.Names} subclass.
@param outputs: Definitions of all output symbols the resulting machine is
allowed to emit, as a L{twisted.python.constants.Names} subclass.
@param states: Definitions of all possible states the resulting machine
will be capable of inhabiting, as a L{twisted.python.constants.Names}
subclass.
@param table: The state transition table, defining which output and next
state results from the receipt of any and all inputs in any and all
states.
@type table: L{TransitionTable}
@param initial: The state the machine will start in (one of the symbols
from C{states}).
@param richInputs: A L{list} of types which correspond to each of the input
symbols from C{inputs}.
@type richInputs: L{list} of L{IRichInput} I{providers}
@param inputContext: A L{dict} mapping output symbols to L{Interface}
subclasses describing the requirements of the inputs which lead to
them.
@param world: An object responsible for turning FSM outputs into observable
side-effects.
@type world: L{IOutputExecutor} provider
@param logger: The logger to which to write messages.
@type logger: L{eliot.ILogger} or L{NoneType} if there is no logger.
@return: An L{IFiniteStateMachine} provider | Below is the the instruction that describes the task:
### Input:
Construct a new finite state machine from a definition of its states.
@param inputs: Definitions of all input symbols the resulting machine will
need to handle, as a L{twisted.python.constants.Names} subclass.
@param outputs: Definitions of all output symbols the resulting machine is
allowed to emit, as a L{twisted.python.constants.Names} subclass.
@param states: Definitions of all possible states the resulting machine
will be capable of inhabiting, as a L{twisted.python.constants.Names}
subclass.
@param table: The state transition table, defining which output and next
state results from the receipt of any and all inputs in any and all
states.
@type table: L{TransitionTable}
@param initial: The state the machine will start in (one of the symbols
from C{states}).
@param richInputs: A L{list} of types which correspond to each of the input
symbols from C{inputs}.
@type richInputs: L{list} of L{IRichInput} I{providers}
@param inputContext: A L{dict} mapping output symbols to L{Interface}
subclasses describing the requirements of the inputs which lead to
them.
@param world: An object responsible for turning FSM outputs into observable
side-effects.
@type world: L{IOutputExecutor} provider
@param logger: The logger to which to write messages.
@type logger: L{eliot.ILogger} or L{NoneType} if there is no logger.
@return: An L{IFiniteStateMachine} provider
### Response:
def constructFiniteStateMachine(inputs, outputs, states, table, initial,
richInputs, inputContext, world,
logger=LOGGER):
"""
Construct a new finite state machine from a definition of its states.
@param inputs: Definitions of all input symbols the resulting machine will
need to handle, as a L{twisted.python.constants.Names} subclass.
@param outputs: Definitions of all output symbols the resulting machine is
allowed to emit, as a L{twisted.python.constants.Names} subclass.
@param states: Definitions of all possible states the resulting machine
will be capable of inhabiting, as a L{twisted.python.constants.Names}
subclass.
@param table: The state transition table, defining which output and next
state results from the receipt of any and all inputs in any and all
states.
@type table: L{TransitionTable}
@param initial: The state the machine will start in (one of the symbols
from C{states}).
@param richInputs: A L{list} of types which correspond to each of the input
symbols from C{inputs}.
@type richInputs: L{list} of L{IRichInput} I{providers}
@param inputContext: A L{dict} mapping output symbols to L{Interface}
subclasses describing the requirements of the inputs which lead to
them.
@param world: An object responsible for turning FSM outputs into observable
side-effects.
@type world: L{IOutputExecutor} provider
@param logger: The logger to which to write messages.
@type logger: L{eliot.ILogger} or L{NoneType} if there is no logger.
@return: An L{IFiniteStateMachine} provider
"""
table = table.table
_missingExtraCheck(
set(table.keys()), set(states.iterconstants()),
ExtraTransitionState, MissingTransitionState)
_missingExtraCheck(
set(i for s in table.values() for i in s), set(inputs.iterconstants()),
ExtraTransitionInput, MissingTransitionInput)
_missingExtraCheck(
set(output for s in table.values() for transition in s.values() for output in transition.output),
set(outputs.iterconstants()),
ExtraTransitionOutput, MissingTransitionOutput)
try:
_missingExtraCheck(
set(transition.nextState for s in table.values() for transition in s.values()),
set(states.iterconstants()),
ExtraTransitionNextState, MissingTransitionNextState)
except MissingTransitionNextState as e:
if e.args != ({initial},):
raise
if initial not in states.iterconstants():
raise InvalidInitialState(initial)
extraInputContext = set(inputContext) - set(outputs.iterconstants())
if extraInputContext:
raise ExtraInputContext(extraInputContext)
_checkConsistency(richInputs, table, inputContext)
fsm = _FiniteStateMachine(inputs, outputs, states, table, initial)
executor = IOutputExecutor(world)
interpreter = _FiniteStateInterpreter(
tuple(richInputs), inputContext, fsm, executor)
if logger is not None:
interpreter = FiniteStateLogger(
interpreter, logger, executor.identifier())
return interpreter |
def cancel_order(self, order_id):
"""
Send a request to cancel an order, return the response.
Arguments:
order_id - the order id to cancel
"""
request = '/v1/order/cancel'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce(),
'order_id': order_id
}
return requests.post(url, headers=self.prepare(params)) | Send a request to cancel an order, return the response.
Arguments:
order_id - the order id to cancel | Below is the the instruction that describes the task:
### Input:
Send a request to cancel an order, return the response.
Arguments:
order_id - the order id to cancel
### Response:
def cancel_order(self, order_id):
"""
Send a request to cancel an order, return the response.
Arguments:
order_id - the order id to cancel
"""
request = '/v1/order/cancel'
url = self.base_url + request
params = {
'request': request,
'nonce': self.get_nonce(),
'order_id': order_id
}
return requests.post(url, headers=self.prepare(params)) |
def _html_escape(string):
"""HTML escape all of these " & < >"""
html_codes = {
'"': '"',
'<': '<',
'>': '>',
}
# & must be handled first
string = string.replace('&', '&')
for char in html_codes:
string = string.replace(char, html_codes[char])
return string | HTML escape all of these " & < > | Below is the the instruction that describes the task:
### Input:
HTML escape all of these " & < >
### Response:
def _html_escape(string):
"""HTML escape all of these " & < >"""
html_codes = {
'"': '"',
'<': '<',
'>': '>',
}
# & must be handled first
string = string.replace('&', '&')
for char in html_codes:
string = string.replace(char, html_codes[char])
return string |
def list_basebackups_http(self, arg):
"""List available basebackups from a HTTP source"""
self.storage = HTTPRestore(arg.host, arg.port, arg.site)
self.storage.show_basebackup_list(verbose=arg.verbose) | List available basebackups from a HTTP source | Below is the the instruction that describes the task:
### Input:
List available basebackups from a HTTP source
### Response:
def list_basebackups_http(self, arg):
"""List available basebackups from a HTTP source"""
self.storage = HTTPRestore(arg.host, arg.port, arg.site)
self.storage.show_basebackup_list(verbose=arg.verbose) |
def from_string(date_str):
"""
construction from the following string patterns
'%Y-%m-%d'
'%d.%m.%Y'
'%m/%d/%Y'
'%Y%m%d'
:param str date_str:
:return BusinessDate:
"""
if date_str.count('-'):
str_format = '%Y-%m-%d'
elif date_str.count('.'):
str_format = '%d.%m.%Y'
elif date_str.count('/'):
str_format = '%m/%d/%Y'
elif len(date_str) == 8:
str_format = '%Y%m%d'
elif len(date_str) == 4:
year = ord(date_str[0]) * 256 + ord(date_str[1])
month = ord(date_str[2])
day = ord(date_str[3])
return BusinessDate.from_ymd(year, month, day)
else:
msg = "the date string " + date_str + " has not the right format"
raise ValueError(msg)
d = datetime.strptime(date_str, str_format)
return BusinessDate.from_ymd(d.year, d.month, d.day) | construction from the following string patterns
'%Y-%m-%d'
'%d.%m.%Y'
'%m/%d/%Y'
'%Y%m%d'
:param str date_str:
:return BusinessDate: | Below is the the instruction that describes the task:
### Input:
construction from the following string patterns
'%Y-%m-%d'
'%d.%m.%Y'
'%m/%d/%Y'
'%Y%m%d'
:param str date_str:
:return BusinessDate:
### Response:
def from_string(date_str):
"""
construction from the following string patterns
'%Y-%m-%d'
'%d.%m.%Y'
'%m/%d/%Y'
'%Y%m%d'
:param str date_str:
:return BusinessDate:
"""
if date_str.count('-'):
str_format = '%Y-%m-%d'
elif date_str.count('.'):
str_format = '%d.%m.%Y'
elif date_str.count('/'):
str_format = '%m/%d/%Y'
elif len(date_str) == 8:
str_format = '%Y%m%d'
elif len(date_str) == 4:
year = ord(date_str[0]) * 256 + ord(date_str[1])
month = ord(date_str[2])
day = ord(date_str[3])
return BusinessDate.from_ymd(year, month, day)
else:
msg = "the date string " + date_str + " has not the right format"
raise ValueError(msg)
d = datetime.strptime(date_str, str_format)
return BusinessDate.from_ymd(d.year, d.month, d.day) |
def _checkpath(self, path):
""" Checks that a given path is valid. If it's not, raises NotFoundException """
if path.startswith("/") or ".." in path or path.strip() != path:
raise NotFoundException() | Checks that a given path is valid. If it's not, raises NotFoundException | Below is the the instruction that describes the task:
### Input:
Checks that a given path is valid. If it's not, raises NotFoundException
### Response:
def _checkpath(self, path):
""" Checks that a given path is valid. If it's not, raises NotFoundException """
if path.startswith("/") or ".." in path or path.strip() != path:
raise NotFoundException() |
def filter_properties(self, model, context=None):
"""
Filter simple properties
Runs filters on simple properties changing them in place.
:param model: object or dict
:param context: object, dict or None
:return: None
"""
if model is None:
return
for property_name in self.properties:
prop = self.properties[property_name]
value = self.get(model, property_name)
if value is None:
continue
filtered_value = prop.filter(
value=value,
model=model,
context=context
)
if value != filtered_value: # unless changed!
self.set(model, property_name, filtered_value) | Filter simple properties
Runs filters on simple properties changing them in place.
:param model: object or dict
:param context: object, dict or None
:return: None | Below is the the instruction that describes the task:
### Input:
Filter simple properties
Runs filters on simple properties changing them in place.
:param model: object or dict
:param context: object, dict or None
:return: None
### Response:
def filter_properties(self, model, context=None):
"""
Filter simple properties
Runs filters on simple properties changing them in place.
:param model: object or dict
:param context: object, dict or None
:return: None
"""
if model is None:
return
for property_name in self.properties:
prop = self.properties[property_name]
value = self.get(model, property_name)
if value is None:
continue
filtered_value = prop.filter(
value=value,
model=model,
context=context
)
if value != filtered_value: # unless changed!
self.set(model, property_name, filtered_value) |
def FileEntryExistsByPathSpec(self, path_spec):
"""Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists, false otherwise.
"""
location = getattr(path_spec, 'location', None)
if location is None:
return False
is_device = False
if platform.system() == 'Windows':
# Note that os.path.exists() returns False for Windows device files so
# instead use libsmdev to do the check.
try:
is_device = pysmdev.check_device(location)
except IOError as exception:
# Since pysmdev will raise IOError when it has no access to the device
# we check if the exception message contains ' access denied ' and
# return true.
# Note that exception.message no longer works in Python 3.
exception_string = str(exception)
if not isinstance(exception_string, py2to3.UNICODE_TYPE):
exception_string = py2to3.UNICODE_TYPE(
exception_string, errors='replace')
if ' access denied ' in exception_string:
is_device = True
# Note that os.path.exists() returns False for broken symbolic links hence
# an additional check using os.path.islink() is necessary.
return is_device or os.path.exists(location) or os.path.islink(location) | Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists, false otherwise. | Below is the the instruction that describes the task:
### Input:
Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists, false otherwise.
### Response:
def FileEntryExistsByPathSpec(self, path_spec):
"""Determines if a file entry for a path specification exists.
Args:
path_spec (PathSpec): a path specification.
Returns:
bool: True if the file entry exists, false otherwise.
"""
location = getattr(path_spec, 'location', None)
if location is None:
return False
is_device = False
if platform.system() == 'Windows':
# Note that os.path.exists() returns False for Windows device files so
# instead use libsmdev to do the check.
try:
is_device = pysmdev.check_device(location)
except IOError as exception:
# Since pysmdev will raise IOError when it has no access to the device
# we check if the exception message contains ' access denied ' and
# return true.
# Note that exception.message no longer works in Python 3.
exception_string = str(exception)
if not isinstance(exception_string, py2to3.UNICODE_TYPE):
exception_string = py2to3.UNICODE_TYPE(
exception_string, errors='replace')
if ' access denied ' in exception_string:
is_device = True
# Note that os.path.exists() returns False for broken symbolic links hence
# an additional check using os.path.islink() is necessary.
return is_device or os.path.exists(location) or os.path.islink(location) |
def arrow_from_wid(self, wid):
"""Make a real portal and its arrow from a dummy arrow.
This doesn't handle touch events. It takes a widget as its
argument: the one the user has been dragging to indicate where
they want the arrow to go. Said widget ought to be invisible.
It checks if the dummy arrow connects two real spots first,
and does nothing if it doesn't.
"""
for spot in self.board.spotlayout.children:
if spot.collide_widget(wid):
whereto = spot
break
else:
return
self.board.arrowlayout.add_widget(
self.board.make_arrow(
self.board.character.new_portal(
self.board.grabbed.place.name,
whereto.place.name,
reciprocal=self.reciprocal_portal
)
)
) | Make a real portal and its arrow from a dummy arrow.
This doesn't handle touch events. It takes a widget as its
argument: the one the user has been dragging to indicate where
they want the arrow to go. Said widget ought to be invisible.
It checks if the dummy arrow connects two real spots first,
and does nothing if it doesn't. | Below is the the instruction that describes the task:
### Input:
Make a real portal and its arrow from a dummy arrow.
This doesn't handle touch events. It takes a widget as its
argument: the one the user has been dragging to indicate where
they want the arrow to go. Said widget ought to be invisible.
It checks if the dummy arrow connects two real spots first,
and does nothing if it doesn't.
### Response:
def arrow_from_wid(self, wid):
"""Make a real portal and its arrow from a dummy arrow.
This doesn't handle touch events. It takes a widget as its
argument: the one the user has been dragging to indicate where
they want the arrow to go. Said widget ought to be invisible.
It checks if the dummy arrow connects two real spots first,
and does nothing if it doesn't.
"""
for spot in self.board.spotlayout.children:
if spot.collide_widget(wid):
whereto = spot
break
else:
return
self.board.arrowlayout.add_widget(
self.board.make_arrow(
self.board.character.new_portal(
self.board.grabbed.place.name,
whereto.place.name,
reciprocal=self.reciprocal_portal
)
)
) |
def open( self ):
"""Open the database connection."""
if self._connection is None:
self._connection = sqlite3.connect(self._dbfile) | Open the database connection. | Below is the the instruction that describes the task:
### Input:
Open the database connection.
### Response:
def open( self ):
"""Open the database connection."""
if self._connection is None:
self._connection = sqlite3.connect(self._dbfile) |
def _send(self, key, value, metric_type):
"""Send the specified value to the statsd daemon via UDP without a
direct socket connection.
:param str key: The key name to send
:param int or float value: The value for the key
"""
try:
payload = self._build_payload(key, value, metric_type)
LOGGER.debug('Sending statsd payload: %r', payload)
self._socket.sendto(payload.encode('utf-8'), self._address)
except socket.error: # pragma: nocover
LOGGER.exception('Error sending statsd metric') | Send the specified value to the statsd daemon via UDP without a
direct socket connection.
:param str key: The key name to send
:param int or float value: The value for the key | Below is the the instruction that describes the task:
### Input:
Send the specified value to the statsd daemon via UDP without a
direct socket connection.
:param str key: The key name to send
:param int or float value: The value for the key
### Response:
def _send(self, key, value, metric_type):
"""Send the specified value to the statsd daemon via UDP without a
direct socket connection.
:param str key: The key name to send
:param int or float value: The value for the key
"""
try:
payload = self._build_payload(key, value, metric_type)
LOGGER.debug('Sending statsd payload: %r', payload)
self._socket.sendto(payload.encode('utf-8'), self._address)
except socket.error: # pragma: nocover
LOGGER.exception('Error sending statsd metric') |
def get_appliances(self, start=0, count=-1, filter='', fields='', query='', sort='', view=''):
"""
Gets a list of all the Image Streamer resources based on optional sorting and filtering, and constrained
by start and count parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
fields:
Specifies which fields should be returned in the result set.
query:
A general query string to narrow the list of resources returned. The default
is no query - all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
view:
Return a specific subset of the attributes of the resource or collection, by
specifying the name of a predefined view. The default view is expand - show all
attributes of the resource and all elements of collections of resources.
Returns:
list: Image Streamer resources associated with the Deployment Servers.
"""
uri = self.URI + '/image-streamer-appliances'
return self._client.get_all(start, count, filter=filter, sort=sort, query=query, fields=fields, view=view,
uri=uri) | Gets a list of all the Image Streamer resources based on optional sorting and filtering, and constrained
by start and count parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
fields:
Specifies which fields should be returned in the result set.
query:
A general query string to narrow the list of resources returned. The default
is no query - all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
view:
Return a specific subset of the attributes of the resource or collection, by
specifying the name of a predefined view. The default view is expand - show all
attributes of the resource and all elements of collections of resources.
Returns:
list: Image Streamer resources associated with the Deployment Servers. | Below is the the instruction that describes the task:
### Input:
Gets a list of all the Image Streamer resources based on optional sorting and filtering, and constrained
by start and count parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
fields:
Specifies which fields should be returned in the result set.
query:
A general query string to narrow the list of resources returned. The default
is no query - all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
view:
Return a specific subset of the attributes of the resource or collection, by
specifying the name of a predefined view. The default view is expand - show all
attributes of the resource and all elements of collections of resources.
Returns:
list: Image Streamer resources associated with the Deployment Servers.
### Response:
def get_appliances(self, start=0, count=-1, filter='', fields='', query='', sort='', view=''):
"""
Gets a list of all the Image Streamer resources based on optional sorting and filtering, and constrained
by start and count parameters.
Args:
start:
The first item to return, using 0-based indexing.
If not specified, the default is 0 - start with the first available item.
count:
The number of resources to return. A count of -1 requests all items.
The actual number of items in the response might differ from the requested
count if the sum of start and count exceeds the total number of items.
filter (list or str):
A general filter/query string to narrow the list of items returned. The
default is no filter; all resources are returned.
fields:
Specifies which fields should be returned in the result set.
query:
A general query string to narrow the list of resources returned. The default
is no query - all resources are returned.
sort:
The sort order of the returned data set. By default, the sort order is based
on create time with the oldest entry first.
view:
Return a specific subset of the attributes of the resource or collection, by
specifying the name of a predefined view. The default view is expand - show all
attributes of the resource and all elements of collections of resources.
Returns:
list: Image Streamer resources associated with the Deployment Servers.
"""
uri = self.URI + '/image-streamer-appliances'
return self._client.get_all(start, count, filter=filter, sort=sort, query=query, fields=fields, view=view,
uri=uri) |
def getenv(name, **kwargs):
"""
Retrieves environment variable by name and casts the value to desired type.
If desired type is list or tuple - uses separator to split the value.
"""
default_value = kwargs.pop('default', None)
desired_type = kwargs.pop('type', str)
list_separator = kwargs.pop('separator', ',')
value = os.getenv(name, None)
if value is None:
if default_value is None:
return None
else:
return default_value
if desired_type is bool:
if value.lower() in ['false', '0']:
return False
else:
return bool(value)
if desired_type is list or desired_type is tuple:
value = value.split(list_separator)
return desired_type(value)
if desired_type is dict:
return dict(literal_eval(value))
return desired_type(value) | Retrieves environment variable by name and casts the value to desired type.
If desired type is list or tuple - uses separator to split the value. | Below is the the instruction that describes the task:
### Input:
Retrieves environment variable by name and casts the value to desired type.
If desired type is list or tuple - uses separator to split the value.
### Response:
def getenv(name, **kwargs):
"""
Retrieves environment variable by name and casts the value to desired type.
If desired type is list or tuple - uses separator to split the value.
"""
default_value = kwargs.pop('default', None)
desired_type = kwargs.pop('type', str)
list_separator = kwargs.pop('separator', ',')
value = os.getenv(name, None)
if value is None:
if default_value is None:
return None
else:
return default_value
if desired_type is bool:
if value.lower() in ['false', '0']:
return False
else:
return bool(value)
if desired_type is list or desired_type is tuple:
value = value.split(list_separator)
return desired_type(value)
if desired_type is dict:
return dict(literal_eval(value))
return desired_type(value) |
def _normalize_django_header_name(header):
"""Unmunge header names modified by Django."""
# Remove HTTP_ prefix.
new_header = header.rpartition('HTTP_')[2]
# Camel case and replace _ with -
new_header = '-'.join(
x.capitalize() for x in new_header.split('_'))
return new_header | Unmunge header names modified by Django. | Below is the the instruction that describes the task:
### Input:
Unmunge header names modified by Django.
### Response:
def _normalize_django_header_name(header):
"""Unmunge header names modified by Django."""
# Remove HTTP_ prefix.
new_header = header.rpartition('HTTP_')[2]
# Camel case and replace _ with -
new_header = '-'.join(
x.capitalize() for x in new_header.split('_'))
return new_header |
def authenticate(self,
connection_certificate=None,
connection_info=None,
request_credentials=None):
"""
Query the configured SLUGS service with the provided credentials.
Args:
connection_certificate (cryptography.x509.Certificate): An X.509
certificate object obtained from the connection being
authenticated. Required for SLUGS authentication.
connection_info (tuple): A tuple of information pertaining to the
connection being authenticated, including the source IP address
and a timestamp (e.g., ('127.0.0.1', 1519759267.467451)).
Optional, defaults to None. Ignored for SLUGS authentication.
request_credentials (list): A list of KMIP Credential structures
containing credential information to use for authentication.
Optional, defaults to None. Ignored for SLUGS authentication.
"""
if (self.users_url is None) or (self.groups_url is None):
raise exceptions.ConfigurationError(
"The SLUGS URL must be specified."
)
user_id = utils.get_client_identity_from_certificate(
connection_certificate
)
try:
response = requests.get(self.users_url.format(user_id))
except Exception:
raise exceptions.ConfigurationError(
"A connection could not be established using the SLUGS URL."
)
if response.status_code == 404:
raise exceptions.PermissionDenied(
"Unrecognized user ID: {}".format(user_id)
)
response = requests.get(self.groups_url.format(user_id))
if response.status_code == 404:
raise exceptions.PermissionDenied(
"Group information could not be retrieved for user ID: "
"{}".format(user_id)
)
return user_id, response.json().get('groups') | Query the configured SLUGS service with the provided credentials.
Args:
connection_certificate (cryptography.x509.Certificate): An X.509
certificate object obtained from the connection being
authenticated. Required for SLUGS authentication.
connection_info (tuple): A tuple of information pertaining to the
connection being authenticated, including the source IP address
and a timestamp (e.g., ('127.0.0.1', 1519759267.467451)).
Optional, defaults to None. Ignored for SLUGS authentication.
request_credentials (list): A list of KMIP Credential structures
containing credential information to use for authentication.
Optional, defaults to None. Ignored for SLUGS authentication. | Below is the the instruction that describes the task:
### Input:
Query the configured SLUGS service with the provided credentials.
Args:
connection_certificate (cryptography.x509.Certificate): An X.509
certificate object obtained from the connection being
authenticated. Required for SLUGS authentication.
connection_info (tuple): A tuple of information pertaining to the
connection being authenticated, including the source IP address
and a timestamp (e.g., ('127.0.0.1', 1519759267.467451)).
Optional, defaults to None. Ignored for SLUGS authentication.
request_credentials (list): A list of KMIP Credential structures
containing credential information to use for authentication.
Optional, defaults to None. Ignored for SLUGS authentication.
### Response:
def authenticate(self,
connection_certificate=None,
connection_info=None,
request_credentials=None):
"""
Query the configured SLUGS service with the provided credentials.
Args:
connection_certificate (cryptography.x509.Certificate): An X.509
certificate object obtained from the connection being
authenticated. Required for SLUGS authentication.
connection_info (tuple): A tuple of information pertaining to the
connection being authenticated, including the source IP address
and a timestamp (e.g., ('127.0.0.1', 1519759267.467451)).
Optional, defaults to None. Ignored for SLUGS authentication.
request_credentials (list): A list of KMIP Credential structures
containing credential information to use for authentication.
Optional, defaults to None. Ignored for SLUGS authentication.
"""
if (self.users_url is None) or (self.groups_url is None):
raise exceptions.ConfigurationError(
"The SLUGS URL must be specified."
)
user_id = utils.get_client_identity_from_certificate(
connection_certificate
)
try:
response = requests.get(self.users_url.format(user_id))
except Exception:
raise exceptions.ConfigurationError(
"A connection could not be established using the SLUGS URL."
)
if response.status_code == 404:
raise exceptions.PermissionDenied(
"Unrecognized user ID: {}".format(user_id)
)
response = requests.get(self.groups_url.format(user_id))
if response.status_code == 404:
raise exceptions.PermissionDenied(
"Group information could not be retrieved for user ID: "
"{}".format(user_id)
)
return user_id, response.json().get('groups') |
def request(device, response_queue, payload, timeout_s=None, poll=POLL_QUEUES):
'''
Send payload to serial device and wait for response.
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default.
'''
device.write(payload)
if poll:
# Polling enabled. Wait for response in busy loop.
start = dt.datetime.now()
while not response_queue.qsize():
if (dt.datetime.now() - start).total_seconds() > timeout_s:
raise queue.Empty('No response received.')
return response_queue.get()
else:
# Polling disabled. Use blocking `Queue.get()` method to wait for
# response.
return response_queue.get(timeout=timeout_s) | Send payload to serial device and wait for response.
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default. | Below is the the instruction that describes the task:
### Input:
Send payload to serial device and wait for response.
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default.
### Response:
def request(device, response_queue, payload, timeout_s=None, poll=POLL_QUEUES):
'''
Send payload to serial device and wait for response.
Parameters
----------
device : serial.Serial
Serial instance.
response_queue : Queue.Queue
Queue to wait for response on.
payload : str or bytes
Payload to send.
timeout_s : float, optional
Maximum time to wait (in seconds) for response.
By default, block until response is ready.
poll : bool, optional
If ``True``, poll response queue in a busy loop until response is
ready (or timeout occurs).
Polling is much more processor intensive, but (at least on Windows)
results in faster response processing. On Windows, polling is
enabled by default.
'''
device.write(payload)
if poll:
# Polling enabled. Wait for response in busy loop.
start = dt.datetime.now()
while not response_queue.qsize():
if (dt.datetime.now() - start).total_seconds() > timeout_s:
raise queue.Empty('No response received.')
return response_queue.get()
else:
# Polling disabled. Use blocking `Queue.get()` method to wait for
# response.
return response_queue.get(timeout=timeout_s) |
def diffsp(self, col: str, serie: "iterable", name: str="Diff"):
"""
Add a diff column in percentage from a serie. The serie is
an iterable of the same length than the dataframe
:param col: column to diff
:type col: str
:param serie: serie to diff from
:type serie: iterable
:param name: name of the diff col, defaults to "Diff"
:param name: str, optional
:example: ``ds.diffp("Col 1", [1, 1, 4], "New col")``
"""
try:
d = []
for i, row in self.df.iterrows():
v = (row[col]*100) / serie[i]
d.append(v)
self.df[name] = d
except Exception as e:
self.err(e, self._append, "Can not diff column from serie") | Add a diff column in percentage from a serie. The serie is
an iterable of the same length than the dataframe
:param col: column to diff
:type col: str
:param serie: serie to diff from
:type serie: iterable
:param name: name of the diff col, defaults to "Diff"
:param name: str, optional
:example: ``ds.diffp("Col 1", [1, 1, 4], "New col")`` | Below is the the instruction that describes the task:
### Input:
Add a diff column in percentage from a serie. The serie is
an iterable of the same length than the dataframe
:param col: column to diff
:type col: str
:param serie: serie to diff from
:type serie: iterable
:param name: name of the diff col, defaults to "Diff"
:param name: str, optional
:example: ``ds.diffp("Col 1", [1, 1, 4], "New col")``
### Response:
def diffsp(self, col: str, serie: "iterable", name: str="Diff"):
"""
Add a diff column in percentage from a serie. The serie is
an iterable of the same length than the dataframe
:param col: column to diff
:type col: str
:param serie: serie to diff from
:type serie: iterable
:param name: name of the diff col, defaults to "Diff"
:param name: str, optional
:example: ``ds.diffp("Col 1", [1, 1, 4], "New col")``
"""
try:
d = []
for i, row in self.df.iterrows():
v = (row[col]*100) / serie[i]
d.append(v)
self.df[name] = d
except Exception as e:
self.err(e, self._append, "Can not diff column from serie") |
def summary(self, background=False, sub_sketch_keys=None):
"""
Summary statistics that can be calculated with one pass over the SArray.
Returns a turicreate.Sketch object which can be further queried for many
descriptive statistics over this SArray. Many of the statistics are
approximate. See the :class:`~turicreate.Sketch` documentation for more
detail.
Parameters
----------
background : boolean, optional
If True, the sketch construction will return immediately and the
sketch will be constructed in the background. While this is going on,
the sketch can be queried incrementally, but at a performance penalty.
Defaults to False.
sub_sketch_keys : int | str | list of int | list of str, optional
For SArray of dict type, also constructs sketches for a given set of keys,
For SArray of array type, also constructs sketches for the given indexes.
The sub sketches may be queried using: :py:func:`~turicreate.Sketch.element_sub_sketch()`.
Defaults to None in which case no subsketches will be constructed.
Returns
-------
out : Sketch
Sketch object that contains descriptive statistics for this SArray.
Many of the statistics are approximate.
"""
from ..data_structures.sketch import Sketch
if (self.dtype == _Image):
raise TypeError("summary() is not supported for arrays of image type")
if (type(background) != bool):
raise TypeError("'background' parameter has to be a boolean value")
if (sub_sketch_keys is not None):
if (self.dtype != dict and self.dtype != array.array):
raise TypeError("sub_sketch_keys is only supported for SArray of dictionary or array type")
if not _is_non_string_iterable(sub_sketch_keys):
sub_sketch_keys = [sub_sketch_keys]
value_types = set([type(i) for i in sub_sketch_keys])
if (len(value_types) != 1):
raise ValueError("sub_sketch_keys member values need to have the same type.")
value_type = value_types.pop()
if (self.dtype == dict and value_type != str):
raise TypeError("Only string value(s) can be passed to sub_sketch_keys for SArray of dictionary type. "+
"For dictionary types, sketch summary is computed by casting keys to string values.")
if (self.dtype == array.array and value_type != int):
raise TypeError("Only int value(s) can be passed to sub_sketch_keys for SArray of array type")
else:
sub_sketch_keys = list()
return Sketch(self, background, sub_sketch_keys = sub_sketch_keys) | Summary statistics that can be calculated with one pass over the SArray.
Returns a turicreate.Sketch object which can be further queried for many
descriptive statistics over this SArray. Many of the statistics are
approximate. See the :class:`~turicreate.Sketch` documentation for more
detail.
Parameters
----------
background : boolean, optional
If True, the sketch construction will return immediately and the
sketch will be constructed in the background. While this is going on,
the sketch can be queried incrementally, but at a performance penalty.
Defaults to False.
sub_sketch_keys : int | str | list of int | list of str, optional
For SArray of dict type, also constructs sketches for a given set of keys,
For SArray of array type, also constructs sketches for the given indexes.
The sub sketches may be queried using: :py:func:`~turicreate.Sketch.element_sub_sketch()`.
Defaults to None in which case no subsketches will be constructed.
Returns
-------
out : Sketch
Sketch object that contains descriptive statistics for this SArray.
Many of the statistics are approximate. | Below is the the instruction that describes the task:
### Input:
Summary statistics that can be calculated with one pass over the SArray.
Returns a turicreate.Sketch object which can be further queried for many
descriptive statistics over this SArray. Many of the statistics are
approximate. See the :class:`~turicreate.Sketch` documentation for more
detail.
Parameters
----------
background : boolean, optional
If True, the sketch construction will return immediately and the
sketch will be constructed in the background. While this is going on,
the sketch can be queried incrementally, but at a performance penalty.
Defaults to False.
sub_sketch_keys : int | str | list of int | list of str, optional
For SArray of dict type, also constructs sketches for a given set of keys,
For SArray of array type, also constructs sketches for the given indexes.
The sub sketches may be queried using: :py:func:`~turicreate.Sketch.element_sub_sketch()`.
Defaults to None in which case no subsketches will be constructed.
Returns
-------
out : Sketch
Sketch object that contains descriptive statistics for this SArray.
Many of the statistics are approximate.
### Response:
def summary(self, background=False, sub_sketch_keys=None):
"""
Summary statistics that can be calculated with one pass over the SArray.
Returns a turicreate.Sketch object which can be further queried for many
descriptive statistics over this SArray. Many of the statistics are
approximate. See the :class:`~turicreate.Sketch` documentation for more
detail.
Parameters
----------
background : boolean, optional
If True, the sketch construction will return immediately and the
sketch will be constructed in the background. While this is going on,
the sketch can be queried incrementally, but at a performance penalty.
Defaults to False.
sub_sketch_keys : int | str | list of int | list of str, optional
For SArray of dict type, also constructs sketches for a given set of keys,
For SArray of array type, also constructs sketches for the given indexes.
The sub sketches may be queried using: :py:func:`~turicreate.Sketch.element_sub_sketch()`.
Defaults to None in which case no subsketches will be constructed.
Returns
-------
out : Sketch
Sketch object that contains descriptive statistics for this SArray.
Many of the statistics are approximate.
"""
from ..data_structures.sketch import Sketch
if (self.dtype == _Image):
raise TypeError("summary() is not supported for arrays of image type")
if (type(background) != bool):
raise TypeError("'background' parameter has to be a boolean value")
if (sub_sketch_keys is not None):
if (self.dtype != dict and self.dtype != array.array):
raise TypeError("sub_sketch_keys is only supported for SArray of dictionary or array type")
if not _is_non_string_iterable(sub_sketch_keys):
sub_sketch_keys = [sub_sketch_keys]
value_types = set([type(i) for i in sub_sketch_keys])
if (len(value_types) != 1):
raise ValueError("sub_sketch_keys member values need to have the same type.")
value_type = value_types.pop()
if (self.dtype == dict and value_type != str):
raise TypeError("Only string value(s) can be passed to sub_sketch_keys for SArray of dictionary type. "+
"For dictionary types, sketch summary is computed by casting keys to string values.")
if (self.dtype == array.array and value_type != int):
raise TypeError("Only int value(s) can be passed to sub_sketch_keys for SArray of array type")
else:
sub_sketch_keys = list()
return Sketch(self, background, sub_sketch_keys = sub_sketch_keys) |
def on_first_registration(self):
"""Action to be performed on first plugin registration"""
self.main.tabify_plugins(self.main.help, self)
self.dockwidget.hide() | Action to be performed on first plugin registration | Below is the the instruction that describes the task:
### Input:
Action to be performed on first plugin registration
### Response:
def on_first_registration(self):
"""Action to be performed on first plugin registration"""
self.main.tabify_plugins(self.main.help, self)
self.dockwidget.hide() |
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False,
):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
This function is by Ian Bicking.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn,
prefix + name + '/',
package,
only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out | Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
This function is by Ian Bicking. | Below is the the instruction that describes the task:
### Input:
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
This function is by Ian Bicking.
### Response:
def find_package_data(
where='.', package='',
exclude=standard_exclude,
exclude_directories=standard_exclude_directories,
only_in_packages=True,
show_ignored=False,
):
"""
Return a dictionary suitable for use in ``package_data``
in a distutils ``setup.py`` file.
The dictionary looks like::
{'package': [files]}
Where ``files`` is a list of all the files in that package that
don't match anything in ``exclude``.
If ``only_in_packages`` is true, then top-level directories that
are not packages won't be included (but directories under packages
will).
Directories matching any pattern in ``exclude_directories`` will
be ignored; by default directories with leading ``.``, ``CVS``,
and ``_darcs`` will be ignored.
If ``show_ignored`` is true, then all the files that aren't
included in package data are shown on stderr (for debugging
purposes).
Note patterns use wildcards, or can be exact paths (including
leading ``./``), and all searching is case-insensitive.
This function is by Ian Bicking.
"""
out = {}
stack = [(convert_path(where), '', package, only_in_packages)]
while stack:
where, prefix, package, only_in_packages = stack.pop(0)
for name in os.listdir(where):
fn = os.path.join(where, name)
if os.path.isdir(fn):
bad_name = False
for pattern in exclude_directories:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"Directory %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
if os.path.isfile(os.path.join(fn, '__init__.py')):
if not package:
new_package = name
else:
new_package = package + '.' + name
stack.append((fn, '', new_package, False))
else:
stack.append((fn,
prefix + name + '/',
package,
only_in_packages))
elif package or not only_in_packages:
# is a file
bad_name = False
for pattern in exclude:
if (fnmatchcase(name, pattern)
or fn.lower() == pattern.lower()):
bad_name = True
if show_ignored:
print >> sys.stderr, (
"File %s ignored by pattern %s"
% (fn, pattern))
break
if bad_name:
continue
out.setdefault(package, []).append(prefix + name)
return out |
def V_total(self):
'''
Compute the input voltage (i.e., ``V1``) based on the measured
high-voltage feedback values for ``V2``, using the high-voltage
transfer function.
See also
--------
:meth:`V_actuation` for diagram with ``V1`` and ``V2`` labelled.
'''
ind = mlab.find(self.hv_resistor >= 0)
V1 = np.empty(self.hv_resistor.shape)
V1.fill(np.nan)
V1[ind] = compute_from_transfer_function(self.calibration.hw_version
.major, 'V1',
V2=self.V_hv[ind], R1=10e6,
R2=self.calibration.R_hv
[self.hv_resistor[ind]],
C2=self.calibration.C_hv
[self.hv_resistor[ind]],
f=self.frequency)
# convert to masked array
V1 = np.ma.masked_invalid(pd.Series(V1, pd.to_datetime(self.time, unit='s')
).interpolate(method='time').values)
V1.fill_value = np.nan
V1.data[V1.mask] = V1.fill_value
return V1 | Compute the input voltage (i.e., ``V1``) based on the measured
high-voltage feedback values for ``V2``, using the high-voltage
transfer function.
See also
--------
:meth:`V_actuation` for diagram with ``V1`` and ``V2`` labelled. | Below is the the instruction that describes the task:
### Input:
Compute the input voltage (i.e., ``V1``) based on the measured
high-voltage feedback values for ``V2``, using the high-voltage
transfer function.
See also
--------
:meth:`V_actuation` for diagram with ``V1`` and ``V2`` labelled.
### Response:
def V_total(self):
'''
Compute the input voltage (i.e., ``V1``) based on the measured
high-voltage feedback values for ``V2``, using the high-voltage
transfer function.
See also
--------
:meth:`V_actuation` for diagram with ``V1`` and ``V2`` labelled.
'''
ind = mlab.find(self.hv_resistor >= 0)
V1 = np.empty(self.hv_resistor.shape)
V1.fill(np.nan)
V1[ind] = compute_from_transfer_function(self.calibration.hw_version
.major, 'V1',
V2=self.V_hv[ind], R1=10e6,
R2=self.calibration.R_hv
[self.hv_resistor[ind]],
C2=self.calibration.C_hv
[self.hv_resistor[ind]],
f=self.frequency)
# convert to masked array
V1 = np.ma.masked_invalid(pd.Series(V1, pd.to_datetime(self.time, unit='s')
).interpolate(method='time').values)
V1.fill_value = np.nan
V1.data[V1.mask] = V1.fill_value
return V1 |
def unpack_utf8(self):
"""Decode a utf-8 string encoded as described in MQTT Version
3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length
followed by a utf-8 encoded string.
Raises
------
UnderflowDecodeError
Raised when a read failed to extract enough bytes from the
underlying stream to decode the string.
DecodeError
When any code point in the utf-8 string is invalid.
Returns
-------
int
Number of bytes consumed.
str
A string utf-8 decoded from the underlying stream.
"""
num_bytes_consumed, s = decode_utf8(self.__f)
self.__num_bytes_consumed += num_bytes_consumed
return num_bytes_consumed, s | Decode a utf-8 string encoded as described in MQTT Version
3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length
followed by a utf-8 encoded string.
Raises
------
UnderflowDecodeError
Raised when a read failed to extract enough bytes from the
underlying stream to decode the string.
DecodeError
When any code point in the utf-8 string is invalid.
Returns
-------
int
Number of bytes consumed.
str
A string utf-8 decoded from the underlying stream. | Below is the the instruction that describes the task:
### Input:
Decode a utf-8 string encoded as described in MQTT Version
3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length
followed by a utf-8 encoded string.
Raises
------
UnderflowDecodeError
Raised when a read failed to extract enough bytes from the
underlying stream to decode the string.
DecodeError
When any code point in the utf-8 string is invalid.
Returns
-------
int
Number of bytes consumed.
str
A string utf-8 decoded from the underlying stream.
### Response:
def unpack_utf8(self):
"""Decode a utf-8 string encoded as described in MQTT Version
3.1.1 section 1.5.3 line 177. This is a 16-bit unsigned length
followed by a utf-8 encoded string.
Raises
------
UnderflowDecodeError
Raised when a read failed to extract enough bytes from the
underlying stream to decode the string.
DecodeError
When any code point in the utf-8 string is invalid.
Returns
-------
int
Number of bytes consumed.
str
A string utf-8 decoded from the underlying stream.
"""
num_bytes_consumed, s = decode_utf8(self.__f)
self.__num_bytes_consumed += num_bytes_consumed
return num_bytes_consumed, s |
def parse_options(cls, options):
"""Parse plug-in specific options."""
cls.known_modules = {
project2module(k): v.split(",")
for k, v in [
x.split(":[")
for x in re.split(r"],?", options.known_modules)[:-1]
]
} | Parse plug-in specific options. | Below is the the instruction that describes the task:
### Input:
Parse plug-in specific options.
### Response:
def parse_options(cls, options):
"""Parse plug-in specific options."""
cls.known_modules = {
project2module(k): v.split(",")
for k, v in [
x.split(":[")
for x in re.split(r"],?", options.known_modules)[:-1]
]
} |
def _prepare_data_dir(self, data):
"""Prepare destination directory where the data will live.
:param data: The :class:`~resolwe.flow.models.Data` object for
which to prepare the private execution directory.
:return: The prepared data directory path.
:rtype: str
"""
logger.debug(__("Preparing data directory for Data with id {}.", data.id))
with transaction.atomic():
# Create a temporary random location and then override it with data
# location id since object has to be created first.
# TODO Find a better solution, e.g. defer the database constraint.
temporary_location_string = uuid.uuid4().hex[:10]
data_location = DataLocation.objects.create(subpath=temporary_location_string)
data_location.subpath = str(data_location.id)
data_location.save()
data_location.data.add(data)
output_path = self._get_per_data_dir('DATA_DIR', data_location.subpath)
dir_mode = self.settings_actual.get('FLOW_EXECUTOR', {}).get('DATA_DIR_MODE', 0o755)
os.mkdir(output_path, mode=dir_mode)
# os.mkdir is not guaranteed to set the given mode
os.chmod(output_path, dir_mode)
return output_path | Prepare destination directory where the data will live.
:param data: The :class:`~resolwe.flow.models.Data` object for
which to prepare the private execution directory.
:return: The prepared data directory path.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Prepare destination directory where the data will live.
:param data: The :class:`~resolwe.flow.models.Data` object for
which to prepare the private execution directory.
:return: The prepared data directory path.
:rtype: str
### Response:
def _prepare_data_dir(self, data):
"""Prepare destination directory where the data will live.
:param data: The :class:`~resolwe.flow.models.Data` object for
which to prepare the private execution directory.
:return: The prepared data directory path.
:rtype: str
"""
logger.debug(__("Preparing data directory for Data with id {}.", data.id))
with transaction.atomic():
# Create a temporary random location and then override it with data
# location id since object has to be created first.
# TODO Find a better solution, e.g. defer the database constraint.
temporary_location_string = uuid.uuid4().hex[:10]
data_location = DataLocation.objects.create(subpath=temporary_location_string)
data_location.subpath = str(data_location.id)
data_location.save()
data_location.data.add(data)
output_path = self._get_per_data_dir('DATA_DIR', data_location.subpath)
dir_mode = self.settings_actual.get('FLOW_EXECUTOR', {}).get('DATA_DIR_MODE', 0o755)
os.mkdir(output_path, mode=dir_mode)
# os.mkdir is not guaranteed to set the given mode
os.chmod(output_path, dir_mode)
return output_path |
def feed(self, schemata: list) -> None:
"""
Take schemata from incoming list representation as schemata() returns, unless
cache already has schema for an incoming schema sequence number.
:param schemata: list of schema objects
"""
LOGGER.debug('SchemaCache.feed >>> schemata: %s', schemata)
for schema in schemata:
seq_no = schema['seqNo']
if self.contains(seq_no):
LOGGER.warning('Schema cache already has schema at seq no %s: skipping', seq_no)
else:
self[seq_no] = schema
LOGGER.info('Schema cache imported schema on id %s at seq no %s', schema['id'], seq_no)
LOGGER.debug('SchemaCache.feed <<<') | Take schemata from incoming list representation as schemata() returns, unless
cache already has schema for an incoming schema sequence number.
:param schemata: list of schema objects | Below is the the instruction that describes the task:
### Input:
Take schemata from incoming list representation as schemata() returns, unless
cache already has schema for an incoming schema sequence number.
:param schemata: list of schema objects
### Response:
def feed(self, schemata: list) -> None:
"""
Take schemata from incoming list representation as schemata() returns, unless
cache already has schema for an incoming schema sequence number.
:param schemata: list of schema objects
"""
LOGGER.debug('SchemaCache.feed >>> schemata: %s', schemata)
for schema in schemata:
seq_no = schema['seqNo']
if self.contains(seq_no):
LOGGER.warning('Schema cache already has schema at seq no %s: skipping', seq_no)
else:
self[seq_no] = schema
LOGGER.info('Schema cache imported schema on id %s at seq no %s', schema['id'], seq_no)
LOGGER.debug('SchemaCache.feed <<<') |
def update(self, table, columns, values, where):
"""
Update the values of a particular row where a value is met.
:param table: table name
:param columns: column(s) to update
:param values: updated values
:param where: tuple, (where_column, where_value)
"""
# Unpack WHERE clause dictionary into tuple
where_col, where_val = where
# Create column string from list of values
cols = get_col_val_str(columns, query_type='update')
# Concatenate statement
statement = "UPDATE {0} SET {1} WHERE {2}='{3}'".format(wrap(table), cols, where_col, where_val)
# Execute statement
self._cursor.execute(statement, values)
self._printer('\tMySQL cols (' + str(len(values)) + ') successfully UPDATED') | Update the values of a particular row where a value is met.
:param table: table name
:param columns: column(s) to update
:param values: updated values
:param where: tuple, (where_column, where_value) | Below is the the instruction that describes the task:
### Input:
Update the values of a particular row where a value is met.
:param table: table name
:param columns: column(s) to update
:param values: updated values
:param where: tuple, (where_column, where_value)
### Response:
def update(self, table, columns, values, where):
"""
Update the values of a particular row where a value is met.
:param table: table name
:param columns: column(s) to update
:param values: updated values
:param where: tuple, (where_column, where_value)
"""
# Unpack WHERE clause dictionary into tuple
where_col, where_val = where
# Create column string from list of values
cols = get_col_val_str(columns, query_type='update')
# Concatenate statement
statement = "UPDATE {0} SET {1} WHERE {2}='{3}'".format(wrap(table), cols, where_col, where_val)
# Execute statement
self._cursor.execute(statement, values)
self._printer('\tMySQL cols (' + str(len(values)) + ') successfully UPDATED') |
def _cnv_prioritize(data):
"""Perform confidence interval based prioritization for CNVs.
"""
supported = {"cnvkit": {"inputs": ["call_file", "segmetrics"], "fn": _cnvkit_prioritize}}
pcall = None
priority_files = None
for call in data.get("sv", []):
if call["variantcaller"] in supported:
priority_files = [call.get(x) for x in supported[call["variantcaller"]]["inputs"]]
priority_files = [x for x in priority_files if x is not None and utils.file_exists(x)]
if len(priority_files) == len(supported[call["variantcaller"]]["inputs"]):
pcall = call
break
prioritize_by = tz.get_in(["config", "algorithm", "svprioritize"], data)
if pcall and prioritize_by:
out_file = "%s-prioritize.tsv" % utils.splitext_plus(priority_files[0])[0]
gene_list = _find_gene_list_from_bed(prioritize_by, out_file, data)
if gene_list:
with open(gene_list) as in_handle:
genes = [x.strip() for x in in_handle]
args = [dd.get_sample_name(data), genes] + priority_files
df = supported[pcall["variantcaller"]]["fn"](*args)
with file_transaction(data, out_file) as tx_out_file:
df.to_csv(tx_out_file, sep="\t", index=False)
pcall["priority"] = out_file
return data | Perform confidence interval based prioritization for CNVs. | Below is the the instruction that describes the task:
### Input:
Perform confidence interval based prioritization for CNVs.
### Response:
def _cnv_prioritize(data):
"""Perform confidence interval based prioritization for CNVs.
"""
supported = {"cnvkit": {"inputs": ["call_file", "segmetrics"], "fn": _cnvkit_prioritize}}
pcall = None
priority_files = None
for call in data.get("sv", []):
if call["variantcaller"] in supported:
priority_files = [call.get(x) for x in supported[call["variantcaller"]]["inputs"]]
priority_files = [x for x in priority_files if x is not None and utils.file_exists(x)]
if len(priority_files) == len(supported[call["variantcaller"]]["inputs"]):
pcall = call
break
prioritize_by = tz.get_in(["config", "algorithm", "svprioritize"], data)
if pcall and prioritize_by:
out_file = "%s-prioritize.tsv" % utils.splitext_plus(priority_files[0])[0]
gene_list = _find_gene_list_from_bed(prioritize_by, out_file, data)
if gene_list:
with open(gene_list) as in_handle:
genes = [x.strip() for x in in_handle]
args = [dd.get_sample_name(data), genes] + priority_files
df = supported[pcall["variantcaller"]]["fn"](*args)
with file_transaction(data, out_file) as tx_out_file:
df.to_csv(tx_out_file, sep="\t", index=False)
pcall["priority"] = out_file
return data |
def dot(matrix, vector, matrix_ty, vector_ty):
"""
Computes the dot product between a matrix and a vector.
Args:
matrix (WeldObject / Numpy.ndarray): 2-d input matrix
vector (WeldObject / Numpy.ndarray): 1-d input vector
ty (WeldType): Type of each element in the input matrix and vector
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
matrix_var = weld_obj.update(matrix)
if isinstance(matrix, WeldObject):
matrix_var = matrix.obj_id
weld_obj.dependencies[matrix_var] = matrix
vector_var = weld_obj.update(vector)
loopsize_annotation = ""
if isinstance(vector, WeldObject):
vector_var = vector.obj_id
weld_obj.dependencies[vector_var] = vector
if isinstance(vector, np.ndarray):
loopsize_annotation = "@(loopsize: %dL)" % len(vector)
weld_template = """
map(
%(matrix)s,
|row: vec[%(matrix_ty)s]|
result(
%(loopsize_annotation)s
for(
result(
%(loopsize_annotation)s
for(
zip(row, %(vector)s),
appender,
|b2, i2, e2: {%(matrix_ty)s, %(vector_ty)s}|
merge(b2, f64(e2.$0 * %(matrix_ty)s(e2.$1)))
)
),
merger[f64,+],
|b, i, e| merge(b, e)
)
)
)
"""
weld_obj.weld_code = weld_template % {"matrix": matrix_var,
"vector": vector_var,
"matrix_ty": matrix_ty,
"vector_ty": vector_ty,
"loopsize_annotation": loopsize_annotation}
return weld_obj | Computes the dot product between a matrix and a vector.
Args:
matrix (WeldObject / Numpy.ndarray): 2-d input matrix
vector (WeldObject / Numpy.ndarray): 1-d input vector
ty (WeldType): Type of each element in the input matrix and vector
Returns:
A WeldObject representing this computation | Below is the the instruction that describes the task:
### Input:
Computes the dot product between a matrix and a vector.
Args:
matrix (WeldObject / Numpy.ndarray): 2-d input matrix
vector (WeldObject / Numpy.ndarray): 1-d input vector
ty (WeldType): Type of each element in the input matrix and vector
Returns:
A WeldObject representing this computation
### Response:
def dot(matrix, vector, matrix_ty, vector_ty):
"""
Computes the dot product between a matrix and a vector.
Args:
matrix (WeldObject / Numpy.ndarray): 2-d input matrix
vector (WeldObject / Numpy.ndarray): 1-d input vector
ty (WeldType): Type of each element in the input matrix and vector
Returns:
A WeldObject representing this computation
"""
weld_obj = WeldObject(encoder_, decoder_)
matrix_var = weld_obj.update(matrix)
if isinstance(matrix, WeldObject):
matrix_var = matrix.obj_id
weld_obj.dependencies[matrix_var] = matrix
vector_var = weld_obj.update(vector)
loopsize_annotation = ""
if isinstance(vector, WeldObject):
vector_var = vector.obj_id
weld_obj.dependencies[vector_var] = vector
if isinstance(vector, np.ndarray):
loopsize_annotation = "@(loopsize: %dL)" % len(vector)
weld_template = """
map(
%(matrix)s,
|row: vec[%(matrix_ty)s]|
result(
%(loopsize_annotation)s
for(
result(
%(loopsize_annotation)s
for(
zip(row, %(vector)s),
appender,
|b2, i2, e2: {%(matrix_ty)s, %(vector_ty)s}|
merge(b2, f64(e2.$0 * %(matrix_ty)s(e2.$1)))
)
),
merger[f64,+],
|b, i, e| merge(b, e)
)
)
)
"""
weld_obj.weld_code = weld_template % {"matrix": matrix_var,
"vector": vector_var,
"matrix_ty": matrix_ty,
"vector_ty": vector_ty,
"loopsize_annotation": loopsize_annotation}
return weld_obj |
def pin_post(self, post):
"""Pin post
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:returns: True if it is successful. False otherwise
"""
try:
cid = post['id']
except KeyError:
cid = post
params = {
"cid": cid,
}
return self._rpc.content_pin(params) | Pin post
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:returns: True if it is successful. False otherwise | Below is the the instruction that describes the task:
### Input:
Pin post
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:returns: True if it is successful. False otherwise
### Response:
def pin_post(self, post):
"""Pin post
:type post: dict|str|int
:param post: Either the post dict returned by another API method, or
the `cid` field of that post.
:returns: True if it is successful. False otherwise
"""
try:
cid = post['id']
except KeyError:
cid = post
params = {
"cid": cid,
}
return self._rpc.content_pin(params) |
def _get(auth, path, fmt, autobox=True, params=None):
'''
Issue a GET request to the XNAT REST API and box the response content.
Example:
>>> import yaxil
>>> from yaxil import Format
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.get(auth, '/data/experiments', Format.JSON)
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param path: API URL path
:type path: str
:param fmt: API result format
:type fmt: :mod:`yaxil.Format`
:param autobox: Autobox response content into an appropriate reader or other data structure
:type autobox: bool
:param params: Additional query parameters
:type params: dict
:returns: Tuple of (URL, :mod:`dict` | :mod:`xml.etree.ElementTree` | :mod:`csv.reader` | :mod:`str`)
:rtype: tuple
'''
if not params:
params = {}
url = "%s/%s" % (auth.url.rstrip('/'), path.lstrip('/'))
params["format"] = fmt
logger.debug("issuing http request %s", url)
logger.debug("query parameters %s", params)
r = requests.get(url, params=params, auth=(auth.username, auth.password), verify=CHECK_CERTIFICATE)
if r.status_code != requests.codes.ok:
raise RestApiError("response not ok (%s) from %s" % (r.status_code, r.url))
if not r.content:
raise RestApiError("response is empty from %s" % r.url)
if autobox:
return r.url,_autobox(r.text, fmt)
else:
return r.url,r.content | Issue a GET request to the XNAT REST API and box the response content.
Example:
>>> import yaxil
>>> from yaxil import Format
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.get(auth, '/data/experiments', Format.JSON)
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param path: API URL path
:type path: str
:param fmt: API result format
:type fmt: :mod:`yaxil.Format`
:param autobox: Autobox response content into an appropriate reader or other data structure
:type autobox: bool
:param params: Additional query parameters
:type params: dict
:returns: Tuple of (URL, :mod:`dict` | :mod:`xml.etree.ElementTree` | :mod:`csv.reader` | :mod:`str`)
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Issue a GET request to the XNAT REST API and box the response content.
Example:
>>> import yaxil
>>> from yaxil import Format
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.get(auth, '/data/experiments', Format.JSON)
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param path: API URL path
:type path: str
:param fmt: API result format
:type fmt: :mod:`yaxil.Format`
:param autobox: Autobox response content into an appropriate reader or other data structure
:type autobox: bool
:param params: Additional query parameters
:type params: dict
:returns: Tuple of (URL, :mod:`dict` | :mod:`xml.etree.ElementTree` | :mod:`csv.reader` | :mod:`str`)
:rtype: tuple
### Response:
def _get(auth, path, fmt, autobox=True, params=None):
'''
Issue a GET request to the XNAT REST API and box the response content.
Example:
>>> import yaxil
>>> from yaxil import Format
>>> auth = yaxil.XnatAuth(url='...', username='...', password='...')
>>> yaxil.get(auth, '/data/experiments', Format.JSON)
:param auth: XNAT authentication
:type auth: :mod:`yaxil.XnatAuth`
:param path: API URL path
:type path: str
:param fmt: API result format
:type fmt: :mod:`yaxil.Format`
:param autobox: Autobox response content into an appropriate reader or other data structure
:type autobox: bool
:param params: Additional query parameters
:type params: dict
:returns: Tuple of (URL, :mod:`dict` | :mod:`xml.etree.ElementTree` | :mod:`csv.reader` | :mod:`str`)
:rtype: tuple
'''
if not params:
params = {}
url = "%s/%s" % (auth.url.rstrip('/'), path.lstrip('/'))
params["format"] = fmt
logger.debug("issuing http request %s", url)
logger.debug("query parameters %s", params)
r = requests.get(url, params=params, auth=(auth.username, auth.password), verify=CHECK_CERTIFICATE)
if r.status_code != requests.codes.ok:
raise RestApiError("response not ok (%s) from %s" % (r.status_code, r.url))
if not r.content:
raise RestApiError("response is empty from %s" % r.url)
if autobox:
return r.url,_autobox(r.text, fmt)
else:
return r.url,r.content |
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[0] != iNaT:
return self._box_func(i8[0])
if self.hasnans:
if skipna:
min_stamp = self[~self._isnan].asi8.min()
else:
return self._na_value
else:
min_stamp = i8.min()
return self._box_func(min_stamp)
except ValueError:
return self._na_value | Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series. | Below is the the instruction that describes the task:
### Input:
Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series.
### Response:
def min(self, axis=None, skipna=True, *args, **kwargs):
"""
Return the minimum value of the Index or minimum along
an axis.
See Also
--------
numpy.ndarray.min
Series.min : Return the minimum value in a Series.
"""
nv.validate_min(args, kwargs)
nv.validate_minmax_axis(axis)
if not len(self):
return self._na_value
i8 = self.asi8
try:
# quick check
if len(i8) and self.is_monotonic:
if i8[0] != iNaT:
return self._box_func(i8[0])
if self.hasnans:
if skipna:
min_stamp = self[~self._isnan].asi8.min()
else:
return self._na_value
else:
min_stamp = i8.min()
return self._box_func(min_stamp)
except ValueError:
return self._na_value |
def parse_mail_date(datestr):
'''Helper method used by :meth:`Message.from_email_message` to
convert dates from rfc822 format to iso 8601.
:param datestr: string containing a date in rfc822 format
:returns: string with date in iso 8601 format
'''
time_tuple = email.utils.parsedate_tz(datestr)
if time_tuple is None:
return datestr
dt = datetime.datetime.fromtimestamp(email.utils.mktime_tz(time_tuple))
return dt.isoformat() | Helper method used by :meth:`Message.from_email_message` to
convert dates from rfc822 format to iso 8601.
:param datestr: string containing a date in rfc822 format
:returns: string with date in iso 8601 format | Below is the the instruction that describes the task:
### Input:
Helper method used by :meth:`Message.from_email_message` to
convert dates from rfc822 format to iso 8601.
:param datestr: string containing a date in rfc822 format
:returns: string with date in iso 8601 format
### Response:
def parse_mail_date(datestr):
'''Helper method used by :meth:`Message.from_email_message` to
convert dates from rfc822 format to iso 8601.
:param datestr: string containing a date in rfc822 format
:returns: string with date in iso 8601 format
'''
time_tuple = email.utils.parsedate_tz(datestr)
if time_tuple is None:
return datestr
dt = datetime.datetime.fromtimestamp(email.utils.mktime_tz(time_tuple))
return dt.isoformat() |
def left(X, i):
"""Compute the orthogonal matrix Q_{\leq i} as defined in [1]."""
if i < 0:
return np.ones([1, 1])
answ = np.ones([1, 1])
cores = tt.tensor.to_list(X)
for dim in xrange(i+1):
answ = np.tensordot(answ, cores[dim], 1)
answ = reshape(answ, (-1, X.r[i+1]))
return answ | Compute the orthogonal matrix Q_{\leq i} as defined in [1]. | Below is the the instruction that describes the task:
### Input:
Compute the orthogonal matrix Q_{\leq i} as defined in [1].
### Response:
def left(X, i):
"""Compute the orthogonal matrix Q_{\leq i} as defined in [1]."""
if i < 0:
return np.ones([1, 1])
answ = np.ones([1, 1])
cores = tt.tensor.to_list(X)
for dim in xrange(i+1):
answ = np.tensordot(answ, cores[dim], 1)
answ = reshape(answ, (-1, X.r[i+1]))
return answ |
def applymap(self, func, **kwargs):
"""Return a new PRDD by applying a function to each element of each
pandas DataFrame."""
return self.from_rdd(
self._rdd.map(lambda data: data.applymap(func), **kwargs)) | Return a new PRDD by applying a function to each element of each
pandas DataFrame. | Below is the the instruction that describes the task:
### Input:
Return a new PRDD by applying a function to each element of each
pandas DataFrame.
### Response:
def applymap(self, func, **kwargs):
"""Return a new PRDD by applying a function to each element of each
pandas DataFrame."""
return self.from_rdd(
self._rdd.map(lambda data: data.applymap(func), **kwargs)) |
def render_sendmail(self):
"""
if we have smtp_host and smtp_from, configure sendmail plugin,
else remove it
"""
phase = 'exit_plugins'
plugin = 'sendmail'
if not self.dj.dock_json_has_plugin_conf(phase, plugin):
return
if self.spec.smtp_host.value and self.spec.smtp_from.value:
self.dj.dock_json_set_arg(phase, plugin, 'url',
self.spec.builder_openshift_url.value)
self.dj.dock_json_set_arg(phase, plugin, 'smtp_host',
self.spec.smtp_host.value)
self.dj.dock_json_set_arg(phase, plugin, 'from_address',
self.spec.smtp_from.value)
else:
logger.info("removing sendmail from request, "
"requires smtp_host and smtp_from")
self.dj.remove_plugin(phase, plugin)
return
if self.spec.kojihub.value and self.spec.kojiroot.value:
self.dj.dock_json_set_arg(phase, plugin,
'koji_hub', self.spec.kojihub.value)
self.dj.dock_json_set_arg(phase, plugin,
"koji_root", self.spec.kojiroot.value)
if self.spec.smtp_to_submitter.value:
self.dj.dock_json_set_arg(phase, plugin, 'to_koji_submitter',
self.spec.smtp_to_submitter.value)
if self.spec.smtp_to_pkgowner.value:
self.dj.dock_json_set_arg(phase, plugin, 'to_koji_pkgowner',
self.spec.smtp_to_pkgowner.value)
if self.spec.smtp_additional_addresses.value:
self.dj.dock_json_set_arg(phase, plugin, 'additional_addresses',
self.spec.smtp_additional_addresses.value)
if self.spec.smtp_error_addresses.value:
self.dj.dock_json_set_arg(phase, plugin,
'error_addresses', self.spec.smtp_error_addresses.value)
if self.spec.smtp_email_domain.value:
self.dj.dock_json_set_arg(phase, plugin,
'email_domain', self.spec.smtp_email_domain.value) | if we have smtp_host and smtp_from, configure sendmail plugin,
else remove it | Below is the the instruction that describes the task:
### Input:
if we have smtp_host and smtp_from, configure sendmail plugin,
else remove it
### Response:
def render_sendmail(self):
"""
if we have smtp_host and smtp_from, configure sendmail plugin,
else remove it
"""
phase = 'exit_plugins'
plugin = 'sendmail'
if not self.dj.dock_json_has_plugin_conf(phase, plugin):
return
if self.spec.smtp_host.value and self.spec.smtp_from.value:
self.dj.dock_json_set_arg(phase, plugin, 'url',
self.spec.builder_openshift_url.value)
self.dj.dock_json_set_arg(phase, plugin, 'smtp_host',
self.spec.smtp_host.value)
self.dj.dock_json_set_arg(phase, plugin, 'from_address',
self.spec.smtp_from.value)
else:
logger.info("removing sendmail from request, "
"requires smtp_host and smtp_from")
self.dj.remove_plugin(phase, plugin)
return
if self.spec.kojihub.value and self.spec.kojiroot.value:
self.dj.dock_json_set_arg(phase, plugin,
'koji_hub', self.spec.kojihub.value)
self.dj.dock_json_set_arg(phase, plugin,
"koji_root", self.spec.kojiroot.value)
if self.spec.smtp_to_submitter.value:
self.dj.dock_json_set_arg(phase, plugin, 'to_koji_submitter',
self.spec.smtp_to_submitter.value)
if self.spec.smtp_to_pkgowner.value:
self.dj.dock_json_set_arg(phase, plugin, 'to_koji_pkgowner',
self.spec.smtp_to_pkgowner.value)
if self.spec.smtp_additional_addresses.value:
self.dj.dock_json_set_arg(phase, plugin, 'additional_addresses',
self.spec.smtp_additional_addresses.value)
if self.spec.smtp_error_addresses.value:
self.dj.dock_json_set_arg(phase, plugin,
'error_addresses', self.spec.smtp_error_addresses.value)
if self.spec.smtp_email_domain.value:
self.dj.dock_json_set_arg(phase, plugin,
'email_domain', self.spec.smtp_email_domain.value) |
def _generate_notebook_header(notebook_object, notebook_type, notebook_title="Notebook Title",
tags="tags", difficulty_stars=1,
notebook_description="Notebook Description"):
"""
Internal function that is used for generation of the generic notebooks header.
----------
Parameters
----------
notebook_object : notebook object
Object of "notebook" class where the header will be created.
notebook_type : str
Notebook type: - "Main_Files_Signal_Samples"
- "Main_Files_By_Category"
- "Main_Files_By_Difficulty"
- "Main_Files_By_Tag"
- "Acquire"
- "Open"
- "Visualise"
- "Process"
- "Detect"
- "Extract"
- "Train_and_Classify"
- "Explain"
notebook_title : None or str
The Notebook title should only be defined when 'notebook_type' is:
- "Acquire"
- "Open"
- "Visualise"
- "Process"
- "Detect"
- "Extract"
- "Train_and_Classify"
- "Explain"
tags : str
Sequence of tags that characterize the Notebook.
difficulty_stars : int
This input defines the difficulty level of the Notebook instructions.
notebook_description : str
An introductory text to present the Notebook and involve the reader.
"""
# ============================= Creation of Header ====================================
header_temp = HEADER_ALL_CATEGORIES.replace("header_image_color_i", "header_image_color_" +
str(NOTEBOOK_KEYS[notebook_type]))
header_temp = header_temp.replace("header_image_i", "header_image_" +
str(NOTEBOOK_KEYS[notebook_type]))
header_temp = header_temp.replace("Notebook Title", notebook_title)
notebook_object["cells"].append(nb.v4.new_markdown_cell(header_temp, **{"metadata": {"tags": ["intro_info_title"]}}))
# =============== Inclusion of the div with "Difficulty" and "Tags" ===================
tags_and_diff = HEADER_TAGS.replace('<td class="shield_right" id="tags">tags</td>',
'<td class="shield_right" id="tags">' + "☁".join(tags)
+ '</td>')
for star in range(1, 6):
if star <= difficulty_stars:
tags_and_diff = tags_and_diff.replace("fa fa-star " + str(star), "fa fa-star "
"checked")
else:
tags_and_diff = tags_and_diff.replace("fa fa-star " + str(star), "fa fa-star")
notebook_object["cells"].append(nb.v4.new_markdown_cell(tags_and_diff,
**{"metadata": {"tags": ["intro_info_tags"]}}))
# ================= Insertion of the div reserved to the Notebook Description ==================
notebook_object["cells"].append(nb.v4.new_markdown_cell(notebook_description,
**{"metadata":
{"tags": ["test"]}}))
notebook_object["cells"].append(nb.v4.new_markdown_cell(SEPARATOR))
# ======================= Insertion of a blank Markdown and Code cell ==========================
notebook_object["cells"].append(nb.v4.new_markdown_cell(MD_EXAMPLES))
notebook_object["cells"].append(nb.v4.new_code_cell(CODE_EXAMPLES)) | Internal function that is used for generation of the generic notebooks header.
----------
Parameters
----------
notebook_object : notebook object
Object of "notebook" class where the header will be created.
notebook_type : str
Notebook type: - "Main_Files_Signal_Samples"
- "Main_Files_By_Category"
- "Main_Files_By_Difficulty"
- "Main_Files_By_Tag"
- "Acquire"
- "Open"
- "Visualise"
- "Process"
- "Detect"
- "Extract"
- "Train_and_Classify"
- "Explain"
notebook_title : None or str
The Notebook title should only be defined when 'notebook_type' is:
- "Acquire"
- "Open"
- "Visualise"
- "Process"
- "Detect"
- "Extract"
- "Train_and_Classify"
- "Explain"
tags : str
Sequence of tags that characterize the Notebook.
difficulty_stars : int
This input defines the difficulty level of the Notebook instructions.
notebook_description : str
An introductory text to present the Notebook and involve the reader. | Below is the the instruction that describes the task:
### Input:
Internal function that is used for generation of the generic notebooks header.
----------
Parameters
----------
notebook_object : notebook object
Object of "notebook" class where the header will be created.
notebook_type : str
Notebook type: - "Main_Files_Signal_Samples"
- "Main_Files_By_Category"
- "Main_Files_By_Difficulty"
- "Main_Files_By_Tag"
- "Acquire"
- "Open"
- "Visualise"
- "Process"
- "Detect"
- "Extract"
- "Train_and_Classify"
- "Explain"
notebook_title : None or str
The Notebook title should only be defined when 'notebook_type' is:
- "Acquire"
- "Open"
- "Visualise"
- "Process"
- "Detect"
- "Extract"
- "Train_and_Classify"
- "Explain"
tags : str
Sequence of tags that characterize the Notebook.
difficulty_stars : int
This input defines the difficulty level of the Notebook instructions.
notebook_description : str
An introductory text to present the Notebook and involve the reader.
### Response:
def _generate_notebook_header(notebook_object, notebook_type, notebook_title="Notebook Title",
tags="tags", difficulty_stars=1,
notebook_description="Notebook Description"):
"""
Internal function that is used for generation of the generic notebooks header.
----------
Parameters
----------
notebook_object : notebook object
Object of "notebook" class where the header will be created.
notebook_type : str
Notebook type: - "Main_Files_Signal_Samples"
- "Main_Files_By_Category"
- "Main_Files_By_Difficulty"
- "Main_Files_By_Tag"
- "Acquire"
- "Open"
- "Visualise"
- "Process"
- "Detect"
- "Extract"
- "Train_and_Classify"
- "Explain"
notebook_title : None or str
The Notebook title should only be defined when 'notebook_type' is:
- "Acquire"
- "Open"
- "Visualise"
- "Process"
- "Detect"
- "Extract"
- "Train_and_Classify"
- "Explain"
tags : str
Sequence of tags that characterize the Notebook.
difficulty_stars : int
This input defines the difficulty level of the Notebook instructions.
notebook_description : str
An introductory text to present the Notebook and involve the reader.
"""
# ============================= Creation of Header ====================================
header_temp = HEADER_ALL_CATEGORIES.replace("header_image_color_i", "header_image_color_" +
str(NOTEBOOK_KEYS[notebook_type]))
header_temp = header_temp.replace("header_image_i", "header_image_" +
str(NOTEBOOK_KEYS[notebook_type]))
header_temp = header_temp.replace("Notebook Title", notebook_title)
notebook_object["cells"].append(nb.v4.new_markdown_cell(header_temp, **{"metadata": {"tags": ["intro_info_title"]}}))
# =============== Inclusion of the div with "Difficulty" and "Tags" ===================
tags_and_diff = HEADER_TAGS.replace('<td class="shield_right" id="tags">tags</td>',
'<td class="shield_right" id="tags">' + "☁".join(tags)
+ '</td>')
for star in range(1, 6):
if star <= difficulty_stars:
tags_and_diff = tags_and_diff.replace("fa fa-star " + str(star), "fa fa-star "
"checked")
else:
tags_and_diff = tags_and_diff.replace("fa fa-star " + str(star), "fa fa-star")
notebook_object["cells"].append(nb.v4.new_markdown_cell(tags_and_diff,
**{"metadata": {"tags": ["intro_info_tags"]}}))
# ================= Insertion of the div reserved to the Notebook Description ==================
notebook_object["cells"].append(nb.v4.new_markdown_cell(notebook_description,
**{"metadata":
{"tags": ["test"]}}))
notebook_object["cells"].append(nb.v4.new_markdown_cell(SEPARATOR))
# ======================= Insertion of a blank Markdown and Code cell ==========================
notebook_object["cells"].append(nb.v4.new_markdown_cell(MD_EXAMPLES))
notebook_object["cells"].append(nb.v4.new_code_cell(CODE_EXAMPLES)) |
def _progress_update(self, numerator_increment, stage=0, show_eta=True, **kw):
""" Updates the progress. Will update progress bars or other progress output.
Parameters
----------
numerator : int
numerator of partial work done already in current stage
stage : int, nonnegative, default=0
Current stage of the algorithm, 0 or greater
"""
if not self.show_progress:
return
self.__check_stage_registered(stage)
if not self._prog_rep_progressbars[stage]:
return
pg = self._prog_rep_progressbars[stage]
pg.update(int(numerator_increment)) | Updates the progress. Will update progress bars or other progress output.
Parameters
----------
numerator : int
numerator of partial work done already in current stage
stage : int, nonnegative, default=0
Current stage of the algorithm, 0 or greater | Below is the the instruction that describes the task:
### Input:
Updates the progress. Will update progress bars or other progress output.
Parameters
----------
numerator : int
numerator of partial work done already in current stage
stage : int, nonnegative, default=0
Current stage of the algorithm, 0 or greater
### Response:
def _progress_update(self, numerator_increment, stage=0, show_eta=True, **kw):
""" Updates the progress. Will update progress bars or other progress output.
Parameters
----------
numerator : int
numerator of partial work done already in current stage
stage : int, nonnegative, default=0
Current stage of the algorithm, 0 or greater
"""
if not self.show_progress:
return
self.__check_stage_registered(stage)
if not self._prog_rep_progressbars[stage]:
return
pg = self._prog_rep_progressbars[stage]
pg.update(int(numerator_increment)) |
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l) | Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view. | Below is the the instruction that describes the task:
### Input:
Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
### Response:
def draw_edges(self):
"""Basic edge routing applied only for edges with dummy points.
Enhanced edge routing can be performed by using the apropriate
*route_with_xxx* functions from :ref:routing_ in the edges' view.
"""
for e in self.g.E():
if hasattr(e,'view'):
l=[]
r0,r1 = None,None
if e in self.ctrls:
D = self.ctrls[e]
r0,r1 = self.grx[e.v[0]].rank,self.grx[e.v[1]].rank
if r0<r1:
ranks = xrange(r0+1,r1)
else:
ranks = xrange(r0-1,r1,-1)
l = [D[r].view.xy for r in ranks]
l.insert(0,e.v[0].view.xy)
l.append(e.v[1].view.xy)
try:
self.route_edge(e,l)
except AttributeError:
pass
e.view.setpath(l) |
def iterate_specific_packet_range():
"""Count the number of packets in a specific range."""
now = datetime.utcnow()
start = now - timedelta(hours=1)
total = 0
for packet in archive.list_packets(start=start, stop=now):
total += 1
# print(packet)
print('Found', total, 'packets in range') | Count the number of packets in a specific range. | Below is the the instruction that describes the task:
### Input:
Count the number of packets in a specific range.
### Response:
def iterate_specific_packet_range():
"""Count the number of packets in a specific range."""
now = datetime.utcnow()
start = now - timedelta(hours=1)
total = 0
for packet in archive.list_packets(start=start, stop=now):
total += 1
# print(packet)
print('Found', total, 'packets in range') |
def _print_summary_map(strm, result_map, ftype):
"""Print summary of certain result map."""
if len(result_map) == 0:
return 0
npass = len([x for k, x in result_map.iteritems() if len(x) == 0])
strm.write('=====%d/%d %s files passed check=====\n' % (npass, len(result_map), ftype))
for fname, emap in result_map.iteritems():
if len(emap) == 0:
continue
strm.write('%s: %d Errors of %d Categories map=%s\n' % (
fname, sum(emap.values()), len(emap), str(emap)))
return len(result_map) - npass | Print summary of certain result map. | Below is the the instruction that describes the task:
### Input:
Print summary of certain result map.
### Response:
def _print_summary_map(strm, result_map, ftype):
"""Print summary of certain result map."""
if len(result_map) == 0:
return 0
npass = len([x for k, x in result_map.iteritems() if len(x) == 0])
strm.write('=====%d/%d %s files passed check=====\n' % (npass, len(result_map), ftype))
for fname, emap in result_map.iteritems():
if len(emap) == 0:
continue
strm.write('%s: %d Errors of %d Categories map=%s\n' % (
fname, sum(emap.values()), len(emap), str(emap)))
return len(result_map) - npass |
def csrf_token():
"""
Get csrf token or create new one
"""
from uliweb import request, settings
from uliweb.utils.common import safe_str
v = {}
token_name = settings.CSRF.cookie_token_name
if not request.session.deleted and request.session.get(token_name):
v = request.session[token_name]
if time.time() >= v['created_time'] + v['expiry_time']:
v = {}
else:
v['created_time'] = time.time()
if not v:
token = request.cookies.get(token_name)
if not token:
token = uuid.uuid4().get_hex()
v = {'token':token, 'expiry_time':settings.CSRF.timeout, 'created_time':time.time()}
if not request.session.deleted:
request.session[token_name] = v
return safe_str(v['token']) | Get csrf token or create new one | Below is the the instruction that describes the task:
### Input:
Get csrf token or create new one
### Response:
def csrf_token():
"""
Get csrf token or create new one
"""
from uliweb import request, settings
from uliweb.utils.common import safe_str
v = {}
token_name = settings.CSRF.cookie_token_name
if not request.session.deleted and request.session.get(token_name):
v = request.session[token_name]
if time.time() >= v['created_time'] + v['expiry_time']:
v = {}
else:
v['created_time'] = time.time()
if not v:
token = request.cookies.get(token_name)
if not token:
token = uuid.uuid4().get_hex()
v = {'token':token, 'expiry_time':settings.CSRF.timeout, 'created_time':time.time()}
if not request.session.deleted:
request.session[token_name] = v
return safe_str(v['token']) |
def import_txt(filename, **kwargs):
"""Import Syscal measurements from a text file, exported as 'Spreadsheet'.
Parameters
----------
filename: string
input filename
x0: float, optional
position of first electrode. If not given, then use the smallest
x-position in the data as the first electrode.
spacing: float
electrode spacing. This is important if not all electrodes are used in
a given measurement setup. If not given, then the smallest distance
between electrodes is assumed to be the electrode spacing. Naturally,
this requires measurements (or injections) with subsequent electrodes.
reciprocals: int, optional
if provided, then assume that this is a reciprocal measurements where
only the electrode cables were switched. The provided number N is
treated as the maximum electrode number, and denotations are renamed
according to the equation :math:`X_n = N - (X_a - 1)`
Returns
-------
data: :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes: :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography: None
No topography information is contained in the text files, so we always
return None
Notes
-----
* TODO: we could try to infer electrode spacing from the file itself
"""
# read in text file into a buffer
with open(filename, 'r') as fid:
text = fid.read()
strings_to_replace = {
'Mixed / non conventional': 'Mixed/non-conventional',
'Date': 'Date Time AM-PM',
}
for key in strings_to_replace.keys():
text = text.replace(key, strings_to_replace[key])
buffer = StringIO(text)
# read data file
data_raw = pd.read_csv(
buffer,
# sep='\t',
delim_whitespace=True,
)
# clean up column names
data_raw.columns = [x.strip() for x in data_raw.columns.tolist()]
# generate electrode positions
data = _convert_coords_to_abmn_X(
data_raw[['Spa.1', 'Spa.2', 'Spa.3', 'Spa.4']],
**kwargs
)
# [mV] / [mA]
data['r'] = data_raw['Vp'] / data_raw['In']
data['Vmn'] = data_raw['Vp']
data['Iab'] = data_raw['In']
# rename electrode denotations
rec_max = kwargs.get('reciprocals', None)
if rec_max is not None:
print('renumbering electrode numbers')
data[['a', 'b', 'm', 'n']] = rec_max + 1 - data[['a', 'b', 'm', 'n']]
return data, None, None | Import Syscal measurements from a text file, exported as 'Spreadsheet'.
Parameters
----------
filename: string
input filename
x0: float, optional
position of first electrode. If not given, then use the smallest
x-position in the data as the first electrode.
spacing: float
electrode spacing. This is important if not all electrodes are used in
a given measurement setup. If not given, then the smallest distance
between electrodes is assumed to be the electrode spacing. Naturally,
this requires measurements (or injections) with subsequent electrodes.
reciprocals: int, optional
if provided, then assume that this is a reciprocal measurements where
only the electrode cables were switched. The provided number N is
treated as the maximum electrode number, and denotations are renamed
according to the equation :math:`X_n = N - (X_a - 1)`
Returns
-------
data: :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes: :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography: None
No topography information is contained in the text files, so we always
return None
Notes
-----
* TODO: we could try to infer electrode spacing from the file itself | Below is the the instruction that describes the task:
### Input:
Import Syscal measurements from a text file, exported as 'Spreadsheet'.
Parameters
----------
filename: string
input filename
x0: float, optional
position of first electrode. If not given, then use the smallest
x-position in the data as the first electrode.
spacing: float
electrode spacing. This is important if not all electrodes are used in
a given measurement setup. If not given, then the smallest distance
between electrodes is assumed to be the electrode spacing. Naturally,
this requires measurements (or injections) with subsequent electrodes.
reciprocals: int, optional
if provided, then assume that this is a reciprocal measurements where
only the electrode cables were switched. The provided number N is
treated as the maximum electrode number, and denotations are renamed
according to the equation :math:`X_n = N - (X_a - 1)`
Returns
-------
data: :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes: :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography: None
No topography information is contained in the text files, so we always
return None
Notes
-----
* TODO: we could try to infer electrode spacing from the file itself
### Response:
def import_txt(filename, **kwargs):
"""Import Syscal measurements from a text file, exported as 'Spreadsheet'.
Parameters
----------
filename: string
input filename
x0: float, optional
position of first electrode. If not given, then use the smallest
x-position in the data as the first electrode.
spacing: float
electrode spacing. This is important if not all electrodes are used in
a given measurement setup. If not given, then the smallest distance
between electrodes is assumed to be the electrode spacing. Naturally,
this requires measurements (or injections) with subsequent electrodes.
reciprocals: int, optional
if provided, then assume that this is a reciprocal measurements where
only the electrode cables were switched. The provided number N is
treated as the maximum electrode number, and denotations are renamed
according to the equation :math:`X_n = N - (X_a - 1)`
Returns
-------
data: :py:class:`pandas.DataFrame`
Contains the measurement data
electrodes: :py:class:`pandas.DataFrame`
Contains electrode positions (None at the moment)
topography: None
No topography information is contained in the text files, so we always
return None
Notes
-----
* TODO: we could try to infer electrode spacing from the file itself
"""
# read in text file into a buffer
with open(filename, 'r') as fid:
text = fid.read()
strings_to_replace = {
'Mixed / non conventional': 'Mixed/non-conventional',
'Date': 'Date Time AM-PM',
}
for key in strings_to_replace.keys():
text = text.replace(key, strings_to_replace[key])
buffer = StringIO(text)
# read data file
data_raw = pd.read_csv(
buffer,
# sep='\t',
delim_whitespace=True,
)
# clean up column names
data_raw.columns = [x.strip() for x in data_raw.columns.tolist()]
# generate electrode positions
data = _convert_coords_to_abmn_X(
data_raw[['Spa.1', 'Spa.2', 'Spa.3', 'Spa.4']],
**kwargs
)
# [mV] / [mA]
data['r'] = data_raw['Vp'] / data_raw['In']
data['Vmn'] = data_raw['Vp']
data['Iab'] = data_raw['In']
# rename electrode denotations
rec_max = kwargs.get('reciprocals', None)
if rec_max is not None:
print('renumbering electrode numbers')
data[['a', 'b', 'm', 'n']] = rec_max + 1 - data[['a', 'b', 'm', 'n']]
return data, None, None |
def align(self):
'''
Every step we have 3 choices:
1) Move pointer witness a --> omission
2) Move pointer witness b --> addition
3) Move pointer of both witness a/b --> match
Note: a replacement is omission followed by an addition or the other way around
Choice 1 and 2 are only possible if token a and b are not a match OR when tokens are repeated.
For now I ignore token repetition..
'''
# extract tokens from witness (note that this can be done in a streaming manner if desired)
tokens_a = self.witness_a.tokens()
tokens_b = self.witness_b.tokens()
# create virtual decision tree (nodes are created on demand)
# see above
# create start node
start = DecisionTreeNode(self)
# search the decision tree
result = self.tree.search(start)
print(result)
pass | Every step we have 3 choices:
1) Move pointer witness a --> omission
2) Move pointer witness b --> addition
3) Move pointer of both witness a/b --> match
Note: a replacement is omission followed by an addition or the other way around
Choice 1 and 2 are only possible if token a and b are not a match OR when tokens are repeated.
For now I ignore token repetition.. | Below is the the instruction that describes the task:
### Input:
Every step we have 3 choices:
1) Move pointer witness a --> omission
2) Move pointer witness b --> addition
3) Move pointer of both witness a/b --> match
Note: a replacement is omission followed by an addition or the other way around
Choice 1 and 2 are only possible if token a and b are not a match OR when tokens are repeated.
For now I ignore token repetition..
### Response:
def align(self):
'''
Every step we have 3 choices:
1) Move pointer witness a --> omission
2) Move pointer witness b --> addition
3) Move pointer of both witness a/b --> match
Note: a replacement is omission followed by an addition or the other way around
Choice 1 and 2 are only possible if token a and b are not a match OR when tokens are repeated.
For now I ignore token repetition..
'''
# extract tokens from witness (note that this can be done in a streaming manner if desired)
tokens_a = self.witness_a.tokens()
tokens_b = self.witness_b.tokens()
# create virtual decision tree (nodes are created on demand)
# see above
# create start node
start = DecisionTreeNode(self)
# search the decision tree
result = self.tree.search(start)
print(result)
pass |
def file_change_rates(self, branch='master', limit=None, coverage=False, days=None, ignore_globs=None,
include_globs=None):
"""
This function will return a DataFrame containing some basic aggregations of the file change history data, and
optionally test coverage data from a coverage_data.py .coverage file. The aim here is to identify files in the
project which have abnormal edit rates, or the rate of changes without growing the files size. If a file has
a high change rate and poor test coverage, then it is a great candidate for writing more tests.
:param branch: (optional, default=master) the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param coverage: (optional, default=False) a bool for whether or not to attempt to join in coverage data.
:param days: (optional, default=None) number of days to return if limit is None
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
"""
fch = self.file_change_history(
branch=branch,
limit=limit,
days=days,
ignore_globs=ignore_globs,
include_globs=include_globs
)
fch.reset_index(level=0, inplace=True)
if fch.shape[0] > 0:
file_history = fch.groupby('filename').agg(
{
'insertions': [np.sum, np.max, np.mean],
'deletions': [np.sum, np.max, np.mean],
'message': lambda x: ','.join(['"' + str(y) + '"' for y in x]),
'committer': lambda x: ','.join(['"' + str(y) + '"' for y in x]),
'author': lambda x: ','.join(['"' + str(y) + '"' for y in x]),
'date': [np.max, np.min]
}
)
file_history.columns = [' '.join(col).strip() for col in file_history.columns.values]
file_history = file_history.rename(columns={
'message <lambda>': 'messages',
'committer <lambda>': 'committers',
'insertions sum': 'total_insertions',
'insertions amax': 'max_insertions',
'insertions mean': 'mean_insertions',
'author <lambda>': 'authors',
'date amax': 'max_date',
'date amin': 'min_date',
'deletions sum': 'total_deletions',
'deletions amax': 'max_deletions',
'deletions mean': 'mean_deletions'
})
# get some building block values for later use
file_history['net_change'] = file_history['total_insertions'] - file_history['total_deletions']
file_history['abs_change'] = file_history['total_insertions'] + file_history['total_deletions']
file_history['delta_time'] = file_history['max_date'] - file_history['min_date']
try:
file_history['delta_days'] = file_history['delta_time'].map(
lambda x: np.ceil(x.seconds / (24 * 3600) + 0.01))
except AttributeError as e:
file_history['delta_days'] = file_history['delta_time'].map(
lambda x: np.ceil((float(x.total_seconds()) * 10e-6) / (24 * 3600) + 0.01))
# calculate metrics
file_history['net_rate_of_change'] = file_history['net_change'] / file_history['delta_days']
file_history['abs_rate_of_change'] = file_history['abs_change'] / file_history['delta_days']
file_history['edit_rate'] = file_history['abs_rate_of_change'] - file_history['net_rate_of_change']
file_history['unique_committers'] = file_history['committers'].map(lambda x: len(set(x.split(','))))
# reindex
file_history = file_history.reindex(
columns=['unique_committers', 'abs_rate_of_change', 'net_rate_of_change', 'net_change', 'abs_change',
'edit_rate'])
file_history.sort_values(by=['edit_rate'], inplace=True)
if coverage and self.has_coverage():
file_history = file_history.merge(self.coverage(), left_index=True, right_on='filename', how='outer')
file_history.set_index(keys=['filename'], drop=True, inplace=True)
else:
file_history = DataFrame(
columns=['unique_committers', 'abs_rate_of_change', 'net_rate_of_change', 'net_change', 'abs_change',
'edit_rate'])
return file_history | This function will return a DataFrame containing some basic aggregations of the file change history data, and
optionally test coverage data from a coverage_data.py .coverage file. The aim here is to identify files in the
project which have abnormal edit rates, or the rate of changes without growing the files size. If a file has
a high change rate and poor test coverage, then it is a great candidate for writing more tests.
:param branch: (optional, default=master) the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param coverage: (optional, default=False) a bool for whether or not to attempt to join in coverage data.
:param days: (optional, default=None) number of days to return if limit is None
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame | Below is the the instruction that describes the task:
### Input:
This function will return a DataFrame containing some basic aggregations of the file change history data, and
optionally test coverage data from a coverage_data.py .coverage file. The aim here is to identify files in the
project which have abnormal edit rates, or the rate of changes without growing the files size. If a file has
a high change rate and poor test coverage, then it is a great candidate for writing more tests.
:param branch: (optional, default=master) the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param coverage: (optional, default=False) a bool for whether or not to attempt to join in coverage data.
:param days: (optional, default=None) number of days to return if limit is None
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
### Response:
def file_change_rates(self, branch='master', limit=None, coverage=False, days=None, ignore_globs=None,
include_globs=None):
"""
This function will return a DataFrame containing some basic aggregations of the file change history data, and
optionally test coverage data from a coverage_data.py .coverage file. The aim here is to identify files in the
project which have abnormal edit rates, or the rate of changes without growing the files size. If a file has
a high change rate and poor test coverage, then it is a great candidate for writing more tests.
:param branch: (optional, default=master) the branch to return commits for
:param limit: (optional, default=None) a maximum number of commits to return, None for no limit
:param coverage: (optional, default=False) a bool for whether or not to attempt to join in coverage data.
:param days: (optional, default=None) number of days to return if limit is None
:param ignore_globs: (optional, default=None) a list of globs to ignore, default none excludes nothing
:param include_globs: (optinal, default=None) a list of globs to include, default of None includes everything.
:return: DataFrame
"""
fch = self.file_change_history(
branch=branch,
limit=limit,
days=days,
ignore_globs=ignore_globs,
include_globs=include_globs
)
fch.reset_index(level=0, inplace=True)
if fch.shape[0] > 0:
file_history = fch.groupby('filename').agg(
{
'insertions': [np.sum, np.max, np.mean],
'deletions': [np.sum, np.max, np.mean],
'message': lambda x: ','.join(['"' + str(y) + '"' for y in x]),
'committer': lambda x: ','.join(['"' + str(y) + '"' for y in x]),
'author': lambda x: ','.join(['"' + str(y) + '"' for y in x]),
'date': [np.max, np.min]
}
)
file_history.columns = [' '.join(col).strip() for col in file_history.columns.values]
file_history = file_history.rename(columns={
'message <lambda>': 'messages',
'committer <lambda>': 'committers',
'insertions sum': 'total_insertions',
'insertions amax': 'max_insertions',
'insertions mean': 'mean_insertions',
'author <lambda>': 'authors',
'date amax': 'max_date',
'date amin': 'min_date',
'deletions sum': 'total_deletions',
'deletions amax': 'max_deletions',
'deletions mean': 'mean_deletions'
})
# get some building block values for later use
file_history['net_change'] = file_history['total_insertions'] - file_history['total_deletions']
file_history['abs_change'] = file_history['total_insertions'] + file_history['total_deletions']
file_history['delta_time'] = file_history['max_date'] - file_history['min_date']
try:
file_history['delta_days'] = file_history['delta_time'].map(
lambda x: np.ceil(x.seconds / (24 * 3600) + 0.01))
except AttributeError as e:
file_history['delta_days'] = file_history['delta_time'].map(
lambda x: np.ceil((float(x.total_seconds()) * 10e-6) / (24 * 3600) + 0.01))
# calculate metrics
file_history['net_rate_of_change'] = file_history['net_change'] / file_history['delta_days']
file_history['abs_rate_of_change'] = file_history['abs_change'] / file_history['delta_days']
file_history['edit_rate'] = file_history['abs_rate_of_change'] - file_history['net_rate_of_change']
file_history['unique_committers'] = file_history['committers'].map(lambda x: len(set(x.split(','))))
# reindex
file_history = file_history.reindex(
columns=['unique_committers', 'abs_rate_of_change', 'net_rate_of_change', 'net_change', 'abs_change',
'edit_rate'])
file_history.sort_values(by=['edit_rate'], inplace=True)
if coverage and self.has_coverage():
file_history = file_history.merge(self.coverage(), left_index=True, right_on='filename', how='outer')
file_history.set_index(keys=['filename'], drop=True, inplace=True)
else:
file_history = DataFrame(
columns=['unique_committers', 'abs_rate_of_change', 'net_rate_of_change', 'net_change', 'abs_change',
'edit_rate'])
return file_history |
def sparse_to_unmasked_sparse(self):
"""The 1D index mappings between the masked sparse-grid and unmasked sparse grid."""
return mapping_util.sparse_to_unmasked_sparse_from_mask_and_pixel_centres(
total_sparse_pixels=self.total_sparse_pixels, mask=self.regular_grid.mask,
unmasked_sparse_grid_pixel_centres=self.unmasked_sparse_grid_pixel_centres).astype('int') | The 1D index mappings between the masked sparse-grid and unmasked sparse grid. | Below is the the instruction that describes the task:
### Input:
The 1D index mappings between the masked sparse-grid and unmasked sparse grid.
### Response:
def sparse_to_unmasked_sparse(self):
"""The 1D index mappings between the masked sparse-grid and unmasked sparse grid."""
return mapping_util.sparse_to_unmasked_sparse_from_mask_and_pixel_centres(
total_sparse_pixels=self.total_sparse_pixels, mask=self.regular_grid.mask,
unmasked_sparse_grid_pixel_centres=self.unmasked_sparse_grid_pixel_centres).astype('int') |
def remove_all_handler(self):
"""
Unlink the file handler association.
"""
for handler in self.logger.handlers[:]:
self.logger.removeHandler(handler)
self._handler_cache.append(handler) | Unlink the file handler association. | Below is the the instruction that describes the task:
### Input:
Unlink the file handler association.
### Response:
def remove_all_handler(self):
"""
Unlink the file handler association.
"""
for handler in self.logger.handlers[:]:
self.logger.removeHandler(handler)
self._handler_cache.append(handler) |
def commercial_domains():
# type: () -> set
""" Return list of commercial email domains, which means:
- domain is not public
- domain is not university
- it is not personal (more than 1 person using this domain)
>>> "google.com" in commercial_domains()
True
>>> "microsoft.com" in commercial_domains()
True
>>> "isri.cs.cmu.edu" in commercial_domains() # university department
False
>>> "jaraco.com" in commercial_domains() # personal
False
"""
dus = domain_user_stats()
es = "test@" + pd.Series(dus.index, index=dus.index)
return set(
dus[~is_public_bulk(es) & ~is_university_bulk(es) & (dus > 1)].index) | Return list of commercial email domains, which means:
- domain is not public
- domain is not university
- it is not personal (more than 1 person using this domain)
>>> "google.com" in commercial_domains()
True
>>> "microsoft.com" in commercial_domains()
True
>>> "isri.cs.cmu.edu" in commercial_domains() # university department
False
>>> "jaraco.com" in commercial_domains() # personal
False | Below is the the instruction that describes the task:
### Input:
Return list of commercial email domains, which means:
- domain is not public
- domain is not university
- it is not personal (more than 1 person using this domain)
>>> "google.com" in commercial_domains()
True
>>> "microsoft.com" in commercial_domains()
True
>>> "isri.cs.cmu.edu" in commercial_domains() # university department
False
>>> "jaraco.com" in commercial_domains() # personal
False
### Response:
def commercial_domains():
# type: () -> set
""" Return list of commercial email domains, which means:
- domain is not public
- domain is not university
- it is not personal (more than 1 person using this domain)
>>> "google.com" in commercial_domains()
True
>>> "microsoft.com" in commercial_domains()
True
>>> "isri.cs.cmu.edu" in commercial_domains() # university department
False
>>> "jaraco.com" in commercial_domains() # personal
False
"""
dus = domain_user_stats()
es = "test@" + pd.Series(dus.index, index=dus.index)
return set(
dus[~is_public_bulk(es) & ~is_university_bulk(es) & (dus > 1)].index) |
def update_serviceprofile(self, host_id, vlan_id):
"""Top level method to update Service Profiles on UCS Manager.
Calls all the methods responsible for the individual tasks that
ultimately result in a vlan_id getting programed on a server's
ethernet ports and the Fabric Interconnect's network ports.
"""
ucsm_ip = self.get_ucsm_ip_for_host(host_id)
if not ucsm_ip:
LOG.info('UCS Manager network driver does not have UCSM IP '
'for Host_id %s', str(host_id))
return False
service_profile = self.ucsm_sp_dict.get((ucsm_ip, host_id))
if service_profile:
LOG.debug('UCS Manager network driver Service Profile : %s',
service_profile)
else:
LOG.info('UCS Manager network driver does not support '
'Host_id %s', host_id)
return False
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
# Create Vlan Profile
if not self._create_vlanprofile(handle, vlan_id, ucsm_ip):
LOG.error('UCS Manager network driver failed to create '
'Vlan Profile for vlan %s', str(vlan_id))
return False
# Update Service Profile
if not self._update_service_profile(handle,
service_profile,
vlan_id,
ucsm_ip):
LOG.error('UCS Manager network driver failed to update '
'Service Profile %(service_profile)s in UCSM '
'%(ucsm_ip)s',
{'service_profile': service_profile, 'ucsm_ip': ucsm_ip})
return False
return True | Top level method to update Service Profiles on UCS Manager.
Calls all the methods responsible for the individual tasks that
ultimately result in a vlan_id getting programed on a server's
ethernet ports and the Fabric Interconnect's network ports. | Below is the the instruction that describes the task:
### Input:
Top level method to update Service Profiles on UCS Manager.
Calls all the methods responsible for the individual tasks that
ultimately result in a vlan_id getting programed on a server's
ethernet ports and the Fabric Interconnect's network ports.
### Response:
def update_serviceprofile(self, host_id, vlan_id):
"""Top level method to update Service Profiles on UCS Manager.
Calls all the methods responsible for the individual tasks that
ultimately result in a vlan_id getting programed on a server's
ethernet ports and the Fabric Interconnect's network ports.
"""
ucsm_ip = self.get_ucsm_ip_for_host(host_id)
if not ucsm_ip:
LOG.info('UCS Manager network driver does not have UCSM IP '
'for Host_id %s', str(host_id))
return False
service_profile = self.ucsm_sp_dict.get((ucsm_ip, host_id))
if service_profile:
LOG.debug('UCS Manager network driver Service Profile : %s',
service_profile)
else:
LOG.info('UCS Manager network driver does not support '
'Host_id %s', host_id)
return False
with self.ucsm_connect_disconnect(ucsm_ip) as handle:
# Create Vlan Profile
if not self._create_vlanprofile(handle, vlan_id, ucsm_ip):
LOG.error('UCS Manager network driver failed to create '
'Vlan Profile for vlan %s', str(vlan_id))
return False
# Update Service Profile
if not self._update_service_profile(handle,
service_profile,
vlan_id,
ucsm_ip):
LOG.error('UCS Manager network driver failed to update '
'Service Profile %(service_profile)s in UCSM '
'%(ucsm_ip)s',
{'service_profile': service_profile, 'ucsm_ip': ucsm_ip})
return False
return True |
def group_dict(items, keyfunc):
"""Return a list defaultdict with ``items`` grouped by ``keyfunc``.
>>> sorted(group_dict('eggs', lambda x: x).items())
[('e', ['e']), ('g', ['g', 'g']), ('s', ['s'])]
"""
result = collections.defaultdict(list)
for i in items:
key = keyfunc(i)
result[key].append(i)
return result | Return a list defaultdict with ``items`` grouped by ``keyfunc``.
>>> sorted(group_dict('eggs', lambda x: x).items())
[('e', ['e']), ('g', ['g', 'g']), ('s', ['s'])] | Below is the the instruction that describes the task:
### Input:
Return a list defaultdict with ``items`` grouped by ``keyfunc``.
>>> sorted(group_dict('eggs', lambda x: x).items())
[('e', ['e']), ('g', ['g', 'g']), ('s', ['s'])]
### Response:
def group_dict(items, keyfunc):
"""Return a list defaultdict with ``items`` grouped by ``keyfunc``.
>>> sorted(group_dict('eggs', lambda x: x).items())
[('e', ['e']), ('g', ['g', 'g']), ('s', ['s'])]
"""
result = collections.defaultdict(list)
for i in items:
key = keyfunc(i)
result[key].append(i)
return result |
def qteNextWindow(self):
"""
Return next window in cyclic order.
|Args|
* **None**
|Returns|
* **QtmacsWindow**: the next window in the Qtmacs internal
window list.
|Raises|
* **None**
"""
# Get the currently active window.
win = self.qteActiveWindow()
if win in self._qteWindowList:
# Find the index of the window in the window list and
# cyclically move to the next element in this list to find
# the next window object.
idx = self._qteWindowList.index(win)
idx = (idx + 1) % len(self._qteWindowList)
return self._qteWindowList[idx]
else:
msg = 'qteNextWindow method found a non-existing window.'
self.qteLogger.warning(msg)
return None | Return next window in cyclic order.
|Args|
* **None**
|Returns|
* **QtmacsWindow**: the next window in the Qtmacs internal
window list.
|Raises|
* **None** | Below is the the instruction that describes the task:
### Input:
Return next window in cyclic order.
|Args|
* **None**
|Returns|
* **QtmacsWindow**: the next window in the Qtmacs internal
window list.
|Raises|
* **None**
### Response:
def qteNextWindow(self):
"""
Return next window in cyclic order.
|Args|
* **None**
|Returns|
* **QtmacsWindow**: the next window in the Qtmacs internal
window list.
|Raises|
* **None**
"""
# Get the currently active window.
win = self.qteActiveWindow()
if win in self._qteWindowList:
# Find the index of the window in the window list and
# cyclically move to the next element in this list to find
# the next window object.
idx = self._qteWindowList.index(win)
idx = (idx + 1) % len(self._qteWindowList)
return self._qteWindowList[idx]
else:
msg = 'qteNextWindow method found a non-existing window.'
self.qteLogger.warning(msg)
return None |
def _write_var_data_nonsparse(self, f, zVar, var, dataType, numElems,
recVary, compression, blockingfactor, indata):
'''
Creates VVRs and the corresponding VXRs full of "indata" data.
If there is no compression, creates exactly one VXR and VVR
If there is compression
Parameters:
f : file
The open CDF file
zVar : bool
True if this is z variable data
var : str
The name of the variable
dataType : int
the CDF variable type
numElems : int
number of elements in each record
recVary : bool
True if each record is unque
compression : int
The amount of compression
blockingfactor: int
The size (in number of records) of a VVR data block
indata : varies
the data to write, should be a numpy or byte array
Returns:
recs : int
The number of records
'''
numValues = self._num_values(zVar, var)
dataTypeSize = CDF._datatype_size(dataType, numElems)
if (isinstance(indata, dict)):
indata = indata['Data']
# Deal with EPOCH16 data types
if (dataType == CDF.CDF_EPOCH16):
epoch16 = []
if isinstance(indata, (list, tuple, np.ndarray)):
adata = indata[0]
if (isinstance(adata, complex)):
recs = len(indata)
for x in range(0, recs):
epoch16.append(indata[x].real)
epoch16.append(indata[x].imag)
indata = epoch16
else:
if (isinstance(indata, complex)):
epoch16.append(indata.real)
epoch16.append(indata.imag)
indata = epoch16
# Convert to byte stream
recs, data = self._convert_data(dataType, numElems, numValues, indata)
if not recVary:
recs = 1
if zVar:
vdr_offset = self.zvarsinfo[var][1]
else:
vdr_offset = self.rvarsinfo[var][1]
usedEntries = 0
editedVDR = False
numVXRs = 0
if compression > 0:
default_blockingfactor = math.ceil(CDF.BLOCKING_BYTES/(numValues * dataTypeSize))
# If the given blocking factor is too small, use the default one
# Will re-adjust if the records are less than this computed BF.
if (blockingfactor < default_blockingfactor):
blockingfactor = default_blockingfactor
if (blockingfactor == 0):
blockingfactor = 1
# set blocking factor
if (recs < blockingfactor):
blockingfactor = recs
blocks = math.ceil(recs / blockingfactor)
nEntries = CDF.NUM_VXR_ENTRIES
VXRhead = None
# Loop through blocks, create VVRs/CVVRs
for x in range(0, blocks):
startrec = x * blockingfactor
startloc = startrec * numValues * dataTypeSize
endrec = (x + 1) * blockingfactor - 1
if (endrec > (recs-1)):
endrec = recs - 1
endloc = (endrec + 1) * numValues * dataTypeSize
if (endloc > len(data)):
endrec = recs - 1
endloc = len(data)
bdata = data[startloc:endloc]
cdata = gzip.compress(bdata, compression)
if (len(cdata) < len(bdata)):
if not editedVDR:
f.seek(vdr_offset+44, 0)
# VDR's Flags
flags = int.from_bytes(f.read(4), 'big', signed=True)
flags = CDF._set_bit(flags, 2)
self._update_offset_value(f, vdr_offset+44, 4, flags)
f.seek(vdr_offset+80, 0)
# VDR's BlockingFactor
self._update_offset_value(f, vdr_offset+80, 4,
blockingfactor)
editedVDR = True
n1offset = self._write_cvvr(f, cdata)
else:
# Not worth compressing
n1offset = self._write_vvr(f, bdata)
if (x == 0):
# Create a VXR
VXRoffset = self._write_vxr(f)
VXRhead = VXRoffset
numVXRs = 1
self._update_vdr_vxrheadtail(f, vdr_offset, VXRoffset)
if (usedEntries < nEntries):
# Use the exisitng VXR
usedEntries = self._use_vxrentry(f, VXRoffset, startrec,
endrec, n1offset)
else:
# Create a new VXR and an upper level VXR, if needed.
# Two levels of VXRs are the maximum, which is simpler
# to implement.
savedVXRoffset = VXRoffset
VXRoffset = self._write_vxr(f)
numVXRs += 1
usedEntries = self._use_vxrentry(f, VXRoffset, startrec,
endrec, n1offset)
# Edit the VXRnext field of the previous VXR
self._update_offset_value(f, savedVXRoffset+12, 8, VXRoffset)
# Edit the VXRtail of the VDR
self._update_offset_value(f, vdr_offset+36, 8, VXRoffset)
# After we're done with the blocks, check the way
# we have VXRs set up
if (numVXRs > CDF.NUM_VXRlvl_ENTRIES):
newvxrhead, newvxrtail = self._add_vxr_levels_r(f, VXRhead,
numVXRs)
self._update_offset_value(f, vdr_offset+28, 8, newvxrhead)
self._update_offset_value(f, vdr_offset+36, 8, newvxrtail)
else:
# Create one VVR and VXR, with one VXR entry
offset = self._write_vvr(f, data)
VXRoffset = self._write_vxr(f)
usedEntries = self._use_vxrentry(f, VXRoffset, 0, recs-1, offset)
self._update_vdr_vxrheadtail(f, vdr_offset, VXRoffset)
# VDR's MaxRec
self._update_offset_value(f, vdr_offset+24, 4, recs-1)
return (recs-1) | Creates VVRs and the corresponding VXRs full of "indata" data.
If there is no compression, creates exactly one VXR and VVR
If there is compression
Parameters:
f : file
The open CDF file
zVar : bool
True if this is z variable data
var : str
The name of the variable
dataType : int
the CDF variable type
numElems : int
number of elements in each record
recVary : bool
True if each record is unque
compression : int
The amount of compression
blockingfactor: int
The size (in number of records) of a VVR data block
indata : varies
the data to write, should be a numpy or byte array
Returns:
recs : int
The number of records | Below is the the instruction that describes the task:
### Input:
Creates VVRs and the corresponding VXRs full of "indata" data.
If there is no compression, creates exactly one VXR and VVR
If there is compression
Parameters:
f : file
The open CDF file
zVar : bool
True if this is z variable data
var : str
The name of the variable
dataType : int
the CDF variable type
numElems : int
number of elements in each record
recVary : bool
True if each record is unque
compression : int
The amount of compression
blockingfactor: int
The size (in number of records) of a VVR data block
indata : varies
the data to write, should be a numpy or byte array
Returns:
recs : int
The number of records
### Response:
def _write_var_data_nonsparse(self, f, zVar, var, dataType, numElems,
recVary, compression, blockingfactor, indata):
'''
Creates VVRs and the corresponding VXRs full of "indata" data.
If there is no compression, creates exactly one VXR and VVR
If there is compression
Parameters:
f : file
The open CDF file
zVar : bool
True if this is z variable data
var : str
The name of the variable
dataType : int
the CDF variable type
numElems : int
number of elements in each record
recVary : bool
True if each record is unque
compression : int
The amount of compression
blockingfactor: int
The size (in number of records) of a VVR data block
indata : varies
the data to write, should be a numpy or byte array
Returns:
recs : int
The number of records
'''
numValues = self._num_values(zVar, var)
dataTypeSize = CDF._datatype_size(dataType, numElems)
if (isinstance(indata, dict)):
indata = indata['Data']
# Deal with EPOCH16 data types
if (dataType == CDF.CDF_EPOCH16):
epoch16 = []
if isinstance(indata, (list, tuple, np.ndarray)):
adata = indata[0]
if (isinstance(adata, complex)):
recs = len(indata)
for x in range(0, recs):
epoch16.append(indata[x].real)
epoch16.append(indata[x].imag)
indata = epoch16
else:
if (isinstance(indata, complex)):
epoch16.append(indata.real)
epoch16.append(indata.imag)
indata = epoch16
# Convert to byte stream
recs, data = self._convert_data(dataType, numElems, numValues, indata)
if not recVary:
recs = 1
if zVar:
vdr_offset = self.zvarsinfo[var][1]
else:
vdr_offset = self.rvarsinfo[var][1]
usedEntries = 0
editedVDR = False
numVXRs = 0
if compression > 0:
default_blockingfactor = math.ceil(CDF.BLOCKING_BYTES/(numValues * dataTypeSize))
# If the given blocking factor is too small, use the default one
# Will re-adjust if the records are less than this computed BF.
if (blockingfactor < default_blockingfactor):
blockingfactor = default_blockingfactor
if (blockingfactor == 0):
blockingfactor = 1
# set blocking factor
if (recs < blockingfactor):
blockingfactor = recs
blocks = math.ceil(recs / blockingfactor)
nEntries = CDF.NUM_VXR_ENTRIES
VXRhead = None
# Loop through blocks, create VVRs/CVVRs
for x in range(0, blocks):
startrec = x * blockingfactor
startloc = startrec * numValues * dataTypeSize
endrec = (x + 1) * blockingfactor - 1
if (endrec > (recs-1)):
endrec = recs - 1
endloc = (endrec + 1) * numValues * dataTypeSize
if (endloc > len(data)):
endrec = recs - 1
endloc = len(data)
bdata = data[startloc:endloc]
cdata = gzip.compress(bdata, compression)
if (len(cdata) < len(bdata)):
if not editedVDR:
f.seek(vdr_offset+44, 0)
# VDR's Flags
flags = int.from_bytes(f.read(4), 'big', signed=True)
flags = CDF._set_bit(flags, 2)
self._update_offset_value(f, vdr_offset+44, 4, flags)
f.seek(vdr_offset+80, 0)
# VDR's BlockingFactor
self._update_offset_value(f, vdr_offset+80, 4,
blockingfactor)
editedVDR = True
n1offset = self._write_cvvr(f, cdata)
else:
# Not worth compressing
n1offset = self._write_vvr(f, bdata)
if (x == 0):
# Create a VXR
VXRoffset = self._write_vxr(f)
VXRhead = VXRoffset
numVXRs = 1
self._update_vdr_vxrheadtail(f, vdr_offset, VXRoffset)
if (usedEntries < nEntries):
# Use the exisitng VXR
usedEntries = self._use_vxrentry(f, VXRoffset, startrec,
endrec, n1offset)
else:
# Create a new VXR and an upper level VXR, if needed.
# Two levels of VXRs are the maximum, which is simpler
# to implement.
savedVXRoffset = VXRoffset
VXRoffset = self._write_vxr(f)
numVXRs += 1
usedEntries = self._use_vxrentry(f, VXRoffset, startrec,
endrec, n1offset)
# Edit the VXRnext field of the previous VXR
self._update_offset_value(f, savedVXRoffset+12, 8, VXRoffset)
# Edit the VXRtail of the VDR
self._update_offset_value(f, vdr_offset+36, 8, VXRoffset)
# After we're done with the blocks, check the way
# we have VXRs set up
if (numVXRs > CDF.NUM_VXRlvl_ENTRIES):
newvxrhead, newvxrtail = self._add_vxr_levels_r(f, VXRhead,
numVXRs)
self._update_offset_value(f, vdr_offset+28, 8, newvxrhead)
self._update_offset_value(f, vdr_offset+36, 8, newvxrtail)
else:
# Create one VVR and VXR, with one VXR entry
offset = self._write_vvr(f, data)
VXRoffset = self._write_vxr(f)
usedEntries = self._use_vxrentry(f, VXRoffset, 0, recs-1, offset)
self._update_vdr_vxrheadtail(f, vdr_offset, VXRoffset)
# VDR's MaxRec
self._update_offset_value(f, vdr_offset+24, 4, recs-1)
return (recs-1) |
def get_object_data(self, ref):
""" As get_object_header, but returns object data as well
:return: (hexsha, type_string, size_as_int,data_string)
:note: not threadsafe"""
hexsha, typename, size, stream = self.stream_object_data(ref)
data = stream.read(size)
del(stream)
return (hexsha, typename, size, data) | As get_object_header, but returns object data as well
:return: (hexsha, type_string, size_as_int,data_string)
:note: not threadsafe | Below is the the instruction that describes the task:
### Input:
As get_object_header, but returns object data as well
:return: (hexsha, type_string, size_as_int,data_string)
:note: not threadsafe
### Response:
def get_object_data(self, ref):
""" As get_object_header, but returns object data as well
:return: (hexsha, type_string, size_as_int,data_string)
:note: not threadsafe"""
hexsha, typename, size, stream = self.stream_object_data(ref)
data = stream.read(size)
del(stream)
return (hexsha, typename, size, data) |
def cmd_exec(self, cmd):
"""
Execute commands in automate namespace
"""
if not cmd:
return
ns = self.cmd_namespace
import copy
rval = True
nscopy = copy.copy(ns)
try:
r = eval(cmd, ns)
if isinstance(r, SystemObject) and not r.system:
r.setup_system(self)
if callable(r):
r = r()
cmd += "()"
self.logger.info("Eval: %s", cmd)
self.logger.info("Result: %s", r)
except SyntaxError:
r = {}
try:
exec (cmd, ns)
self.logger.info("Exec: %s", cmd)
except ExitException:
raise
except Exception as e:
self.logger.info("Failed to exec cmd %s: %s.", cmd, e)
rval = False
for key, value in list(ns.items()):
if key not in nscopy or not value is nscopy[key]:
if key in self.namespace:
del self.namespace[key]
self.namespace[key] = value
r[key] = value
self.logger.info("Set items in namespace: %s", r)
except ExitException:
raise
except Exception as e:
self.logger.info("Failed to eval cmd %s: %s", cmd, e)
return False
return rval | Execute commands in automate namespace | Below is the the instruction that describes the task:
### Input:
Execute commands in automate namespace
### Response:
def cmd_exec(self, cmd):
"""
Execute commands in automate namespace
"""
if not cmd:
return
ns = self.cmd_namespace
import copy
rval = True
nscopy = copy.copy(ns)
try:
r = eval(cmd, ns)
if isinstance(r, SystemObject) and not r.system:
r.setup_system(self)
if callable(r):
r = r()
cmd += "()"
self.logger.info("Eval: %s", cmd)
self.logger.info("Result: %s", r)
except SyntaxError:
r = {}
try:
exec (cmd, ns)
self.logger.info("Exec: %s", cmd)
except ExitException:
raise
except Exception as e:
self.logger.info("Failed to exec cmd %s: %s.", cmd, e)
rval = False
for key, value in list(ns.items()):
if key not in nscopy or not value is nscopy[key]:
if key in self.namespace:
del self.namespace[key]
self.namespace[key] = value
r[key] = value
self.logger.info("Set items in namespace: %s", r)
except ExitException:
raise
except Exception as e:
self.logger.info("Failed to eval cmd %s: %s", cmd, e)
return False
return rval |
def _join(self, rows):
'''
Join multiple rows worth of data into a single result.
'''
rval = {}
for row in rows:
if row:
for value,count in row.items():
rval[ value ] = count + rval.get(value,0)
return rval | Join multiple rows worth of data into a single result. | Below is the the instruction that describes the task:
### Input:
Join multiple rows worth of data into a single result.
### Response:
def _join(self, rows):
'''
Join multiple rows worth of data into a single result.
'''
rval = {}
for row in rows:
if row:
for value,count in row.items():
rval[ value ] = count + rval.get(value,0)
return rval |
def _stdout_raw(self, s):
"""Writes the string to stdout"""
print(s, end='', file=sys.stdout)
sys.stdout.flush() | Writes the string to stdout | Below is the the instruction that describes the task:
### Input:
Writes the string to stdout
### Response:
def _stdout_raw(self, s):
"""Writes the string to stdout"""
print(s, end='', file=sys.stdout)
sys.stdout.flush() |
def write_meta(self):
"""ucds, descriptions and units are written as attributes in the hdf5 file, instead of a seperate file as
the default :func:`Dataset.write_meta`.
"""
with h5py.File(self.filename, "r+") as h5file_output:
h5table_root = h5file_output[self.h5table_root_name]
if self.description is not None:
h5table_root.attrs["description"] = self.description
h5columns = h5table_root if self._version == 1 else h5table_root['columns']
for column_name in self.columns.keys():
h5dataset = None
if column_name in h5columns:
h5dataset = h5columns[column_name]
else:
for group in h5columns.values():
if 'type' in group.attrs:
if group.attrs['type'] in ['csr_matrix']:
for name, column in group.items():
if name == column_name:
h5dataset = column
if h5dataset is None:
raise ValueError('column {} not found'.format(column_name))
for name, values in [("ucd", self.ucds), ("unit", self.units), ("description", self.descriptions)]:
if column_name in values:
value = ensure_string(values[column_name], cast=True)
h5dataset.attrs[name] = value
else:
if name in h5columns.attrs:
del h5dataset.attrs[name] | ucds, descriptions and units are written as attributes in the hdf5 file, instead of a seperate file as
the default :func:`Dataset.write_meta`. | Below is the the instruction that describes the task:
### Input:
ucds, descriptions and units are written as attributes in the hdf5 file, instead of a seperate file as
the default :func:`Dataset.write_meta`.
### Response:
def write_meta(self):
"""ucds, descriptions and units are written as attributes in the hdf5 file, instead of a seperate file as
the default :func:`Dataset.write_meta`.
"""
with h5py.File(self.filename, "r+") as h5file_output:
h5table_root = h5file_output[self.h5table_root_name]
if self.description is not None:
h5table_root.attrs["description"] = self.description
h5columns = h5table_root if self._version == 1 else h5table_root['columns']
for column_name in self.columns.keys():
h5dataset = None
if column_name in h5columns:
h5dataset = h5columns[column_name]
else:
for group in h5columns.values():
if 'type' in group.attrs:
if group.attrs['type'] in ['csr_matrix']:
for name, column in group.items():
if name == column_name:
h5dataset = column
if h5dataset is None:
raise ValueError('column {} not found'.format(column_name))
for name, values in [("ucd", self.ucds), ("unit", self.units), ("description", self.descriptions)]:
if column_name in values:
value = ensure_string(values[column_name], cast=True)
h5dataset.attrs[name] = value
else:
if name in h5columns.attrs:
del h5dataset.attrs[name] |
def disable_all_tokens(platform, user_id, on_error=None, on_success=None):
""" Disable ALL device tokens for the given user on the specified platform.
:param str platform The platform which to disable token on. One of either
Google Cloud Messaging (outbound.GCM) or Apple Push Notification Service
(outbound.APNS).
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
__device_token(platform, False, user_id, all=True, on_error=on_error, on_success=on_success) | Disable ALL device tokens for the given user on the specified platform.
:param str platform The platform which to disable token on. One of either
Google Cloud Messaging (outbound.GCM) or Apple Push Notification Service
(outbound.APNS).
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters. | Below is the the instruction that describes the task:
### Input:
Disable ALL device tokens for the given user on the specified platform.
:param str platform The platform which to disable token on. One of either
Google Cloud Messaging (outbound.GCM) or Apple Push Notification Service
(outbound.APNS).
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
### Response:
def disable_all_tokens(platform, user_id, on_error=None, on_success=None):
""" Disable ALL device tokens for the given user on the specified platform.
:param str platform The platform which to disable token on. One of either
Google Cloud Messaging (outbound.GCM) or Apple Push Notification Service
(outbound.APNS).
:param str | number user_id: the id you use to identify a user. this should
be static for the lifetime of a user.
:param func on_error: An optional function to call in the event of an error.
on_error callback should take 2 parameters: `code` and `error`. `code` will be
one of outbound.ERROR_XXXXXX. `error` will be the corresponding message.
:param func on_success: An optional function to call if/when the API call succeeds.
on_success callback takes no parameters.
"""
__device_token(platform, False, user_id, all=True, on_error=on_error, on_success=on_success) |
def put(self,
body: Body,
priority: int = DEFAULT_PRIORITY,
delay: int = DEFAULT_DELAY,
ttr: int = DEFAULT_TTR) -> int:
"""Inserts a job into the currently used tube and returns the job ID.
:param body: The data representing the job.
:param priority: An integer between 0 and 4,294,967,295 where 0 is the
most urgent.
:param delay: The number of seconds to delay the job for.
:param ttr: The maximum number of seconds the job can be reserved for
before timing out.
"""
if isinstance(body, str):
if self.encoding is None:
raise TypeError("Unable to encode string with no encoding set")
body = body.encode(self.encoding)
cmd = b'put %d %d %d %d\r\n%b' % (priority, delay, ttr, len(body), body)
return self._int_cmd(cmd, b'INSERTED') | Inserts a job into the currently used tube and returns the job ID.
:param body: The data representing the job.
:param priority: An integer between 0 and 4,294,967,295 where 0 is the
most urgent.
:param delay: The number of seconds to delay the job for.
:param ttr: The maximum number of seconds the job can be reserved for
before timing out. | Below is the the instruction that describes the task:
### Input:
Inserts a job into the currently used tube and returns the job ID.
:param body: The data representing the job.
:param priority: An integer between 0 and 4,294,967,295 where 0 is the
most urgent.
:param delay: The number of seconds to delay the job for.
:param ttr: The maximum number of seconds the job can be reserved for
before timing out.
### Response:
def put(self,
body: Body,
priority: int = DEFAULT_PRIORITY,
delay: int = DEFAULT_DELAY,
ttr: int = DEFAULT_TTR) -> int:
"""Inserts a job into the currently used tube and returns the job ID.
:param body: The data representing the job.
:param priority: An integer between 0 and 4,294,967,295 where 0 is the
most urgent.
:param delay: The number of seconds to delay the job for.
:param ttr: The maximum number of seconds the job can be reserved for
before timing out.
"""
if isinstance(body, str):
if self.encoding is None:
raise TypeError("Unable to encode string with no encoding set")
body = body.encode(self.encoding)
cmd = b'put %d %d %d %d\r\n%b' % (priority, delay, ttr, len(body), body)
return self._int_cmd(cmd, b'INSERTED') |
def _wrap(value):
"""
Wraps the passed value in a Sequence if it is not a primitive. If it is a string
argument it is expanded to a list of characters.
>>> _wrap(1)
1
>>> _wrap("abc")
['a', 'b', 'c']
>>> type(_wrap([1, 2]))
functional.pipeline.Sequence
:param value: value to wrap
:return: wrapped or not wrapped value
"""
if is_primitive(value):
return value
if isinstance(value, (dict, set)) or is_namedtuple(value):
return value
elif isinstance(value, collections.Iterable):
try:
if type(value).__name__ == 'DataFrame':
import pandas
if isinstance(value, pandas.DataFrame):
return Sequence(value.values)
except ImportError: # pragma: no cover
pass
return Sequence(value)
else:
return value | Wraps the passed value in a Sequence if it is not a primitive. If it is a string
argument it is expanded to a list of characters.
>>> _wrap(1)
1
>>> _wrap("abc")
['a', 'b', 'c']
>>> type(_wrap([1, 2]))
functional.pipeline.Sequence
:param value: value to wrap
:return: wrapped or not wrapped value | Below is the the instruction that describes the task:
### Input:
Wraps the passed value in a Sequence if it is not a primitive. If it is a string
argument it is expanded to a list of characters.
>>> _wrap(1)
1
>>> _wrap("abc")
['a', 'b', 'c']
>>> type(_wrap([1, 2]))
functional.pipeline.Sequence
:param value: value to wrap
:return: wrapped or not wrapped value
### Response:
def _wrap(value):
"""
Wraps the passed value in a Sequence if it is not a primitive. If it is a string
argument it is expanded to a list of characters.
>>> _wrap(1)
1
>>> _wrap("abc")
['a', 'b', 'c']
>>> type(_wrap([1, 2]))
functional.pipeline.Sequence
:param value: value to wrap
:return: wrapped or not wrapped value
"""
if is_primitive(value):
return value
if isinstance(value, (dict, set)) or is_namedtuple(value):
return value
elif isinstance(value, collections.Iterable):
try:
if type(value).__name__ == 'DataFrame':
import pandas
if isinstance(value, pandas.DataFrame):
return Sequence(value.values)
except ImportError: # pragma: no cover
pass
return Sequence(value)
else:
return value |
def set_mode_cb(self, mode, tf):
"""Called when one of the Move/Draw/Edit radio buttons is selected."""
if tf:
self.canvas.set_draw_mode(mode)
if mode == 'edit':
self.edit_select_mark()
return True | Called when one of the Move/Draw/Edit radio buttons is selected. | Below is the the instruction that describes the task:
### Input:
Called when one of the Move/Draw/Edit radio buttons is selected.
### Response:
def set_mode_cb(self, mode, tf):
"""Called when one of the Move/Draw/Edit radio buttons is selected."""
if tf:
self.canvas.set_draw_mode(mode)
if mode == 'edit':
self.edit_select_mark()
return True |
def wquantiles(W, x, alphas=(0.25, 0.50, 0.75)):
"""Quantiles for weighted data.
Parameters
----------
W: (N,) ndarray
normalised weights (weights are >=0 and sum to one)
x: (N,) or (N,d) ndarray
data
alphas: list-like of size k (default: (0.25, 0.50, 0.75))
probabilities (between 0. and 1.)
Returns
-------
a (k,) or (d, k) ndarray containing the alpha-quantiles
"""
if len(x.shape) == 1:
return _wquantiles(W, x, alphas=alphas)
elif len(x.shape) == 2:
return np.array([_wquantiles(W, x[:, i], alphas=alphas)
for i in range(x.shape[1])]) | Quantiles for weighted data.
Parameters
----------
W: (N,) ndarray
normalised weights (weights are >=0 and sum to one)
x: (N,) or (N,d) ndarray
data
alphas: list-like of size k (default: (0.25, 0.50, 0.75))
probabilities (between 0. and 1.)
Returns
-------
a (k,) or (d, k) ndarray containing the alpha-quantiles | Below is the the instruction that describes the task:
### Input:
Quantiles for weighted data.
Parameters
----------
W: (N,) ndarray
normalised weights (weights are >=0 and sum to one)
x: (N,) or (N,d) ndarray
data
alphas: list-like of size k (default: (0.25, 0.50, 0.75))
probabilities (between 0. and 1.)
Returns
-------
a (k,) or (d, k) ndarray containing the alpha-quantiles
### Response:
def wquantiles(W, x, alphas=(0.25, 0.50, 0.75)):
"""Quantiles for weighted data.
Parameters
----------
W: (N,) ndarray
normalised weights (weights are >=0 and sum to one)
x: (N,) or (N,d) ndarray
data
alphas: list-like of size k (default: (0.25, 0.50, 0.75))
probabilities (between 0. and 1.)
Returns
-------
a (k,) or (d, k) ndarray containing the alpha-quantiles
"""
if len(x.shape) == 1:
return _wquantiles(W, x, alphas=alphas)
elif len(x.shape) == 2:
return np.array([_wquantiles(W, x[:, i], alphas=alphas)
for i in range(x.shape[1])]) |
def cli(env, identifier, wait):
"""Check if a virtual server is ready."""
vsi = SoftLayer.VSManager(env.client)
vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS')
ready = vsi.wait_for_ready(vs_id, wait)
if ready:
env.fout("READY")
else:
raise exceptions.CLIAbort("Instance %s not ready" % vs_id) | Check if a virtual server is ready. | Below is the the instruction that describes the task:
### Input:
Check if a virtual server is ready.
### Response:
def cli(env, identifier, wait):
"""Check if a virtual server is ready."""
vsi = SoftLayer.VSManager(env.client)
vs_id = helpers.resolve_id(vsi.resolve_ids, identifier, 'VS')
ready = vsi.wait_for_ready(vs_id, wait)
if ready:
env.fout("READY")
else:
raise exceptions.CLIAbort("Instance %s not ready" % vs_id) |
def get(cls, pid_value, pid_type=None, **kwargs):
"""Get a persistent identifier for this provider.
:param pid_type: Persistent identifier type. (Default: configured
:attr:`invenio_pidstore.providers.base.BaseProvider.pid_type`)
:param pid_value: Persistent identifier value.
:param kwargs: See
:meth:`invenio_pidstore.providers.base.BaseProvider` required
initialization properties.
:returns: A :class:`invenio_pidstore.providers.base.BaseProvider`
instance.
"""
return cls(
PersistentIdentifier.get(pid_type or cls.pid_type, pid_value,
pid_provider=cls.pid_provider),
**kwargs) | Get a persistent identifier for this provider.
:param pid_type: Persistent identifier type. (Default: configured
:attr:`invenio_pidstore.providers.base.BaseProvider.pid_type`)
:param pid_value: Persistent identifier value.
:param kwargs: See
:meth:`invenio_pidstore.providers.base.BaseProvider` required
initialization properties.
:returns: A :class:`invenio_pidstore.providers.base.BaseProvider`
instance. | Below is the the instruction that describes the task:
### Input:
Get a persistent identifier for this provider.
:param pid_type: Persistent identifier type. (Default: configured
:attr:`invenio_pidstore.providers.base.BaseProvider.pid_type`)
:param pid_value: Persistent identifier value.
:param kwargs: See
:meth:`invenio_pidstore.providers.base.BaseProvider` required
initialization properties.
:returns: A :class:`invenio_pidstore.providers.base.BaseProvider`
instance.
### Response:
def get(cls, pid_value, pid_type=None, **kwargs):
"""Get a persistent identifier for this provider.
:param pid_type: Persistent identifier type. (Default: configured
:attr:`invenio_pidstore.providers.base.BaseProvider.pid_type`)
:param pid_value: Persistent identifier value.
:param kwargs: See
:meth:`invenio_pidstore.providers.base.BaseProvider` required
initialization properties.
:returns: A :class:`invenio_pidstore.providers.base.BaseProvider`
instance.
"""
return cls(
PersistentIdentifier.get(pid_type or cls.pid_type, pid_value,
pid_provider=cls.pid_provider),
**kwargs) |
def parse(self, m, prefix=None):
"""Parse messages sent by the 'buildbot-cvs-mail' program.
"""
# The mail is sent from the person doing the checkin. Assume that the
# local username is enough to identify them (this assumes a one-server
# cvs-over-rsh environment rather than the server-dirs-shared-over-NFS
# model)
name, addr = parseaddr(m["from"])
if not addr:
# no From means this message isn't from buildbot-cvs-mail
return None
at = addr.find("@")
if at == -1:
author = addr # might still be useful
else:
author = addr[:at]
author = util.bytes2unicode(author, encoding="ascii")
# CVS accepts RFC822 dates. buildbot-cvs-mail adds the date as
# part of the mail header, so use that.
# This assumes cvs is being access via ssh or pserver, so the time
# will be the CVS server's time.
# calculate a "revision" based on that timestamp, or the current time
# if we're unable to parse the date.
log.msg('Processing CVS mail')
dateTuple = parsedate_tz(m["date"])
if dateTuple is None:
when = util.now()
else:
when = mktime_tz(dateTuple)
theTime = datetime.datetime.utcfromtimestamp(float(when))
rev = theTime.strftime('%Y-%m-%d %H:%M:%S')
catRE = re.compile(r'^Category:\s*(\S.*)')
cvsRE = re.compile(r'^CVSROOT:\s*(\S.*)')
cvsmodeRE = re.compile(r'^Cvsmode:\s*(\S.*)')
filesRE = re.compile(r'^Files:\s*(\S.*)')
modRE = re.compile(r'^Module:\s*(\S.*)')
pathRE = re.compile(r'^Path:\s*(\S.*)')
projRE = re.compile(r'^Project:\s*(\S.*)')
singleFileRE = re.compile(r'(.*) (NONE|\d(\.|\d)+) (NONE|\d(\.|\d)+)')
tagRE = re.compile(r'^\s+Tag:\s*(\S.*)')
updateRE = re.compile(r'^Update of:\s*(\S.*)')
comments = ""
branch = None
cvsroot = None
fileList = None
files = []
isdir = 0
path = None
project = None
lines = list(body_line_iterator(m))
while lines:
line = lines.pop(0)
m = catRE.match(line)
if m:
category = m.group(1)
continue
m = cvsRE.match(line)
if m:
cvsroot = m.group(1)
continue
m = cvsmodeRE.match(line)
if m:
cvsmode = m.group(1)
continue
m = filesRE.match(line)
if m:
fileList = m.group(1)
continue
m = modRE.match(line)
if m:
# We don't actually use this
# module = m.group(1)
continue
m = pathRE.match(line)
if m:
path = m.group(1)
continue
m = projRE.match(line)
if m:
project = m.group(1)
continue
m = tagRE.match(line)
if m:
branch = m.group(1)
continue
m = updateRE.match(line)
if m:
# We don't actually use this
# updateof = m.group(1)
continue
if line == "Log Message:\n":
break
# CVS 1.11 lists files as:
# repo/path file,old-version,new-version file2,old-version,new-version
# Version 1.12 lists files as:
# file1 old-version new-version file2 old-version new-version
#
# files consists of tuples of 'file-name old-version new-version'
# The versions are either dotted-decimal version numbers, ie 1.1
# or NONE. New files are of the form 'NONE NUMBER', while removed
# files are 'NUMBER NONE'. 'NONE' is a literal string
# Parsing this instead of files list in 'Added File:' etc
# makes it possible to handle files with embedded spaces, though
# it could fail if the filename was 'bad 1.1 1.2'
# For cvs version 1.11, we expect
# my_module new_file.c,NONE,1.1
# my_module removed.txt,1.2,NONE
# my_module modified_file.c,1.1,1.2
# While cvs version 1.12 gives us
# new_file.c NONE 1.1
# removed.txt 1.2 NONE
# modified_file.c 1.1,1.2
if fileList is None:
log.msg('CVSMaildirSource Mail with no files. Ignoring')
return None # We don't have any files. Email not from CVS
if cvsmode == '1.11':
# Please, no repo paths with spaces!
m = re.search('([^ ]*) ', fileList)
if m:
path = m.group(1)
else:
log.msg(
'CVSMaildirSource can\'t get path from file list. Ignoring mail')
return
fileList = fileList[len(path):].strip()
singleFileRE = re.compile(
r'(.+?),(NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+)),(NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+))(?: |$)')
elif cvsmode == '1.12':
singleFileRE = re.compile(
r'(.+?) (NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+)) (NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+))(?: |$)')
if path is None:
raise ValueError(
'CVSMaildirSource cvs 1.12 require path. Check cvs loginfo config')
else:
raise ValueError(
'Expected cvsmode 1.11 or 1.12. got: %s' % cvsmode)
log.msg("CVSMaildirSource processing filelist: %s" % fileList)
while(fileList):
m = singleFileRE.match(fileList)
if m:
curFile = path + '/' + m.group(1)
files.append(curFile)
fileList = fileList[m.end():]
else:
log.msg('CVSMaildirSource no files matched regex. Ignoring')
return None # bail - we couldn't parse the files that changed
# Now get comments
while lines:
line = lines.pop(0)
comments += line
comments = comments.rstrip() + "\n"
if comments == '\n':
comments = None
return ('cvs', dict(author=author, files=files, comments=comments,
isdir=isdir, when=when, branch=branch,
revision=rev, category=category,
repository=cvsroot, project=project,
properties=self.properties)) | Parse messages sent by the 'buildbot-cvs-mail' program. | Below is the the instruction that describes the task:
### Input:
Parse messages sent by the 'buildbot-cvs-mail' program.
### Response:
def parse(self, m, prefix=None):
"""Parse messages sent by the 'buildbot-cvs-mail' program.
"""
# The mail is sent from the person doing the checkin. Assume that the
# local username is enough to identify them (this assumes a one-server
# cvs-over-rsh environment rather than the server-dirs-shared-over-NFS
# model)
name, addr = parseaddr(m["from"])
if not addr:
# no From means this message isn't from buildbot-cvs-mail
return None
at = addr.find("@")
if at == -1:
author = addr # might still be useful
else:
author = addr[:at]
author = util.bytes2unicode(author, encoding="ascii")
# CVS accepts RFC822 dates. buildbot-cvs-mail adds the date as
# part of the mail header, so use that.
# This assumes cvs is being access via ssh or pserver, so the time
# will be the CVS server's time.
# calculate a "revision" based on that timestamp, or the current time
# if we're unable to parse the date.
log.msg('Processing CVS mail')
dateTuple = parsedate_tz(m["date"])
if dateTuple is None:
when = util.now()
else:
when = mktime_tz(dateTuple)
theTime = datetime.datetime.utcfromtimestamp(float(when))
rev = theTime.strftime('%Y-%m-%d %H:%M:%S')
catRE = re.compile(r'^Category:\s*(\S.*)')
cvsRE = re.compile(r'^CVSROOT:\s*(\S.*)')
cvsmodeRE = re.compile(r'^Cvsmode:\s*(\S.*)')
filesRE = re.compile(r'^Files:\s*(\S.*)')
modRE = re.compile(r'^Module:\s*(\S.*)')
pathRE = re.compile(r'^Path:\s*(\S.*)')
projRE = re.compile(r'^Project:\s*(\S.*)')
singleFileRE = re.compile(r'(.*) (NONE|\d(\.|\d)+) (NONE|\d(\.|\d)+)')
tagRE = re.compile(r'^\s+Tag:\s*(\S.*)')
updateRE = re.compile(r'^Update of:\s*(\S.*)')
comments = ""
branch = None
cvsroot = None
fileList = None
files = []
isdir = 0
path = None
project = None
lines = list(body_line_iterator(m))
while lines:
line = lines.pop(0)
m = catRE.match(line)
if m:
category = m.group(1)
continue
m = cvsRE.match(line)
if m:
cvsroot = m.group(1)
continue
m = cvsmodeRE.match(line)
if m:
cvsmode = m.group(1)
continue
m = filesRE.match(line)
if m:
fileList = m.group(1)
continue
m = modRE.match(line)
if m:
# We don't actually use this
# module = m.group(1)
continue
m = pathRE.match(line)
if m:
path = m.group(1)
continue
m = projRE.match(line)
if m:
project = m.group(1)
continue
m = tagRE.match(line)
if m:
branch = m.group(1)
continue
m = updateRE.match(line)
if m:
# We don't actually use this
# updateof = m.group(1)
continue
if line == "Log Message:\n":
break
# CVS 1.11 lists files as:
# repo/path file,old-version,new-version file2,old-version,new-version
# Version 1.12 lists files as:
# file1 old-version new-version file2 old-version new-version
#
# files consists of tuples of 'file-name old-version new-version'
# The versions are either dotted-decimal version numbers, ie 1.1
# or NONE. New files are of the form 'NONE NUMBER', while removed
# files are 'NUMBER NONE'. 'NONE' is a literal string
# Parsing this instead of files list in 'Added File:' etc
# makes it possible to handle files with embedded spaces, though
# it could fail if the filename was 'bad 1.1 1.2'
# For cvs version 1.11, we expect
# my_module new_file.c,NONE,1.1
# my_module removed.txt,1.2,NONE
# my_module modified_file.c,1.1,1.2
# While cvs version 1.12 gives us
# new_file.c NONE 1.1
# removed.txt 1.2 NONE
# modified_file.c 1.1,1.2
if fileList is None:
log.msg('CVSMaildirSource Mail with no files. Ignoring')
return None # We don't have any files. Email not from CVS
if cvsmode == '1.11':
# Please, no repo paths with spaces!
m = re.search('([^ ]*) ', fileList)
if m:
path = m.group(1)
else:
log.msg(
'CVSMaildirSource can\'t get path from file list. Ignoring mail')
return
fileList = fileList[len(path):].strip()
singleFileRE = re.compile(
r'(.+?),(NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+)),(NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+))(?: |$)')
elif cvsmode == '1.12':
singleFileRE = re.compile(
r'(.+?) (NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+)) (NONE|(?:\d+\.(?:\d+\.\d+\.)*\d+))(?: |$)')
if path is None:
raise ValueError(
'CVSMaildirSource cvs 1.12 require path. Check cvs loginfo config')
else:
raise ValueError(
'Expected cvsmode 1.11 or 1.12. got: %s' % cvsmode)
log.msg("CVSMaildirSource processing filelist: %s" % fileList)
while(fileList):
m = singleFileRE.match(fileList)
if m:
curFile = path + '/' + m.group(1)
files.append(curFile)
fileList = fileList[m.end():]
else:
log.msg('CVSMaildirSource no files matched regex. Ignoring')
return None # bail - we couldn't parse the files that changed
# Now get comments
while lines:
line = lines.pop(0)
comments += line
comments = comments.rstrip() + "\n"
if comments == '\n':
comments = None
return ('cvs', dict(author=author, files=files, comments=comments,
isdir=isdir, when=when, branch=branch,
revision=rev, category=category,
repository=cvsroot, project=project,
properties=self.properties)) |
def get_lambda_function(lambda_function, flags=FLAGS.ALL, **conn):
"""Fully describes a lambda function.
Args:
lambda_function: Name, ARN, or dictionary of lambda function. If dictionary, should likely be the return value from list_functions. At a minimum, must contain a key titled 'FunctionName'.
flags: Flags describing which sections should be included in the return value. Default ALL
Returns:
dictionary describing the requested lambda function.
"""
# Python 2 and 3 support:
try:
basestring
except NameError as _:
basestring = str
# If STR is passed in, determine if it's a name or ARN and built a dict.
if isinstance(lambda_function, basestring):
lambda_function_arn = ARN(lambda_function)
if lambda_function_arn.error:
lambda_function = dict(FunctionName=lambda_function)
else:
lambda_function = dict(FunctionName=lambda_function_arn.name, FunctionArn=lambda_function)
# If an ARN is available, override the account_number/region from the conn dict.
if 'FunctionArn' in lambda_function:
lambda_function_arn = ARN(lambda_function['FunctionArn'])
if not lambda_function_arn.error:
if lambda_function_arn.account_number:
conn['account_number'] = lambda_function_arn.account_number
if lambda_function_arn.region:
conn['region'] = lambda_function_arn.region
return registry.build_out(flags, start_with=lambda_function, pass_datastructure=True, **conn) | Fully describes a lambda function.
Args:
lambda_function: Name, ARN, or dictionary of lambda function. If dictionary, should likely be the return value from list_functions. At a minimum, must contain a key titled 'FunctionName'.
flags: Flags describing which sections should be included in the return value. Default ALL
Returns:
dictionary describing the requested lambda function. | Below is the the instruction that describes the task:
### Input:
Fully describes a lambda function.
Args:
lambda_function: Name, ARN, or dictionary of lambda function. If dictionary, should likely be the return value from list_functions. At a minimum, must contain a key titled 'FunctionName'.
flags: Flags describing which sections should be included in the return value. Default ALL
Returns:
dictionary describing the requested lambda function.
### Response:
def get_lambda_function(lambda_function, flags=FLAGS.ALL, **conn):
"""Fully describes a lambda function.
Args:
lambda_function: Name, ARN, or dictionary of lambda function. If dictionary, should likely be the return value from list_functions. At a minimum, must contain a key titled 'FunctionName'.
flags: Flags describing which sections should be included in the return value. Default ALL
Returns:
dictionary describing the requested lambda function.
"""
# Python 2 and 3 support:
try:
basestring
except NameError as _:
basestring = str
# If STR is passed in, determine if it's a name or ARN and built a dict.
if isinstance(lambda_function, basestring):
lambda_function_arn = ARN(lambda_function)
if lambda_function_arn.error:
lambda_function = dict(FunctionName=lambda_function)
else:
lambda_function = dict(FunctionName=lambda_function_arn.name, FunctionArn=lambda_function)
# If an ARN is available, override the account_number/region from the conn dict.
if 'FunctionArn' in lambda_function:
lambda_function_arn = ARN(lambda_function['FunctionArn'])
if not lambda_function_arn.error:
if lambda_function_arn.account_number:
conn['account_number'] = lambda_function_arn.account_number
if lambda_function_arn.region:
conn['region'] = lambda_function_arn.region
return registry.build_out(flags, start_with=lambda_function, pass_datastructure=True, **conn) |
def label_image(
self,
parent,
basic_config,
feature,
image_classification_config=None,
bounding_poly_config=None,
polyline_config=None,
segmentation_config=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Starts a labeling task for image. The type of image labeling task is
configured by feature in the request.
Example:
>>> from google.cloud import datalabeling_v1beta1
>>> from google.cloud.datalabeling_v1beta1 import enums
>>>
>>> client = datalabeling_v1beta1.DataLabelingServiceClient()
>>>
>>> parent = client.dataset_path('[PROJECT]', '[DATASET]')
>>>
>>> # TODO: Initialize `basic_config`:
>>> basic_config = {}
>>>
>>> # TODO: Initialize `feature`:
>>> feature = enums.LabelImageRequest.Feature.FEATURE_UNSPECIFIED
>>>
>>> response = client.label_image(parent, basic_config, feature)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. Name of the dataset to request labeling task, format:
projects/{project\_id}/datasets/{dataset\_id}
basic_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig]): Required. Basic human annotation config.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig`
feature (~google.cloud.datalabeling_v1beta1.types.Feature): Required. The type of image labeling task.
image_classification_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.ImageClassificationConfig]): Configuration for image classification task. One of
image\_classification\_config, bounding\_poly\_config, polyline\_config
and segmentation\_config is required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.ImageClassificationConfig`
bounding_poly_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.BoundingPolyConfig]): Configuration for bounding box and bounding poly task. One of
image\_classification\_config, bounding\_poly\_config, polyline\_config
and segmentation\_config is required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.BoundingPolyConfig`
polyline_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.PolylineConfig]): Configuration for polyline task. One of image\_classification\_config,
bounding\_poly\_config, polyline\_config and segmentation\_config is
required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.PolylineConfig`
segmentation_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.SegmentationConfig]): Configuration for segmentation task. One of
image\_classification\_config, bounding\_poly\_config, polyline\_config
and segmentation\_config is required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.SegmentationConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.datalabeling_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "label_image" not in self._inner_api_calls:
self._inner_api_calls[
"label_image"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.label_image,
default_retry=self._method_configs["LabelImage"].retry,
default_timeout=self._method_configs["LabelImage"].timeout,
client_info=self._client_info,
)
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
google.api_core.protobuf_helpers.check_oneof(
image_classification_config=image_classification_config,
bounding_poly_config=bounding_poly_config,
polyline_config=polyline_config,
segmentation_config=segmentation_config,
)
request = data_labeling_service_pb2.LabelImageRequest(
parent=parent,
basic_config=basic_config,
feature=feature,
image_classification_config=image_classification_config,
bounding_poly_config=bounding_poly_config,
polyline_config=polyline_config,
segmentation_config=segmentation_config,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["label_image"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
dataset_pb2.AnnotatedDataset,
metadata_type=proto_operations_pb2.LabelOperationMetadata,
) | Starts a labeling task for image. The type of image labeling task is
configured by feature in the request.
Example:
>>> from google.cloud import datalabeling_v1beta1
>>> from google.cloud.datalabeling_v1beta1 import enums
>>>
>>> client = datalabeling_v1beta1.DataLabelingServiceClient()
>>>
>>> parent = client.dataset_path('[PROJECT]', '[DATASET]')
>>>
>>> # TODO: Initialize `basic_config`:
>>> basic_config = {}
>>>
>>> # TODO: Initialize `feature`:
>>> feature = enums.LabelImageRequest.Feature.FEATURE_UNSPECIFIED
>>>
>>> response = client.label_image(parent, basic_config, feature)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. Name of the dataset to request labeling task, format:
projects/{project\_id}/datasets/{dataset\_id}
basic_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig]): Required. Basic human annotation config.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig`
feature (~google.cloud.datalabeling_v1beta1.types.Feature): Required. The type of image labeling task.
image_classification_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.ImageClassificationConfig]): Configuration for image classification task. One of
image\_classification\_config, bounding\_poly\_config, polyline\_config
and segmentation\_config is required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.ImageClassificationConfig`
bounding_poly_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.BoundingPolyConfig]): Configuration for bounding box and bounding poly task. One of
image\_classification\_config, bounding\_poly\_config, polyline\_config
and segmentation\_config is required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.BoundingPolyConfig`
polyline_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.PolylineConfig]): Configuration for polyline task. One of image\_classification\_config,
bounding\_poly\_config, polyline\_config and segmentation\_config is
required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.PolylineConfig`
segmentation_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.SegmentationConfig]): Configuration for segmentation task. One of
image\_classification\_config, bounding\_poly\_config, polyline\_config
and segmentation\_config is required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.SegmentationConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.datalabeling_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. | Below is the the instruction that describes the task:
### Input:
Starts a labeling task for image. The type of image labeling task is
configured by feature in the request.
Example:
>>> from google.cloud import datalabeling_v1beta1
>>> from google.cloud.datalabeling_v1beta1 import enums
>>>
>>> client = datalabeling_v1beta1.DataLabelingServiceClient()
>>>
>>> parent = client.dataset_path('[PROJECT]', '[DATASET]')
>>>
>>> # TODO: Initialize `basic_config`:
>>> basic_config = {}
>>>
>>> # TODO: Initialize `feature`:
>>> feature = enums.LabelImageRequest.Feature.FEATURE_UNSPECIFIED
>>>
>>> response = client.label_image(parent, basic_config, feature)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. Name of the dataset to request labeling task, format:
projects/{project\_id}/datasets/{dataset\_id}
basic_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig]): Required. Basic human annotation config.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig`
feature (~google.cloud.datalabeling_v1beta1.types.Feature): Required. The type of image labeling task.
image_classification_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.ImageClassificationConfig]): Configuration for image classification task. One of
image\_classification\_config, bounding\_poly\_config, polyline\_config
and segmentation\_config is required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.ImageClassificationConfig`
bounding_poly_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.BoundingPolyConfig]): Configuration for bounding box and bounding poly task. One of
image\_classification\_config, bounding\_poly\_config, polyline\_config
and segmentation\_config is required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.BoundingPolyConfig`
polyline_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.PolylineConfig]): Configuration for polyline task. One of image\_classification\_config,
bounding\_poly\_config, polyline\_config and segmentation\_config is
required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.PolylineConfig`
segmentation_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.SegmentationConfig]): Configuration for segmentation task. One of
image\_classification\_config, bounding\_poly\_config, polyline\_config
and segmentation\_config is required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.SegmentationConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.datalabeling_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
### Response:
def label_image(
self,
parent,
basic_config,
feature,
image_classification_config=None,
bounding_poly_config=None,
polyline_config=None,
segmentation_config=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Starts a labeling task for image. The type of image labeling task is
configured by feature in the request.
Example:
>>> from google.cloud import datalabeling_v1beta1
>>> from google.cloud.datalabeling_v1beta1 import enums
>>>
>>> client = datalabeling_v1beta1.DataLabelingServiceClient()
>>>
>>> parent = client.dataset_path('[PROJECT]', '[DATASET]')
>>>
>>> # TODO: Initialize `basic_config`:
>>> basic_config = {}
>>>
>>> # TODO: Initialize `feature`:
>>> feature = enums.LabelImageRequest.Feature.FEATURE_UNSPECIFIED
>>>
>>> response = client.label_image(parent, basic_config, feature)
>>>
>>> def callback(operation_future):
... # Handle result.
... result = operation_future.result()
>>>
>>> response.add_done_callback(callback)
>>>
>>> # Handle metadata.
>>> metadata = response.metadata()
Args:
parent (str): Required. Name of the dataset to request labeling task, format:
projects/{project\_id}/datasets/{dataset\_id}
basic_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig]): Required. Basic human annotation config.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.HumanAnnotationConfig`
feature (~google.cloud.datalabeling_v1beta1.types.Feature): Required. The type of image labeling task.
image_classification_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.ImageClassificationConfig]): Configuration for image classification task. One of
image\_classification\_config, bounding\_poly\_config, polyline\_config
and segmentation\_config is required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.ImageClassificationConfig`
bounding_poly_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.BoundingPolyConfig]): Configuration for bounding box and bounding poly task. One of
image\_classification\_config, bounding\_poly\_config, polyline\_config
and segmentation\_config is required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.BoundingPolyConfig`
polyline_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.PolylineConfig]): Configuration for polyline task. One of image\_classification\_config,
bounding\_poly\_config, polyline\_config and segmentation\_config is
required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.PolylineConfig`
segmentation_config (Union[dict, ~google.cloud.datalabeling_v1beta1.types.SegmentationConfig]): Configuration for segmentation task. One of
image\_classification\_config, bounding\_poly\_config, polyline\_config
and segmentation\_config is required.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.datalabeling_v1beta1.types.SegmentationConfig`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.datalabeling_v1beta1.types._OperationFuture` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "label_image" not in self._inner_api_calls:
self._inner_api_calls[
"label_image"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.label_image,
default_retry=self._method_configs["LabelImage"].retry,
default_timeout=self._method_configs["LabelImage"].timeout,
client_info=self._client_info,
)
# Sanity check: We have some fields which are mutually exclusive;
# raise ValueError if more than one is sent.
google.api_core.protobuf_helpers.check_oneof(
image_classification_config=image_classification_config,
bounding_poly_config=bounding_poly_config,
polyline_config=polyline_config,
segmentation_config=segmentation_config,
)
request = data_labeling_service_pb2.LabelImageRequest(
parent=parent,
basic_config=basic_config,
feature=feature,
image_classification_config=image_classification_config,
bounding_poly_config=bounding_poly_config,
polyline_config=polyline_config,
segmentation_config=segmentation_config,
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("parent", parent)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
operation = self._inner_api_calls["label_image"](
request, retry=retry, timeout=timeout, metadata=metadata
)
return google.api_core.operation.from_gapic(
operation,
self.transport._operations_client,
dataset_pb2.AnnotatedDataset,
metadata_type=proto_operations_pb2.LabelOperationMetadata,
) |
def comply(self, path):
"""Ensures the contents and the permissions of the file.
:param path: the path to correct
"""
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
self.pre_write()
render_and_write(self.template_dir, path, self.context())
utils.ensure_permissions(path, self.user, self.group, self.mode)
self.run_service_actions()
self.save_checksum(path)
self.post_write() | Ensures the contents and the permissions of the file.
:param path: the path to correct | Below is the the instruction that describes the task:
### Input:
Ensures the contents and the permissions of the file.
:param path: the path to correct
### Response:
def comply(self, path):
"""Ensures the contents and the permissions of the file.
:param path: the path to correct
"""
dirname = os.path.dirname(path)
if not os.path.exists(dirname):
os.makedirs(dirname)
self.pre_write()
render_and_write(self.template_dir, path, self.context())
utils.ensure_permissions(path, self.user, self.group, self.mode)
self.run_service_actions()
self.save_checksum(path)
self.post_write() |
def confd_state_loaded_data_models_data_model_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
loaded_data_models = ET.SubElement(confd_state, "loaded-data-models")
data_model = ET.SubElement(loaded_data_models, "data-model")
name = ET.SubElement(data_model, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def confd_state_loaded_data_models_data_model_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
confd_state = ET.SubElement(config, "confd-state", xmlns="http://tail-f.com/yang/confd-monitoring")
loaded_data_models = ET.SubElement(confd_state, "loaded-data-models")
data_model = ET.SubElement(loaded_data_models, "data-model")
name = ET.SubElement(data_model, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def legendLabel(self):
"""
Provide a textual description of the feature and its qualifiers to be
used as a label in a plot legend.
@return: A C{str} description of the feature.
"""
excludedQualifiers = set((
'codon_start', 'db_xref', 'protein_id', 'region_name',
'ribosomal_slippage', 'rpt_type', 'translation', 'transl_except',
'transl_table')
)
maxValueLength = 30
result = []
if self.feature.qualifiers:
for qualifier in sorted(self.feature.qualifiers.keys()):
if qualifier not in excludedQualifiers:
value = ', '.join(self.feature.qualifiers[qualifier])
if qualifier == 'site_type' and value == 'other':
continue
if len(value) > maxValueLength:
value = value[:maxValueLength - 3] + '...'
result.append('%s: %s' % (qualifier, value))
return '%d-%d %s%s.%s' % (
int(self.feature.location.start),
int(self.feature.location.end),
self.feature.type,
' (subfeature)' if self.subfeature else '',
' ' + ', '.join(result) if result else '') | Provide a textual description of the feature and its qualifiers to be
used as a label in a plot legend.
@return: A C{str} description of the feature. | Below is the the instruction that describes the task:
### Input:
Provide a textual description of the feature and its qualifiers to be
used as a label in a plot legend.
@return: A C{str} description of the feature.
### Response:
def legendLabel(self):
"""
Provide a textual description of the feature and its qualifiers to be
used as a label in a plot legend.
@return: A C{str} description of the feature.
"""
excludedQualifiers = set((
'codon_start', 'db_xref', 'protein_id', 'region_name',
'ribosomal_slippage', 'rpt_type', 'translation', 'transl_except',
'transl_table')
)
maxValueLength = 30
result = []
if self.feature.qualifiers:
for qualifier in sorted(self.feature.qualifiers.keys()):
if qualifier not in excludedQualifiers:
value = ', '.join(self.feature.qualifiers[qualifier])
if qualifier == 'site_type' and value == 'other':
continue
if len(value) > maxValueLength:
value = value[:maxValueLength - 3] + '...'
result.append('%s: %s' % (qualifier, value))
return '%d-%d %s%s.%s' % (
int(self.feature.location.start),
int(self.feature.location.end),
self.feature.type,
' (subfeature)' if self.subfeature else '',
' ' + ', '.join(result) if result else '') |
def run(self, target, payload, instance=None, hook_id=None, **kwargs):
"""
target: the url to receive the payload.
payload: a python primitive data structure
instance: a possibly null "trigger" instance
hook: the defining Hook object (useful for removing)
"""
self.post_data(target, payload, hook_id) | target: the url to receive the payload.
payload: a python primitive data structure
instance: a possibly null "trigger" instance
hook: the defining Hook object (useful for removing) | Below is the the instruction that describes the task:
### Input:
target: the url to receive the payload.
payload: a python primitive data structure
instance: a possibly null "trigger" instance
hook: the defining Hook object (useful for removing)
### Response:
def run(self, target, payload, instance=None, hook_id=None, **kwargs):
"""
target: the url to receive the payload.
payload: a python primitive data structure
instance: a possibly null "trigger" instance
hook: the defining Hook object (useful for removing)
"""
self.post_data(target, payload, hook_id) |
def create_action(parent, text, shortcut=None, icon=None, tip=None,
toggled=None, triggered=None, data=None, menurole=None,
context=Qt.WindowShortcut):
"""Create a QAction"""
action = SpyderAction(text, parent)
if triggered is not None:
action.triggered.connect(triggered)
if toggled is not None:
action.toggled.connect(toggled)
action.setCheckable(True)
if icon is not None:
if is_text_string(icon):
icon = get_icon(icon)
action.setIcon(icon)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if data is not None:
action.setData(to_qvariant(data))
if menurole is not None:
action.setMenuRole(menurole)
# Workround for Mac because setting context=Qt.WidgetShortcut
# there doesn't have any effect
if sys.platform == 'darwin':
action._shown_shortcut = None
if context == Qt.WidgetShortcut:
if shortcut is not None:
action._shown_shortcut = shortcut
else:
# This is going to be filled by
# main.register_shortcut
action._shown_shortcut = 'missing'
else:
if shortcut is not None:
action.setShortcut(shortcut)
action.setShortcutContext(context)
else:
if shortcut is not None:
action.setShortcut(shortcut)
action.setShortcutContext(context)
return action | Create a QAction | Below is the the instruction that describes the task:
### Input:
Create a QAction
### Response:
def create_action(parent, text, shortcut=None, icon=None, tip=None,
toggled=None, triggered=None, data=None, menurole=None,
context=Qt.WindowShortcut):
"""Create a QAction"""
action = SpyderAction(text, parent)
if triggered is not None:
action.triggered.connect(triggered)
if toggled is not None:
action.toggled.connect(toggled)
action.setCheckable(True)
if icon is not None:
if is_text_string(icon):
icon = get_icon(icon)
action.setIcon(icon)
if tip is not None:
action.setToolTip(tip)
action.setStatusTip(tip)
if data is not None:
action.setData(to_qvariant(data))
if menurole is not None:
action.setMenuRole(menurole)
# Workround for Mac because setting context=Qt.WidgetShortcut
# there doesn't have any effect
if sys.platform == 'darwin':
action._shown_shortcut = None
if context == Qt.WidgetShortcut:
if shortcut is not None:
action._shown_shortcut = shortcut
else:
# This is going to be filled by
# main.register_shortcut
action._shown_shortcut = 'missing'
else:
if shortcut is not None:
action.setShortcut(shortcut)
action.setShortcutContext(context)
else:
if shortcut is not None:
action.setShortcut(shortcut)
action.setShortcutContext(context)
return action |
def reinstantiate_endpoints(self, endpoint=None):
"""
This will re-instantiate the endpoints with the connection this time
:param endpoint: Endpoint object to instantiate the sub endpoint in.
:return: None
"""
endpoint = endpoint or self
for k, v in endpoint.__class__.__dict__.items():
if isinstance(v, Endpoint):
setattr(endpoint, k, v.__class__(self))
elif inspect.isclass(v) and issubclass(v, Endpoint):
setattr(endpoint, k, v(self)) | This will re-instantiate the endpoints with the connection this time
:param endpoint: Endpoint object to instantiate the sub endpoint in.
:return: None | Below is the the instruction that describes the task:
### Input:
This will re-instantiate the endpoints with the connection this time
:param endpoint: Endpoint object to instantiate the sub endpoint in.
:return: None
### Response:
def reinstantiate_endpoints(self, endpoint=None):
"""
This will re-instantiate the endpoints with the connection this time
:param endpoint: Endpoint object to instantiate the sub endpoint in.
:return: None
"""
endpoint = endpoint or self
for k, v in endpoint.__class__.__dict__.items():
if isinstance(v, Endpoint):
setattr(endpoint, k, v.__class__(self))
elif inspect.isclass(v) and issubclass(v, Endpoint):
setattr(endpoint, k, v(self)) |
def create(resource_path, previous_version=None, package='perch.migrations'):
"""Create a new migration"""
pkg, obj = resource_path.rsplit('.', 1)
module = importlib.import_module(pkg)
resource = getattr(module, obj)
version = uuid4().hex
target_module = importlib.import_module(package)
target_dir = os.path.dirname(target_module.__file__)
target_file = os.path.join(target_dir, resource.resource_type + '_' + version + '.py')
with open(target_file, 'w') as f:
f.write(MIGRATION_TEMPLATE.format(
resource_path=resource_path,
resource_type=resource.resource_type,
version=version,
previous_version=previous_version or '',
))
return target_file | Create a new migration | Below is the the instruction that describes the task:
### Input:
Create a new migration
### Response:
def create(resource_path, previous_version=None, package='perch.migrations'):
"""Create a new migration"""
pkg, obj = resource_path.rsplit('.', 1)
module = importlib.import_module(pkg)
resource = getattr(module, obj)
version = uuid4().hex
target_module = importlib.import_module(package)
target_dir = os.path.dirname(target_module.__file__)
target_file = os.path.join(target_dir, resource.resource_type + '_' + version + '.py')
with open(target_file, 'w') as f:
f.write(MIGRATION_TEMPLATE.format(
resource_path=resource_path,
resource_type=resource.resource_type,
version=version,
previous_version=previous_version or '',
))
return target_file |
def plot_axes(self):
"""axes"""
span = np.linspace(-1.0, 1.0, 2)
self.axes.plot(span, 0 * span, zs=0, zdir='z', label='X',
lw=self.frame_width, color=self.frame_color)
self.axes.plot(0 * span, span, zs=0, zdir='z', label='Y',
lw=self.frame_width, color=self.frame_color)
self.axes.plot(0 * span, span, zs=0, zdir='y', label='Z',
lw=self.frame_width, color=self.frame_color) | axes | Below is the the instruction that describes the task:
### Input:
axes
### Response:
def plot_axes(self):
"""axes"""
span = np.linspace(-1.0, 1.0, 2)
self.axes.plot(span, 0 * span, zs=0, zdir='z', label='X',
lw=self.frame_width, color=self.frame_color)
self.axes.plot(0 * span, span, zs=0, zdir='z', label='Y',
lw=self.frame_width, color=self.frame_color)
self.axes.plot(0 * span, span, zs=0, zdir='y', label='Z',
lw=self.frame_width, color=self.frame_color) |
def read(self):
''' Read tagged doc from mutliple files (sents, tokens, concepts, links, tags) '''
warnings.warn("Document.read() is deprecated and will be removed in near future.", DeprecationWarning)
with TxtReader.from_doc(self) as reader:
reader.read(self)
return self | Read tagged doc from mutliple files (sents, tokens, concepts, links, tags) | Below is the the instruction that describes the task:
### Input:
Read tagged doc from mutliple files (sents, tokens, concepts, links, tags)
### Response:
def read(self):
''' Read tagged doc from mutliple files (sents, tokens, concepts, links, tags) '''
warnings.warn("Document.read() is deprecated and will be removed in near future.", DeprecationWarning)
with TxtReader.from_doc(self) as reader:
reader.read(self)
return self |
def import_doc(self, file_uris, docsearch, current_doc=None):
"""
Import the specified PDF files
"""
doc = None
docs = []
pages = []
file_uris = [self.fs.safe(uri) for uri in file_uris]
imported = []
for file_uri in file_uris:
logger.info("Importing PDF from '%s'" % (file_uri))
idx = 0
for child in self.fs.recurse(file_uri):
gc.collect()
if not self.check_file_type(child):
continue
h = PdfDoc.hash_file(self.fs, child)
if docsearch.is_hash_in_index(h):
logger.info(
"Document %s already found in the index. Skipped",
child
)
continue
imported.append(child)
doc = PdfDoc(self.fs, docsearch.rootdir)
error = doc.import_pdf(child)
if error:
continue
docs.append(doc)
pages += [p for p in doc.pages]
idx += 1
return ImportResult(
imported_file_uris=imported,
select_doc=doc, new_docs=docs,
new_docs_pages=pages,
stats={
_("PDF"): len(docs),
_("Document(s)"): len(docs),
_("Page(s)"): sum([d.nb_pages for d in docs]),
},
) | Import the specified PDF files | Below is the the instruction that describes the task:
### Input:
Import the specified PDF files
### Response:
def import_doc(self, file_uris, docsearch, current_doc=None):
"""
Import the specified PDF files
"""
doc = None
docs = []
pages = []
file_uris = [self.fs.safe(uri) for uri in file_uris]
imported = []
for file_uri in file_uris:
logger.info("Importing PDF from '%s'" % (file_uri))
idx = 0
for child in self.fs.recurse(file_uri):
gc.collect()
if not self.check_file_type(child):
continue
h = PdfDoc.hash_file(self.fs, child)
if docsearch.is_hash_in_index(h):
logger.info(
"Document %s already found in the index. Skipped",
child
)
continue
imported.append(child)
doc = PdfDoc(self.fs, docsearch.rootdir)
error = doc.import_pdf(child)
if error:
continue
docs.append(doc)
pages += [p for p in doc.pages]
idx += 1
return ImportResult(
imported_file_uris=imported,
select_doc=doc, new_docs=docs,
new_docs_pages=pages,
stats={
_("PDF"): len(docs),
_("Document(s)"): len(docs),
_("Page(s)"): sum([d.nb_pages for d in docs]),
},
) |
def read(self, symbol, as_of=None, date_range=None, from_version=None, allow_secondary=None, **kwargs):
"""
Read data for the named symbol. Returns a VersionedItem object with
a data and metdata element (as passed into write).
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or `int` or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
date_range: `arctic.date.DateRange`
DateRange to read data for. Applies to Pandas data, with a DateTime index
returns only the part of the data that falls in the DateRange.
allow_secondary : `bool` or `None`
Override the default behavior for allowing reads from secondary members of a cluster:
`None` : use the settings from the top-level `Arctic` object used to query this version store.
`True` : allow reads from secondary members
`False` : only allow reads from primary members
Returns
-------
VersionedItem namedtuple which contains a .data and .metadata element
"""
try:
read_preference = self._read_preference(allow_secondary)
_version = self._read_metadata(symbol, as_of=as_of, read_preference=read_preference)
return self._do_read(symbol, _version, from_version,
date_range=date_range, read_preference=read_preference, **kwargs)
except (OperationFailure, AutoReconnect) as e:
# Log the exception so we know how often this is happening
log_exception('read', e, 1)
# If we've failed to read from the secondary, then it's possible the
# secondary has lagged. In this case direct the query to the primary.
_version = mongo_retry(self._read_metadata)(symbol, as_of=as_of,
read_preference=ReadPreference.PRIMARY)
return self._do_read_retry(symbol, _version, from_version,
date_range=date_range,
read_preference=ReadPreference.PRIMARY,
**kwargs)
except Exception as e:
log_exception('read', e, 1)
raise | Read data for the named symbol. Returns a VersionedItem object with
a data and metdata element (as passed into write).
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or `int` or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
date_range: `arctic.date.DateRange`
DateRange to read data for. Applies to Pandas data, with a DateTime index
returns only the part of the data that falls in the DateRange.
allow_secondary : `bool` or `None`
Override the default behavior for allowing reads from secondary members of a cluster:
`None` : use the settings from the top-level `Arctic` object used to query this version store.
`True` : allow reads from secondary members
`False` : only allow reads from primary members
Returns
-------
VersionedItem namedtuple which contains a .data and .metadata element | Below is the the instruction that describes the task:
### Input:
Read data for the named symbol. Returns a VersionedItem object with
a data and metdata element (as passed into write).
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or `int` or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
date_range: `arctic.date.DateRange`
DateRange to read data for. Applies to Pandas data, with a DateTime index
returns only the part of the data that falls in the DateRange.
allow_secondary : `bool` or `None`
Override the default behavior for allowing reads from secondary members of a cluster:
`None` : use the settings from the top-level `Arctic` object used to query this version store.
`True` : allow reads from secondary members
`False` : only allow reads from primary members
Returns
-------
VersionedItem namedtuple which contains a .data and .metadata element
### Response:
def read(self, symbol, as_of=None, date_range=None, from_version=None, allow_secondary=None, **kwargs):
"""
Read data for the named symbol. Returns a VersionedItem object with
a data and metdata element (as passed into write).
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `str` or `int` or `datetime.datetime`
Return the data as it was as_of the point in time.
`int` : specific version number
`str` : snapshot name which contains the version
`datetime.datetime` : the version of the data that existed as_of the requested point in time
date_range: `arctic.date.DateRange`
DateRange to read data for. Applies to Pandas data, with a DateTime index
returns only the part of the data that falls in the DateRange.
allow_secondary : `bool` or `None`
Override the default behavior for allowing reads from secondary members of a cluster:
`None` : use the settings from the top-level `Arctic` object used to query this version store.
`True` : allow reads from secondary members
`False` : only allow reads from primary members
Returns
-------
VersionedItem namedtuple which contains a .data and .metadata element
"""
try:
read_preference = self._read_preference(allow_secondary)
_version = self._read_metadata(symbol, as_of=as_of, read_preference=read_preference)
return self._do_read(symbol, _version, from_version,
date_range=date_range, read_preference=read_preference, **kwargs)
except (OperationFailure, AutoReconnect) as e:
# Log the exception so we know how often this is happening
log_exception('read', e, 1)
# If we've failed to read from the secondary, then it's possible the
# secondary has lagged. In this case direct the query to the primary.
_version = mongo_retry(self._read_metadata)(symbol, as_of=as_of,
read_preference=ReadPreference.PRIMARY)
return self._do_read_retry(symbol, _version, from_version,
date_range=date_range,
read_preference=ReadPreference.PRIMARY,
**kwargs)
except Exception as e:
log_exception('read', e, 1)
raise |
def parse_bool(value):
""" Convert a string to a boolean
>>> parse_bool(None)
False
>>> parse_bool("true")
True
>>> parse_bool("TRUE")
True
>>> parse_bool("yes")
True
>>> parse_bool("1")
True
>>> parse_bool("false")
False
>>> parse_bool("sqdf")
False
>>> parse_bool(False)
False
>>> parse_bool(True)
True
"""
if value is None:
return False
if value is True or value is False:
return value
boolean = str(value).strip().lower()
return boolean in ['true', 'yes', 'on', '1'] | Convert a string to a boolean
>>> parse_bool(None)
False
>>> parse_bool("true")
True
>>> parse_bool("TRUE")
True
>>> parse_bool("yes")
True
>>> parse_bool("1")
True
>>> parse_bool("false")
False
>>> parse_bool("sqdf")
False
>>> parse_bool(False)
False
>>> parse_bool(True)
True | Below is the the instruction that describes the task:
### Input:
Convert a string to a boolean
>>> parse_bool(None)
False
>>> parse_bool("true")
True
>>> parse_bool("TRUE")
True
>>> parse_bool("yes")
True
>>> parse_bool("1")
True
>>> parse_bool("false")
False
>>> parse_bool("sqdf")
False
>>> parse_bool(False)
False
>>> parse_bool(True)
True
### Response:
def parse_bool(value):
""" Convert a string to a boolean
>>> parse_bool(None)
False
>>> parse_bool("true")
True
>>> parse_bool("TRUE")
True
>>> parse_bool("yes")
True
>>> parse_bool("1")
True
>>> parse_bool("false")
False
>>> parse_bool("sqdf")
False
>>> parse_bool(False)
False
>>> parse_bool(True)
True
"""
if value is None:
return False
if value is True or value is False:
return value
boolean = str(value).strip().lower()
return boolean in ['true', 'yes', 'on', '1'] |
def get_fwhm_tag(expnum, ccd, prefix=None, version='p'):
"""
Get the FWHM from the VOSpace annotation.
@param expnum:
@param ccd:
@param prefix:
@param version:
@return:
"""
uri = get_uri(expnum, ccd, version, ext='fwhm', prefix=prefix)
if uri not in fwhm:
key = "fwhm_{:1s}{:02d}".format(version, int(ccd))
fwhm[uri] = get_tag(expnum, key)
return fwhm[uri] | Get the FWHM from the VOSpace annotation.
@param expnum:
@param ccd:
@param prefix:
@param version:
@return: | Below is the the instruction that describes the task:
### Input:
Get the FWHM from the VOSpace annotation.
@param expnum:
@param ccd:
@param prefix:
@param version:
@return:
### Response:
def get_fwhm_tag(expnum, ccd, prefix=None, version='p'):
"""
Get the FWHM from the VOSpace annotation.
@param expnum:
@param ccd:
@param prefix:
@param version:
@return:
"""
uri = get_uri(expnum, ccd, version, ext='fwhm', prefix=prefix)
if uri not in fwhm:
key = "fwhm_{:1s}{:02d}".format(version, int(ccd))
fwhm[uri] = get_tag(expnum, key)
return fwhm[uri] |
def _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=True, return_untouched=False):
"""
Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT)
INSERT INTO table_name (field1, field2)
VALUES (1, 'two')
ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2;
"""
model = queryset.model
# Use all fields except pk unless the uniqueness constraint is the pk field
all_fields = [
field for field in model._meta.fields
if field.column != model._meta.pk.name or not field.auto_created
]
all_field_names = [field.column for field in all_fields]
returning = returning if returning is not True else [f.column for f in model._meta.fields]
all_field_names_sql = ', '.join([_quote(field) for field in all_field_names])
# Convert field names to db column names
unique_fields = [
model._meta.get_field(unique_field)
for unique_field in unique_fields
]
update_fields = [
model._meta.get_field(update_field)
for update_field in update_fields
]
unique_field_names_sql = ', '.join([
_quote(field.column) for field in unique_fields
])
update_fields_sql = ', '.join([
'{0} = EXCLUDED.{0}'.format(_quote(field.column))
for field in update_fields
])
row_values, sql_args = _get_values_for_rows(model_objs, all_fields)
return_sql = 'RETURNING ' + _get_return_fields_sql(returning, return_status=True) if returning else ''
ignore_duplicates_sql = ''
if ignore_duplicate_updates:
ignore_duplicates_sql = (
' WHERE ({update_fields_sql}) IS DISTINCT FROM ({excluded_update_fields_sql}) '
).format(
update_fields_sql=', '.join(
'{0}.{1}'.format(model._meta.db_table, _quote(field.column))
for field in update_fields
),
excluded_update_fields_sql=', '.join(
'EXCLUDED.' + _quote(field.column)
for field in update_fields
)
)
on_conflict = (
'DO UPDATE SET {0} {1}'.format(update_fields_sql, ignore_duplicates_sql) if update_fields else 'DO NOTHING'
)
if return_untouched:
row_values_sql = ', '.join([
'(\'{0}\', {1})'.format(i, row_value[1:-1])
for i, row_value in enumerate(row_values)
])
sql = (
' WITH input_rows("temp_id_", {all_field_names_sql}) AS ('
' VALUES {row_values_sql}'
' ), ins AS ( '
' INSERT INTO {table_name} ({all_field_names_sql})'
' SELECT {all_field_names_sql} FROM input_rows ORDER BY temp_id_'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
' )'
' SELECT DISTINCT ON ({table_pk_name}) * FROM ('
' SELECT status_, {return_fields_sql}'
' FROM ins'
' UNION ALL'
' SELECT \'n\' AS status_, {aliased_return_fields_sql}'
' FROM input_rows'
' JOIN {table_name} c USING ({unique_field_names_sql})'
' ) as results'
' ORDER BY results."{table_pk_name}", CASE WHEN(status_ = \'n\') THEN 1 ELSE 0 END;'
).format(
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
table_name=model._meta.db_table,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql,
table_pk_name=model._meta.pk.name,
return_fields_sql=_get_return_fields_sql(returning),
aliased_return_fields_sql=_get_return_fields_sql(returning, alias='c')
)
else:
row_values_sql = ', '.join(row_values)
sql = (
' INSERT INTO {table_name} ({all_field_names_sql})'
' VALUES {row_values_sql}'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
).format(
table_name=model._meta.db_table,
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql
)
return sql, sql_args | Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT)
INSERT INTO table_name (field1, field2)
VALUES (1, 'two')
ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2; | Below is the the instruction that describes the task:
### Input:
Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT)
INSERT INTO table_name (field1, field2)
VALUES (1, 'two')
ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2;
### Response:
def _get_upsert_sql(queryset, model_objs, unique_fields, update_fields, returning,
ignore_duplicate_updates=True, return_untouched=False):
"""
Generates the postgres specific sql necessary to perform an upsert (ON CONFLICT)
INSERT INTO table_name (field1, field2)
VALUES (1, 'two')
ON CONFLICT (unique_field) DO UPDATE SET field2 = EXCLUDED.field2;
"""
model = queryset.model
# Use all fields except pk unless the uniqueness constraint is the pk field
all_fields = [
field for field in model._meta.fields
if field.column != model._meta.pk.name or not field.auto_created
]
all_field_names = [field.column for field in all_fields]
returning = returning if returning is not True else [f.column for f in model._meta.fields]
all_field_names_sql = ', '.join([_quote(field) for field in all_field_names])
# Convert field names to db column names
unique_fields = [
model._meta.get_field(unique_field)
for unique_field in unique_fields
]
update_fields = [
model._meta.get_field(update_field)
for update_field in update_fields
]
unique_field_names_sql = ', '.join([
_quote(field.column) for field in unique_fields
])
update_fields_sql = ', '.join([
'{0} = EXCLUDED.{0}'.format(_quote(field.column))
for field in update_fields
])
row_values, sql_args = _get_values_for_rows(model_objs, all_fields)
return_sql = 'RETURNING ' + _get_return_fields_sql(returning, return_status=True) if returning else ''
ignore_duplicates_sql = ''
if ignore_duplicate_updates:
ignore_duplicates_sql = (
' WHERE ({update_fields_sql}) IS DISTINCT FROM ({excluded_update_fields_sql}) '
).format(
update_fields_sql=', '.join(
'{0}.{1}'.format(model._meta.db_table, _quote(field.column))
for field in update_fields
),
excluded_update_fields_sql=', '.join(
'EXCLUDED.' + _quote(field.column)
for field in update_fields
)
)
on_conflict = (
'DO UPDATE SET {0} {1}'.format(update_fields_sql, ignore_duplicates_sql) if update_fields else 'DO NOTHING'
)
if return_untouched:
row_values_sql = ', '.join([
'(\'{0}\', {1})'.format(i, row_value[1:-1])
for i, row_value in enumerate(row_values)
])
sql = (
' WITH input_rows("temp_id_", {all_field_names_sql}) AS ('
' VALUES {row_values_sql}'
' ), ins AS ( '
' INSERT INTO {table_name} ({all_field_names_sql})'
' SELECT {all_field_names_sql} FROM input_rows ORDER BY temp_id_'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
' )'
' SELECT DISTINCT ON ({table_pk_name}) * FROM ('
' SELECT status_, {return_fields_sql}'
' FROM ins'
' UNION ALL'
' SELECT \'n\' AS status_, {aliased_return_fields_sql}'
' FROM input_rows'
' JOIN {table_name} c USING ({unique_field_names_sql})'
' ) as results'
' ORDER BY results."{table_pk_name}", CASE WHEN(status_ = \'n\') THEN 1 ELSE 0 END;'
).format(
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
table_name=model._meta.db_table,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql,
table_pk_name=model._meta.pk.name,
return_fields_sql=_get_return_fields_sql(returning),
aliased_return_fields_sql=_get_return_fields_sql(returning, alias='c')
)
else:
row_values_sql = ', '.join(row_values)
sql = (
' INSERT INTO {table_name} ({all_field_names_sql})'
' VALUES {row_values_sql}'
' ON CONFLICT ({unique_field_names_sql}) {on_conflict} {return_sql}'
).format(
table_name=model._meta.db_table,
all_field_names_sql=all_field_names_sql,
row_values_sql=row_values_sql,
unique_field_names_sql=unique_field_names_sql,
on_conflict=on_conflict,
return_sql=return_sql
)
return sql, sql_args |
def _writeReplacementFiles(self, session, directory, name):
"""
Write the replacement files
"""
if self.replaceParamFile:
self.replaceParamFile.write(session=session, directory=directory,
name=name)
if self.replaceValFile:
self.replaceValFile.write(session=session, directory=directory,
name=name) | Write the replacement files | Below is the the instruction that describes the task:
### Input:
Write the replacement files
### Response:
def _writeReplacementFiles(self, session, directory, name):
"""
Write the replacement files
"""
if self.replaceParamFile:
self.replaceParamFile.write(session=session, directory=directory,
name=name)
if self.replaceValFile:
self.replaceValFile.write(session=session, directory=directory,
name=name) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.