code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value) | Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64 | Below is the the instruction that describes the task:
### Input:
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
### Response:
def unstack(self, level=-1, fill_value=None):
"""
Pivot a level of the (necessarily hierarchical) index labels, returning
a DataFrame having a new level of column labels whose inner-most level
consists of the pivoted index labels.
If the index is not a MultiIndex, the output will be a Series
(the analogue of stack when the columns are not a MultiIndex).
The level involved will automatically get sorted.
Parameters
----------
level : int, string, or list of these, default -1 (last level)
Level(s) of index to unstack, can pass level name
fill_value : replace NaN with this value if the unstack produces
missing values
.. versionadded:: 0.18.0
Returns
-------
Series or DataFrame
See Also
--------
DataFrame.pivot : Pivot a table based on column values.
DataFrame.stack : Pivot a level of the column labels (inverse operation
from `unstack`).
Examples
--------
>>> index = pd.MultiIndex.from_tuples([('one', 'a'), ('one', 'b'),
... ('two', 'a'), ('two', 'b')])
>>> s = pd.Series(np.arange(1.0, 5.0), index=index)
>>> s
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
>>> s.unstack(level=-1)
a b
one 1.0 2.0
two 3.0 4.0
>>> s.unstack(level=0)
one two
a 1.0 3.0
b 2.0 4.0
>>> df = s.unstack(level=0)
>>> df.unstack()
one a 1.0
b 2.0
two a 3.0
b 4.0
dtype: float64
"""
from pandas.core.reshape.reshape import unstack
return unstack(self, level, fill_value) |
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
self.__dict__['headers'] = hdict = HeaderDict()
hdict.dict = self._headers
return hdict | An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. | Below is the the instruction that describes the task:
### Input:
An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers.
### Response:
def headers(self):
''' An instance of :class:`HeaderDict`, a case-insensitive dict-like
view on the response headers. '''
self.__dict__['headers'] = hdict = HeaderDict()
hdict.dict = self._headers
return hdict |
def get_devices(self, hid_filter = None):
"""Filter a HID device list by current object parameters. Devices
must match the all of the filtering parameters
"""
if not hid_filter: #empty list or called without any parameters
if type(hid_filter) == type(None):
#request to query connected devices
hid_filter = find_all_hid_devices()
else:
return hid_filter
#initially all accepted
results = {}.fromkeys(hid_filter)
#the filter parameters
validating_attributes = list(self.filter_params.keys())
#first filter out restricted access devices
if not len(results):
return {}
for device in list(results.keys()):
if not device.is_active():
del results[device]
if not len(results):
return {}
#filter out
for item in validating_attributes:
if item.endswith("_includes"):
item = item[:-len("_includes")]
elif item.endswith("_mask"):
item = item[:-len("_mask")]
elif item +"_mask" in self.filter_params or item + "_includes" \
in self.filter_params:
continue # value mask or string search is being queried
elif item not in HidDevice.filter_attributes:
continue # field does not exist sys.error.write(...)
#start filtering out
for device in list(results.keys()):
if not hasattr(device, item):
del results[device]
elif item + "_mask" in validating_attributes:
#masked value
if getattr(device, item) & self.filter_params[item + \
"_mask"] != self.filter_params[item] \
& self.filter_params[item + "_mask"]:
del results[device]
elif item + "_includes" in validating_attributes:
#subset item
if self.filter_params[item + "_includes"] not in \
getattr(device, item):
del results[device]
else:
#plain comparison
if getattr(device, item) != self.filter_params[item]:
del results[device]
#
return list(results.keys()) | Filter a HID device list by current object parameters. Devices
must match the all of the filtering parameters | Below is the the instruction that describes the task:
### Input:
Filter a HID device list by current object parameters. Devices
must match the all of the filtering parameters
### Response:
def get_devices(self, hid_filter = None):
"""Filter a HID device list by current object parameters. Devices
must match the all of the filtering parameters
"""
if not hid_filter: #empty list or called without any parameters
if type(hid_filter) == type(None):
#request to query connected devices
hid_filter = find_all_hid_devices()
else:
return hid_filter
#initially all accepted
results = {}.fromkeys(hid_filter)
#the filter parameters
validating_attributes = list(self.filter_params.keys())
#first filter out restricted access devices
if not len(results):
return {}
for device in list(results.keys()):
if not device.is_active():
del results[device]
if not len(results):
return {}
#filter out
for item in validating_attributes:
if item.endswith("_includes"):
item = item[:-len("_includes")]
elif item.endswith("_mask"):
item = item[:-len("_mask")]
elif item +"_mask" in self.filter_params or item + "_includes" \
in self.filter_params:
continue # value mask or string search is being queried
elif item not in HidDevice.filter_attributes:
continue # field does not exist sys.error.write(...)
#start filtering out
for device in list(results.keys()):
if not hasattr(device, item):
del results[device]
elif item + "_mask" in validating_attributes:
#masked value
if getattr(device, item) & self.filter_params[item + \
"_mask"] != self.filter_params[item] \
& self.filter_params[item + "_mask"]:
del results[device]
elif item + "_includes" in validating_attributes:
#subset item
if self.filter_params[item + "_includes"] not in \
getattr(device, item):
del results[device]
else:
#plain comparison
if getattr(device, item) != self.filter_params[item]:
del results[device]
#
return list(results.keys()) |
def feed(self, can):
"""Attempt to feed an incoming CAN frame into the state machine"""
if not isinstance(can, CAN):
raise Scapy_Exception("argument is not a CAN frame")
identifier = can.identifier
data = bytes(can.data)
if len(data) > 1 and self.use_ext_addr is not True:
self._try_feed(identifier, None, data)
if len(data) > 2 and self.use_ext_addr is not False:
ea = six.indexbytes(data, 0)
self._try_feed(identifier, ea, data[1:]) | Attempt to feed an incoming CAN frame into the state machine | Below is the the instruction that describes the task:
### Input:
Attempt to feed an incoming CAN frame into the state machine
### Response:
def feed(self, can):
"""Attempt to feed an incoming CAN frame into the state machine"""
if not isinstance(can, CAN):
raise Scapy_Exception("argument is not a CAN frame")
identifier = can.identifier
data = bytes(can.data)
if len(data) > 1 and self.use_ext_addr is not True:
self._try_feed(identifier, None, data)
if len(data) > 2 and self.use_ext_addr is not False:
ea = six.indexbytes(data, 0)
self._try_feed(identifier, ea, data[1:]) |
def loadtxt_str(path:PathOrStr)->np.ndarray:
"Return `ndarray` of `str` of lines of text from `path`."
with open(path, 'r') as f: lines = f.readlines()
return np.array([l.strip() for l in lines]) | Return `ndarray` of `str` of lines of text from `path`. | Below is the the instruction that describes the task:
### Input:
Return `ndarray` of `str` of lines of text from `path`.
### Response:
def loadtxt_str(path:PathOrStr)->np.ndarray:
"Return `ndarray` of `str` of lines of text from `path`."
with open(path, 'r') as f: lines = f.readlines()
return np.array([l.strip() for l in lines]) |
def create_record_task(self, frame_parameters: dict=None, channels_enabled: typing.List[bool]=None) -> RecordTask:
"""Create a record task for this hardware source.
.. versionadded:: 1.0
:param frame_parameters: The frame parameters for the record. Pass None for defaults.
:type frame_parameters: :py:class:`FrameParameters`
:param channels_enabled: The enabled channels for the record. Pass None for defaults.
:type channels_enabled: List of booleans.
:return: The :py:class:`RecordTask` object.
:rtype: :py:class:`RecordTask`
Callers should call close on the returned task when finished.
See :py:class:`RecordTask` for examples of how to use.
"""
return RecordTask(self.__hardware_source, frame_parameters, channels_enabled) | Create a record task for this hardware source.
.. versionadded:: 1.0
:param frame_parameters: The frame parameters for the record. Pass None for defaults.
:type frame_parameters: :py:class:`FrameParameters`
:param channels_enabled: The enabled channels for the record. Pass None for defaults.
:type channels_enabled: List of booleans.
:return: The :py:class:`RecordTask` object.
:rtype: :py:class:`RecordTask`
Callers should call close on the returned task when finished.
See :py:class:`RecordTask` for examples of how to use. | Below is the the instruction that describes the task:
### Input:
Create a record task for this hardware source.
.. versionadded:: 1.0
:param frame_parameters: The frame parameters for the record. Pass None for defaults.
:type frame_parameters: :py:class:`FrameParameters`
:param channels_enabled: The enabled channels for the record. Pass None for defaults.
:type channels_enabled: List of booleans.
:return: The :py:class:`RecordTask` object.
:rtype: :py:class:`RecordTask`
Callers should call close on the returned task when finished.
See :py:class:`RecordTask` for examples of how to use.
### Response:
def create_record_task(self, frame_parameters: dict=None, channels_enabled: typing.List[bool]=None) -> RecordTask:
"""Create a record task for this hardware source.
.. versionadded:: 1.0
:param frame_parameters: The frame parameters for the record. Pass None for defaults.
:type frame_parameters: :py:class:`FrameParameters`
:param channels_enabled: The enabled channels for the record. Pass None for defaults.
:type channels_enabled: List of booleans.
:return: The :py:class:`RecordTask` object.
:rtype: :py:class:`RecordTask`
Callers should call close on the returned task when finished.
See :py:class:`RecordTask` for examples of how to use.
"""
return RecordTask(self.__hardware_source, frame_parameters, channels_enabled) |
def sys_lseek(self, fd, offset, whence):
"""
lseek - reposition read/write file offset
The lseek() function repositions the file offset of the open file description associated
with the file descriptor fd to the argument offset according to the directive whence
:param fd: a valid file descriptor
:param offset: the offset in bytes
:param whence: SEEK_SET: The file offset is set to offset bytes.
SEEK_CUR: The file offset is set to its current location plus offset bytes.
SEEK_END: The file offset is set to the size of the file plus offset bytes.
:return: offset from file beginning, or EBADF (fd is not a valid file descriptor or is not open)
"""
signed_offset = self._to_signed_dword(offset)
try:
return self._get_fd(fd).seek(signed_offset, whence)
except FdError as e:
logger.info(("LSEEK: Not valid file descriptor on lseek."
"Fd not seekable. Returning EBADF"))
return -e.err | lseek - reposition read/write file offset
The lseek() function repositions the file offset of the open file description associated
with the file descriptor fd to the argument offset according to the directive whence
:param fd: a valid file descriptor
:param offset: the offset in bytes
:param whence: SEEK_SET: The file offset is set to offset bytes.
SEEK_CUR: The file offset is set to its current location plus offset bytes.
SEEK_END: The file offset is set to the size of the file plus offset bytes.
:return: offset from file beginning, or EBADF (fd is not a valid file descriptor or is not open) | Below is the the instruction that describes the task:
### Input:
lseek - reposition read/write file offset
The lseek() function repositions the file offset of the open file description associated
with the file descriptor fd to the argument offset according to the directive whence
:param fd: a valid file descriptor
:param offset: the offset in bytes
:param whence: SEEK_SET: The file offset is set to offset bytes.
SEEK_CUR: The file offset is set to its current location plus offset bytes.
SEEK_END: The file offset is set to the size of the file plus offset bytes.
:return: offset from file beginning, or EBADF (fd is not a valid file descriptor or is not open)
### Response:
def sys_lseek(self, fd, offset, whence):
"""
lseek - reposition read/write file offset
The lseek() function repositions the file offset of the open file description associated
with the file descriptor fd to the argument offset according to the directive whence
:param fd: a valid file descriptor
:param offset: the offset in bytes
:param whence: SEEK_SET: The file offset is set to offset bytes.
SEEK_CUR: The file offset is set to its current location plus offset bytes.
SEEK_END: The file offset is set to the size of the file plus offset bytes.
:return: offset from file beginning, or EBADF (fd is not a valid file descriptor or is not open)
"""
signed_offset = self._to_signed_dword(offset)
try:
return self._get_fd(fd).seek(signed_offset, whence)
except FdError as e:
logger.info(("LSEEK: Not valid file descriptor on lseek."
"Fd not seekable. Returning EBADF"))
return -e.err |
def discrete_rainbow(N=7, cmap=cm.Set1, usepreset=True, shuffle=False, \
plot=False):
"""
Return a discrete colormap and the set of colors.
modified from
<http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations>
cmap: colormap instance, eg. cm.jet.
N: Number of colors.
Example
>>> x = resize(arange(100), (5,100))
>>> djet = cmap_discretize(cm.jet, 5)
>>> imshow(x, cmap=djet)
See available matplotlib colormaps at:
<http://dept.astro.lsa.umich.edu/~msshin/science/code/matplotlib_cm/>
If N>20 the sampled colors might not be very distinctive.
If you want to error and try anyway, set usepreset=False
"""
import random
from scipy import interpolate
if usepreset:
if 0 < N <= 5:
cmap = cm.gist_rainbow
elif N <= 20:
cmap = cm.Set1
else:
sys.exit(discrete_rainbow.__doc__)
cdict = cmap._segmentdata.copy()
# N colors
colors_i = np.linspace(0,1.,N)
# N+1 indices
indices = np.linspace(0,1.,N+1)
rgbs = []
for key in ('red','green','blue'):
# Find the N colors
D = np.array(cdict[key])
I = interpolate.interp1d(D[:,0], D[:,1])
colors = I(colors_i)
rgbs.append(colors)
# Place these colors at the correct indices.
A = np.zeros((N+1,3), float)
A[:,0] = indices
A[1:,1] = colors
A[:-1,2] = colors
# Create a tuple for the dictionary.
L = []
for l in A:
L.append(tuple(l))
cdict[key] = tuple(L)
palette = zip(*rgbs)
if shuffle:
random.shuffle(palette)
if plot:
print_colors(palette)
# Return (colormap object, RGB tuples)
return mpl.colors.LinearSegmentedColormap('colormap',cdict,1024), palette | Return a discrete colormap and the set of colors.
modified from
<http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations>
cmap: colormap instance, eg. cm.jet.
N: Number of colors.
Example
>>> x = resize(arange(100), (5,100))
>>> djet = cmap_discretize(cm.jet, 5)
>>> imshow(x, cmap=djet)
See available matplotlib colormaps at:
<http://dept.astro.lsa.umich.edu/~msshin/science/code/matplotlib_cm/>
If N>20 the sampled colors might not be very distinctive.
If you want to error and try anyway, set usepreset=False | Below is the the instruction that describes the task:
### Input:
Return a discrete colormap and the set of colors.
modified from
<http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations>
cmap: colormap instance, eg. cm.jet.
N: Number of colors.
Example
>>> x = resize(arange(100), (5,100))
>>> djet = cmap_discretize(cm.jet, 5)
>>> imshow(x, cmap=djet)
See available matplotlib colormaps at:
<http://dept.astro.lsa.umich.edu/~msshin/science/code/matplotlib_cm/>
If N>20 the sampled colors might not be very distinctive.
If you want to error and try anyway, set usepreset=False
### Response:
def discrete_rainbow(N=7, cmap=cm.Set1, usepreset=True, shuffle=False, \
plot=False):
"""
Return a discrete colormap and the set of colors.
modified from
<http://www.scipy.org/Cookbook/Matplotlib/ColormapTransformations>
cmap: colormap instance, eg. cm.jet.
N: Number of colors.
Example
>>> x = resize(arange(100), (5,100))
>>> djet = cmap_discretize(cm.jet, 5)
>>> imshow(x, cmap=djet)
See available matplotlib colormaps at:
<http://dept.astro.lsa.umich.edu/~msshin/science/code/matplotlib_cm/>
If N>20 the sampled colors might not be very distinctive.
If you want to error and try anyway, set usepreset=False
"""
import random
from scipy import interpolate
if usepreset:
if 0 < N <= 5:
cmap = cm.gist_rainbow
elif N <= 20:
cmap = cm.Set1
else:
sys.exit(discrete_rainbow.__doc__)
cdict = cmap._segmentdata.copy()
# N colors
colors_i = np.linspace(0,1.,N)
# N+1 indices
indices = np.linspace(0,1.,N+1)
rgbs = []
for key in ('red','green','blue'):
# Find the N colors
D = np.array(cdict[key])
I = interpolate.interp1d(D[:,0], D[:,1])
colors = I(colors_i)
rgbs.append(colors)
# Place these colors at the correct indices.
A = np.zeros((N+1,3), float)
A[:,0] = indices
A[1:,1] = colors
A[:-1,2] = colors
# Create a tuple for the dictionary.
L = []
for l in A:
L.append(tuple(l))
cdict[key] = tuple(L)
palette = zip(*rgbs)
if shuffle:
random.shuffle(palette)
if plot:
print_colors(palette)
# Return (colormap object, RGB tuples)
return mpl.colors.LinearSegmentedColormap('colormap',cdict,1024), palette |
def print_errors(function):
"""Prints the exceptions raised by the decorated function
without interfering. For debugging purpose."""
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except BaseException as e:
print ("Exception raise calling %s: %s"
% (reflect.canonical_name(function),
get_exception_message(e)))
raise
return wrapper | Prints the exceptions raised by the decorated function
without interfering. For debugging purpose. | Below is the the instruction that describes the task:
### Input:
Prints the exceptions raised by the decorated function
without interfering. For debugging purpose.
### Response:
def print_errors(function):
"""Prints the exceptions raised by the decorated function
without interfering. For debugging purpose."""
def wrapper(*args, **kwargs):
try:
return function(*args, **kwargs)
except BaseException as e:
print ("Exception raise calling %s: %s"
% (reflect.canonical_name(function),
get_exception_message(e)))
raise
return wrapper |
def selected(self, sel):
"""Called when this item has been selected (sel=True) OR deselected (sel=False)"""
ParameterItem.selected(self, sel)
if self.widget is None:
return
if sel and self.param.writable():
self.showEditor()
elif self.hideWidget:
self.hideEditor() | Called when this item has been selected (sel=True) OR deselected (sel=False) | Below is the the instruction that describes the task:
### Input:
Called when this item has been selected (sel=True) OR deselected (sel=False)
### Response:
def selected(self, sel):
"""Called when this item has been selected (sel=True) OR deselected (sel=False)"""
ParameterItem.selected(self, sel)
if self.widget is None:
return
if sel and self.param.writable():
self.showEditor()
elif self.hideWidget:
self.hideEditor() |
def get_argument( # noqa: F811
self,
name: str,
default: Union[None, str, _ArgDefaultMarker] = _ARG_DEFAULT,
strip: bool = True,
) -> Optional[str]:
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the request more than once, we return the
last value.
This method searches both the query and body arguments.
"""
return self._get_argument(name, default, self.request.arguments, strip) | Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the request more than once, we return the
last value.
This method searches both the query and body arguments. | Below is the the instruction that describes the task:
### Input:
Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the request more than once, we return the
last value.
This method searches both the query and body arguments.
### Response:
def get_argument( # noqa: F811
self,
name: str,
default: Union[None, str, _ArgDefaultMarker] = _ARG_DEFAULT,
strip: bool = True,
) -> Optional[str]:
"""Returns the value of the argument with the given name.
If default is not provided, the argument is considered to be
required, and we raise a `MissingArgumentError` if it is missing.
If the argument appears in the request more than once, we return the
last value.
This method searches both the query and body arguments.
"""
return self._get_argument(name, default, self.request.arguments, strip) |
def c_hashes(self):
"""Get the hashes of the module including functions and DLLs.
"""
if callable(self.opts.hash_func):
hashes = [
'# define {}{} {}\n'.format(
self.opts.prefix, name, self.opts.hash_func(name)
) for name, dummy_args in self.funcs
]
else:
hashes = [
make_c_str(self.opts.prefix + name, name)
for name, dummy_args in self.funcs
]
if self.name != 'kernel32':
hashes = [
make_c_str(self.opts.prefix + self.name, self.name)
] + hashes
return hashes | Get the hashes of the module including functions and DLLs. | Below is the the instruction that describes the task:
### Input:
Get the hashes of the module including functions and DLLs.
### Response:
def c_hashes(self):
"""Get the hashes of the module including functions and DLLs.
"""
if callable(self.opts.hash_func):
hashes = [
'# define {}{} {}\n'.format(
self.opts.prefix, name, self.opts.hash_func(name)
) for name, dummy_args in self.funcs
]
else:
hashes = [
make_c_str(self.opts.prefix + name, name)
for name, dummy_args in self.funcs
]
if self.name != 'kernel32':
hashes = [
make_c_str(self.opts.prefix + self.name, self.name)
] + hashes
return hashes |
def t_name(self, s):
r'[A-Za-z_][A-Za-z_0-9]*'
if s in RESERVED_WORDS:
self.add_token(s.upper(), s)
else:
self.add_token('NAME', s) | r'[A-Za-z_][A-Za-z_0-9]* | Below is the the instruction that describes the task:
### Input:
r'[A-Za-z_][A-Za-z_0-9]*
### Response:
def t_name(self, s):
r'[A-Za-z_][A-Za-z_0-9]*'
if s in RESERVED_WORDS:
self.add_token(s.upper(), s)
else:
self.add_token('NAME', s) |
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data) | Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status. | Below is the the instruction that describes the task:
### Input:
Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
### Response:
def post_parse(self, uri, params={}, data={}):
'''Convenience method to call post() on an arbitrary URI and parse the response
into a JSON object. Raises an error on non-200 response status.
'''
return self._request_parse(self.post, uri, params, data) |
def search_command_record(
self,
after_context, before_context, context, context_type,
**kwds):
"""
Search command history.
:rtype: [CommandRecord]
"""
if after_context or before_context or context:
kwds['condition_as_column'] = True
limit = kwds['limit']
kwds['limit'] = -1
kwds['unique'] = False
kwds['sort_by'] = {
'session': ['session_start_time', 'start_time'],
'time': ['start_time'],
}[context_type]
if not kwds['reverse']:
# Default (reverse=False) means latest history comes first.
after_context, before_context = before_context, after_context
(sql, params, keys) = self._compile_sql_search_command_record(**kwds)
records = self._select_rows(CommandRecord, keys, sql, params)
# SOMEDAY: optimize context search; do not create CommandRecord
# object for all (including non-matching) records.
predicate = lambda r: r.condition
if context:
records = include_context(predicate, context, records)
elif before_context:
records = include_before(predicate, before_context, records)
elif after_context:
records = include_after(predicate, after_context, records)
if after_context or before_context or context and limit >= 0:
records = itertools.islice(records, limit)
# NOTE: as SQLite does not support row_number function, let's
# do the filtering at Python side when context modifier
# is given. This is *very* inefficient but at least it
# works..
return records | Search command history.
:rtype: [CommandRecord] | Below is the the instruction that describes the task:
### Input:
Search command history.
:rtype: [CommandRecord]
### Response:
def search_command_record(
self,
after_context, before_context, context, context_type,
**kwds):
"""
Search command history.
:rtype: [CommandRecord]
"""
if after_context or before_context or context:
kwds['condition_as_column'] = True
limit = kwds['limit']
kwds['limit'] = -1
kwds['unique'] = False
kwds['sort_by'] = {
'session': ['session_start_time', 'start_time'],
'time': ['start_time'],
}[context_type]
if not kwds['reverse']:
# Default (reverse=False) means latest history comes first.
after_context, before_context = before_context, after_context
(sql, params, keys) = self._compile_sql_search_command_record(**kwds)
records = self._select_rows(CommandRecord, keys, sql, params)
# SOMEDAY: optimize context search; do not create CommandRecord
# object for all (including non-matching) records.
predicate = lambda r: r.condition
if context:
records = include_context(predicate, context, records)
elif before_context:
records = include_before(predicate, before_context, records)
elif after_context:
records = include_after(predicate, after_context, records)
if after_context or before_context or context and limit >= 0:
records = itertools.islice(records, limit)
# NOTE: as SQLite does not support row_number function, let's
# do the filtering at Python side when context modifier
# is given. This is *very* inefficient but at least it
# works..
return records |
def _compute_baseline_survival(self):
"""
Importantly, this agrees with what the KaplanMeierFitter produces. Ex:
Example
-------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter, KaplanMeierFitter
>>> rossi = load_rossi()
>>> kmf = KaplanMeierFitter()
>>> kmf.fit(rossi['week'], rossi['arrest'])
>>> rossi2 = rossi[['week', 'arrest']].copy()
>>> rossi2['var1'] = np.random.randn(432)
>>> cph = CoxPHFitter()
>>> cph.fit(rossi2, 'week', 'arrest')
>>> ax = cph.baseline_survival_.plot()
>>> kmf.plot(ax=ax)
"""
survival_df = np.exp(-self.baseline_cumulative_hazard_)
if self.strata is None:
survival_df.columns = ["baseline survival"]
return survival_df | Importantly, this agrees with what the KaplanMeierFitter produces. Ex:
Example
-------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter, KaplanMeierFitter
>>> rossi = load_rossi()
>>> kmf = KaplanMeierFitter()
>>> kmf.fit(rossi['week'], rossi['arrest'])
>>> rossi2 = rossi[['week', 'arrest']].copy()
>>> rossi2['var1'] = np.random.randn(432)
>>> cph = CoxPHFitter()
>>> cph.fit(rossi2, 'week', 'arrest')
>>> ax = cph.baseline_survival_.plot()
>>> kmf.plot(ax=ax) | Below is the the instruction that describes the task:
### Input:
Importantly, this agrees with what the KaplanMeierFitter produces. Ex:
Example
-------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter, KaplanMeierFitter
>>> rossi = load_rossi()
>>> kmf = KaplanMeierFitter()
>>> kmf.fit(rossi['week'], rossi['arrest'])
>>> rossi2 = rossi[['week', 'arrest']].copy()
>>> rossi2['var1'] = np.random.randn(432)
>>> cph = CoxPHFitter()
>>> cph.fit(rossi2, 'week', 'arrest')
>>> ax = cph.baseline_survival_.plot()
>>> kmf.plot(ax=ax)
### Response:
def _compute_baseline_survival(self):
"""
Importantly, this agrees with what the KaplanMeierFitter produces. Ex:
Example
-------
>>> from lifelines.datasets import load_rossi
>>> from lifelines import CoxPHFitter, KaplanMeierFitter
>>> rossi = load_rossi()
>>> kmf = KaplanMeierFitter()
>>> kmf.fit(rossi['week'], rossi['arrest'])
>>> rossi2 = rossi[['week', 'arrest']].copy()
>>> rossi2['var1'] = np.random.randn(432)
>>> cph = CoxPHFitter()
>>> cph.fit(rossi2, 'week', 'arrest')
>>> ax = cph.baseline_survival_.plot()
>>> kmf.plot(ax=ax)
"""
survival_df = np.exp(-self.baseline_cumulative_hazard_)
if self.strata is None:
survival_df.columns = ["baseline survival"]
return survival_df |
def are_tokens_valid(self, tokens):
"""
Check if tokens are valid tokens for the locale.
:param tokens:
a list of string or unicode tokens.
:type tokens: list
:return: True if tokens are valid, False otherwise.
"""
match_relative_regex = self._get_match_relative_regex_cache()
for token in tokens:
if any([match_relative_regex.match(token),
token in self, token.isdigit()]):
continue
else:
return False
else:
return True | Check if tokens are valid tokens for the locale.
:param tokens:
a list of string or unicode tokens.
:type tokens: list
:return: True if tokens are valid, False otherwise. | Below is the the instruction that describes the task:
### Input:
Check if tokens are valid tokens for the locale.
:param tokens:
a list of string or unicode tokens.
:type tokens: list
:return: True if tokens are valid, False otherwise.
### Response:
def are_tokens_valid(self, tokens):
"""
Check if tokens are valid tokens for the locale.
:param tokens:
a list of string or unicode tokens.
:type tokens: list
:return: True if tokens are valid, False otherwise.
"""
match_relative_regex = self._get_match_relative_regex_cache()
for token in tokens:
if any([match_relative_regex.match(token),
token in self, token.isdigit()]):
continue
else:
return False
else:
return True |
def crtGauss2D(varSizeX, varSizeY, varPosX, varPosY, varSd):
"""Create 2D Gaussian kernel.
Parameters
----------
varSizeX : int, positive
Width of the visual field.
varSizeY : int, positive
Height of the visual field..
varPosX : int, positive
X position of centre of 2D Gauss.
varPosY : int, positive
Y position of centre of 2D Gauss.
varSd : float, positive
Standard deviation of 2D Gauss.
Returns
-------
aryGauss : 2d numpy array, shape [varSizeX, varSizeY]
2d Gaussian.
Reference
---------
[1]
"""
varSizeX = int(varSizeX)
varSizeY = int(varSizeY)
# aryX and aryY are in reversed order, this seems to be necessary:
aryY, aryX = sp.mgrid[0:varSizeX,
0:varSizeY]
# The actual creation of the Gaussian array:
aryGauss = (
(np.square((aryX - varPosX)) + np.square((aryY - varPosY))) /
(2.0 * np.square(varSd))
)
aryGauss = np.exp(-aryGauss) / (2 * np.pi * np.square(varSd))
return aryGauss | Create 2D Gaussian kernel.
Parameters
----------
varSizeX : int, positive
Width of the visual field.
varSizeY : int, positive
Height of the visual field..
varPosX : int, positive
X position of centre of 2D Gauss.
varPosY : int, positive
Y position of centre of 2D Gauss.
varSd : float, positive
Standard deviation of 2D Gauss.
Returns
-------
aryGauss : 2d numpy array, shape [varSizeX, varSizeY]
2d Gaussian.
Reference
---------
[1] | Below is the the instruction that describes the task:
### Input:
Create 2D Gaussian kernel.
Parameters
----------
varSizeX : int, positive
Width of the visual field.
varSizeY : int, positive
Height of the visual field..
varPosX : int, positive
X position of centre of 2D Gauss.
varPosY : int, positive
Y position of centre of 2D Gauss.
varSd : float, positive
Standard deviation of 2D Gauss.
Returns
-------
aryGauss : 2d numpy array, shape [varSizeX, varSizeY]
2d Gaussian.
Reference
---------
[1]
### Response:
def crtGauss2D(varSizeX, varSizeY, varPosX, varPosY, varSd):
"""Create 2D Gaussian kernel.
Parameters
----------
varSizeX : int, positive
Width of the visual field.
varSizeY : int, positive
Height of the visual field..
varPosX : int, positive
X position of centre of 2D Gauss.
varPosY : int, positive
Y position of centre of 2D Gauss.
varSd : float, positive
Standard deviation of 2D Gauss.
Returns
-------
aryGauss : 2d numpy array, shape [varSizeX, varSizeY]
2d Gaussian.
Reference
---------
[1]
"""
varSizeX = int(varSizeX)
varSizeY = int(varSizeY)
# aryX and aryY are in reversed order, this seems to be necessary:
aryY, aryX = sp.mgrid[0:varSizeX,
0:varSizeY]
# The actual creation of the Gaussian array:
aryGauss = (
(np.square((aryX - varPosX)) + np.square((aryY - varPosY))) /
(2.0 * np.square(varSd))
)
aryGauss = np.exp(-aryGauss) / (2 * np.pi * np.square(varSd))
return aryGauss |
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx) | Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4. | Below is the the instruction that describes the task:
### Input:
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
### Response:
def freqItems(self, cols, support=None):
"""
Finding frequent items for columns, possibly with false positives. Using the
frequent element count algorithm described in
"https://doi.org/10.1145/762471.762473, proposed by Karp, Schenker, and Papadimitriou".
:func:`DataFrame.freqItems` and :func:`DataFrameStatFunctions.freqItems` are aliases.
.. note:: This function is meant for exploratory data analysis, as we make no
guarantee about the backward compatibility of the schema of the resulting DataFrame.
:param cols: Names of the columns to calculate frequent items for as a list or tuple of
strings.
:param support: The frequency with which to consider an item 'frequent'. Default is 1%.
The support must be greater than 1e-4.
"""
if isinstance(cols, tuple):
cols = list(cols)
if not isinstance(cols, list):
raise ValueError("cols must be a list or tuple of column names as strings.")
if not support:
support = 0.01
return DataFrame(self._jdf.stat().freqItems(_to_seq(self._sc, cols), support), self.sql_ctx) |
def cnfwAlpha(self, R, Rs, rho0, r_core, ax_x, ax_y):
"""
deflection angel of NFW profile along the projection to coordinate axis
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:param axis: projection to either x- or y-axis
:type axis: same as R
:return: Epsilon(R) projected density at radius R
"""
if isinstance(R, int) or isinstance(R, float):
R = max(R, 0.00001)
else:
R[R <= 0.00001] = 0.00001
x = R / Rs
b = r_core * Rs ** -1
b = max(b, 0.000001)
gx = self._G(x, b)
a = 4*rho0*Rs*gx/x**2
return a * ax_x, a * ax_y | deflection angel of NFW profile along the projection to coordinate axis
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:param axis: projection to either x- or y-axis
:type axis: same as R
:return: Epsilon(R) projected density at radius R | Below is the the instruction that describes the task:
### Input:
deflection angel of NFW profile along the projection to coordinate axis
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:param axis: projection to either x- or y-axis
:type axis: same as R
:return: Epsilon(R) projected density at radius R
### Response:
def cnfwAlpha(self, R, Rs, rho0, r_core, ax_x, ax_y):
"""
deflection angel of NFW profile along the projection to coordinate axis
:param R: radius of interest
:type R: float/numpy array
:param Rs: scale radius
:type Rs: float
:param rho0: density normalization (characteristic density)
:type rho0: float
:param r200: radius of (sub)halo
:type r200: float>0
:param axis: projection to either x- or y-axis
:type axis: same as R
:return: Epsilon(R) projected density at radius R
"""
if isinstance(R, int) or isinstance(R, float):
R = max(R, 0.00001)
else:
R[R <= 0.00001] = 0.00001
x = R / Rs
b = r_core * Rs ** -1
b = max(b, 0.000001)
gx = self._G(x, b)
a = 4*rho0*Rs*gx/x**2
return a * ax_x, a * ax_y |
async def get_property(self, command):
"""Get property state from device."""
_LOGGER.debug("Getting property %s", command)
if self.__checkLock():
return BUSY
timeout = self.__get_timeout(command)
response = await self.send_request(
timeout=timeout,
params=EPSON_KEY_COMMANDS[command],
type='json_query')
if not response:
return False
try:
return response['projector']['feature']['reply']
except KeyError:
return BUSY | Get property state from device. | Below is the the instruction that describes the task:
### Input:
Get property state from device.
### Response:
async def get_property(self, command):
"""Get property state from device."""
_LOGGER.debug("Getting property %s", command)
if self.__checkLock():
return BUSY
timeout = self.__get_timeout(command)
response = await self.send_request(
timeout=timeout,
params=EPSON_KEY_COMMANDS[command],
type='json_query')
if not response:
return False
try:
return response['projector']['feature']['reply']
except KeyError:
return BUSY |
def reset(self, rabaClass, namespace = None) :
"""rabaClass can either be a raba class of a string of a raba class name. In the latter case you must provide the namespace argument.
If it's a Raba Class the argument is ignored. If you fear cicular imports use strings"""
if type(rabaClass) is types.StringType :
self._raba_namespace = namespace
self.con = stp.RabaConnection(self._raba_namespace)
self.rabaClass = self.con.getClass(rabaClass)
else :
self.rabaClass = rabaClass
self._raba_namespace = self.rabaClass._raba_namespace
self.con = stp.RabaConnection(self._raba_namespace)
self.filters = []
self.tables = set()
#self.fctPattern = re.compile("\s*([^\s]+)\s*\(\s*([^\s]+)\s*\)\s*([=><])\s*([^\s]+)\s*")
self.fieldPattern = re.compile("\s*([^\s\(\)]+)\s*([=><]|([L|l][I|i][K|k][E|e]))\s*(.+)")
self.operators = set(['LIKE', '=', '<', '>', '=', '>=', '<=', '<>', '!=', 'IS']) | rabaClass can either be a raba class of a string of a raba class name. In the latter case you must provide the namespace argument.
If it's a Raba Class the argument is ignored. If you fear cicular imports use strings | Below is the the instruction that describes the task:
### Input:
rabaClass can either be a raba class of a string of a raba class name. In the latter case you must provide the namespace argument.
If it's a Raba Class the argument is ignored. If you fear cicular imports use strings
### Response:
def reset(self, rabaClass, namespace = None) :
"""rabaClass can either be a raba class of a string of a raba class name. In the latter case you must provide the namespace argument.
If it's a Raba Class the argument is ignored. If you fear cicular imports use strings"""
if type(rabaClass) is types.StringType :
self._raba_namespace = namespace
self.con = stp.RabaConnection(self._raba_namespace)
self.rabaClass = self.con.getClass(rabaClass)
else :
self.rabaClass = rabaClass
self._raba_namespace = self.rabaClass._raba_namespace
self.con = stp.RabaConnection(self._raba_namespace)
self.filters = []
self.tables = set()
#self.fctPattern = re.compile("\s*([^\s]+)\s*\(\s*([^\s]+)\s*\)\s*([=><])\s*([^\s]+)\s*")
self.fieldPattern = re.compile("\s*([^\s\(\)]+)\s*([=><]|([L|l][I|i][K|k][E|e]))\s*(.+)")
self.operators = set(['LIKE', '=', '<', '>', '=', '>=', '<=', '<>', '!=', 'IS']) |
def relevent_issue(issue, after):
"""Returns True iff this issue is something we should show in the changelog."""
return (closed_issue(issue, after) and
issue_completed(issue) and
issue_section(issue)) | Returns True iff this issue is something we should show in the changelog. | Below is the the instruction that describes the task:
### Input:
Returns True iff this issue is something we should show in the changelog.
### Response:
def relevent_issue(issue, after):
"""Returns True iff this issue is something we should show in the changelog."""
return (closed_issue(issue, after) and
issue_completed(issue) and
issue_section(issue)) |
def __parseResponseServer(self):
"""Parses the response of the server.
Exception
---------
A Sitools2Exception is raised when the server does not send back a success."""
self.__logger.debug(Sitools2Abstract.getBaseUrl(self) + SITools2Instance.PROJECTS_URI)
result = Util.retrieveJsonResponseFromServer(Sitools2Abstract.getBaseUrl(self) + SITools2Instance.PROJECTS_URI)
isSuccess = result['success']
if isSuccess:
data = result['data']
self.__logger.debug(data)
for i, dataItem in enumerate(data):
project = Project(Sitools2Abstract.getBaseUrl(self), dataItem)
self.__projects.append(project)
else:
raise Sitools2Exception("Error when loading the server response") | Parses the response of the server.
Exception
---------
A Sitools2Exception is raised when the server does not send back a success. | Below is the the instruction that describes the task:
### Input:
Parses the response of the server.
Exception
---------
A Sitools2Exception is raised when the server does not send back a success.
### Response:
def __parseResponseServer(self):
"""Parses the response of the server.
Exception
---------
A Sitools2Exception is raised when the server does not send back a success."""
self.__logger.debug(Sitools2Abstract.getBaseUrl(self) + SITools2Instance.PROJECTS_URI)
result = Util.retrieveJsonResponseFromServer(Sitools2Abstract.getBaseUrl(self) + SITools2Instance.PROJECTS_URI)
isSuccess = result['success']
if isSuccess:
data = result['data']
self.__logger.debug(data)
for i, dataItem in enumerate(data):
project = Project(Sitools2Abstract.getBaseUrl(self), dataItem)
self.__projects.append(project)
else:
raise Sitools2Exception("Error when loading the server response") |
def load(self, file_name):
"""Load a raw data-file
Args:
file_name (path)
Returns:
loaded test
"""
new_rundata = self.loader(file_name)
new_rundata = self.inspect(new_rundata)
return new_rundata | Load a raw data-file
Args:
file_name (path)
Returns:
loaded test | Below is the the instruction that describes the task:
### Input:
Load a raw data-file
Args:
file_name (path)
Returns:
loaded test
### Response:
def load(self, file_name):
"""Load a raw data-file
Args:
file_name (path)
Returns:
loaded test
"""
new_rundata = self.loader(file_name)
new_rundata = self.inspect(new_rundata)
return new_rundata |
def permute_graph(G, order):
'''Reorder the graph's vertices, returning a copy of the input graph.
order : integer array-like, some permutation of range(G.num_vertices()).
'''
adj = G.matrix('dense')
adj = adj[np.ix_(order, order)]
return Graph.from_adj_matrix(adj) | Reorder the graph's vertices, returning a copy of the input graph.
order : integer array-like, some permutation of range(G.num_vertices()). | Below is the the instruction that describes the task:
### Input:
Reorder the graph's vertices, returning a copy of the input graph.
order : integer array-like, some permutation of range(G.num_vertices()).
### Response:
def permute_graph(G, order):
'''Reorder the graph's vertices, returning a copy of the input graph.
order : integer array-like, some permutation of range(G.num_vertices()).
'''
adj = G.matrix('dense')
adj = adj[np.ix_(order, order)]
return Graph.from_adj_matrix(adj) |
def _init_create_child(self):
"""
Initialize the base class :attr:`create_child` and
:attr:`create_child_args` according to whether we need a PTY or not.
"""
if self._requires_pty():
self.create_child = mitogen.parent.hybrid_tty_create_child
else:
self.create_child = mitogen.parent.create_child
self.create_child_args = {
'stderr_pipe': True,
} | Initialize the base class :attr:`create_child` and
:attr:`create_child_args` according to whether we need a PTY or not. | Below is the the instruction that describes the task:
### Input:
Initialize the base class :attr:`create_child` and
:attr:`create_child_args` according to whether we need a PTY or not.
### Response:
def _init_create_child(self):
"""
Initialize the base class :attr:`create_child` and
:attr:`create_child_args` according to whether we need a PTY or not.
"""
if self._requires_pty():
self.create_child = mitogen.parent.hybrid_tty_create_child
else:
self.create_child = mitogen.parent.create_child
self.create_child_args = {
'stderr_pipe': True,
} |
def mass_3d(self, r, rho0, gamma):
"""
mass enclosed a 3d sphere or radius r
:param r:
:param a:
:param s:
:return:
"""
mass_3d = 4 * np.pi * rho0 /(-gamma + 3) * r ** (-gamma + 3)
return mass_3d | mass enclosed a 3d sphere or radius r
:param r:
:param a:
:param s:
:return: | Below is the the instruction that describes the task:
### Input:
mass enclosed a 3d sphere or radius r
:param r:
:param a:
:param s:
:return:
### Response:
def mass_3d(self, r, rho0, gamma):
"""
mass enclosed a 3d sphere or radius r
:param r:
:param a:
:param s:
:return:
"""
mass_3d = 4 * np.pi * rho0 /(-gamma + 3) * r ** (-gamma + 3)
return mass_3d |
def disassociate_hosting_device_with_config_agent(
self, client, config_agent_id, hosting_device_id):
"""Disassociates a hosting_device with a config agent."""
return client.delete((ConfigAgentHandlingHostingDevice.resource_path +
CFG_AGENT_HOSTING_DEVICES + "/%s") % (
config_agent_id, hosting_device_id)) | Disassociates a hosting_device with a config agent. | Below is the the instruction that describes the task:
### Input:
Disassociates a hosting_device with a config agent.
### Response:
def disassociate_hosting_device_with_config_agent(
self, client, config_agent_id, hosting_device_id):
"""Disassociates a hosting_device with a config agent."""
return client.delete((ConfigAgentHandlingHostingDevice.resource_path +
CFG_AGENT_HOSTING_DEVICES + "/%s") % (
config_agent_id, hosting_device_id)) |
def _etree_py26_write(f, tree):
"""
Compatibility workaround for ElementTree shipped with py2.6
"""
f.write("<?xml version='1.0' encoding='utf-8'?>\n".encode('utf-8'))
if etree.VERSION[:3] == '1.2':
def fixtag(tag, namespaces):
if tag == XML_NS + 'lang':
return 'xml:lang', ""
if '}' in tag:
j = tag.index('}') + 1
tag = tag[j:]
xmlns = ''
if tag == 'feed':
xmlns = ('xmlns', str('http://www.w3.org/2005/Atom'))
namespaces['http://www.w3.org/2005/Atom'] = 'xmlns'
return tag, xmlns
else:
fixtag = etree.fixtag
old_fixtag = etree.fixtag
etree.fixtag = fixtag
try:
tree.write(f, encoding=str('utf-8'))
finally:
etree.fixtag = old_fixtag | Compatibility workaround for ElementTree shipped with py2.6 | Below is the the instruction that describes the task:
### Input:
Compatibility workaround for ElementTree shipped with py2.6
### Response:
def _etree_py26_write(f, tree):
"""
Compatibility workaround for ElementTree shipped with py2.6
"""
f.write("<?xml version='1.0' encoding='utf-8'?>\n".encode('utf-8'))
if etree.VERSION[:3] == '1.2':
def fixtag(tag, namespaces):
if tag == XML_NS + 'lang':
return 'xml:lang', ""
if '}' in tag:
j = tag.index('}') + 1
tag = tag[j:]
xmlns = ''
if tag == 'feed':
xmlns = ('xmlns', str('http://www.w3.org/2005/Atom'))
namespaces['http://www.w3.org/2005/Atom'] = 'xmlns'
return tag, xmlns
else:
fixtag = etree.fixtag
old_fixtag = etree.fixtag
etree.fixtag = fixtag
try:
tree.write(f, encoding=str('utf-8'))
finally:
etree.fixtag = old_fixtag |
def fetch(self, url, open_graph=None, twitter_card=None, touch_icon=None,
favicon=None, all_images=None, parser=None, handle_file_content=None,
canonical=None):
"""Retrieves content from the specified url, parses it, and returns
a beautifully crafted dictionary of important information about that
web page.
Priority tree is as follows:
1. OEmbed
2. Open Graph
3. Twitter Card
4. Other meta content (i.e. description, keywords)
:param url: URL to send a GET request to
:param open_graph: (optional) If ``True``, filters web page content for Open Graph meta tags. The content of these properties have top priority on return values.
:type open_graph: bool
:param twitter_card: (optional) If ``True``, filters web page content for Twitter Card meta tags
:type twitter_card: bool
:param touch_icon: (optional) If ``True``, retrieves Apple touch icons and includes them in the response ``images`` array
:type touch_icon: bool
:param favicon: (optional) If ``True``, retrieves any favicon images and includes them in the response ``images`` array
:type favicon: bool
:param canonical: (optional) If ``True``, retrieves canonical url from meta tags. Default: False
:type canonical: bool
:param all_images: (optional) If ``True``, retrieves images inside web pages body and includes them in the response ``images`` array. Default: False
:type all_images: bool
:param parser: (optional) String reference for the parser that BeautifulSoup will use
:type parser: string
:param handle_file_content: (optional) If ``True``, lassie will return a generic response when a file is fetched. Default: False
:type handle_file_content: bool
"""
# Set params, method params have priority over class params
open_graph = merge_settings(open_graph, self.open_graph)
twitter_card = merge_settings(twitter_card, self.twitter_card)
touch_icon = merge_settings(touch_icon, self.touch_icon)
favicon = merge_settings(favicon, self.favicon)
canonical = merge_settings(canonical, self.canonical)
all_images = merge_settings(all_images, self.all_images)
parser = merge_settings(parser, self.parser)
handle_file_content = merge_settings(handle_file_content, self.handle_file_content)
data = {
'images': [],
'videos': [],
}
has_file_content = False
content_type = None
if handle_file_content:
headers, status_code = self._retrieve_headers(url)
content_type = headers.get('Content-Type')
has_file_content = content_type and not 'text/html' in content_type
if has_file_content and content_type:
has_image_content = content_type in IMAGE_MIMETYPES
if has_image_content:
parsed_url = urlparse(url)
data['title'] = basename(parsed_url.path.lstrip('/')) # TODO: if the url doesn't have an extension, maybe we should match it up to the mimetype and append an ext?
data['url'] = url
data['images'].append({
'type': 'body_image',
'src': url,
})
else:
try:
oembed_data, status_code = self._retrieve_oembed_data(url)
parse_oembed_data(oembed_data, data)
except LassieError:
oembed_data = None
html, status_code = self._retrieve_content(url)
if not html and not oembed_data:
raise LassieError('There was no content to parse.')
if '<html' not in html:
html = re.sub(r'(?:<!DOCTYPE(?:\s\w)?>(?:<head>)?)', '<!DOCTYPE html><html>', html)
soup = BeautifulSoup(clean_text(html), parser)
self._filter_amp_data(soup, data, url, all_images)
if open_graph:
self._filter_meta_data('open_graph', soup, data, url)
if twitter_card:
self._filter_meta_data('twitter_card', soup, data)
self._filter_meta_data('generic', soup, data)
if touch_icon:
self._filter_link_tag_data('touch_icon', soup, data, url)
if favicon:
self._filter_link_tag_data('favicon', soup, data, url)
if canonical:
self._filter_link_tag_data('canonical', soup, data, url)
if all_images:
# Maybe filter out 1x1, no "good" way to do this if image doesn't supply
# width/height.
self._find_all_images(soup, data, url)
# TODO: Find a good place for setting url, title and locale
if soup.html.get('lang'):
lang = soup.html.get('lang')
else:
lang = soup.html.get('xml:lang')
if lang and ('locale' not in data):
locale = normalize_locale(lang)
if locale:
data['locale'] = locale
data_url = data.get('url')
if not data_url or (data_url in url and len(data_url) < len(url)):
data['url'] = url
if ('title' not in data or not data.get('title')) and hasattr(soup.title, 'string'):
data['title'] = soup.title.string
data['status_code'] = status_code
return data | Retrieves content from the specified url, parses it, and returns
a beautifully crafted dictionary of important information about that
web page.
Priority tree is as follows:
1. OEmbed
2. Open Graph
3. Twitter Card
4. Other meta content (i.e. description, keywords)
:param url: URL to send a GET request to
:param open_graph: (optional) If ``True``, filters web page content for Open Graph meta tags. The content of these properties have top priority on return values.
:type open_graph: bool
:param twitter_card: (optional) If ``True``, filters web page content for Twitter Card meta tags
:type twitter_card: bool
:param touch_icon: (optional) If ``True``, retrieves Apple touch icons and includes them in the response ``images`` array
:type touch_icon: bool
:param favicon: (optional) If ``True``, retrieves any favicon images and includes them in the response ``images`` array
:type favicon: bool
:param canonical: (optional) If ``True``, retrieves canonical url from meta tags. Default: False
:type canonical: bool
:param all_images: (optional) If ``True``, retrieves images inside web pages body and includes them in the response ``images`` array. Default: False
:type all_images: bool
:param parser: (optional) String reference for the parser that BeautifulSoup will use
:type parser: string
:param handle_file_content: (optional) If ``True``, lassie will return a generic response when a file is fetched. Default: False
:type handle_file_content: bool | Below is the the instruction that describes the task:
### Input:
Retrieves content from the specified url, parses it, and returns
a beautifully crafted dictionary of important information about that
web page.
Priority tree is as follows:
1. OEmbed
2. Open Graph
3. Twitter Card
4. Other meta content (i.e. description, keywords)
:param url: URL to send a GET request to
:param open_graph: (optional) If ``True``, filters web page content for Open Graph meta tags. The content of these properties have top priority on return values.
:type open_graph: bool
:param twitter_card: (optional) If ``True``, filters web page content for Twitter Card meta tags
:type twitter_card: bool
:param touch_icon: (optional) If ``True``, retrieves Apple touch icons and includes them in the response ``images`` array
:type touch_icon: bool
:param favicon: (optional) If ``True``, retrieves any favicon images and includes them in the response ``images`` array
:type favicon: bool
:param canonical: (optional) If ``True``, retrieves canonical url from meta tags. Default: False
:type canonical: bool
:param all_images: (optional) If ``True``, retrieves images inside web pages body and includes them in the response ``images`` array. Default: False
:type all_images: bool
:param parser: (optional) String reference for the parser that BeautifulSoup will use
:type parser: string
:param handle_file_content: (optional) If ``True``, lassie will return a generic response when a file is fetched. Default: False
:type handle_file_content: bool
### Response:
def fetch(self, url, open_graph=None, twitter_card=None, touch_icon=None,
favicon=None, all_images=None, parser=None, handle_file_content=None,
canonical=None):
"""Retrieves content from the specified url, parses it, and returns
a beautifully crafted dictionary of important information about that
web page.
Priority tree is as follows:
1. OEmbed
2. Open Graph
3. Twitter Card
4. Other meta content (i.e. description, keywords)
:param url: URL to send a GET request to
:param open_graph: (optional) If ``True``, filters web page content for Open Graph meta tags. The content of these properties have top priority on return values.
:type open_graph: bool
:param twitter_card: (optional) If ``True``, filters web page content for Twitter Card meta tags
:type twitter_card: bool
:param touch_icon: (optional) If ``True``, retrieves Apple touch icons and includes them in the response ``images`` array
:type touch_icon: bool
:param favicon: (optional) If ``True``, retrieves any favicon images and includes them in the response ``images`` array
:type favicon: bool
:param canonical: (optional) If ``True``, retrieves canonical url from meta tags. Default: False
:type canonical: bool
:param all_images: (optional) If ``True``, retrieves images inside web pages body and includes them in the response ``images`` array. Default: False
:type all_images: bool
:param parser: (optional) String reference for the parser that BeautifulSoup will use
:type parser: string
:param handle_file_content: (optional) If ``True``, lassie will return a generic response when a file is fetched. Default: False
:type handle_file_content: bool
"""
# Set params, method params have priority over class params
open_graph = merge_settings(open_graph, self.open_graph)
twitter_card = merge_settings(twitter_card, self.twitter_card)
touch_icon = merge_settings(touch_icon, self.touch_icon)
favicon = merge_settings(favicon, self.favicon)
canonical = merge_settings(canonical, self.canonical)
all_images = merge_settings(all_images, self.all_images)
parser = merge_settings(parser, self.parser)
handle_file_content = merge_settings(handle_file_content, self.handle_file_content)
data = {
'images': [],
'videos': [],
}
has_file_content = False
content_type = None
if handle_file_content:
headers, status_code = self._retrieve_headers(url)
content_type = headers.get('Content-Type')
has_file_content = content_type and not 'text/html' in content_type
if has_file_content and content_type:
has_image_content = content_type in IMAGE_MIMETYPES
if has_image_content:
parsed_url = urlparse(url)
data['title'] = basename(parsed_url.path.lstrip('/')) # TODO: if the url doesn't have an extension, maybe we should match it up to the mimetype and append an ext?
data['url'] = url
data['images'].append({
'type': 'body_image',
'src': url,
})
else:
try:
oembed_data, status_code = self._retrieve_oembed_data(url)
parse_oembed_data(oembed_data, data)
except LassieError:
oembed_data = None
html, status_code = self._retrieve_content(url)
if not html and not oembed_data:
raise LassieError('There was no content to parse.')
if '<html' not in html:
html = re.sub(r'(?:<!DOCTYPE(?:\s\w)?>(?:<head>)?)', '<!DOCTYPE html><html>', html)
soup = BeautifulSoup(clean_text(html), parser)
self._filter_amp_data(soup, data, url, all_images)
if open_graph:
self._filter_meta_data('open_graph', soup, data, url)
if twitter_card:
self._filter_meta_data('twitter_card', soup, data)
self._filter_meta_data('generic', soup, data)
if touch_icon:
self._filter_link_tag_data('touch_icon', soup, data, url)
if favicon:
self._filter_link_tag_data('favicon', soup, data, url)
if canonical:
self._filter_link_tag_data('canonical', soup, data, url)
if all_images:
# Maybe filter out 1x1, no "good" way to do this if image doesn't supply
# width/height.
self._find_all_images(soup, data, url)
# TODO: Find a good place for setting url, title and locale
if soup.html.get('lang'):
lang = soup.html.get('lang')
else:
lang = soup.html.get('xml:lang')
if lang and ('locale' not in data):
locale = normalize_locale(lang)
if locale:
data['locale'] = locale
data_url = data.get('url')
if not data_url or (data_url in url and len(data_url) < len(url)):
data['url'] = url
if ('title' not in data or not data.get('title')) and hasattr(soup.title, 'string'):
data['title'] = soup.title.string
data['status_code'] = status_code
return data |
def reject(self):
"""
Emits the accepted signal and closes the popup.
"""
self._result = 0
if not self.signalsBlocked():
self.rejected.emit()
if self.autoCloseOnReject():
self.close() | Emits the accepted signal and closes the popup. | Below is the the instruction that describes the task:
### Input:
Emits the accepted signal and closes the popup.
### Response:
def reject(self):
"""
Emits the accepted signal and closes the popup.
"""
self._result = 0
if not self.signalsBlocked():
self.rejected.emit()
if self.autoCloseOnReject():
self.close() |
def get_recent_async(self, count, callback):
"""Similar to `get_recent` except instead of returning an iterable, passes each dict to the given function which
must accept a single argument. Returns the request.
`callback` (mandatory) (function) instead of returning an iterable, pass each dict (as described above) to the
given function which must accept a single argument. Nothing is returned.
"""
validate_nonnegative_int(count, 'count')
Validation.callable_check(callback, allow_none=True)
evt = self._client._request_sub_recent(self.subid, count=count)
self._client._add_recent_cb_for(evt, callback)
return evt | Similar to `get_recent` except instead of returning an iterable, passes each dict to the given function which
must accept a single argument. Returns the request.
`callback` (mandatory) (function) instead of returning an iterable, pass each dict (as described above) to the
given function which must accept a single argument. Nothing is returned. | Below is the the instruction that describes the task:
### Input:
Similar to `get_recent` except instead of returning an iterable, passes each dict to the given function which
must accept a single argument. Returns the request.
`callback` (mandatory) (function) instead of returning an iterable, pass each dict (as described above) to the
given function which must accept a single argument. Nothing is returned.
### Response:
def get_recent_async(self, count, callback):
"""Similar to `get_recent` except instead of returning an iterable, passes each dict to the given function which
must accept a single argument. Returns the request.
`callback` (mandatory) (function) instead of returning an iterable, pass each dict (as described above) to the
given function which must accept a single argument. Nothing is returned.
"""
validate_nonnegative_int(count, 'count')
Validation.callable_check(callback, allow_none=True)
evt = self._client._request_sub_recent(self.subid, count=count)
self._client._add_recent_cb_for(evt, callback)
return evt |
def _sim_strlen(self, str_addr):
"""
Return the result of invoking the strlen simprocedure on `str_addr`.
"""
from .. import SIM_PROCEDURES
strlen = SIM_PROCEDURES['libc']['strlen']
return self.inline_call(strlen, str_addr).ret_expr | Return the result of invoking the strlen simprocedure on `str_addr`. | Below is the the instruction that describes the task:
### Input:
Return the result of invoking the strlen simprocedure on `str_addr`.
### Response:
def _sim_strlen(self, str_addr):
"""
Return the result of invoking the strlen simprocedure on `str_addr`.
"""
from .. import SIM_PROCEDURES
strlen = SIM_PROCEDURES['libc']['strlen']
return self.inline_call(strlen, str_addr).ret_expr |
def recRemoveTreeFormating(element):
"""Removes whitespace characters, which are leftovers from previous xml
formatting.
:param element: an instance of lxml.etree._Element
str.strip() is applied to the "text" and the "tail" attribute of the
element and recursively to all child elements.
"""
children = element.getchildren()
if len(children) > 0:
for child in children:
recRemoveTreeFormating(child)
if element.text is not None:
if len(element.text.strip()) == 0:
element.text = None
else:
element.text = element.text.strip()
if element.tail is not None:
if len(element.tail.strip()) == 0:
element.tail = None
else:
element.tail = element.tail.strip() | Removes whitespace characters, which are leftovers from previous xml
formatting.
:param element: an instance of lxml.etree._Element
str.strip() is applied to the "text" and the "tail" attribute of the
element and recursively to all child elements. | Below is the the instruction that describes the task:
### Input:
Removes whitespace characters, which are leftovers from previous xml
formatting.
:param element: an instance of lxml.etree._Element
str.strip() is applied to the "text" and the "tail" attribute of the
element and recursively to all child elements.
### Response:
def recRemoveTreeFormating(element):
"""Removes whitespace characters, which are leftovers from previous xml
formatting.
:param element: an instance of lxml.etree._Element
str.strip() is applied to the "text" and the "tail" attribute of the
element and recursively to all child elements.
"""
children = element.getchildren()
if len(children) > 0:
for child in children:
recRemoveTreeFormating(child)
if element.text is not None:
if len(element.text.strip()) == 0:
element.text = None
else:
element.text = element.text.strip()
if element.tail is not None:
if len(element.tail.strip()) == 0:
element.tail = None
else:
element.tail = element.tail.strip() |
def add_dispatcher(self, dsp, inputs, outputs, dsp_id=None,
input_domain=None, weight=None, inp_weight=None,
description=None, include_defaults=False,
await_domain=None, **kwargs):
"""
Add a single sub-dispatcher node to dispatcher.
:param dsp:
Child dispatcher that is added as sub-dispatcher node to the parent
dispatcher.
:type dsp: Dispatcher | dict[str, list]
:param inputs:
Inputs mapping. Data node ids from parent dispatcher to child
sub-dispatcher.
:type inputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param outputs:
Outputs mapping. Data node ids from child sub-dispatcher to parent
dispatcher.
:type outputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param dsp_id:
Sub-dispatcher node id.
If None will be assigned as <dsp.name>.
:type dsp_id: str, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the a dictionary with the inputs
of the sub-dispatcher node and returns True if input values satisfy
the domain, otherwise False.
.. note:: This function is invoked every time that a data node reach
the sub-dispatcher node.
:type input_domain: (dict) -> bool, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the sub-dispatcher node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, int | float], optional
:param description:
Sub-dispatcher node's description.
:type description: str, optional
:param include_defaults:
If True the default values of the sub-dispatcher are added to the
current dispatcher.
:type include_defaults: bool, optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Sub-dispatcher node id.
:rtype: str
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`,
:func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Create a sub-dispatcher::
>>> sub_dsp = Dispatcher()
>>> sub_dsp.add_function('max', max, ['a', 'b'], ['c'])
'max'
Add the sub-dispatcher to the parent dispatcher::
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher', dsp=sub_dsp,
... inputs={'A': 'a', 'B': 'b'},
... outputs={'c': 'C'})
'Sub-Dispatcher'
Add a sub-dispatcher node with domain::
>>> def my_domain(kwargs):
... return kwargs['C'] > 3
...
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher with domain',
... dsp=sub_dsp, inputs={'C': 'a', 'D': 'b'},
... outputs={('c', 'b'): ('E', 'E1')},
... input_domain=my_domain)
'Sub-Dispatcher with domain'
"""
from .utils.blue import _init
dsp = _init(dsp)
if not isinstance(dsp, self.__class__):
kw = dsp
dsp = self.__class__(
name=dsp_id or 'unknown',
executor=self.executor
)
dsp.add_from_lists(**kw)
if not dsp_id: # Get the dsp id.
dsp_id = dsp.name or 'unknown'
if description is None: # Get description.
description = dsp.__doc__ or None
if not isinstance(inputs, dict): # Create the inputs dict.
inputs = kk_dict(*inputs)
if not isinstance(outputs, dict): # Create the outputs dict.
outputs = kk_dict(*outputs)
# Set zero as default input distances.
# noinspection PyTypeChecker
_weight_from = dict.fromkeys(inputs.keys(), 0.0)
_weight_from.update(inp_weight or {})
from .utils.alg import _nodes
# Return dispatcher node id.
dsp_id = self.add_function(
dsp_id, dsp, sorted(_nodes(inputs)),
sorted(_nodes(outputs.values())), input_domain, weight,
_weight_from, type='dispatcher', description=description,
wait_inputs=False, await_domain=await_domain, **kwargs
)
# Set proper inputs.
self.nodes[dsp_id]['inputs'] = inputs
# Set proper outputs.
self.nodes[dsp_id]['outputs'] = outputs
if SINK not in dsp.nodes and \
SINK in _nodes(inputs.values()).union(_nodes(outputs)):
dsp.add_data(SINK) # Add sink node.
# Import default values from sub-dispatcher.
if include_defaults:
dsp_dfl = dsp.default_values # Namespace shortcut.
remove = set() # Set of nodes to remove after the import.
# Set default values.
for k, v in inputs.items():
if isinstance(v, str):
if v in dsp_dfl:
self.set_default_value(k, **dsp_dfl.pop(v))
else:
if v[0] in dsp_dfl:
self.set_default_value(k, **dsp_dfl.pop(v[0]))
remove.update(v[1:])
# Remove default values.
for k in remove:
dsp_dfl.pop(k, None)
return dsp_id | Add a single sub-dispatcher node to dispatcher.
:param dsp:
Child dispatcher that is added as sub-dispatcher node to the parent
dispatcher.
:type dsp: Dispatcher | dict[str, list]
:param inputs:
Inputs mapping. Data node ids from parent dispatcher to child
sub-dispatcher.
:type inputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param outputs:
Outputs mapping. Data node ids from child sub-dispatcher to parent
dispatcher.
:type outputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param dsp_id:
Sub-dispatcher node id.
If None will be assigned as <dsp.name>.
:type dsp_id: str, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the a dictionary with the inputs
of the sub-dispatcher node and returns True if input values satisfy
the domain, otherwise False.
.. note:: This function is invoked every time that a data node reach
the sub-dispatcher node.
:type input_domain: (dict) -> bool, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the sub-dispatcher node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, int | float], optional
:param description:
Sub-dispatcher node's description.
:type description: str, optional
:param include_defaults:
If True the default values of the sub-dispatcher are added to the
current dispatcher.
:type include_defaults: bool, optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Sub-dispatcher node id.
:rtype: str
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`,
:func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Create a sub-dispatcher::
>>> sub_dsp = Dispatcher()
>>> sub_dsp.add_function('max', max, ['a', 'b'], ['c'])
'max'
Add the sub-dispatcher to the parent dispatcher::
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher', dsp=sub_dsp,
... inputs={'A': 'a', 'B': 'b'},
... outputs={'c': 'C'})
'Sub-Dispatcher'
Add a sub-dispatcher node with domain::
>>> def my_domain(kwargs):
... return kwargs['C'] > 3
...
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher with domain',
... dsp=sub_dsp, inputs={'C': 'a', 'D': 'b'},
... outputs={('c', 'b'): ('E', 'E1')},
... input_domain=my_domain)
'Sub-Dispatcher with domain' | Below is the the instruction that describes the task:
### Input:
Add a single sub-dispatcher node to dispatcher.
:param dsp:
Child dispatcher that is added as sub-dispatcher node to the parent
dispatcher.
:type dsp: Dispatcher | dict[str, list]
:param inputs:
Inputs mapping. Data node ids from parent dispatcher to child
sub-dispatcher.
:type inputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param outputs:
Outputs mapping. Data node ids from child sub-dispatcher to parent
dispatcher.
:type outputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param dsp_id:
Sub-dispatcher node id.
If None will be assigned as <dsp.name>.
:type dsp_id: str, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the a dictionary with the inputs
of the sub-dispatcher node and returns True if input values satisfy
the domain, otherwise False.
.. note:: This function is invoked every time that a data node reach
the sub-dispatcher node.
:type input_domain: (dict) -> bool, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the sub-dispatcher node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, int | float], optional
:param description:
Sub-dispatcher node's description.
:type description: str, optional
:param include_defaults:
If True the default values of the sub-dispatcher are added to the
current dispatcher.
:type include_defaults: bool, optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Sub-dispatcher node id.
:rtype: str
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`,
:func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Create a sub-dispatcher::
>>> sub_dsp = Dispatcher()
>>> sub_dsp.add_function('max', max, ['a', 'b'], ['c'])
'max'
Add the sub-dispatcher to the parent dispatcher::
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher', dsp=sub_dsp,
... inputs={'A': 'a', 'B': 'b'},
... outputs={'c': 'C'})
'Sub-Dispatcher'
Add a sub-dispatcher node with domain::
>>> def my_domain(kwargs):
... return kwargs['C'] > 3
...
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher with domain',
... dsp=sub_dsp, inputs={'C': 'a', 'D': 'b'},
... outputs={('c', 'b'): ('E', 'E1')},
... input_domain=my_domain)
'Sub-Dispatcher with domain'
### Response:
def add_dispatcher(self, dsp, inputs, outputs, dsp_id=None,
input_domain=None, weight=None, inp_weight=None,
description=None, include_defaults=False,
await_domain=None, **kwargs):
"""
Add a single sub-dispatcher node to dispatcher.
:param dsp:
Child dispatcher that is added as sub-dispatcher node to the parent
dispatcher.
:type dsp: Dispatcher | dict[str, list]
:param inputs:
Inputs mapping. Data node ids from parent dispatcher to child
sub-dispatcher.
:type inputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param outputs:
Outputs mapping. Data node ids from child sub-dispatcher to parent
dispatcher.
:type outputs: dict[str, str | list[str]] | tuple[str] |
(str, ..., dict[str, str | list[str]])
:param dsp_id:
Sub-dispatcher node id.
If None will be assigned as <dsp.name>.
:type dsp_id: str, optional
:param input_domain:
A function that checks if input values satisfy the function domain.
This can be any function that takes the a dictionary with the inputs
of the sub-dispatcher node and returns True if input values satisfy
the domain, otherwise False.
.. note:: This function is invoked every time that a data node reach
the sub-dispatcher node.
:type input_domain: (dict) -> bool, optional
:param weight:
Node weight. It is a weight coefficient that is used by the dispatch
algorithm to estimate the minimum workflow.
:type weight: float, int, optional
:param inp_weight:
Edge weights from data nodes to the sub-dispatcher node.
It is a dictionary (key=data node id) with the weight coefficients
used by the dispatch algorithm to estimate the minimum workflow.
:type inp_weight: dict[str, int | float], optional
:param description:
Sub-dispatcher node's description.
:type description: str, optional
:param include_defaults:
If True the default values of the sub-dispatcher are added to the
current dispatcher.
:type include_defaults: bool, optional
:param await_domain:
If True the Dispatcher waits all input results before executing the
`input_domain` function. If a number is defined this is used as
`timeout` for `Future.result` method [default: True]. Note this is
used when asynchronous or parallel execution is enable.
:type await_domain: bool|int|float, optional
:param kwargs:
Set additional node attributes using key=value.
:type kwargs: keyword arguments, optional
:return:
Sub-dispatcher node id.
:rtype: str
.. seealso:: :func:`add_data`, :func:`add_func`, :func:`add_function`,
:func:`add_from_lists`
**--------------------------------------------------------------------**
**Example**:
.. testsetup::
>>> dsp = Dispatcher(name='Dispatcher')
Create a sub-dispatcher::
>>> sub_dsp = Dispatcher()
>>> sub_dsp.add_function('max', max, ['a', 'b'], ['c'])
'max'
Add the sub-dispatcher to the parent dispatcher::
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher', dsp=sub_dsp,
... inputs={'A': 'a', 'B': 'b'},
... outputs={'c': 'C'})
'Sub-Dispatcher'
Add a sub-dispatcher node with domain::
>>> def my_domain(kwargs):
... return kwargs['C'] > 3
...
>>> dsp.add_dispatcher(dsp_id='Sub-Dispatcher with domain',
... dsp=sub_dsp, inputs={'C': 'a', 'D': 'b'},
... outputs={('c', 'b'): ('E', 'E1')},
... input_domain=my_domain)
'Sub-Dispatcher with domain'
"""
from .utils.blue import _init
dsp = _init(dsp)
if not isinstance(dsp, self.__class__):
kw = dsp
dsp = self.__class__(
name=dsp_id or 'unknown',
executor=self.executor
)
dsp.add_from_lists(**kw)
if not dsp_id: # Get the dsp id.
dsp_id = dsp.name or 'unknown'
if description is None: # Get description.
description = dsp.__doc__ or None
if not isinstance(inputs, dict): # Create the inputs dict.
inputs = kk_dict(*inputs)
if not isinstance(outputs, dict): # Create the outputs dict.
outputs = kk_dict(*outputs)
# Set zero as default input distances.
# noinspection PyTypeChecker
_weight_from = dict.fromkeys(inputs.keys(), 0.0)
_weight_from.update(inp_weight or {})
from .utils.alg import _nodes
# Return dispatcher node id.
dsp_id = self.add_function(
dsp_id, dsp, sorted(_nodes(inputs)),
sorted(_nodes(outputs.values())), input_domain, weight,
_weight_from, type='dispatcher', description=description,
wait_inputs=False, await_domain=await_domain, **kwargs
)
# Set proper inputs.
self.nodes[dsp_id]['inputs'] = inputs
# Set proper outputs.
self.nodes[dsp_id]['outputs'] = outputs
if SINK not in dsp.nodes and \
SINK in _nodes(inputs.values()).union(_nodes(outputs)):
dsp.add_data(SINK) # Add sink node.
# Import default values from sub-dispatcher.
if include_defaults:
dsp_dfl = dsp.default_values # Namespace shortcut.
remove = set() # Set of nodes to remove after the import.
# Set default values.
for k, v in inputs.items():
if isinstance(v, str):
if v in dsp_dfl:
self.set_default_value(k, **dsp_dfl.pop(v))
else:
if v[0] in dsp_dfl:
self.set_default_value(k, **dsp_dfl.pop(v[0]))
remove.update(v[1:])
# Remove default values.
for k in remove:
dsp_dfl.pop(k, None)
return dsp_id |
def get_table(self, dataset, table, project_id=None):
""" Retrieve a table if it exists, otherwise return an empty dict.
Parameters
----------
dataset : str
The dataset that the table is in
table : str
The name of the table
project_id: str, optional
The project that the table is in
Returns
-------
dict
Containing the table object if it exists, else empty
"""
project_id = self._get_project_id(project_id)
try:
table = self.bigquery.tables().get(
projectId=project_id, datasetId=dataset,
tableId=table).execute(num_retries=self.num_retries)
except HttpError:
table = {}
return table | Retrieve a table if it exists, otherwise return an empty dict.
Parameters
----------
dataset : str
The dataset that the table is in
table : str
The name of the table
project_id: str, optional
The project that the table is in
Returns
-------
dict
Containing the table object if it exists, else empty | Below is the the instruction that describes the task:
### Input:
Retrieve a table if it exists, otherwise return an empty dict.
Parameters
----------
dataset : str
The dataset that the table is in
table : str
The name of the table
project_id: str, optional
The project that the table is in
Returns
-------
dict
Containing the table object if it exists, else empty
### Response:
def get_table(self, dataset, table, project_id=None):
""" Retrieve a table if it exists, otherwise return an empty dict.
Parameters
----------
dataset : str
The dataset that the table is in
table : str
The name of the table
project_id: str, optional
The project that the table is in
Returns
-------
dict
Containing the table object if it exists, else empty
"""
project_id = self._get_project_id(project_id)
try:
table = self.bigquery.tables().get(
projectId=project_id, datasetId=dataset,
tableId=table).execute(num_retries=self.num_retries)
except HttpError:
table = {}
return table |
def validate_config_parameters(config_json, allowed_keys, allowed_types):
"""Validate parameters in config file."""
custom_fields = config_json.get(defs.PARAMETERS, [])
for field in custom_fields:
validate_field(field, allowed_keys, allowed_types)
default = field.get(defs.DEFAULT)
field_type = field.get(defs.TYPE)
if default:
validate_field_matches_type(field[defs.VALUE], default, field_type) | Validate parameters in config file. | Below is the the instruction that describes the task:
### Input:
Validate parameters in config file.
### Response:
def validate_config_parameters(config_json, allowed_keys, allowed_types):
"""Validate parameters in config file."""
custom_fields = config_json.get(defs.PARAMETERS, [])
for field in custom_fields:
validate_field(field, allowed_keys, allowed_types)
default = field.get(defs.DEFAULT)
field_type = field.get(defs.TYPE)
if default:
validate_field_matches_type(field[defs.VALUE], default, field_type) |
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers} | Return the NTP servers configured on the device. | Below is the the instruction that describes the task:
### Input:
Return the NTP servers configured on the device.
### Response:
def get_ntp_servers(self):
"""Return the NTP servers configured on the device."""
ntp_table = junos_views.junos_ntp_servers_config_table(self.device)
ntp_table.get()
ntp_servers = ntp_table.items()
if not ntp_servers:
return {}
return {napalm_base.helpers.ip(server[0]): {} for server in ntp_servers} |
def _mb_model(self, beta, mini_batch):
""" Creates the structure of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
lambda : np.array
Contains the values for the conditional volatility series
Y : np.array
Contains the length-adjusted time series (accounting for lags)
scores : np.array
Contains the score terms for the time series
"""
# Transform latent variables
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
rand_int = np.random.randint(low=0, high=self.data_length-mini_batch+1)
sample = np.arange(start=rand_int, stop=rand_int+mini_batch)
data = self.y[sample]
X = self.X[sample, :]
Y = data[self.max_lag:]
scores = np.zeros(Y.shape[0])
lmda = np.ones(Y.shape[0])
theta = np.ones(Y.shape[0])
# Loop over time series
for t in range(0,Y.shape[0]):
if t < self.max_lag:
lmda[t] = parm[-len(self.X_names)*2]/(1-np.sum(parm[:self.p]))
theta[t] = np.dot(self.X[t],parm[-len(self.X_names):])
else:
# Loop over GARCH terms
for p_term in range(0,self.p):
lmda[t] += parm[p_term]*lmda[t-p_term-1]
# Loop over Score terms
for q_term in range(0,self.q):
lmda[t] += parm[self.p+q_term]*scores[t-q_term-1]
if self.leverage is True:
lmda[t] += parm[-(len(self.X_names)*2)-3]*np.sign(-(Y[t-1]-theta[t-1]))*(scores[t-1]+1)
lmda[t] += np.dot(self.X[t],parm[-len(self.X_names)*2:-len(self.X_names)])
theta[t] = np.dot(self.X[t],parm[-len(self.X_names):]) + parm[-(len(self.X_names)*2)-1]*np.exp(lmda[t]/2.0)
scores[t] = (((parm[self.p+self.q]+1.0)*np.power(Y[t]-theta[t],2))/float(parm[self.p+self.q]*np.exp(lmda[t]) + np.power(Y[t]-theta[t],2))) - 1.0
return lmda, Y, scores, theta | Creates the structure of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
lambda : np.array
Contains the values for the conditional volatility series
Y : np.array
Contains the length-adjusted time series (accounting for lags)
scores : np.array
Contains the score terms for the time series | Below is the the instruction that describes the task:
### Input:
Creates the structure of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
lambda : np.array
Contains the values for the conditional volatility series
Y : np.array
Contains the length-adjusted time series (accounting for lags)
scores : np.array
Contains the score terms for the time series
### Response:
def _mb_model(self, beta, mini_batch):
""" Creates the structure of the model
Parameters
----------
beta : np.array
Contains untransformed starting values for latent variables
Returns
----------
lambda : np.array
Contains the values for the conditional volatility series
Y : np.array
Contains the length-adjusted time series (accounting for lags)
scores : np.array
Contains the score terms for the time series
"""
# Transform latent variables
parm = np.array([self.latent_variables.z_list[k].prior.transform(beta[k]) for k in range(beta.shape[0])])
rand_int = np.random.randint(low=0, high=self.data_length-mini_batch+1)
sample = np.arange(start=rand_int, stop=rand_int+mini_batch)
data = self.y[sample]
X = self.X[sample, :]
Y = data[self.max_lag:]
scores = np.zeros(Y.shape[0])
lmda = np.ones(Y.shape[0])
theta = np.ones(Y.shape[0])
# Loop over time series
for t in range(0,Y.shape[0]):
if t < self.max_lag:
lmda[t] = parm[-len(self.X_names)*2]/(1-np.sum(parm[:self.p]))
theta[t] = np.dot(self.X[t],parm[-len(self.X_names):])
else:
# Loop over GARCH terms
for p_term in range(0,self.p):
lmda[t] += parm[p_term]*lmda[t-p_term-1]
# Loop over Score terms
for q_term in range(0,self.q):
lmda[t] += parm[self.p+q_term]*scores[t-q_term-1]
if self.leverage is True:
lmda[t] += parm[-(len(self.X_names)*2)-3]*np.sign(-(Y[t-1]-theta[t-1]))*(scores[t-1]+1)
lmda[t] += np.dot(self.X[t],parm[-len(self.X_names)*2:-len(self.X_names)])
theta[t] = np.dot(self.X[t],parm[-len(self.X_names):]) + parm[-(len(self.X_names)*2)-1]*np.exp(lmda[t]/2.0)
scores[t] = (((parm[self.p+self.q]+1.0)*np.power(Y[t]-theta[t],2))/float(parm[self.p+self.q]*np.exp(lmda[t]) + np.power(Y[t]-theta[t],2))) - 1.0
return lmda, Y, scores, theta |
def list_certificates(self):
"""Get the attributes of the current array certificate.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing all configured certificates.
:rtype: ResponseList
.. note::
Requires use of REST API 1.12 or later.
"""
# This call takes no parameters.
if self._rest_version >= LooseVersion("1.12"):
return self._request("GET", "cert")
else:
# If someone tries to call this against a too-early api version,
# do the best we can to provide expected behavior.
cert = self._request("GET", "cert")
out = ResponseList([cert])
out.headers = cert.headers
return out | Get the attributes of the current array certificate.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing all configured certificates.
:rtype: ResponseList
.. note::
Requires use of REST API 1.12 or later. | Below is the the instruction that describes the task:
### Input:
Get the attributes of the current array certificate.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing all configured certificates.
:rtype: ResponseList
.. note::
Requires use of REST API 1.12 or later.
### Response:
def list_certificates(self):
"""Get the attributes of the current array certificate.
:param \*\*kwargs: See the REST API Guide on your array for the
documentation on the request:
**GET cert**
:type \*\*kwargs: optional
:returns: A list of dictionaries describing all configured certificates.
:rtype: ResponseList
.. note::
Requires use of REST API 1.12 or later.
"""
# This call takes no parameters.
if self._rest_version >= LooseVersion("1.12"):
return self._request("GET", "cert")
else:
# If someone tries to call this against a too-early api version,
# do the best we can to provide expected behavior.
cert = self._request("GET", "cert")
out = ResponseList([cert])
out.headers = cert.headers
return out |
def consume_message_with_notify(
notifier_uri_getter):
"""
Decorator for methods handling requests from RabbitMQ
This decorator builds on the :py:func:`consume_message` decorator. It extents
it by logic for notifying a client of the result of handling the
request.
The *notifier_uri_getter* argument must be a callable which accepts
*self* and returns the uri of the notifier service.
"""
def consume_message_with_notify_decorator(
method):
@consume_message
def wrapper(
self,
data):
notifier_uri = notifier_uri_getter(self)
client_id = data["client_id"]
# Forward the call to the method and notify the client of the
# result
try:
method(self, data)
notify_client(notifier_uri, client_id, 200)
except Exception as exception:
notify_client(notifier_uri, client_id, 400, str(exception))
raise
return wrapper
return consume_message_with_notify_decorator | Decorator for methods handling requests from RabbitMQ
This decorator builds on the :py:func:`consume_message` decorator. It extents
it by logic for notifying a client of the result of handling the
request.
The *notifier_uri_getter* argument must be a callable which accepts
*self* and returns the uri of the notifier service. | Below is the the instruction that describes the task:
### Input:
Decorator for methods handling requests from RabbitMQ
This decorator builds on the :py:func:`consume_message` decorator. It extents
it by logic for notifying a client of the result of handling the
request.
The *notifier_uri_getter* argument must be a callable which accepts
*self* and returns the uri of the notifier service.
### Response:
def consume_message_with_notify(
notifier_uri_getter):
"""
Decorator for methods handling requests from RabbitMQ
This decorator builds on the :py:func:`consume_message` decorator. It extents
it by logic for notifying a client of the result of handling the
request.
The *notifier_uri_getter* argument must be a callable which accepts
*self* and returns the uri of the notifier service.
"""
def consume_message_with_notify_decorator(
method):
@consume_message
def wrapper(
self,
data):
notifier_uri = notifier_uri_getter(self)
client_id = data["client_id"]
# Forward the call to the method and notify the client of the
# result
try:
method(self, data)
notify_client(notifier_uri, client_id, 200)
except Exception as exception:
notify_client(notifier_uri, client_id, 400, str(exception))
raise
return wrapper
return consume_message_with_notify_decorator |
def preserve_order(self, value):
"""
Setter method for **self.__preserve_order** attribute.
:param value: Attribute value.
:type value: bool
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("preserve_order", value)
self.__preserve_order = value | Setter method for **self.__preserve_order** attribute.
:param value: Attribute value.
:type value: bool | Below is the the instruction that describes the task:
### Input:
Setter method for **self.__preserve_order** attribute.
:param value: Attribute value.
:type value: bool
### Response:
def preserve_order(self, value):
"""
Setter method for **self.__preserve_order** attribute.
:param value: Attribute value.
:type value: bool
"""
if value is not None:
assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format("preserve_order", value)
self.__preserve_order = value |
def spell_check(request):
"""
Returns a HttpResponse that implements the TinyMCE spellchecker protocol.
"""
try:
if not enchant:
raise RuntimeError("install pyenchant for spellchecker functionality")
raw = force_text(request.body)
input = json.loads(raw)
id = input['id']
method = input['method']
params = input['params']
lang = params[0]
arg = params[1]
if not enchant.dict_exists(str(lang)):
raise RuntimeError("dictionary not found for language {!r}".format(lang))
checker = enchant.Dict(str(lang))
if method == 'checkWords':
result = [word for word in arg if word and not checker.check(word)]
elif method == 'getSuggestions':
result = checker.suggest(arg)
else:
raise RuntimeError("Unknown spellcheck method: {!r}".format(method))
output = {
'id': id,
'result': result,
'error': None,
}
except Exception:
logging.exception("Error running spellchecker")
return HttpResponse(_("Error running spellchecker"))
return HttpResponse(json.dumps(output),
content_type='application/json') | Returns a HttpResponse that implements the TinyMCE spellchecker protocol. | Below is the the instruction that describes the task:
### Input:
Returns a HttpResponse that implements the TinyMCE spellchecker protocol.
### Response:
def spell_check(request):
"""
Returns a HttpResponse that implements the TinyMCE spellchecker protocol.
"""
try:
if not enchant:
raise RuntimeError("install pyenchant for spellchecker functionality")
raw = force_text(request.body)
input = json.loads(raw)
id = input['id']
method = input['method']
params = input['params']
lang = params[0]
arg = params[1]
if not enchant.dict_exists(str(lang)):
raise RuntimeError("dictionary not found for language {!r}".format(lang))
checker = enchant.Dict(str(lang))
if method == 'checkWords':
result = [word for word in arg if word and not checker.check(word)]
elif method == 'getSuggestions':
result = checker.suggest(arg)
else:
raise RuntimeError("Unknown spellcheck method: {!r}".format(method))
output = {
'id': id,
'result': result,
'error': None,
}
except Exception:
logging.exception("Error running spellchecker")
return HttpResponse(_("Error running spellchecker"))
return HttpResponse(json.dumps(output),
content_type='application/json') |
def equinox(date, eop_correction=True, terms=106, kinematic=True):
"""Equinox equation in degrees
"""
epsilon_bar, delta_psi, delta_eps = _nutation(date, eop_correction, terms)
equin = delta_psi * 3600. * np.cos(np.deg2rad(epsilon_bar))
if date.d >= 50506 and kinematic:
# Starting 1992-02-27, we apply the effect of the moon
ttt = date.change_scale('TT').julian_century
om_m = 125.04455501 - (5 * 360. + 134.1361851) * ttt\
+ 0.0020756 * ttt ** 2 + 2.139e-6 * ttt ** 3
equin += 0.00264 * np.sin(np.deg2rad(om_m)) + 6.3e-5 * np.sin(np.deg2rad(2 * om_m))
# print("equinox = {}\n".format(equin / 3600))
return equin / 3600. | Equinox equation in degrees | Below is the the instruction that describes the task:
### Input:
Equinox equation in degrees
### Response:
def equinox(date, eop_correction=True, terms=106, kinematic=True):
"""Equinox equation in degrees
"""
epsilon_bar, delta_psi, delta_eps = _nutation(date, eop_correction, terms)
equin = delta_psi * 3600. * np.cos(np.deg2rad(epsilon_bar))
if date.d >= 50506 and kinematic:
# Starting 1992-02-27, we apply the effect of the moon
ttt = date.change_scale('TT').julian_century
om_m = 125.04455501 - (5 * 360. + 134.1361851) * ttt\
+ 0.0020756 * ttt ** 2 + 2.139e-6 * ttt ** 3
equin += 0.00264 * np.sin(np.deg2rad(om_m)) + 6.3e-5 * np.sin(np.deg2rad(2 * om_m))
# print("equinox = {}\n".format(equin / 3600))
return equin / 3600. |
def get_ip_addresses(self, **kwargs):
"""
Get IP Addresses already set on an Interface.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet etc).
name (str): Name of interface id.
(For interface: 1/0/5, 1/0/10 etc).
version (int): 4 or 6 to represent IPv4 or IPv6 address
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
List of 0 or more IPs configure on the specified interface.
Raises:
KeyError: if `int_type` or `name` is not passed.
ValueError: if `int_type` or `name` are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... int_type = 'tengigabitethernet'
... name = '225/0/4'
... ip_addr = '20.10.10.1/24'
... version = 4
... output = dev.interface.disable_switchport(inter_type=
... int_type, inter=name)
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr)
... result = dev.interface.get_ip_addresses(
... int_type=int_type, name=name, version=version)
... assert len(result) >= 1
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr, delete=True)
... ip_addr = 'fc00:1:3:1ad3:0:0:23:a/64'
... version = 6
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr)
... result = dev.interface.get_ip_addresses(
... int_type=int_type, name=name, version=version)
... assert len(result) >= 1
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr, delete=True)
"""
int_type = str(kwargs.pop('int_type').lower())
name = str(kwargs.pop('name'))
version = int(kwargs.pop('version'))
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet']
if int_type not in valid_int_types:
raise ValueError('int_type must be one of: %s' %
repr(valid_int_types))
method_name = None
method_class = self._interface
if version == 4:
method_name = 'interface_%s_ip_ip_config_address_' \
'address' % int_type
elif version == 6:
method_name = 'interface_%s_ipv6_ipv6_config_address_ipv6_' \
'address_address' % int_type
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces.')
ip_args = dict(name=name, address='')
ip_address_attr = getattr(method_class, method_name)
config = ip_address_attr(**ip_args)
output = callback(config, handler='get_config')
result = []
if version == 4:
for item in output.data.findall(
'.//{*}address/{*}address'):
result.append(item.text)
elif version == 6:
for item in output.data.findall(
'.//{*}address/{*}ipv6-address/{'
'*}address'):
result.append(item.text)
return result | Get IP Addresses already set on an Interface.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet etc).
name (str): Name of interface id.
(For interface: 1/0/5, 1/0/10 etc).
version (int): 4 or 6 to represent IPv4 or IPv6 address
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
List of 0 or more IPs configure on the specified interface.
Raises:
KeyError: if `int_type` or `name` is not passed.
ValueError: if `int_type` or `name` are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... int_type = 'tengigabitethernet'
... name = '225/0/4'
... ip_addr = '20.10.10.1/24'
... version = 4
... output = dev.interface.disable_switchport(inter_type=
... int_type, inter=name)
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr)
... result = dev.interface.get_ip_addresses(
... int_type=int_type, name=name, version=version)
... assert len(result) >= 1
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr, delete=True)
... ip_addr = 'fc00:1:3:1ad3:0:0:23:a/64'
... version = 6
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr)
... result = dev.interface.get_ip_addresses(
... int_type=int_type, name=name, version=version)
... assert len(result) >= 1
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr, delete=True) | Below is the the instruction that describes the task:
### Input:
Get IP Addresses already set on an Interface.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet etc).
name (str): Name of interface id.
(For interface: 1/0/5, 1/0/10 etc).
version (int): 4 or 6 to represent IPv4 or IPv6 address
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
List of 0 or more IPs configure on the specified interface.
Raises:
KeyError: if `int_type` or `name` is not passed.
ValueError: if `int_type` or `name` are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... int_type = 'tengigabitethernet'
... name = '225/0/4'
... ip_addr = '20.10.10.1/24'
... version = 4
... output = dev.interface.disable_switchport(inter_type=
... int_type, inter=name)
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr)
... result = dev.interface.get_ip_addresses(
... int_type=int_type, name=name, version=version)
... assert len(result) >= 1
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr, delete=True)
... ip_addr = 'fc00:1:3:1ad3:0:0:23:a/64'
... version = 6
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr)
... result = dev.interface.get_ip_addresses(
... int_type=int_type, name=name, version=version)
... assert len(result) >= 1
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr, delete=True)
### Response:
def get_ip_addresses(self, **kwargs):
"""
Get IP Addresses already set on an Interface.
Args:
int_type (str): Type of interface. (gigabitethernet,
tengigabitethernet etc).
name (str): Name of interface id.
(For interface: 1/0/5, 1/0/10 etc).
version (int): 4 or 6 to represent IPv4 or IPv6 address
callback (function): A function executed upon completion of the
method. The only parameter passed to `callback` will be the
``ElementTree`` `config`.
Returns:
List of 0 or more IPs configure on the specified interface.
Raises:
KeyError: if `int_type` or `name` is not passed.
ValueError: if `int_type` or `name` are invalid.
Examples:
>>> import pynos.device
>>> switches = ['10.24.39.211', '10.24.39.203']
>>> auth = ('admin', 'password')
>>> for switch in switches:
... conn = (switch, '22')
... with pynos.device.Device(conn=conn, auth=auth) as dev:
... int_type = 'tengigabitethernet'
... name = '225/0/4'
... ip_addr = '20.10.10.1/24'
... version = 4
... output = dev.interface.disable_switchport(inter_type=
... int_type, inter=name)
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr)
... result = dev.interface.get_ip_addresses(
... int_type=int_type, name=name, version=version)
... assert len(result) >= 1
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr, delete=True)
... ip_addr = 'fc00:1:3:1ad3:0:0:23:a/64'
... version = 6
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr)
... result = dev.interface.get_ip_addresses(
... int_type=int_type, name=name, version=version)
... assert len(result) >= 1
... output = dev.interface.ip_address(int_type=int_type,
... name=name, ip_addr=ip_addr, delete=True)
"""
int_type = str(kwargs.pop('int_type').lower())
name = str(kwargs.pop('name'))
version = int(kwargs.pop('version'))
callback = kwargs.pop('callback', self._callback)
valid_int_types = ['gigabitethernet', 'tengigabitethernet',
'fortygigabitethernet', 'hundredgigabitethernet']
if int_type not in valid_int_types:
raise ValueError('int_type must be one of: %s' %
repr(valid_int_types))
method_name = None
method_class = self._interface
if version == 4:
method_name = 'interface_%s_ip_ip_config_address_' \
'address' % int_type
elif version == 6:
method_name = 'interface_%s_ipv6_ipv6_config_address_ipv6_' \
'address_address' % int_type
if not pynos.utilities.valid_interface(int_type, name):
raise ValueError('`name` must be in the format of x/y/z for '
'physical interfaces.')
ip_args = dict(name=name, address='')
ip_address_attr = getattr(method_class, method_name)
config = ip_address_attr(**ip_args)
output = callback(config, handler='get_config')
result = []
if version == 4:
for item in output.data.findall(
'.//{*}address/{*}address'):
result.append(item.text)
elif version == 6:
for item in output.data.findall(
'.//{*}address/{*}ipv6-address/{'
'*}address'):
result.append(item.text)
return result |
def get_wordpress(self, service_id, version_number, name):
"""Get information on a specific wordpress."""
content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, name))
return FastlyWordpress(self, content) | Get information on a specific wordpress. | Below is the the instruction that describes the task:
### Input:
Get information on a specific wordpress.
### Response:
def get_wordpress(self, service_id, version_number, name):
"""Get information on a specific wordpress."""
content = self._fetch("/service/%s/version/%d/wordpress/%s" % (service_id, version_number, name))
return FastlyWordpress(self, content) |
def delete_event_subscription(self, url):
"""Deregister a callback URL as an event subscriber.
:param str url: callback URL
:returns: the deleted event subscription
:rtype: dict
"""
params = {'callbackUrl': url}
response = self._do_request('DELETE', '/v2/eventSubscriptions', params)
return response.json() | Deregister a callback URL as an event subscriber.
:param str url: callback URL
:returns: the deleted event subscription
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Deregister a callback URL as an event subscriber.
:param str url: callback URL
:returns: the deleted event subscription
:rtype: dict
### Response:
def delete_event_subscription(self, url):
"""Deregister a callback URL as an event subscriber.
:param str url: callback URL
:returns: the deleted event subscription
:rtype: dict
"""
params = {'callbackUrl': url}
response = self._do_request('DELETE', '/v2/eventSubscriptions', params)
return response.json() |
def count_rows(self, table, cols='*'):
"""Get the number of rows in a particular table."""
query = 'SELECT COUNT({0}) FROM {1}'.format(join_cols(cols), wrap(table))
result = self.fetch(query)
return result if result is not None else 0 | Get the number of rows in a particular table. | Below is the the instruction that describes the task:
### Input:
Get the number of rows in a particular table.
### Response:
def count_rows(self, table, cols='*'):
"""Get the number of rows in a particular table."""
query = 'SELECT COUNT({0}) FROM {1}'.format(join_cols(cols), wrap(table))
result = self.fetch(query)
return result if result is not None else 0 |
def c_var_decls(self):
"""Get the needed variable definitions."""
if self.opts.no_structs:
mod_decl = 'HMODULE {} = NULL;\n'.format(self.name)
return [mod_decl] + [
'{} *{} = NULL;\n'.format(
self._c_type_name(name), name
)
for name, dummy_args in self.funcs
]
if self.opts.windll:
return ''
return [
'{} _{} = {{ 0 }};\n'.format(
self._c_struct_names()[1], self.name
)
] | Get the needed variable definitions. | Below is the the instruction that describes the task:
### Input:
Get the needed variable definitions.
### Response:
def c_var_decls(self):
"""Get the needed variable definitions."""
if self.opts.no_structs:
mod_decl = 'HMODULE {} = NULL;\n'.format(self.name)
return [mod_decl] + [
'{} *{} = NULL;\n'.format(
self._c_type_name(name), name
)
for name, dummy_args in self.funcs
]
if self.opts.windll:
return ''
return [
'{} _{} = {{ 0 }};\n'.format(
self._c_struct_names()[1], self.name
)
] |
def like(self, **kwargs):
'''
When provided with keyword arguments of the form ``col=pattern``, this
will limit the entities returned to those that include the provided
pattern. Note that 'like' queries require that the ``prefix=True``
option must have been provided as part of the column definition.
Patterns allow for 4 wildcard characters, whose semantics are as
follows:
* *?* - will match 0 or 1 of any character
* *\** - will match 0 or more of any character
* *+* - will match 1 or more of any character
* *!* - will match exactly 1 of any character
As an example, imagine that you have enabled the required prefix
matching on your ``User.email`` column. And lets say that you want to
find everyone with an email address that contains the name 'frank'
before the ``@`` sign. You can use either of the following patterns
to discover those users.
* *\*frank\*@*
* *\*frank\*@*
.. note:: Like queries implicitly start at the beginning of strings
checked, so if you want to match a pattern that doesn't start at
the beginning of a string, you should prefix it with one of the
wildcard characters (like ``*`` as we did with the 'frank' pattern).
'''
new = []
for k, v in kwargs.items():
v = self._check(k, v, 'like')
new.append(Pattern(k, v))
return self.replace(filters=self._filters+tuple(new)) | When provided with keyword arguments of the form ``col=pattern``, this
will limit the entities returned to those that include the provided
pattern. Note that 'like' queries require that the ``prefix=True``
option must have been provided as part of the column definition.
Patterns allow for 4 wildcard characters, whose semantics are as
follows:
* *?* - will match 0 or 1 of any character
* *\** - will match 0 or more of any character
* *+* - will match 1 or more of any character
* *!* - will match exactly 1 of any character
As an example, imagine that you have enabled the required prefix
matching on your ``User.email`` column. And lets say that you want to
find everyone with an email address that contains the name 'frank'
before the ``@`` sign. You can use either of the following patterns
to discover those users.
* *\*frank\*@*
* *\*frank\*@*
.. note:: Like queries implicitly start at the beginning of strings
checked, so if you want to match a pattern that doesn't start at
the beginning of a string, you should prefix it with one of the
wildcard characters (like ``*`` as we did with the 'frank' pattern). | Below is the the instruction that describes the task:
### Input:
When provided with keyword arguments of the form ``col=pattern``, this
will limit the entities returned to those that include the provided
pattern. Note that 'like' queries require that the ``prefix=True``
option must have been provided as part of the column definition.
Patterns allow for 4 wildcard characters, whose semantics are as
follows:
* *?* - will match 0 or 1 of any character
* *\** - will match 0 or more of any character
* *+* - will match 1 or more of any character
* *!* - will match exactly 1 of any character
As an example, imagine that you have enabled the required prefix
matching on your ``User.email`` column. And lets say that you want to
find everyone with an email address that contains the name 'frank'
before the ``@`` sign. You can use either of the following patterns
to discover those users.
* *\*frank\*@*
* *\*frank\*@*
.. note:: Like queries implicitly start at the beginning of strings
checked, so if you want to match a pattern that doesn't start at
the beginning of a string, you should prefix it with one of the
wildcard characters (like ``*`` as we did with the 'frank' pattern).
### Response:
def like(self, **kwargs):
'''
When provided with keyword arguments of the form ``col=pattern``, this
will limit the entities returned to those that include the provided
pattern. Note that 'like' queries require that the ``prefix=True``
option must have been provided as part of the column definition.
Patterns allow for 4 wildcard characters, whose semantics are as
follows:
* *?* - will match 0 or 1 of any character
* *\** - will match 0 or more of any character
* *+* - will match 1 or more of any character
* *!* - will match exactly 1 of any character
As an example, imagine that you have enabled the required prefix
matching on your ``User.email`` column. And lets say that you want to
find everyone with an email address that contains the name 'frank'
before the ``@`` sign. You can use either of the following patterns
to discover those users.
* *\*frank\*@*
* *\*frank\*@*
.. note:: Like queries implicitly start at the beginning of strings
checked, so if you want to match a pattern that doesn't start at
the beginning of a string, you should prefix it with one of the
wildcard characters (like ``*`` as we did with the 'frank' pattern).
'''
new = []
for k, v in kwargs.items():
v = self._check(k, v, 'like')
new.append(Pattern(k, v))
return self.replace(filters=self._filters+tuple(new)) |
def marshal(self, v):
"""
Turn this value into API format.
Do a reverse dictionary lookup on choices to find the original value. If
there are no keys or too many keys for now we raise a NotImplementedError
as marshal is not used anywhere currently. In the future we will want to
fail gracefully.
"""
if v:
orig = [i for i in self.choices if self.choices[i] == v]
if len(orig) == 1:
return orig[0]
elif len(orig) == 0:
# No such choice
raise NotImplementedError("No such reverse choice {0} for field {1}.".format(v, self))
else:
# Too many choices. We could return one possible choice (e.g. orig[0]).
raise NotImplementedError("Too many reverse choices {0} for value {1} for field {2}".format(orig, v, self)) | Turn this value into API format.
Do a reverse dictionary lookup on choices to find the original value. If
there are no keys or too many keys for now we raise a NotImplementedError
as marshal is not used anywhere currently. In the future we will want to
fail gracefully. | Below is the the instruction that describes the task:
### Input:
Turn this value into API format.
Do a reverse dictionary lookup on choices to find the original value. If
there are no keys or too many keys for now we raise a NotImplementedError
as marshal is not used anywhere currently. In the future we will want to
fail gracefully.
### Response:
def marshal(self, v):
"""
Turn this value into API format.
Do a reverse dictionary lookup on choices to find the original value. If
there are no keys or too many keys for now we raise a NotImplementedError
as marshal is not used anywhere currently. In the future we will want to
fail gracefully.
"""
if v:
orig = [i for i in self.choices if self.choices[i] == v]
if len(orig) == 1:
return orig[0]
elif len(orig) == 0:
# No such choice
raise NotImplementedError("No such reverse choice {0} for field {1}.".format(v, self))
else:
# Too many choices. We could return one possible choice (e.g. orig[0]).
raise NotImplementedError("Too many reverse choices {0} for value {1} for field {2}".format(orig, v, self)) |
def beacon(config):
'''
Read the log file and return match whole string
.. code-block:: yaml
beacons:
log:
- file: <path>
- tags:
<tag>:
regex: <pattern>
.. note::
regex matching is based on the `re`_ module
.. _re: https://docs.python.org/3.6/library/re.html#regular-expression-syntax
The defined tag is added to the beacon event tag.
This is not the tag in the log.
.. code-block:: yaml
beacons:
log:
- file: /var/log/messages #path to log.
- tags:
goodbye/world: # tag added to beacon event tag.
regex: .*good-bye.* # match good-bye string anywhere in the log entry.
'''
_config = {}
list(map(_config.update, config))
ret = []
if 'file' not in _config:
event = SKEL.copy()
event['tag'] = 'global'
event['error'] = 'file not defined in config'
ret.append(event)
return ret
with salt.utils.files.fopen(_config['file'], 'r') as fp_:
loc = __context__.get(LOC_KEY, 0)
if loc == 0:
fp_.seek(0, 2)
__context__[LOC_KEY] = fp_.tell()
return ret
fp_.seek(0, 2)
__context__[LOC_KEY] = fp_.tell()
fp_.seek(loc)
txt = fp_.read()
log.info('txt %s', txt)
d = {}
for tag in _config.get('tags', {}):
if 'regex' not in _config['tags'][tag]:
continue
if not _config['tags'][tag]['regex']:
continue
try:
d[tag] = re.compile(r'{0}'.format(_config['tags'][tag]['regex']))
except Exception as e:
event = SKEL.copy()
event['tag'] = tag
event['error'] = 'bad regex'
ret.append(event)
for line in txt.splitlines():
for tag, reg in d.items():
try:
m = reg.match(line)
if m:
event = SKEL.copy()
event['tag'] = tag
event['raw'] = line
event['match'] = 'yes'
ret.append(event)
except Exception:
event = SKEL.copy()
event['tag'] = tag
event['error'] = 'bad match'
ret.append(event)
return ret | Read the log file and return match whole string
.. code-block:: yaml
beacons:
log:
- file: <path>
- tags:
<tag>:
regex: <pattern>
.. note::
regex matching is based on the `re`_ module
.. _re: https://docs.python.org/3.6/library/re.html#regular-expression-syntax
The defined tag is added to the beacon event tag.
This is not the tag in the log.
.. code-block:: yaml
beacons:
log:
- file: /var/log/messages #path to log.
- tags:
goodbye/world: # tag added to beacon event tag.
regex: .*good-bye.* # match good-bye string anywhere in the log entry. | Below is the the instruction that describes the task:
### Input:
Read the log file and return match whole string
.. code-block:: yaml
beacons:
log:
- file: <path>
- tags:
<tag>:
regex: <pattern>
.. note::
regex matching is based on the `re`_ module
.. _re: https://docs.python.org/3.6/library/re.html#regular-expression-syntax
The defined tag is added to the beacon event tag.
This is not the tag in the log.
.. code-block:: yaml
beacons:
log:
- file: /var/log/messages #path to log.
- tags:
goodbye/world: # tag added to beacon event tag.
regex: .*good-bye.* # match good-bye string anywhere in the log entry.
### Response:
def beacon(config):
'''
Read the log file and return match whole string
.. code-block:: yaml
beacons:
log:
- file: <path>
- tags:
<tag>:
regex: <pattern>
.. note::
regex matching is based on the `re`_ module
.. _re: https://docs.python.org/3.6/library/re.html#regular-expression-syntax
The defined tag is added to the beacon event tag.
This is not the tag in the log.
.. code-block:: yaml
beacons:
log:
- file: /var/log/messages #path to log.
- tags:
goodbye/world: # tag added to beacon event tag.
regex: .*good-bye.* # match good-bye string anywhere in the log entry.
'''
_config = {}
list(map(_config.update, config))
ret = []
if 'file' not in _config:
event = SKEL.copy()
event['tag'] = 'global'
event['error'] = 'file not defined in config'
ret.append(event)
return ret
with salt.utils.files.fopen(_config['file'], 'r') as fp_:
loc = __context__.get(LOC_KEY, 0)
if loc == 0:
fp_.seek(0, 2)
__context__[LOC_KEY] = fp_.tell()
return ret
fp_.seek(0, 2)
__context__[LOC_KEY] = fp_.tell()
fp_.seek(loc)
txt = fp_.read()
log.info('txt %s', txt)
d = {}
for tag in _config.get('tags', {}):
if 'regex' not in _config['tags'][tag]:
continue
if not _config['tags'][tag]['regex']:
continue
try:
d[tag] = re.compile(r'{0}'.format(_config['tags'][tag]['regex']))
except Exception as e:
event = SKEL.copy()
event['tag'] = tag
event['error'] = 'bad regex'
ret.append(event)
for line in txt.splitlines():
for tag, reg in d.items():
try:
m = reg.match(line)
if m:
event = SKEL.copy()
event['tag'] = tag
event['raw'] = line
event['match'] = 'yes'
ret.append(event)
except Exception:
event = SKEL.copy()
event['tag'] = tag
event['error'] = 'bad match'
ret.append(event)
return ret |
def policy_assignments(self):
"""Instance depends on the API version:
* 2015-10-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2015_10_01_preview.operations.PolicyAssignmentsOperations>`
* 2016-04-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_04_01.operations.PolicyAssignmentsOperations>`
* 2016-12-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_12_01.operations.PolicyAssignmentsOperations>`
* 2017-06-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicyAssignmentsOperations>`
* 2018-03-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicyAssignmentsOperations>`
* 2018-05-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicyAssignmentsOperations>`
"""
api_version = self._get_api_version('policy_assignments')
if api_version == '2015-10-01-preview':
from .v2015_10_01_preview.operations import PolicyAssignmentsOperations as OperationClass
elif api_version == '2016-04-01':
from .v2016_04_01.operations import PolicyAssignmentsOperations as OperationClass
elif api_version == '2016-12-01':
from .v2016_12_01.operations import PolicyAssignmentsOperations as OperationClass
elif api_version == '2017-06-01-preview':
from .v2017_06_01_preview.operations import PolicyAssignmentsOperations as OperationClass
elif api_version == '2018-03-01':
from .v2018_03_01.operations import PolicyAssignmentsOperations as OperationClass
elif api_version == '2018-05-01':
from .v2018_05_01.operations import PolicyAssignmentsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2015-10-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2015_10_01_preview.operations.PolicyAssignmentsOperations>`
* 2016-04-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_04_01.operations.PolicyAssignmentsOperations>`
* 2016-12-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_12_01.operations.PolicyAssignmentsOperations>`
* 2017-06-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicyAssignmentsOperations>`
* 2018-03-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicyAssignmentsOperations>`
* 2018-05-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicyAssignmentsOperations>` | Below is the the instruction that describes the task:
### Input:
Instance depends on the API version:
* 2015-10-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2015_10_01_preview.operations.PolicyAssignmentsOperations>`
* 2016-04-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_04_01.operations.PolicyAssignmentsOperations>`
* 2016-12-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_12_01.operations.PolicyAssignmentsOperations>`
* 2017-06-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicyAssignmentsOperations>`
* 2018-03-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicyAssignmentsOperations>`
* 2018-05-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicyAssignmentsOperations>`
### Response:
def policy_assignments(self):
"""Instance depends on the API version:
* 2015-10-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2015_10_01_preview.operations.PolicyAssignmentsOperations>`
* 2016-04-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_04_01.operations.PolicyAssignmentsOperations>`
* 2016-12-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2016_12_01.operations.PolicyAssignmentsOperations>`
* 2017-06-01-preview: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2017_06_01_preview.operations.PolicyAssignmentsOperations>`
* 2018-03-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_03_01.operations.PolicyAssignmentsOperations>`
* 2018-05-01: :class:`PolicyAssignmentsOperations<azure.mgmt.resource.policy.v2018_05_01.operations.PolicyAssignmentsOperations>`
"""
api_version = self._get_api_version('policy_assignments')
if api_version == '2015-10-01-preview':
from .v2015_10_01_preview.operations import PolicyAssignmentsOperations as OperationClass
elif api_version == '2016-04-01':
from .v2016_04_01.operations import PolicyAssignmentsOperations as OperationClass
elif api_version == '2016-12-01':
from .v2016_12_01.operations import PolicyAssignmentsOperations as OperationClass
elif api_version == '2017-06-01-preview':
from .v2017_06_01_preview.operations import PolicyAssignmentsOperations as OperationClass
elif api_version == '2018-03-01':
from .v2018_03_01.operations import PolicyAssignmentsOperations as OperationClass
elif api_version == '2018-05-01':
from .v2018_05_01.operations import PolicyAssignmentsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) |
def info(self, token):
"""
Return token information.
:param token: A token
:return: dictionary with info about the token
"""
_res = dict(zip(['_id', 'type', 'sid', 'exp'],
self.split_token(token)))
if _res['type'] != self.type:
raise WrongTokenType(_res['type'])
else:
_res['handler'] = self
_res['black_listed'] = self.is_black_listed(token)
return _res | Return token information.
:param token: A token
:return: dictionary with info about the token | Below is the the instruction that describes the task:
### Input:
Return token information.
:param token: A token
:return: dictionary with info about the token
### Response:
def info(self, token):
"""
Return token information.
:param token: A token
:return: dictionary with info about the token
"""
_res = dict(zip(['_id', 'type', 'sid', 'exp'],
self.split_token(token)))
if _res['type'] != self.type:
raise WrongTokenType(_res['type'])
else:
_res['handler'] = self
_res['black_listed'] = self.is_black_listed(token)
return _res |
def receiveVolumeInfo(self, paths):
""" Return Context Manager for a file-like (stream) object to store volume info. """
path = self.selectReceivePath(paths)
path = path + Store.theInfoExtension
if self._skipDryRun(logger)("receive info in '%s'", path):
return None
return _Uploader(self.bucket, path, bufferSize=theInfoBufferSize) | Return Context Manager for a file-like (stream) object to store volume info. | Below is the the instruction that describes the task:
### Input:
Return Context Manager for a file-like (stream) object to store volume info.
### Response:
def receiveVolumeInfo(self, paths):
""" Return Context Manager for a file-like (stream) object to store volume info. """
path = self.selectReceivePath(paths)
path = path + Store.theInfoExtension
if self._skipDryRun(logger)("receive info in '%s'", path):
return None
return _Uploader(self.bucket, path, bufferSize=theInfoBufferSize) |
def xsl_withparam(self, name, value, parent):
"""Construct an XSLT 'with-param' element.
`parent` is this element's parent.
`name` is the parameter name.
`value` is the parameter value.
"""
res = ET.SubElement(parent, "with-param", name=name)
res.text = value
return res | Construct an XSLT 'with-param' element.
`parent` is this element's parent.
`name` is the parameter name.
`value` is the parameter value. | Below is the the instruction that describes the task:
### Input:
Construct an XSLT 'with-param' element.
`parent` is this element's parent.
`name` is the parameter name.
`value` is the parameter value.
### Response:
def xsl_withparam(self, name, value, parent):
"""Construct an XSLT 'with-param' element.
`parent` is this element's parent.
`name` is the parameter name.
`value` is the parameter value.
"""
res = ET.SubElement(parent, "with-param", name=name)
res.text = value
return res |
def launch():
"""Launch the experiment."""
exp = experiment(db.init_db(drop_all=False))
exp.log("Launching experiment...", "-----")
init_db()
exp.recruiter().open_recruitment(n=exp.initial_recruitment_size)
session_psiturk.commit()
session.commit()
return success_response(request_type="launch") | Launch the experiment. | Below is the the instruction that describes the task:
### Input:
Launch the experiment.
### Response:
def launch():
"""Launch the experiment."""
exp = experiment(db.init_db(drop_all=False))
exp.log("Launching experiment...", "-----")
init_db()
exp.recruiter().open_recruitment(n=exp.initial_recruitment_size)
session_psiturk.commit()
session.commit()
return success_response(request_type="launch") |
def index_dir(self, folder):
"""
Creates a nested dictionary that represents the folder structure of folder.
Also extracts meta data from all markdown posts and adds to the dictionary.
"""
folder_path = folder
print('Indexing folder: ' + folder_path)
nested_dir = {}
folder = folder_path.rstrip(os.sep)
start = folder.rfind(os.sep) + 1
for root, dirs, files in os.walk(folder):
folders = root[start:].split(os.sep)
# subdir = dict.fromkeys(files)
subdir = {}
for f in files:
# Create an entry for every markdown file
if os.path.splitext(f)[1] == '.md':
with open(os.path.abspath(os.path.join(root, f)), encoding='utf-8') as fp:
try:
_, meta = self.mrk.extract_meta(fp.read())
except:
print("Skipping indexing " + f +"; Could not parse metadata")
meta = {'title': f}
pass
# Value of the entry (the key) is it's metadata
subdir[f] = meta
parent = nested_dir
for fold in folders[:-1]:
parent = parent.get(fold)
# Attach the config of all children nodes onto the parent
parent[folders[-1]] = subdir
return nested_dir | Creates a nested dictionary that represents the folder structure of folder.
Also extracts meta data from all markdown posts and adds to the dictionary. | Below is the the instruction that describes the task:
### Input:
Creates a nested dictionary that represents the folder structure of folder.
Also extracts meta data from all markdown posts and adds to the dictionary.
### Response:
def index_dir(self, folder):
"""
Creates a nested dictionary that represents the folder structure of folder.
Also extracts meta data from all markdown posts and adds to the dictionary.
"""
folder_path = folder
print('Indexing folder: ' + folder_path)
nested_dir = {}
folder = folder_path.rstrip(os.sep)
start = folder.rfind(os.sep) + 1
for root, dirs, files in os.walk(folder):
folders = root[start:].split(os.sep)
# subdir = dict.fromkeys(files)
subdir = {}
for f in files:
# Create an entry for every markdown file
if os.path.splitext(f)[1] == '.md':
with open(os.path.abspath(os.path.join(root, f)), encoding='utf-8') as fp:
try:
_, meta = self.mrk.extract_meta(fp.read())
except:
print("Skipping indexing " + f +"; Could not parse metadata")
meta = {'title': f}
pass
# Value of the entry (the key) is it's metadata
subdir[f] = meta
parent = nested_dir
for fold in folders[:-1]:
parent = parent.get(fold)
# Attach the config of all children nodes onto the parent
parent[folders[-1]] = subdir
return nested_dir |
def function_trace(function_name):
"""
Wraps a chunk of code that we want to appear as a separate, explicit,
segment in our monitoring tools.
"""
if newrelic:
nr_transaction = newrelic.agent.current_transaction()
with newrelic.agent.FunctionTrace(nr_transaction, function_name):
yield
else:
yield | Wraps a chunk of code that we want to appear as a separate, explicit,
segment in our monitoring tools. | Below is the the instruction that describes the task:
### Input:
Wraps a chunk of code that we want to appear as a separate, explicit,
segment in our monitoring tools.
### Response:
def function_trace(function_name):
"""
Wraps a chunk of code that we want to appear as a separate, explicit,
segment in our monitoring tools.
"""
if newrelic:
nr_transaction = newrelic.agent.current_transaction()
with newrelic.agent.FunctionTrace(nr_transaction, function_name):
yield
else:
yield |
def _init_item_marks(item_marks):
"""Initialize the makred item dict."""
if isinstance(item_marks, dict):
return item_marks
if item_marks:
return {item_id:'>' for item_id in item_marks} | Initialize the makred item dict. | Below is the the instruction that describes the task:
### Input:
Initialize the makred item dict.
### Response:
def _init_item_marks(item_marks):
"""Initialize the makred item dict."""
if isinstance(item_marks, dict):
return item_marks
if item_marks:
return {item_id:'>' for item_id in item_marks} |
def backward_smoothing_pass(self,
filtered_means,
filtered_covs,
predicted_means,
predicted_covs):
"""Run the backward pass in Kalman smoother.
The backward smoothing is using Rauch, Tung and Striebel smoother as
as discussed in section 18.3.2 of Kevin P. Murphy, 2012, Machine Learning:
A Probabilistic Perspective, The MIT Press. The inputs are returned by
`forward_filter` function.
Args:
filtered_means: Means of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`.
filtered_covs: Covariances of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`batch_shape + [num_timesteps, latent_size, latent_size]`.
predicted_means: Means of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, latent_size]`.
predicted_covs: Covariances of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `batch_shape + [num_timesteps, latent_size,
latent_size]`.
Returns:
posterior_means: Means of the smoothed marginal distributions
p(z_t | x_{1:T}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`,
which is of the same shape as filtered_means.
posterior_covs: Covariances of the smoothed marginal distributions
p(z_t | x_{1:T}), as a Tensor of shape
`batch_shape + [num_timesteps, latent_size, latent_size]`.
which is of the same shape as filtered_covs.
"""
with tf.name_scope("backward_pass"):
filtered_means = tf.convert_to_tensor(
value=filtered_means, name="filtered_means")
filtered_covs = tf.convert_to_tensor(
value=filtered_covs, name="filtered_covs")
predicted_means = tf.convert_to_tensor(
value=predicted_means, name="predicted_means")
predicted_covs = tf.convert_to_tensor(
value=predicted_covs, name="predicted_covs")
# To scan over time dimension, we need to move 'num_timesteps' from the
# event shape to the initial dimension of the tensor.
filtered_means = distribution_util.move_dimension(filtered_means, -2, 0)
filtered_covs = distribution_util.move_dimension(filtered_covs, -3, 0)
predicted_means = distribution_util.move_dimension(predicted_means, -2, 0)
predicted_covs = distribution_util.move_dimension(predicted_covs, -3, 0)
# The means are assumed to be vectors. Adding a dummy index to
# ensure the `matmul` op working smoothly.
filtered_means = filtered_means[..., tf.newaxis]
predicted_means = predicted_means[..., tf.newaxis]
initial_backward_mean = predicted_means[-1, ...]
initial_backward_cov = predicted_covs[-1, ...]
num_timesteps = tf.shape(input=filtered_means)[0]
initial_state = BackwardPassState(
backward_mean=initial_backward_mean,
backward_cov=initial_backward_cov,
timestep=self.initial_step + num_timesteps - 1)
update_step_fn = build_backward_pass_step(
self.get_transition_matrix_for_timestep)
# For backward pass, it scans the `elems` from last to first.
posterior_states = tf.scan(update_step_fn,
elems=(filtered_means,
filtered_covs,
predicted_means,
predicted_covs),
initializer=initial_state,
reverse=True)
# Move the time dimension back into the event shape.
posterior_means = distribution_util.move_dimension(
posterior_states.backward_mean[..., 0], 0, -2)
posterior_covs = distribution_util.move_dimension(
posterior_states.backward_cov, 0, -3)
return (posterior_means, posterior_covs) | Run the backward pass in Kalman smoother.
The backward smoothing is using Rauch, Tung and Striebel smoother as
as discussed in section 18.3.2 of Kevin P. Murphy, 2012, Machine Learning:
A Probabilistic Perspective, The MIT Press. The inputs are returned by
`forward_filter` function.
Args:
filtered_means: Means of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`.
filtered_covs: Covariances of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`batch_shape + [num_timesteps, latent_size, latent_size]`.
predicted_means: Means of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, latent_size]`.
predicted_covs: Covariances of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `batch_shape + [num_timesteps, latent_size,
latent_size]`.
Returns:
posterior_means: Means of the smoothed marginal distributions
p(z_t | x_{1:T}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`,
which is of the same shape as filtered_means.
posterior_covs: Covariances of the smoothed marginal distributions
p(z_t | x_{1:T}), as a Tensor of shape
`batch_shape + [num_timesteps, latent_size, latent_size]`.
which is of the same shape as filtered_covs. | Below is the the instruction that describes the task:
### Input:
Run the backward pass in Kalman smoother.
The backward smoothing is using Rauch, Tung and Striebel smoother as
as discussed in section 18.3.2 of Kevin P. Murphy, 2012, Machine Learning:
A Probabilistic Perspective, The MIT Press. The inputs are returned by
`forward_filter` function.
Args:
filtered_means: Means of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`.
filtered_covs: Covariances of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`batch_shape + [num_timesteps, latent_size, latent_size]`.
predicted_means: Means of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, latent_size]`.
predicted_covs: Covariances of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `batch_shape + [num_timesteps, latent_size,
latent_size]`.
Returns:
posterior_means: Means of the smoothed marginal distributions
p(z_t | x_{1:T}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`,
which is of the same shape as filtered_means.
posterior_covs: Covariances of the smoothed marginal distributions
p(z_t | x_{1:T}), as a Tensor of shape
`batch_shape + [num_timesteps, latent_size, latent_size]`.
which is of the same shape as filtered_covs.
### Response:
def backward_smoothing_pass(self,
filtered_means,
filtered_covs,
predicted_means,
predicted_covs):
"""Run the backward pass in Kalman smoother.
The backward smoothing is using Rauch, Tung and Striebel smoother as
as discussed in section 18.3.2 of Kevin P. Murphy, 2012, Machine Learning:
A Probabilistic Perspective, The MIT Press. The inputs are returned by
`forward_filter` function.
Args:
filtered_means: Means of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`.
filtered_covs: Covariances of the per-timestep filtered marginal
distributions p(z_t | x_{:t}), as a Tensor of shape
`batch_shape + [num_timesteps, latent_size, latent_size]`.
predicted_means: Means of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `sample_shape(x) + batch_shape +
[num_timesteps, latent_size]`.
predicted_covs: Covariances of the per-timestep predictive
distributions over latent states, p(z_{t+1} | x_{:t}), as a
Tensor of shape `batch_shape + [num_timesteps, latent_size,
latent_size]`.
Returns:
posterior_means: Means of the smoothed marginal distributions
p(z_t | x_{1:T}), as a Tensor of shape
`sample_shape(x) + batch_shape + [num_timesteps, latent_size]`,
which is of the same shape as filtered_means.
posterior_covs: Covariances of the smoothed marginal distributions
p(z_t | x_{1:T}), as a Tensor of shape
`batch_shape + [num_timesteps, latent_size, latent_size]`.
which is of the same shape as filtered_covs.
"""
with tf.name_scope("backward_pass"):
filtered_means = tf.convert_to_tensor(
value=filtered_means, name="filtered_means")
filtered_covs = tf.convert_to_tensor(
value=filtered_covs, name="filtered_covs")
predicted_means = tf.convert_to_tensor(
value=predicted_means, name="predicted_means")
predicted_covs = tf.convert_to_tensor(
value=predicted_covs, name="predicted_covs")
# To scan over time dimension, we need to move 'num_timesteps' from the
# event shape to the initial dimension of the tensor.
filtered_means = distribution_util.move_dimension(filtered_means, -2, 0)
filtered_covs = distribution_util.move_dimension(filtered_covs, -3, 0)
predicted_means = distribution_util.move_dimension(predicted_means, -2, 0)
predicted_covs = distribution_util.move_dimension(predicted_covs, -3, 0)
# The means are assumed to be vectors. Adding a dummy index to
# ensure the `matmul` op working smoothly.
filtered_means = filtered_means[..., tf.newaxis]
predicted_means = predicted_means[..., tf.newaxis]
initial_backward_mean = predicted_means[-1, ...]
initial_backward_cov = predicted_covs[-1, ...]
num_timesteps = tf.shape(input=filtered_means)[0]
initial_state = BackwardPassState(
backward_mean=initial_backward_mean,
backward_cov=initial_backward_cov,
timestep=self.initial_step + num_timesteps - 1)
update_step_fn = build_backward_pass_step(
self.get_transition_matrix_for_timestep)
# For backward pass, it scans the `elems` from last to first.
posterior_states = tf.scan(update_step_fn,
elems=(filtered_means,
filtered_covs,
predicted_means,
predicted_covs),
initializer=initial_state,
reverse=True)
# Move the time dimension back into the event shape.
posterior_means = distribution_util.move_dimension(
posterior_states.backward_mean[..., 0], 0, -2)
posterior_covs = distribution_util.move_dimension(
posterior_states.backward_cov, 0, -3)
return (posterior_means, posterior_covs) |
def out_name(stem, timestep=None):
"""Return StagPy out file name.
Args:
stem (str): short description of file content.
timestep (int): timestep if relevant.
Returns:
str: the output file name.
Other Parameters:
conf.core.outname (str): the generic name stem, defaults to
``'stagpy'``.
"""
if timestep is not None:
stem = (stem + INT_FMT).format(timestep)
return conf.core.outname + '_' + stem | Return StagPy out file name.
Args:
stem (str): short description of file content.
timestep (int): timestep if relevant.
Returns:
str: the output file name.
Other Parameters:
conf.core.outname (str): the generic name stem, defaults to
``'stagpy'``. | Below is the the instruction that describes the task:
### Input:
Return StagPy out file name.
Args:
stem (str): short description of file content.
timestep (int): timestep if relevant.
Returns:
str: the output file name.
Other Parameters:
conf.core.outname (str): the generic name stem, defaults to
``'stagpy'``.
### Response:
def out_name(stem, timestep=None):
"""Return StagPy out file name.
Args:
stem (str): short description of file content.
timestep (int): timestep if relevant.
Returns:
str: the output file name.
Other Parameters:
conf.core.outname (str): the generic name stem, defaults to
``'stagpy'``.
"""
if timestep is not None:
stem = (stem + INT_FMT).format(timestep)
return conf.core.outname + '_' + stem |
async def modify_tree(self, tree, modifications):
'''The modifications are a map of the form, {path: TreeEntry}. The tree
can be None to indicate an empty starting tree. The entries can be
either blobs or trees, or None to indicate a deletion. The return value
is either the hash of the resulting tree, or None if the resulting tree
is empty. Modifications in parent directories are done before
modifications in subdirectories below them, so for example you can
insert a tree at a given path and also insert more new stuff beneath
that path, without fear of overwriting the new stuff.'''
# Read the original contents of the base tree.
if tree is None:
entries = {}
else:
entries = await self.ls_tree(tree, '.')
# Separate the modifications into two groups, those that refer to
# entries at the base of this tree (e.g. 'foo'), and those that refer
# to entries in subtrees (e.g. 'foo/bar').
modifications_at_base = dict()
modifications_in_subtrees = collections.defaultdict(dict)
for path_str, entry in modifications.items():
# Canonicalize paths to get rid of duplicate/trailing slashes.
path = pathlib.PurePosixPath(path_str)
# Check for nonsense paths.
# TODO: Maybe stop recursive calls from repeating these checks.
if len(path.parts) == 0:
raise ModifyTreeError('Cannot modify an empty path.')
elif path.parts[0] == '/':
raise ModifyTreeError('Cannot modify an absolute path.')
elif '..' in path.parts:
raise ModifyTreeError('.. is not allowed in tree paths.')
if len(path.parts) == 1:
modifications_at_base[str(path)] = entry
else:
first_dir = path.parts[0]
rest = str(pathlib.PurePosixPath(*path.parts[1:]))
modifications_in_subtrees[first_dir][rest] = entry
# Insert or delete entries in the base tree. Note that this happens
# before any subtree operations.
for name, entry in modifications_at_base.items():
if entry is None:
entries.pop(name, None)
else:
entries[name] = entry
# Recurse to compute modified subtrees. Note how we handle deletions:
# If 'a' is a file, inserting a new file at 'a/b' will implicitly
# delete 'a', but trying to delete 'a/b' will be a no-op and will not
# delete 'a'.
empty_tree = (await self.get_empty_tree())
for name, sub_modifications in modifications_in_subtrees.items():
subtree_base = None
if name in entries and entries[name].type == TREE_TYPE:
subtree_base = entries[name].hash
new_subtree = await self.modify_tree(subtree_base,
sub_modifications)
if new_subtree != empty_tree:
entries[name] = TreeEntry(TREE_MODE, TREE_TYPE, new_subtree)
# Delete an empty tree if it was actually a tree to begin with.
elif name in entries and entries[name].type == TREE_TYPE:
del entries[name]
# Return the resulting tree, or None if empty.
if entries:
session = self.no_index_git_session()
tree = await session.make_tree_from_entries(entries)
return tree
else:
return empty_tree | The modifications are a map of the form, {path: TreeEntry}. The tree
can be None to indicate an empty starting tree. The entries can be
either blobs or trees, or None to indicate a deletion. The return value
is either the hash of the resulting tree, or None if the resulting tree
is empty. Modifications in parent directories are done before
modifications in subdirectories below them, so for example you can
insert a tree at a given path and also insert more new stuff beneath
that path, without fear of overwriting the new stuff. | Below is the the instruction that describes the task:
### Input:
The modifications are a map of the form, {path: TreeEntry}. The tree
can be None to indicate an empty starting tree. The entries can be
either blobs or trees, or None to indicate a deletion. The return value
is either the hash of the resulting tree, or None if the resulting tree
is empty. Modifications in parent directories are done before
modifications in subdirectories below them, so for example you can
insert a tree at a given path and also insert more new stuff beneath
that path, without fear of overwriting the new stuff.
### Response:
async def modify_tree(self, tree, modifications):
'''The modifications are a map of the form, {path: TreeEntry}. The tree
can be None to indicate an empty starting tree. The entries can be
either blobs or trees, or None to indicate a deletion. The return value
is either the hash of the resulting tree, or None if the resulting tree
is empty. Modifications in parent directories are done before
modifications in subdirectories below them, so for example you can
insert a tree at a given path and also insert more new stuff beneath
that path, without fear of overwriting the new stuff.'''
# Read the original contents of the base tree.
if tree is None:
entries = {}
else:
entries = await self.ls_tree(tree, '.')
# Separate the modifications into two groups, those that refer to
# entries at the base of this tree (e.g. 'foo'), and those that refer
# to entries in subtrees (e.g. 'foo/bar').
modifications_at_base = dict()
modifications_in_subtrees = collections.defaultdict(dict)
for path_str, entry in modifications.items():
# Canonicalize paths to get rid of duplicate/trailing slashes.
path = pathlib.PurePosixPath(path_str)
# Check for nonsense paths.
# TODO: Maybe stop recursive calls from repeating these checks.
if len(path.parts) == 0:
raise ModifyTreeError('Cannot modify an empty path.')
elif path.parts[0] == '/':
raise ModifyTreeError('Cannot modify an absolute path.')
elif '..' in path.parts:
raise ModifyTreeError('.. is not allowed in tree paths.')
if len(path.parts) == 1:
modifications_at_base[str(path)] = entry
else:
first_dir = path.parts[0]
rest = str(pathlib.PurePosixPath(*path.parts[1:]))
modifications_in_subtrees[first_dir][rest] = entry
# Insert or delete entries in the base tree. Note that this happens
# before any subtree operations.
for name, entry in modifications_at_base.items():
if entry is None:
entries.pop(name, None)
else:
entries[name] = entry
# Recurse to compute modified subtrees. Note how we handle deletions:
# If 'a' is a file, inserting a new file at 'a/b' will implicitly
# delete 'a', but trying to delete 'a/b' will be a no-op and will not
# delete 'a'.
empty_tree = (await self.get_empty_tree())
for name, sub_modifications in modifications_in_subtrees.items():
subtree_base = None
if name in entries and entries[name].type == TREE_TYPE:
subtree_base = entries[name].hash
new_subtree = await self.modify_tree(subtree_base,
sub_modifications)
if new_subtree != empty_tree:
entries[name] = TreeEntry(TREE_MODE, TREE_TYPE, new_subtree)
# Delete an empty tree if it was actually a tree to begin with.
elif name in entries and entries[name].type == TREE_TYPE:
del entries[name]
# Return the resulting tree, or None if empty.
if entries:
session = self.no_index_git_session()
tree = await session.make_tree_from_entries(entries)
return tree
else:
return empty_tree |
def purge(self, doc_ids):
'''
a method to remove docs from the collection
:param doc_ids: string or list of strings with document ids to purge
:return: list of strings of doc ids purged
'''
# https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/document/post__db___purge
title = '%s.purge' % self.__class__.__name__
# ingest arguments
if isinstance(doc_ids, str):
doc_ids = [ doc_ids ]
# validate inputs
input_fields = {
'doc_ids': doc_ids
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = self.bucket_url + '/_purge'
json_body = {}
for doc in doc_ids:
json_body[doc] = [ "*" ]
# send request
response = requests.post(url, json=json_body)
# construct output from response
purged_list = []
purged_map = {}
response_details = response.json()
if 'purged' in response_details.keys():
purged_map = response_details['purged']
for key in purged_map.keys():
purged_list.append(key)
return purged_list | a method to remove docs from the collection
:param doc_ids: string or list of strings with document ids to purge
:return: list of strings of doc ids purged | Below is the the instruction that describes the task:
### Input:
a method to remove docs from the collection
:param doc_ids: string or list of strings with document ids to purge
:return: list of strings of doc ids purged
### Response:
def purge(self, doc_ids):
'''
a method to remove docs from the collection
:param doc_ids: string or list of strings with document ids to purge
:return: list of strings of doc ids purged
'''
# https://developer.couchbase.com/documentation/mobile/1.5/references/sync-gateway/admin-rest-api/index.html#/document/post__db___purge
title = '%s.purge' % self.__class__.__name__
# ingest arguments
if isinstance(doc_ids, str):
doc_ids = [ doc_ids ]
# validate inputs
input_fields = {
'doc_ids': doc_ids
}
for key, value in input_fields.items():
object_title = '%s(%s=%s)' % (title, key, str(value))
self.fields.validate(value, '.%s' % key, object_title)
# construct request fields
url = self.bucket_url + '/_purge'
json_body = {}
for doc in doc_ids:
json_body[doc] = [ "*" ]
# send request
response = requests.post(url, json=json_body)
# construct output from response
purged_list = []
purged_map = {}
response_details = response.json()
if 'purged' in response_details.keys():
purged_map = response_details['purged']
for key in purged_map.keys():
purged_list.append(key)
return purged_list |
def update(self, data, offset, is_last, buffer_index=0):
"""
Update the buffer at the given index.
Args:
data (np.ndarray): The frames.
offset (int): The index of the first frame in `data` within the sequence.
is_last (bool): Whether this is the last block of frames in the sequence.
buffer_index (int): The index of the buffer to update (< self.num_buffers).
"""
if buffer_index >= self.num_buffers:
raise ValueError('Expected buffer index < {} but got index {}.'.format(self.num_buffers, buffer_index))
if self.buffers[buffer_index] is not None and self.buffers[buffer_index].shape[0] > 0:
expected_next_frame = self.current_frame + self.buffers[buffer_index].shape[0]
if expected_next_frame != offset:
raise ValueError(
'There are missing frames. Last frame in buffer is {}. The passed frames start at {}.'.format(
expected_next_frame, offset))
self.buffers[buffer_index] = np.vstack([self.buffers[buffer_index], data])
else:
self.buffers[buffer_index] = data
self.buffers_full[buffer_index] = is_last | Update the buffer at the given index.
Args:
data (np.ndarray): The frames.
offset (int): The index of the first frame in `data` within the sequence.
is_last (bool): Whether this is the last block of frames in the sequence.
buffer_index (int): The index of the buffer to update (< self.num_buffers). | Below is the the instruction that describes the task:
### Input:
Update the buffer at the given index.
Args:
data (np.ndarray): The frames.
offset (int): The index of the first frame in `data` within the sequence.
is_last (bool): Whether this is the last block of frames in the sequence.
buffer_index (int): The index of the buffer to update (< self.num_buffers).
### Response:
def update(self, data, offset, is_last, buffer_index=0):
"""
Update the buffer at the given index.
Args:
data (np.ndarray): The frames.
offset (int): The index of the first frame in `data` within the sequence.
is_last (bool): Whether this is the last block of frames in the sequence.
buffer_index (int): The index of the buffer to update (< self.num_buffers).
"""
if buffer_index >= self.num_buffers:
raise ValueError('Expected buffer index < {} but got index {}.'.format(self.num_buffers, buffer_index))
if self.buffers[buffer_index] is not None and self.buffers[buffer_index].shape[0] > 0:
expected_next_frame = self.current_frame + self.buffers[buffer_index].shape[0]
if expected_next_frame != offset:
raise ValueError(
'There are missing frames. Last frame in buffer is {}. The passed frames start at {}.'.format(
expected_next_frame, offset))
self.buffers[buffer_index] = np.vstack([self.buffers[buffer_index], data])
else:
self.buffers[buffer_index] = data
self.buffers_full[buffer_index] = is_last |
def predict(fqdn, result, *argl, **argd):
"""Analyzes the result of a generic predict operation performed by
`sklearn`.
Args:
fqdn (str): full-qualified name of the method that was called.
result: result of calling the method with `fqdn`.
argl (tuple): positional arguments passed to the method call.
argd (dict): keyword arguments passed to the method call.
"""
#Check the arguments to see what kind of data we are working with, then
#choose the appropriate function below to return the analysis dictionary.
out = None
if len(argl) > 0:
machine = argl[0]
if isclassifier(machine):
out = classify_predict(fqdn, result, None, *argl, **argd)
elif isregressor(machine):
out = regress_predict(fqdn, result, None, *argl, **argd)
return out | Analyzes the result of a generic predict operation performed by
`sklearn`.
Args:
fqdn (str): full-qualified name of the method that was called.
result: result of calling the method with `fqdn`.
argl (tuple): positional arguments passed to the method call.
argd (dict): keyword arguments passed to the method call. | Below is the the instruction that describes the task:
### Input:
Analyzes the result of a generic predict operation performed by
`sklearn`.
Args:
fqdn (str): full-qualified name of the method that was called.
result: result of calling the method with `fqdn`.
argl (tuple): positional arguments passed to the method call.
argd (dict): keyword arguments passed to the method call.
### Response:
def predict(fqdn, result, *argl, **argd):
"""Analyzes the result of a generic predict operation performed by
`sklearn`.
Args:
fqdn (str): full-qualified name of the method that was called.
result: result of calling the method with `fqdn`.
argl (tuple): positional arguments passed to the method call.
argd (dict): keyword arguments passed to the method call.
"""
#Check the arguments to see what kind of data we are working with, then
#choose the appropriate function below to return the analysis dictionary.
out = None
if len(argl) > 0:
machine = argl[0]
if isclassifier(machine):
out = classify_predict(fqdn, result, None, *argl, **argd)
elif isregressor(machine):
out = regress_predict(fqdn, result, None, *argl, **argd)
return out |
def parse(self, rev_string):
"""
:param rev_string:
:type rev_string: str
"""
elements = rev_string.split(MESSAGE_LINE_SEPARATOR)
heading = elements[0]
heading_elements = heading.split(" ")
self.revision_id = heading_elements[2]
datetime_str = "{} {}".format(
heading_elements[0],
heading_elements[1]
)
self.release_date = datetime.datetime.strptime(
datetime_str,
DATETIME_FORMAT
)
self.description = elements[1]
self.message = elements[2] | :param rev_string:
:type rev_string: str | Below is the the instruction that describes the task:
### Input:
:param rev_string:
:type rev_string: str
### Response:
def parse(self, rev_string):
"""
:param rev_string:
:type rev_string: str
"""
elements = rev_string.split(MESSAGE_LINE_SEPARATOR)
heading = elements[0]
heading_elements = heading.split(" ")
self.revision_id = heading_elements[2]
datetime_str = "{} {}".format(
heading_elements[0],
heading_elements[1]
)
self.release_date = datetime.datetime.strptime(
datetime_str,
DATETIME_FORMAT
)
self.description = elements[1]
self.message = elements[2] |
def getBoneName(self, action, nBoneIndex, pchBoneName, unNameBufferSize):
"""Fills the given buffer with the name of the bone at the given index in the skeleton associated with the given action"""
fn = self.function_table.getBoneName
result = fn(action, nBoneIndex, pchBoneName, unNameBufferSize)
return result | Fills the given buffer with the name of the bone at the given index in the skeleton associated with the given action | Below is the the instruction that describes the task:
### Input:
Fills the given buffer with the name of the bone at the given index in the skeleton associated with the given action
### Response:
def getBoneName(self, action, nBoneIndex, pchBoneName, unNameBufferSize):
"""Fills the given buffer with the name of the bone at the given index in the skeleton associated with the given action"""
fn = self.function_table.getBoneName
result = fn(action, nBoneIndex, pchBoneName, unNameBufferSize)
return result |
def delete_snapshot(self, snapshot_id):
"""Remove a previously created snapshot."""
query = self.query_factory(
action="DeleteSnapshot", creds=self.creds, endpoint=self.endpoint,
other_params={"SnapshotId": snapshot_id})
d = query.submit()
return d.addCallback(self.parser.truth_return) | Remove a previously created snapshot. | Below is the the instruction that describes the task:
### Input:
Remove a previously created snapshot.
### Response:
def delete_snapshot(self, snapshot_id):
"""Remove a previously created snapshot."""
query = self.query_factory(
action="DeleteSnapshot", creds=self.creds, endpoint=self.endpoint,
other_params={"SnapshotId": snapshot_id})
d = query.submit()
return d.addCallback(self.parser.truth_return) |
def set_gcc():
"""Try to use GCC on OSX for OpenMP support."""
# For macports and homebrew
if 'darwin' in platform.platform().lower():
gcc = extract_gcc_binaries()
if gcc is not None:
os.environ["CC"] = gcc
os.environ["CXX"] = gcc
else:
global use_openmp
use_openmp = False
logging.warning('No GCC available. Install gcc from Homebrew '
'using brew install gcc.') | Try to use GCC on OSX for OpenMP support. | Below is the the instruction that describes the task:
### Input:
Try to use GCC on OSX for OpenMP support.
### Response:
def set_gcc():
"""Try to use GCC on OSX for OpenMP support."""
# For macports and homebrew
if 'darwin' in platform.platform().lower():
gcc = extract_gcc_binaries()
if gcc is not None:
os.environ["CC"] = gcc
os.environ["CXX"] = gcc
else:
global use_openmp
use_openmp = False
logging.warning('No GCC available. Install gcc from Homebrew '
'using brew install gcc.') |
def getSubstitutionElement(self, elt, ps):
'''if elt matches a member of the head substitutionGroup, return
the GED typecode representation of the member.
head -- ElementDeclaration typecode,
elt -- the DOM element being parsed
ps -- ParsedSoap instance
'''
nsuri,ncname = _get_element_nsuri_name(elt)
typecode = GED(nsuri,ncname)
if typecode is None:
return
try:
nsuri,ncname = typecode.substitutionGroup
except (AttributeError, TypeError):
return
if (ncname == self.pname) and (nsuri == self.nspname or
(not nsuri and not self.nspname)):
return typecode
return | if elt matches a member of the head substitutionGroup, return
the GED typecode representation of the member.
head -- ElementDeclaration typecode,
elt -- the DOM element being parsed
ps -- ParsedSoap instance | Below is the the instruction that describes the task:
### Input:
if elt matches a member of the head substitutionGroup, return
the GED typecode representation of the member.
head -- ElementDeclaration typecode,
elt -- the DOM element being parsed
ps -- ParsedSoap instance
### Response:
def getSubstitutionElement(self, elt, ps):
'''if elt matches a member of the head substitutionGroup, return
the GED typecode representation of the member.
head -- ElementDeclaration typecode,
elt -- the DOM element being parsed
ps -- ParsedSoap instance
'''
nsuri,ncname = _get_element_nsuri_name(elt)
typecode = GED(nsuri,ncname)
if typecode is None:
return
try:
nsuri,ncname = typecode.substitutionGroup
except (AttributeError, TypeError):
return
if (ncname == self.pname) and (nsuri == self.nspname or
(not nsuri and not self.nspname)):
return typecode
return |
def set_default_subparser(self, name, args=None):
"""default subparser selection. Call after setup, just before parse_args()
name: is the name of the subparser to call by default
args: if set is the argument list handed to parse_args()
, tested with 2.7, 3.2, 3.3, 3.4
it works with 2.6 assuming argparse is installed
"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name) | default subparser selection. Call after setup, just before parse_args()
name: is the name of the subparser to call by default
args: if set is the argument list handed to parse_args()
, tested with 2.7, 3.2, 3.3, 3.4
it works with 2.6 assuming argparse is installed | Below is the the instruction that describes the task:
### Input:
default subparser selection. Call after setup, just before parse_args()
name: is the name of the subparser to call by default
args: if set is the argument list handed to parse_args()
, tested with 2.7, 3.2, 3.3, 3.4
it works with 2.6 assuming argparse is installed
### Response:
def set_default_subparser(self, name, args=None):
"""default subparser selection. Call after setup, just before parse_args()
name: is the name of the subparser to call by default
args: if set is the argument list handed to parse_args()
, tested with 2.7, 3.2, 3.3, 3.4
it works with 2.6 assuming argparse is installed
"""
subparser_found = False
for arg in sys.argv[1:]:
if arg in ['-h', '--help']: # global help if no subparser
break
else:
for x in self._subparsers._actions:
if not isinstance(x, argparse._SubParsersAction):
continue
for sp_name in x._name_parser_map.keys():
if sp_name in sys.argv[1:]:
subparser_found = True
if not subparser_found:
# insert default in first position, this implies no
# global options without a sub_parsers specified
if args is None:
sys.argv.insert(1, name)
else:
args.insert(0, name) |
def start_daemon():
"""
Start a thread to continuously read log files and append lines in DB.
Work in progress. Currently the thread doesn't append anything,
it only print the information parsed from each line read.
Returns:
thread: the started thread.
"""
if RequestLog.daemon is None:
parser = get_nginx_parser()
RequestLog.daemon = RequestLog.ParseToDBThread(parser, daemon=True)
RequestLog.daemon.start()
return RequestLog.daemon | Start a thread to continuously read log files and append lines in DB.
Work in progress. Currently the thread doesn't append anything,
it only print the information parsed from each line read.
Returns:
thread: the started thread. | Below is the the instruction that describes the task:
### Input:
Start a thread to continuously read log files and append lines in DB.
Work in progress. Currently the thread doesn't append anything,
it only print the information parsed from each line read.
Returns:
thread: the started thread.
### Response:
def start_daemon():
"""
Start a thread to continuously read log files and append lines in DB.
Work in progress. Currently the thread doesn't append anything,
it only print the information parsed from each line read.
Returns:
thread: the started thread.
"""
if RequestLog.daemon is None:
parser = get_nginx_parser()
RequestLog.daemon = RequestLog.ParseToDBThread(parser, daemon=True)
RequestLog.daemon.start()
return RequestLog.daemon |
def conf(self, key):
'''get config'''
return self.__conf[key] if key in self.__conf else _YunpianConf.YP_CONF.get(key) | get config | Below is the the instruction that describes the task:
### Input:
get config
### Response:
def conf(self, key):
'''get config'''
return self.__conf[key] if key in self.__conf else _YunpianConf.YP_CONF.get(key) |
def autoLayout(self, size=None):
"""
Updates the layout for the graphics within this scene.
"""
if size is None:
size = self._view.size()
self.setSceneRect(0, 0, size.width(), size.height())
for item in self.items():
if isinstance(item, XWalkthroughGraphic):
item.autoLayout(size) | Updates the layout for the graphics within this scene. | Below is the the instruction that describes the task:
### Input:
Updates the layout for the graphics within this scene.
### Response:
def autoLayout(self, size=None):
"""
Updates the layout for the graphics within this scene.
"""
if size is None:
size = self._view.size()
self.setSceneRect(0, 0, size.width(), size.height())
for item in self.items():
if isinstance(item, XWalkthroughGraphic):
item.autoLayout(size) |
def elemDump(self, f, cur):
"""Dump an XML/HTML node, recursive behaviour, children are
printed too. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.xmlElemDump(f, self._o, cur__o) | Dump an XML/HTML node, recursive behaviour, children are
printed too. | Below is the the instruction that describes the task:
### Input:
Dump an XML/HTML node, recursive behaviour, children are
printed too.
### Response:
def elemDump(self, f, cur):
"""Dump an XML/HTML node, recursive behaviour, children are
printed too. """
if cur is None: cur__o = None
else: cur__o = cur._o
libxml2mod.xmlElemDump(f, self._o, cur__o) |
def show_active(self, **kwargs):
"""Draws the network, highlighting active queues.
The colored vertices represent vertices that have at least one
queue on an in-edge that is active. Dark edges represent
queues that are active, light edges represent queues that are
inactive.
Parameters
----------
**kwargs
Any additional parameters to pass to :meth:`.draw`, and
:meth:`.QueueNetworkDiGraph.draw_graph`.
Notes
-----
Active queues are :class:`QueueServers<.QueueServer>` that
accept arrivals from outside the network. The colors are
defined by the class attribute ``colors``. The relevant keys
are ``vertex_active``, ``vertex_inactive``, ``edge_active``,
and ``edge_inactive``.
"""
g = self.g
for v in g.nodes():
self.g.set_vp(v, 'vertex_color', [0, 0, 0, 0.9])
is_active = False
my_iter = g.in_edges(v) if g.is_directed() else g.out_edges(v)
for e in my_iter:
ei = g.edge_index[e]
if self.edge2queue[ei]._active:
is_active = True
break
if is_active:
self.g.set_vp(v, 'vertex_fill_color', self.colors['vertex_active'])
else:
self.g.set_vp(v, 'vertex_fill_color', self.colors['vertex_inactive'])
for e in g.edges():
ei = g.edge_index[e]
if self.edge2queue[ei]._active:
self.g.set_ep(e, 'edge_color', self.colors['edge_active'])
else:
self.g.set_ep(e, 'edge_color', self.colors['edge_inactive'])
self.draw(update_colors=False, **kwargs)
self._update_all_colors() | Draws the network, highlighting active queues.
The colored vertices represent vertices that have at least one
queue on an in-edge that is active. Dark edges represent
queues that are active, light edges represent queues that are
inactive.
Parameters
----------
**kwargs
Any additional parameters to pass to :meth:`.draw`, and
:meth:`.QueueNetworkDiGraph.draw_graph`.
Notes
-----
Active queues are :class:`QueueServers<.QueueServer>` that
accept arrivals from outside the network. The colors are
defined by the class attribute ``colors``. The relevant keys
are ``vertex_active``, ``vertex_inactive``, ``edge_active``,
and ``edge_inactive``. | Below is the the instruction that describes the task:
### Input:
Draws the network, highlighting active queues.
The colored vertices represent vertices that have at least one
queue on an in-edge that is active. Dark edges represent
queues that are active, light edges represent queues that are
inactive.
Parameters
----------
**kwargs
Any additional parameters to pass to :meth:`.draw`, and
:meth:`.QueueNetworkDiGraph.draw_graph`.
Notes
-----
Active queues are :class:`QueueServers<.QueueServer>` that
accept arrivals from outside the network. The colors are
defined by the class attribute ``colors``. The relevant keys
are ``vertex_active``, ``vertex_inactive``, ``edge_active``,
and ``edge_inactive``.
### Response:
def show_active(self, **kwargs):
"""Draws the network, highlighting active queues.
The colored vertices represent vertices that have at least one
queue on an in-edge that is active. Dark edges represent
queues that are active, light edges represent queues that are
inactive.
Parameters
----------
**kwargs
Any additional parameters to pass to :meth:`.draw`, and
:meth:`.QueueNetworkDiGraph.draw_graph`.
Notes
-----
Active queues are :class:`QueueServers<.QueueServer>` that
accept arrivals from outside the network. The colors are
defined by the class attribute ``colors``. The relevant keys
are ``vertex_active``, ``vertex_inactive``, ``edge_active``,
and ``edge_inactive``.
"""
g = self.g
for v in g.nodes():
self.g.set_vp(v, 'vertex_color', [0, 0, 0, 0.9])
is_active = False
my_iter = g.in_edges(v) if g.is_directed() else g.out_edges(v)
for e in my_iter:
ei = g.edge_index[e]
if self.edge2queue[ei]._active:
is_active = True
break
if is_active:
self.g.set_vp(v, 'vertex_fill_color', self.colors['vertex_active'])
else:
self.g.set_vp(v, 'vertex_fill_color', self.colors['vertex_inactive'])
for e in g.edges():
ei = g.edge_index[e]
if self.edge2queue[ei]._active:
self.g.set_ep(e, 'edge_color', self.colors['edge_active'])
else:
self.g.set_ep(e, 'edge_color', self.colors['edge_inactive'])
self.draw(update_colors=False, **kwargs)
self._update_all_colors() |
def add_name(self, name_attr, space_attr, new_schema):
# type: (Text, Optional[Text], NamedSchema) -> Name
"""
Add a new schema object to the name set.
@arg name_attr: name value read in schema
@arg space_attr: namespace value read in schema.
@return: the Name that was just added.
"""
to_add = Name(name_attr, space_attr, self.default_namespace)
if to_add.fullname in VALID_TYPES:
fail_msg = '%s is a reserved type name.' % to_add.fullname
raise SchemaParseException(fail_msg)
elif to_add.fullname in self.names:
fail_msg = 'The name "%s" is already in use.' % to_add.fullname
raise SchemaParseException(fail_msg)
self.names[to_add.fullname] = new_schema
return to_add | Add a new schema object to the name set.
@arg name_attr: name value read in schema
@arg space_attr: namespace value read in schema.
@return: the Name that was just added. | Below is the the instruction that describes the task:
### Input:
Add a new schema object to the name set.
@arg name_attr: name value read in schema
@arg space_attr: namespace value read in schema.
@return: the Name that was just added.
### Response:
def add_name(self, name_attr, space_attr, new_schema):
# type: (Text, Optional[Text], NamedSchema) -> Name
"""
Add a new schema object to the name set.
@arg name_attr: name value read in schema
@arg space_attr: namespace value read in schema.
@return: the Name that was just added.
"""
to_add = Name(name_attr, space_attr, self.default_namespace)
if to_add.fullname in VALID_TYPES:
fail_msg = '%s is a reserved type name.' % to_add.fullname
raise SchemaParseException(fail_msg)
elif to_add.fullname in self.names:
fail_msg = 'The name "%s" is already in use.' % to_add.fullname
raise SchemaParseException(fail_msg)
self.names[to_add.fullname] = new_schema
return to_add |
def descriptor(obj, path=''):
"""Return descriptor of given object.
If ``path`` is specified, only the content on that path is
returned.
"""
if isinstance(obj, dict):
# Current object is hydrated, so we need to get descriptor from
# dict representation.
desc = obj['__descriptor']
else:
desc = obj.descriptor
resp = dict_dot(desc, path)
if isinstance(resp, list) or isinstance(resp, dict):
return json.dumps(resp)
return resp | Return descriptor of given object.
If ``path`` is specified, only the content on that path is
returned. | Below is the the instruction that describes the task:
### Input:
Return descriptor of given object.
If ``path`` is specified, only the content on that path is
returned.
### Response:
def descriptor(obj, path=''):
"""Return descriptor of given object.
If ``path`` is specified, only the content on that path is
returned.
"""
if isinstance(obj, dict):
# Current object is hydrated, so we need to get descriptor from
# dict representation.
desc = obj['__descriptor']
else:
desc = obj.descriptor
resp = dict_dot(desc, path)
if isinstance(resp, list) or isinstance(resp, dict):
return json.dumps(resp)
return resp |
async def get_conversations(self, service_url: str, continuation_token: str=None):
"""
Lists the Conversations in which this bot has participated for a given channel server. The channel server
returns results in pages and each page will include a `continuationToken` that can be used to fetch the next
page of results from the server.
:param service_url:
:param continuation_token:
:return:
"""
client = self.create_connector_client(service_url)
return await client.conversations.get_conversations(continuation_token) | Lists the Conversations in which this bot has participated for a given channel server. The channel server
returns results in pages and each page will include a `continuationToken` that can be used to fetch the next
page of results from the server.
:param service_url:
:param continuation_token:
:return: | Below is the the instruction that describes the task:
### Input:
Lists the Conversations in which this bot has participated for a given channel server. The channel server
returns results in pages and each page will include a `continuationToken` that can be used to fetch the next
page of results from the server.
:param service_url:
:param continuation_token:
:return:
### Response:
async def get_conversations(self, service_url: str, continuation_token: str=None):
"""
Lists the Conversations in which this bot has participated for a given channel server. The channel server
returns results in pages and each page will include a `continuationToken` that can be used to fetch the next
page of results from the server.
:param service_url:
:param continuation_token:
:return:
"""
client = self.create_connector_client(service_url)
return await client.conversations.get_conversations(continuation_token) |
def purge_files(self, exclude_files=["index.html", "error.html"]):
"""
To delete files that are in the manifest
:param excludes_files: list : files to not delete
:return:
"""
for chunk in utils.chunk_list(self._get_manifest_data(), 1000):
try:
self.s3.delete_objects(
Bucket=self.sitename,
Delete={
'Objects': [{"Key": f} for f in chunk
if f not in exclude_files]
}
)
except Exception as ex:
pass | To delete files that are in the manifest
:param excludes_files: list : files to not delete
:return: | Below is the the instruction that describes the task:
### Input:
To delete files that are in the manifest
:param excludes_files: list : files to not delete
:return:
### Response:
def purge_files(self, exclude_files=["index.html", "error.html"]):
"""
To delete files that are in the manifest
:param excludes_files: list : files to not delete
:return:
"""
for chunk in utils.chunk_list(self._get_manifest_data(), 1000):
try:
self.s3.delete_objects(
Bucket=self.sitename,
Delete={
'Objects': [{"Key": f} for f in chunk
if f not in exclude_files]
}
)
except Exception as ex:
pass |
def peptide(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps):
"""
Infer peptides and conduct error-rate estimation in different contexts.
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
infer_peptides(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps) | Infer peptides and conduct error-rate estimation in different contexts. | Below is the the instruction that describes the task:
### Input:
Infer peptides and conduct error-rate estimation in different contexts.
### Response:
def peptide(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps):
"""
Infer peptides and conduct error-rate estimation in different contexts.
"""
if outfile is None:
outfile = infile
else:
outfile = outfile
infer_peptides(infile, outfile, context, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps) |
def get_subplot_at(self, row, column):
"""Return the subplot at row, column position.
:param row,column: specify the subplot.
"""
idx = row * self.columns + column
return self.subplots[idx] | Return the subplot at row, column position.
:param row,column: specify the subplot. | Below is the the instruction that describes the task:
### Input:
Return the subplot at row, column position.
:param row,column: specify the subplot.
### Response:
def get_subplot_at(self, row, column):
"""Return the subplot at row, column position.
:param row,column: specify the subplot.
"""
idx = row * self.columns + column
return self.subplots[idx] |
def randomize_colors(im, keep_vals=[0]):
r'''
Takes a greyscale image and randomly shuffles the greyscale values, so that
all voxels labeled X will be labelled Y, and all voxels labeled Y will be
labeled Z, where X, Y, Z and so on are randomly selected from the values
in the input image.
This function is useful for improving the visibility of images with
neighboring regions that are only incrementally different from each other,
such as that returned by `scipy.ndimage.label`.
Parameters
----------
im : array_like
An ND image of greyscale values.
keep_vals : array_like
Indicate which voxel values should NOT be altered. The default is
`[0]` which is useful for leaving the background of the image
untouched.
Returns
-------
image : ND-array
An image the same size and type as ``im`` but with the greyscale values
reassigned. The unique values in both the input and output images will
be identical.
Notes
-----
If the greyscale values in the input image are not contiguous then the
neither will they be in the output.
Examples
--------
>>> import porespy as ps
>>> import scipy as sp
>>> sp.random.seed(0)
>>> im = sp.random.randint(low=0, high=5, size=[4, 4])
>>> print(im)
[[4 0 3 3]
[3 1 3 2]
[4 0 0 4]
[2 1 0 1]]
>>> im_rand = ps.tools.randomize_colors(im)
>>> print(im_rand)
[[2 0 4 4]
[4 1 4 3]
[2 0 0 2]
[3 1 0 1]]
As can be seen, the 2's have become 3, 3's have become 4, and 4's have
become 2. 1's remained 1 by random accident. 0's remain zeros by default,
but this can be controlled using the `keep_vals` argument.
'''
im_flat = im.flatten()
keep_vals = sp.array(keep_vals)
swap_vals = ~sp.in1d(im_flat, keep_vals)
im_vals = sp.unique(im_flat[swap_vals])
new_vals = sp.random.permutation(im_vals)
im_map = sp.zeros(shape=[sp.amax(im_vals) + 1, ], dtype=int)
im_map[im_vals] = new_vals
im_new = im_map[im_flat]
im_new = sp.reshape(im_new, newshape=sp.shape(im))
return im_new | r'''
Takes a greyscale image and randomly shuffles the greyscale values, so that
all voxels labeled X will be labelled Y, and all voxels labeled Y will be
labeled Z, where X, Y, Z and so on are randomly selected from the values
in the input image.
This function is useful for improving the visibility of images with
neighboring regions that are only incrementally different from each other,
such as that returned by `scipy.ndimage.label`.
Parameters
----------
im : array_like
An ND image of greyscale values.
keep_vals : array_like
Indicate which voxel values should NOT be altered. The default is
`[0]` which is useful for leaving the background of the image
untouched.
Returns
-------
image : ND-array
An image the same size and type as ``im`` but with the greyscale values
reassigned. The unique values in both the input and output images will
be identical.
Notes
-----
If the greyscale values in the input image are not contiguous then the
neither will they be in the output.
Examples
--------
>>> import porespy as ps
>>> import scipy as sp
>>> sp.random.seed(0)
>>> im = sp.random.randint(low=0, high=5, size=[4, 4])
>>> print(im)
[[4 0 3 3]
[3 1 3 2]
[4 0 0 4]
[2 1 0 1]]
>>> im_rand = ps.tools.randomize_colors(im)
>>> print(im_rand)
[[2 0 4 4]
[4 1 4 3]
[2 0 0 2]
[3 1 0 1]]
As can be seen, the 2's have become 3, 3's have become 4, and 4's have
become 2. 1's remained 1 by random accident. 0's remain zeros by default,
but this can be controlled using the `keep_vals` argument. | Below is the the instruction that describes the task:
### Input:
r'''
Takes a greyscale image and randomly shuffles the greyscale values, so that
all voxels labeled X will be labelled Y, and all voxels labeled Y will be
labeled Z, where X, Y, Z and so on are randomly selected from the values
in the input image.
This function is useful for improving the visibility of images with
neighboring regions that are only incrementally different from each other,
such as that returned by `scipy.ndimage.label`.
Parameters
----------
im : array_like
An ND image of greyscale values.
keep_vals : array_like
Indicate which voxel values should NOT be altered. The default is
`[0]` which is useful for leaving the background of the image
untouched.
Returns
-------
image : ND-array
An image the same size and type as ``im`` but with the greyscale values
reassigned. The unique values in both the input and output images will
be identical.
Notes
-----
If the greyscale values in the input image are not contiguous then the
neither will they be in the output.
Examples
--------
>>> import porespy as ps
>>> import scipy as sp
>>> sp.random.seed(0)
>>> im = sp.random.randint(low=0, high=5, size=[4, 4])
>>> print(im)
[[4 0 3 3]
[3 1 3 2]
[4 0 0 4]
[2 1 0 1]]
>>> im_rand = ps.tools.randomize_colors(im)
>>> print(im_rand)
[[2 0 4 4]
[4 1 4 3]
[2 0 0 2]
[3 1 0 1]]
As can be seen, the 2's have become 3, 3's have become 4, and 4's have
become 2. 1's remained 1 by random accident. 0's remain zeros by default,
but this can be controlled using the `keep_vals` argument.
### Response:
def randomize_colors(im, keep_vals=[0]):
r'''
Takes a greyscale image and randomly shuffles the greyscale values, so that
all voxels labeled X will be labelled Y, and all voxels labeled Y will be
labeled Z, where X, Y, Z and so on are randomly selected from the values
in the input image.
This function is useful for improving the visibility of images with
neighboring regions that are only incrementally different from each other,
such as that returned by `scipy.ndimage.label`.
Parameters
----------
im : array_like
An ND image of greyscale values.
keep_vals : array_like
Indicate which voxel values should NOT be altered. The default is
`[0]` which is useful for leaving the background of the image
untouched.
Returns
-------
image : ND-array
An image the same size and type as ``im`` but with the greyscale values
reassigned. The unique values in both the input and output images will
be identical.
Notes
-----
If the greyscale values in the input image are not contiguous then the
neither will they be in the output.
Examples
--------
>>> import porespy as ps
>>> import scipy as sp
>>> sp.random.seed(0)
>>> im = sp.random.randint(low=0, high=5, size=[4, 4])
>>> print(im)
[[4 0 3 3]
[3 1 3 2]
[4 0 0 4]
[2 1 0 1]]
>>> im_rand = ps.tools.randomize_colors(im)
>>> print(im_rand)
[[2 0 4 4]
[4 1 4 3]
[2 0 0 2]
[3 1 0 1]]
As can be seen, the 2's have become 3, 3's have become 4, and 4's have
become 2. 1's remained 1 by random accident. 0's remain zeros by default,
but this can be controlled using the `keep_vals` argument.
'''
im_flat = im.flatten()
keep_vals = sp.array(keep_vals)
swap_vals = ~sp.in1d(im_flat, keep_vals)
im_vals = sp.unique(im_flat[swap_vals])
new_vals = sp.random.permutation(im_vals)
im_map = sp.zeros(shape=[sp.amax(im_vals) + 1, ], dtype=int)
im_map[im_vals] = new_vals
im_new = im_map[im_flat]
im_new = sp.reshape(im_new, newshape=sp.shape(im))
return im_new |
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
C_SITE = self.SITE_COEFFS[imt]
s_c, idx = self._get_site_classification(sites.vs30)
sa_rock = (self.get_magnitude_scaling_term(C, rup) +
self.get_sof_term(C, rup) +
self.get_depth_term(C, rup) +
self.get_distance_term(C, dists, rup))
sa_soil = self.add_site_amplification(C, C_SITE, sites,
sa_rock, idx, rup)
stddevs = self.get_stddevs(C, sites.vs30.shape, idx, stddev_types)
return sa_soil, stddevs | See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values. | Below is the the instruction that describes the task:
### Input:
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
### Response:
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
C_SITE = self.SITE_COEFFS[imt]
s_c, idx = self._get_site_classification(sites.vs30)
sa_rock = (self.get_magnitude_scaling_term(C, rup) +
self.get_sof_term(C, rup) +
self.get_depth_term(C, rup) +
self.get_distance_term(C, dists, rup))
sa_soil = self.add_site_amplification(C, C_SITE, sites,
sa_rock, idx, rup)
stddevs = self.get_stddevs(C, sites.vs30.shape, idx, stddev_types)
return sa_soil, stddevs |
def _simplify_doc(doc):
"""
Limit a document to just the three fields we should upload.
"""
# Mutate a copy of the document to fill in missing fields
doc = dict(doc)
if 'text' not in doc:
raise ValueError("The document {!r} has no text field".format(doc))
return {
'text': doc['text'],
'metadata': doc.get('metadata', []),
'title': doc.get('title', '')
} | Limit a document to just the three fields we should upload. | Below is the the instruction that describes the task:
### Input:
Limit a document to just the three fields we should upload.
### Response:
def _simplify_doc(doc):
"""
Limit a document to just the three fields we should upload.
"""
# Mutate a copy of the document to fill in missing fields
doc = dict(doc)
if 'text' not in doc:
raise ValueError("The document {!r} has no text field".format(doc))
return {
'text': doc['text'],
'metadata': doc.get('metadata', []),
'title': doc.get('title', '')
} |
def parse_at_element(
self,
element, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the primitive value at the XML element."""
if self._attribute:
parsed_value = self._parse_attribute(element, self._attribute, state)
else:
parsed_value = self._parser_func(element.text, state)
return _hooks_apply_after_parse(self._hooks, state, parsed_value) | Parse the primitive value at the XML element. | Below is the the instruction that describes the task:
### Input:
Parse the primitive value at the XML element.
### Response:
def parse_at_element(
self,
element, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the primitive value at the XML element."""
if self._attribute:
parsed_value = self._parse_attribute(element, self._attribute, state)
else:
parsed_value = self._parser_func(element.text, state)
return _hooks_apply_after_parse(self._hooks, state, parsed_value) |
def _dispatch(self, method, params):
"""Customise exception handling"""
self._count += 1
func = getattr(self, method)
try:
return func(*params)
except Exception as e:
traceback.print_exc()
raise e | Customise exception handling | Below is the the instruction that describes the task:
### Input:
Customise exception handling
### Response:
def _dispatch(self, method, params):
"""Customise exception handling"""
self._count += 1
func = getattr(self, method)
try:
return func(*params)
except Exception as e:
traceback.print_exc()
raise e |
def _scale_steps(self, duration, commands, *steps):
""" Scale steps
:param duration: Total time (in seconds)
:param commands: Number of commands to be executed.
:param steps: Steps for one or many properties to take.
:return: Steps scaled to time and total.
"""
factor = duration / ((self.wait * self.reps * commands) - \
(self.wait * self.reps * self._bridge.active))
steps = [math.ceil(factor * step) for step in steps]
if len(steps) == 1:
return steps[0]
else:
return steps | Scale steps
:param duration: Total time (in seconds)
:param commands: Number of commands to be executed.
:param steps: Steps for one or many properties to take.
:return: Steps scaled to time and total. | Below is the the instruction that describes the task:
### Input:
Scale steps
:param duration: Total time (in seconds)
:param commands: Number of commands to be executed.
:param steps: Steps for one or many properties to take.
:return: Steps scaled to time and total.
### Response:
def _scale_steps(self, duration, commands, *steps):
""" Scale steps
:param duration: Total time (in seconds)
:param commands: Number of commands to be executed.
:param steps: Steps for one or many properties to take.
:return: Steps scaled to time and total.
"""
factor = duration / ((self.wait * self.reps * commands) - \
(self.wait * self.reps * self._bridge.active))
steps = [math.ceil(factor * step) for step in steps]
if len(steps) == 1:
return steps[0]
else:
return steps |
def result(self):
'''
The result from the executed task. Raises NotExecutedYet if not yet
executed.
'''
if self.cancelled or (self._fn is not None):
raise NotExecutedYet()
if self._fn_exc is not None:
six.reraise(*self._fn_exc)
else:
return self._fn_res | The result from the executed task. Raises NotExecutedYet if not yet
executed. | Below is the the instruction that describes the task:
### Input:
The result from the executed task. Raises NotExecutedYet if not yet
executed.
### Response:
def result(self):
'''
The result from the executed task. Raises NotExecutedYet if not yet
executed.
'''
if self.cancelled or (self._fn is not None):
raise NotExecutedYet()
if self._fn_exc is not None:
six.reraise(*self._fn_exc)
else:
return self._fn_res |
def loadFromURL(self, url, schema=None):
"""Return an XMLSchema instance loaded from the given url.
url -- URL to dereference
schema -- Optional XMLSchema instance.
"""
reader = self.__readerClass()
if self.__base_url:
url = basejoin(self.__base_url,url)
reader.loadFromURL(url)
schema = schema or XMLSchema()
schema.setBaseUrl(url)
schema.load(reader)
self.__setIncludes(schema)
self.__setImports(schema)
return schema | Return an XMLSchema instance loaded from the given url.
url -- URL to dereference
schema -- Optional XMLSchema instance. | Below is the the instruction that describes the task:
### Input:
Return an XMLSchema instance loaded from the given url.
url -- URL to dereference
schema -- Optional XMLSchema instance.
### Response:
def loadFromURL(self, url, schema=None):
"""Return an XMLSchema instance loaded from the given url.
url -- URL to dereference
schema -- Optional XMLSchema instance.
"""
reader = self.__readerClass()
if self.__base_url:
url = basejoin(self.__base_url,url)
reader.loadFromURL(url)
schema = schema or XMLSchema()
schema.setBaseUrl(url)
schema.load(reader)
self.__setIncludes(schema)
self.__setImports(schema)
return schema |
def set_tensor_symmetry_old(force_constants,
lattice, # column vectors
positions,
symmetry):
"""Full force constants are symmetrized using crystal symmetry.
This method extracts symmetrically equivalent sets of atomic pairs and
take sum of their force constants and average the sum.
Since get_force_constants_disps may include crystal symmetry, this method
is usually meaningless.
"""
rotations = symmetry.get_symmetry_operations()['rotations']
translations = symmetry.get_symmetry_operations()['translations']
symprec = symmetry.get_symmetry_tolerance()
fc_bak = force_constants.copy()
# Create mapping table between an atom and the symmetry operated atom
# map[ i, j ]
# i: atom index
# j: operation index
mapping = []
for pos_i in positions:
map_local = []
for rot, trans in zip(rotations, translations):
rot_pos = np.dot(pos_i, rot.T) + trans
for j, pos_j in enumerate(positions):
diff = pos_j - rot_pos
diff -= np.rint(diff)
diff = np.dot(diff, lattice.T)
if np.linalg.norm(diff) < symprec:
map_local.append(j)
break
mapping.append(map_local)
mapping = np.array(mapping)
# Look for the symmetrically equivalent force constant tensors
for i, pos_i in enumerate(positions):
for j, pos_j in enumerate(positions):
tmp_fc = np.zeros((3, 3), dtype='double')
for k, rot in enumerate(rotations):
cart_rot = similarity_transformation(lattice, rot)
# Reverse rotation of force constant is summed
tmp_fc += similarity_transformation(cart_rot.T,
fc_bak[mapping[i, k],
mapping[j, k]])
# Take average and set to new force cosntants
force_constants[i, j] = tmp_fc / len(rotations) | Full force constants are symmetrized using crystal symmetry.
This method extracts symmetrically equivalent sets of atomic pairs and
take sum of their force constants and average the sum.
Since get_force_constants_disps may include crystal symmetry, this method
is usually meaningless. | Below is the the instruction that describes the task:
### Input:
Full force constants are symmetrized using crystal symmetry.
This method extracts symmetrically equivalent sets of atomic pairs and
take sum of their force constants and average the sum.
Since get_force_constants_disps may include crystal symmetry, this method
is usually meaningless.
### Response:
def set_tensor_symmetry_old(force_constants,
lattice, # column vectors
positions,
symmetry):
"""Full force constants are symmetrized using crystal symmetry.
This method extracts symmetrically equivalent sets of atomic pairs and
take sum of their force constants and average the sum.
Since get_force_constants_disps may include crystal symmetry, this method
is usually meaningless.
"""
rotations = symmetry.get_symmetry_operations()['rotations']
translations = symmetry.get_symmetry_operations()['translations']
symprec = symmetry.get_symmetry_tolerance()
fc_bak = force_constants.copy()
# Create mapping table between an atom and the symmetry operated atom
# map[ i, j ]
# i: atom index
# j: operation index
mapping = []
for pos_i in positions:
map_local = []
for rot, trans in zip(rotations, translations):
rot_pos = np.dot(pos_i, rot.T) + trans
for j, pos_j in enumerate(positions):
diff = pos_j - rot_pos
diff -= np.rint(diff)
diff = np.dot(diff, lattice.T)
if np.linalg.norm(diff) < symprec:
map_local.append(j)
break
mapping.append(map_local)
mapping = np.array(mapping)
# Look for the symmetrically equivalent force constant tensors
for i, pos_i in enumerate(positions):
for j, pos_j in enumerate(positions):
tmp_fc = np.zeros((3, 3), dtype='double')
for k, rot in enumerate(rotations):
cart_rot = similarity_transformation(lattice, rot)
# Reverse rotation of force constant is summed
tmp_fc += similarity_transformation(cart_rot.T,
fc_bak[mapping[i, k],
mapping[j, k]])
# Take average and set to new force cosntants
force_constants[i, j] = tmp_fc / len(rotations) |
def unmount(self, force=None, auth_no_user_interaction=None):
"""Unmount filesystem."""
return self._M.Filesystem.Unmount(
'(a{sv})',
filter_opt({
'force': ('b', force),
'auth.no_user_interaction': ('b', auth_no_user_interaction),
})
) | Unmount filesystem. | Below is the the instruction that describes the task:
### Input:
Unmount filesystem.
### Response:
def unmount(self, force=None, auth_no_user_interaction=None):
"""Unmount filesystem."""
return self._M.Filesystem.Unmount(
'(a{sv})',
filter_opt({
'force': ('b', force),
'auth.no_user_interaction': ('b', auth_no_user_interaction),
})
) |
def keyboard(table, day=None):
"""Handler for showing the keyboard statistics page."""
cols, group = "realkey AS key, COUNT(*) AS count", "realkey"
where = (("day", day),) if day else ()
counts_display = counts = db.fetch(table, cols, where, group, "count DESC")
if "combos" == table:
counts_display = db.fetch(table, "key, COUNT(*) AS count", where,
"key", "count DESC")
events = db.fetch(table, where=where, order="stamp")
for e in events: e["dt"] = datetime.datetime.fromtimestamp(e["stamp"])
stats, collatedevents = stats_keyboard(events, table)
days, input = db.fetch("counts", order="day", type=table), "keyboard"
return bottle.template("heatmap.tpl", locals(), conf=conf) | Handler for showing the keyboard statistics page. | Below is the the instruction that describes the task:
### Input:
Handler for showing the keyboard statistics page.
### Response:
def keyboard(table, day=None):
"""Handler for showing the keyboard statistics page."""
cols, group = "realkey AS key, COUNT(*) AS count", "realkey"
where = (("day", day),) if day else ()
counts_display = counts = db.fetch(table, cols, where, group, "count DESC")
if "combos" == table:
counts_display = db.fetch(table, "key, COUNT(*) AS count", where,
"key", "count DESC")
events = db.fetch(table, where=where, order="stamp")
for e in events: e["dt"] = datetime.datetime.fromtimestamp(e["stamp"])
stats, collatedevents = stats_keyboard(events, table)
days, input = db.fetch("counts", order="day", type=table), "keyboard"
return bottle.template("heatmap.tpl", locals(), conf=conf) |
def mag_calibration(self):
"""Perform magnetometer calibration for current IMU."""
self.calibration_state = self.CAL_MAG
self.mag_dialog = SK8MagDialog(self.sk8.get_imu(self.spinIMU.value()), self)
if self.mag_dialog.exec_() == QDialog.Rejected:
return
self.calculate_mag_calibration(self.mag_dialog.samples) | Perform magnetometer calibration for current IMU. | Below is the the instruction that describes the task:
### Input:
Perform magnetometer calibration for current IMU.
### Response:
def mag_calibration(self):
"""Perform magnetometer calibration for current IMU."""
self.calibration_state = self.CAL_MAG
self.mag_dialog = SK8MagDialog(self.sk8.get_imu(self.spinIMU.value()), self)
if self.mag_dialog.exec_() == QDialog.Rejected:
return
self.calculate_mag_calibration(self.mag_dialog.samples) |
def print_result_from_timeit(stmt='pass', setup='pass', number=1000000):
"""
Clean function to know how much time took the execution of one statement
"""
units = ["s", "ms", "us", "ns"]
duration = timeit(stmt, setup, number=int(number))
avg_duration = duration / float(number)
thousands = int(math.floor(math.log(avg_duration, 1000)))
print("Total time: %fs. Average run: %.3f%s." % (
duration, avg_duration * (1000 ** -thousands), units[-thousands])) | Clean function to know how much time took the execution of one statement | Below is the the instruction that describes the task:
### Input:
Clean function to know how much time took the execution of one statement
### Response:
def print_result_from_timeit(stmt='pass', setup='pass', number=1000000):
"""
Clean function to know how much time took the execution of one statement
"""
units = ["s", "ms", "us", "ns"]
duration = timeit(stmt, setup, number=int(number))
avg_duration = duration / float(number)
thousands = int(math.floor(math.log(avg_duration, 1000)))
print("Total time: %fs. Average run: %.3f%s." % (
duration, avg_duration * (1000 ** -thousands), units[-thousands])) |
def deactivate_in_ec(self, ec_index):
'''Deactivate this component in an execution context.
@param ec_index The index of the execution context to deactivate in.
This index is into the total array of contexts, that
is both owned and participating contexts. If the value
of ec_index is greater than the length of
@ref owned_ecs, that length is subtracted from
ec_index and the result used as an index into
@ref participating_ecs.
'''
with self._mutex:
if ec_index >= len(self.owned_ecs):
ec_index -= len(self.owned_ecs)
if ec_index >= len(self.participating_ecs):
raise exceptions.BadECIndexError(ec_index)
ec = self.participating_ecs[ec_index]
else:
ec = self.owned_ecs[ec_index]
ec.deactivate_component(self._obj) | Deactivate this component in an execution context.
@param ec_index The index of the execution context to deactivate in.
This index is into the total array of contexts, that
is both owned and participating contexts. If the value
of ec_index is greater than the length of
@ref owned_ecs, that length is subtracted from
ec_index and the result used as an index into
@ref participating_ecs. | Below is the the instruction that describes the task:
### Input:
Deactivate this component in an execution context.
@param ec_index The index of the execution context to deactivate in.
This index is into the total array of contexts, that
is both owned and participating contexts. If the value
of ec_index is greater than the length of
@ref owned_ecs, that length is subtracted from
ec_index and the result used as an index into
@ref participating_ecs.
### Response:
def deactivate_in_ec(self, ec_index):
'''Deactivate this component in an execution context.
@param ec_index The index of the execution context to deactivate in.
This index is into the total array of contexts, that
is both owned and participating contexts. If the value
of ec_index is greater than the length of
@ref owned_ecs, that length is subtracted from
ec_index and the result used as an index into
@ref participating_ecs.
'''
with self._mutex:
if ec_index >= len(self.owned_ecs):
ec_index -= len(self.owned_ecs)
if ec_index >= len(self.participating_ecs):
raise exceptions.BadECIndexError(ec_index)
ec = self.participating_ecs[ec_index]
else:
ec = self.owned_ecs[ec_index]
ec.deactivate_component(self._obj) |
def add_layer3_cluster_interface(self, interface_id, cluster_virtual=None,
network_value=None, macaddress=None, nodes=None, cvi_mode='packetdispatch',
zone_ref=None, comment=None, **kw):
"""
Add cluster virtual interface. A "CVI" interface is used as a VIP
address for clustered engines. Providing 'nodes' will create the
node specific interfaces. You can also add a cluster address with only
a CVI, or only NDI's.
Add CVI only::
engine.physical_interface.add_cluster_virtual_interface(
interface_id=30,
cluster_virtual='30.30.30.1',
network_value='30.30.30.0/24',
macaddress='02:02:02:02:02:06')
Add NDI's only::
engine.physical_interface.add_cluster_virtual_interface(
interface_id=30,
nodes=nodes)
Add CVI and NDI's::
engine.physical_interface.add_cluster_virtual_interface(
cluster_virtual='5.5.5.1',
network_value='5.5.5.0/24',
macaddress='02:03:03:03:03:03',
nodes=[{'address':'5.5.5.2', 'network_value':'5.5.5.0/24', 'nodeid':1},
{'address':'5.5.5.3', 'network_value':'5.5.5.0/24', 'nodeid':2}])
.. versionchanged:: 0.6.1
Renamed from add_cluster_virtual_interface
:param str,int interface_id: physical interface identifier
:param str cluster_virtual: CVI address (VIP) for this interface
:param str network_value: network value for VIP; format: 10.10.10.0/24
:param str macaddress: mandatory mac address if cluster_virtual and
cluster_mask provided
:param list nodes: list of dictionary items identifying cluster nodes
:param str cvi_mode: packetdispatch is recommended setting
:param str zone_ref: zone reference, can be name, href or Zone
:param kw: key word arguments are valid NodeInterface sub-interface
settings passed in during create time. For example, 'backup_mgt=True'
to enable this interface as the management backup.
:raises EngineCommandFailed: failure creating interface
:return: None
"""
interfaces = [{'nodes': nodes if nodes else [],
'cluster_virtual': cluster_virtual, 'network_value': network_value}]
try:
interface = self._engine.interface.get(interface_id)
interface._add_interface(interface_id, interfaces=interfaces)
return interface.update()
except InterfaceNotFound:
interface = ClusterPhysicalInterface(
engine=self._engine,
interface_id=interface_id,
interfaces=interfaces,
cvi_mode=cvi_mode if macaddress else 'none',
macaddress=macaddress,
zone_ref=zone_ref, comment=comment, **kw)
return self._engine.add_interface(interface) | Add cluster virtual interface. A "CVI" interface is used as a VIP
address for clustered engines. Providing 'nodes' will create the
node specific interfaces. You can also add a cluster address with only
a CVI, or only NDI's.
Add CVI only::
engine.physical_interface.add_cluster_virtual_interface(
interface_id=30,
cluster_virtual='30.30.30.1',
network_value='30.30.30.0/24',
macaddress='02:02:02:02:02:06')
Add NDI's only::
engine.physical_interface.add_cluster_virtual_interface(
interface_id=30,
nodes=nodes)
Add CVI and NDI's::
engine.physical_interface.add_cluster_virtual_interface(
cluster_virtual='5.5.5.1',
network_value='5.5.5.0/24',
macaddress='02:03:03:03:03:03',
nodes=[{'address':'5.5.5.2', 'network_value':'5.5.5.0/24', 'nodeid':1},
{'address':'5.5.5.3', 'network_value':'5.5.5.0/24', 'nodeid':2}])
.. versionchanged:: 0.6.1
Renamed from add_cluster_virtual_interface
:param str,int interface_id: physical interface identifier
:param str cluster_virtual: CVI address (VIP) for this interface
:param str network_value: network value for VIP; format: 10.10.10.0/24
:param str macaddress: mandatory mac address if cluster_virtual and
cluster_mask provided
:param list nodes: list of dictionary items identifying cluster nodes
:param str cvi_mode: packetdispatch is recommended setting
:param str zone_ref: zone reference, can be name, href or Zone
:param kw: key word arguments are valid NodeInterface sub-interface
settings passed in during create time. For example, 'backup_mgt=True'
to enable this interface as the management backup.
:raises EngineCommandFailed: failure creating interface
:return: None | Below is the the instruction that describes the task:
### Input:
Add cluster virtual interface. A "CVI" interface is used as a VIP
address for clustered engines. Providing 'nodes' will create the
node specific interfaces. You can also add a cluster address with only
a CVI, or only NDI's.
Add CVI only::
engine.physical_interface.add_cluster_virtual_interface(
interface_id=30,
cluster_virtual='30.30.30.1',
network_value='30.30.30.0/24',
macaddress='02:02:02:02:02:06')
Add NDI's only::
engine.physical_interface.add_cluster_virtual_interface(
interface_id=30,
nodes=nodes)
Add CVI and NDI's::
engine.physical_interface.add_cluster_virtual_interface(
cluster_virtual='5.5.5.1',
network_value='5.5.5.0/24',
macaddress='02:03:03:03:03:03',
nodes=[{'address':'5.5.5.2', 'network_value':'5.5.5.0/24', 'nodeid':1},
{'address':'5.5.5.3', 'network_value':'5.5.5.0/24', 'nodeid':2}])
.. versionchanged:: 0.6.1
Renamed from add_cluster_virtual_interface
:param str,int interface_id: physical interface identifier
:param str cluster_virtual: CVI address (VIP) for this interface
:param str network_value: network value for VIP; format: 10.10.10.0/24
:param str macaddress: mandatory mac address if cluster_virtual and
cluster_mask provided
:param list nodes: list of dictionary items identifying cluster nodes
:param str cvi_mode: packetdispatch is recommended setting
:param str zone_ref: zone reference, can be name, href or Zone
:param kw: key word arguments are valid NodeInterface sub-interface
settings passed in during create time. For example, 'backup_mgt=True'
to enable this interface as the management backup.
:raises EngineCommandFailed: failure creating interface
:return: None
### Response:
def add_layer3_cluster_interface(self, interface_id, cluster_virtual=None,
network_value=None, macaddress=None, nodes=None, cvi_mode='packetdispatch',
zone_ref=None, comment=None, **kw):
"""
Add cluster virtual interface. A "CVI" interface is used as a VIP
address for clustered engines. Providing 'nodes' will create the
node specific interfaces. You can also add a cluster address with only
a CVI, or only NDI's.
Add CVI only::
engine.physical_interface.add_cluster_virtual_interface(
interface_id=30,
cluster_virtual='30.30.30.1',
network_value='30.30.30.0/24',
macaddress='02:02:02:02:02:06')
Add NDI's only::
engine.physical_interface.add_cluster_virtual_interface(
interface_id=30,
nodes=nodes)
Add CVI and NDI's::
engine.physical_interface.add_cluster_virtual_interface(
cluster_virtual='5.5.5.1',
network_value='5.5.5.0/24',
macaddress='02:03:03:03:03:03',
nodes=[{'address':'5.5.5.2', 'network_value':'5.5.5.0/24', 'nodeid':1},
{'address':'5.5.5.3', 'network_value':'5.5.5.0/24', 'nodeid':2}])
.. versionchanged:: 0.6.1
Renamed from add_cluster_virtual_interface
:param str,int interface_id: physical interface identifier
:param str cluster_virtual: CVI address (VIP) for this interface
:param str network_value: network value for VIP; format: 10.10.10.0/24
:param str macaddress: mandatory mac address if cluster_virtual and
cluster_mask provided
:param list nodes: list of dictionary items identifying cluster nodes
:param str cvi_mode: packetdispatch is recommended setting
:param str zone_ref: zone reference, can be name, href or Zone
:param kw: key word arguments are valid NodeInterface sub-interface
settings passed in during create time. For example, 'backup_mgt=True'
to enable this interface as the management backup.
:raises EngineCommandFailed: failure creating interface
:return: None
"""
interfaces = [{'nodes': nodes if nodes else [],
'cluster_virtual': cluster_virtual, 'network_value': network_value}]
try:
interface = self._engine.interface.get(interface_id)
interface._add_interface(interface_id, interfaces=interfaces)
return interface.update()
except InterfaceNotFound:
interface = ClusterPhysicalInterface(
engine=self._engine,
interface_id=interface_id,
interfaces=interfaces,
cvi_mode=cvi_mode if macaddress else 'none',
macaddress=macaddress,
zone_ref=zone_ref, comment=comment, **kw)
return self._engine.add_interface(interface) |
def _extract_battery_info_from_sys(self):
"""
Extract the percent charged, charging state, time remaining,
and capacity for a battery, using Linux's kernel /sys interface
Only available in kernel 2.6.24(?) and newer. Before kernel provided
a similar, yet incompatible interface in /proc
"""
if not os.listdir(self.sys_battery_path):
return []
def _parse_battery_info(sys_path):
"""
Extract battery information from uevent file, already convert to
int if necessary
"""
raw_values = {}
with open(os.path.join(sys_path, u"uevent")) as f:
for var in f.read().splitlines():
k, v = var.split("=")
try:
raw_values[k] = int(v)
except ValueError:
raw_values[k] = v
return raw_values
battery_list = []
for path in iglob(os.path.join(self.sys_battery_path, "BAT*")):
r = _parse_battery_info(path)
capacity = r.get(
"POWER_SUPPLY_ENERGY_FULL", r.get("POWER_SUPPLY_CHARGE_FULL")
)
present_rate = r.get(
"POWER_SUPPLY_POWER_NOW",
r.get("POWER_SUPPLY_CURRENT_NOW", r.get("POWER_SUPPLY_VOLTAGE_NOW")),
)
remaining_energy = r.get(
"POWER_SUPPLY_ENERGY_NOW", r.get("POWER_SUPPLY_CHARGE_NOW")
)
battery = {}
battery["capacity"] = capacity
battery["charging"] = "Charging" in r["POWER_SUPPLY_STATUS"]
battery["percent_charged"] = int(
math.floor(remaining_energy / capacity * 100)
)
try:
if battery["charging"]:
time_in_secs = (capacity - remaining_energy) / present_rate * 3600
else:
time_in_secs = remaining_energy / present_rate * 3600
battery["time_remaining"] = self._seconds_to_hms(time_in_secs)
except ZeroDivisionError:
# Battery is either full charged or is not discharging
battery["time_remaining"] = FULLY_CHARGED
battery_list.append(battery)
return battery_list | Extract the percent charged, charging state, time remaining,
and capacity for a battery, using Linux's kernel /sys interface
Only available in kernel 2.6.24(?) and newer. Before kernel provided
a similar, yet incompatible interface in /proc | Below is the the instruction that describes the task:
### Input:
Extract the percent charged, charging state, time remaining,
and capacity for a battery, using Linux's kernel /sys interface
Only available in kernel 2.6.24(?) and newer. Before kernel provided
a similar, yet incompatible interface in /proc
### Response:
def _extract_battery_info_from_sys(self):
"""
Extract the percent charged, charging state, time remaining,
and capacity for a battery, using Linux's kernel /sys interface
Only available in kernel 2.6.24(?) and newer. Before kernel provided
a similar, yet incompatible interface in /proc
"""
if not os.listdir(self.sys_battery_path):
return []
def _parse_battery_info(sys_path):
"""
Extract battery information from uevent file, already convert to
int if necessary
"""
raw_values = {}
with open(os.path.join(sys_path, u"uevent")) as f:
for var in f.read().splitlines():
k, v = var.split("=")
try:
raw_values[k] = int(v)
except ValueError:
raw_values[k] = v
return raw_values
battery_list = []
for path in iglob(os.path.join(self.sys_battery_path, "BAT*")):
r = _parse_battery_info(path)
capacity = r.get(
"POWER_SUPPLY_ENERGY_FULL", r.get("POWER_SUPPLY_CHARGE_FULL")
)
present_rate = r.get(
"POWER_SUPPLY_POWER_NOW",
r.get("POWER_SUPPLY_CURRENT_NOW", r.get("POWER_SUPPLY_VOLTAGE_NOW")),
)
remaining_energy = r.get(
"POWER_SUPPLY_ENERGY_NOW", r.get("POWER_SUPPLY_CHARGE_NOW")
)
battery = {}
battery["capacity"] = capacity
battery["charging"] = "Charging" in r["POWER_SUPPLY_STATUS"]
battery["percent_charged"] = int(
math.floor(remaining_energy / capacity * 100)
)
try:
if battery["charging"]:
time_in_secs = (capacity - remaining_energy) / present_rate * 3600
else:
time_in_secs = remaining_energy / present_rate * 3600
battery["time_remaining"] = self._seconds_to_hms(time_in_secs)
except ZeroDivisionError:
# Battery is either full charged or is not discharging
battery["time_remaining"] = FULLY_CHARGED
battery_list.append(battery)
return battery_list |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.