code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def create_class(self, method):
"""
Build the estimator class.
Returns
-------
:return : string
The built class as string.
"""
self.__dict__.update(dict(method=method))
temp_class = self.temp('separated.class')
return temp_class.format(**self.__dict__) | Build the estimator class.
Returns
-------
:return : string
The built class as string. | Below is the the instruction that describes the task:
### Input:
Build the estimator class.
Returns
-------
:return : string
The built class as string.
### Response:
def create_class(self, method):
"""
Build the estimator class.
Returns
-------
:return : string
The built class as string.
"""
self.__dict__.update(dict(method=method))
temp_class = self.temp('separated.class')
return temp_class.format(**self.__dict__) |
def align(data, align='hyper', normalize=None, ndims=None, method=None,
format_data=True):
"""
Aligns a list of arrays
This function takes a list of high dimensional arrays and 'hyperaligns' them
to a 'common' space, or coordinate system following the approach outlined by
Haxby et al, 2011. Hyperalignment uses linear transformations (rotation,
reflection, translation, scaling) to register a group of arrays to a common
space. This can be useful when two or more datasets describe an identical
or similar system, but may not be in same coordinate system. For example,
consider the example of fMRI recordings (voxels by time) from the visual
cortex of a group of subjects watching the same movie: The brain responses
should be highly similar, but the coordinates may not be aligned.
Haxby JV, Guntupalli JS, Connolly AC, Halchenko YO, Conroy BR, Gobbini
MI, Hanke M, and Ramadge PJ (2011) A common, high-dimensional model of
the representational space in human ventral temporal cortex. Neuron 72,
404 -- 416. (used to implement hyperalignment, see https://github.com/PyMVPA/PyMVPA)
Brain Imaging Analysis Kit, http://brainiak.org. (used to implement Shared Response Model [SRM], see https://github.com/IntelPNI/brainiak)
Parameters
----------
data : numpy array, pandas df, or list of arrays/dfs
A list of Numpy arrays or Pandas Dataframes
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
format_data : bool
Whether or not to first call the format_data function (default: True).
normalize : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
aligned : list
An aligned list of numpy arrays
"""
# if model is None, just return data
if align is None:
return data
elif isinstance(align, dict):
if align['model'] is None:
return data
else:
if method is not None:
warnings.warn('The method argument will be deprecated. Please use align. See the API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.tools.align.html#hypertools.tools.align')
align = method
if align is True:
warnings.warn("Setting align=True will be deprecated. Please specify the \
type of alignment, i.e. align='hyper'. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.tools.align.html#hypertools.tools.align")
align = 'hyper'
# common format
if format_data:
data = formatter(data, ppca=True)
if len(data) is 1:
warnings.warn('Data in list of length 1 can not be aligned. '
'Skipping the alignment.')
if data[0].shape[1] >= data[0].shape[0]:
warnings.warn('The number of features exceeds number of samples. This can lead \
to overfitting. We recommend reducing the dimensionality to be \
less than the number of samples prior to hyperalignment.')
if (align == 'hyper') or (method == 'hyper'):
##STEP 0: STANDARDIZE SIZE AND SHAPE##
sizes_0 = [x.shape[0] for x in data]
sizes_1 = [x.shape[1] for x in data]
#find the smallest number of rows
R = min(sizes_0)
C = max(sizes_1)
m = [np.empty((R,C), dtype=np.ndarray)] * len(data)
for idx,x in enumerate(data):
y = x[0:R,:]
missing = C - y.shape[1]
add = np.zeros((y.shape[0], missing))
y = np.append(y, add, axis=1)
m[idx]=y
##STEP 1: TEMPLATE##
for x in range(0, len(m)):
if x==0:
template = np.copy(m[x])
else:
next = procrustes(m[x], template / (x + 1))
template += next
template /= len(m)
##STEP 2: NEW COMMON TEMPLATE##
#align each subj to the template from STEP 1
template2 = np.zeros(template.shape)
for x in range(0, len(m)):
next = procrustes(m[x], template)
template2 += next
template2 /= len(m)
#STEP 3 (below): ALIGN TO NEW TEMPLATE
aligned = [np.zeros(template2.shape)] * len(m)
for x in range(0, len(m)):
next = procrustes(m[x], template2)
aligned[x] = next
return aligned
elif (align == 'SRM') or (method == 'SRM'):
data = [i.T for i in data]
srm = SRM(features=np.min([i.shape[0] for i in data]))
fit = srm.fit(data)
return [i.T for i in srm.transform(data)] | Aligns a list of arrays
This function takes a list of high dimensional arrays and 'hyperaligns' them
to a 'common' space, or coordinate system following the approach outlined by
Haxby et al, 2011. Hyperalignment uses linear transformations (rotation,
reflection, translation, scaling) to register a group of arrays to a common
space. This can be useful when two or more datasets describe an identical
or similar system, but may not be in same coordinate system. For example,
consider the example of fMRI recordings (voxels by time) from the visual
cortex of a group of subjects watching the same movie: The brain responses
should be highly similar, but the coordinates may not be aligned.
Haxby JV, Guntupalli JS, Connolly AC, Halchenko YO, Conroy BR, Gobbini
MI, Hanke M, and Ramadge PJ (2011) A common, high-dimensional model of
the representational space in human ventral temporal cortex. Neuron 72,
404 -- 416. (used to implement hyperalignment, see https://github.com/PyMVPA/PyMVPA)
Brain Imaging Analysis Kit, http://brainiak.org. (used to implement Shared Response Model [SRM], see https://github.com/IntelPNI/brainiak)
Parameters
----------
data : numpy array, pandas df, or list of arrays/dfs
A list of Numpy arrays or Pandas Dataframes
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
format_data : bool
Whether or not to first call the format_data function (default: True).
normalize : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
aligned : list
An aligned list of numpy arrays | Below is the the instruction that describes the task:
### Input:
Aligns a list of arrays
This function takes a list of high dimensional arrays and 'hyperaligns' them
to a 'common' space, or coordinate system following the approach outlined by
Haxby et al, 2011. Hyperalignment uses linear transformations (rotation,
reflection, translation, scaling) to register a group of arrays to a common
space. This can be useful when two or more datasets describe an identical
or similar system, but may not be in same coordinate system. For example,
consider the example of fMRI recordings (voxels by time) from the visual
cortex of a group of subjects watching the same movie: The brain responses
should be highly similar, but the coordinates may not be aligned.
Haxby JV, Guntupalli JS, Connolly AC, Halchenko YO, Conroy BR, Gobbini
MI, Hanke M, and Ramadge PJ (2011) A common, high-dimensional model of
the representational space in human ventral temporal cortex. Neuron 72,
404 -- 416. (used to implement hyperalignment, see https://github.com/PyMVPA/PyMVPA)
Brain Imaging Analysis Kit, http://brainiak.org. (used to implement Shared Response Model [SRM], see https://github.com/IntelPNI/brainiak)
Parameters
----------
data : numpy array, pandas df, or list of arrays/dfs
A list of Numpy arrays or Pandas Dataframes
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
format_data : bool
Whether or not to first call the format_data function (default: True).
normalize : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
aligned : list
An aligned list of numpy arrays
### Response:
def align(data, align='hyper', normalize=None, ndims=None, method=None,
format_data=True):
"""
Aligns a list of arrays
This function takes a list of high dimensional arrays and 'hyperaligns' them
to a 'common' space, or coordinate system following the approach outlined by
Haxby et al, 2011. Hyperalignment uses linear transformations (rotation,
reflection, translation, scaling) to register a group of arrays to a common
space. This can be useful when two or more datasets describe an identical
or similar system, but may not be in same coordinate system. For example,
consider the example of fMRI recordings (voxels by time) from the visual
cortex of a group of subjects watching the same movie: The brain responses
should be highly similar, but the coordinates may not be aligned.
Haxby JV, Guntupalli JS, Connolly AC, Halchenko YO, Conroy BR, Gobbini
MI, Hanke M, and Ramadge PJ (2011) A common, high-dimensional model of
the representational space in human ventral temporal cortex. Neuron 72,
404 -- 416. (used to implement hyperalignment, see https://github.com/PyMVPA/PyMVPA)
Brain Imaging Analysis Kit, http://brainiak.org. (used to implement Shared Response Model [SRM], see https://github.com/IntelPNI/brainiak)
Parameters
----------
data : numpy array, pandas df, or list of arrays/dfs
A list of Numpy arrays or Pandas Dataframes
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
format_data : bool
Whether or not to first call the format_data function (default: True).
normalize : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
ndims : None
Deprecated argument. Please use new analyze function to perform
combinations of transformations
Returns
----------
aligned : list
An aligned list of numpy arrays
"""
# if model is None, just return data
if align is None:
return data
elif isinstance(align, dict):
if align['model'] is None:
return data
else:
if method is not None:
warnings.warn('The method argument will be deprecated. Please use align. See the API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.tools.align.html#hypertools.tools.align')
align = method
if align is True:
warnings.warn("Setting align=True will be deprecated. Please specify the \
type of alignment, i.e. align='hyper'. See API docs for more info: http://hypertools.readthedocs.io/en/latest/hypertools.tools.align.html#hypertools.tools.align")
align = 'hyper'
# common format
if format_data:
data = formatter(data, ppca=True)
if len(data) is 1:
warnings.warn('Data in list of length 1 can not be aligned. '
'Skipping the alignment.')
if data[0].shape[1] >= data[0].shape[0]:
warnings.warn('The number of features exceeds number of samples. This can lead \
to overfitting. We recommend reducing the dimensionality to be \
less than the number of samples prior to hyperalignment.')
if (align == 'hyper') or (method == 'hyper'):
##STEP 0: STANDARDIZE SIZE AND SHAPE##
sizes_0 = [x.shape[0] for x in data]
sizes_1 = [x.shape[1] for x in data]
#find the smallest number of rows
R = min(sizes_0)
C = max(sizes_1)
m = [np.empty((R,C), dtype=np.ndarray)] * len(data)
for idx,x in enumerate(data):
y = x[0:R,:]
missing = C - y.shape[1]
add = np.zeros((y.shape[0], missing))
y = np.append(y, add, axis=1)
m[idx]=y
##STEP 1: TEMPLATE##
for x in range(0, len(m)):
if x==0:
template = np.copy(m[x])
else:
next = procrustes(m[x], template / (x + 1))
template += next
template /= len(m)
##STEP 2: NEW COMMON TEMPLATE##
#align each subj to the template from STEP 1
template2 = np.zeros(template.shape)
for x in range(0, len(m)):
next = procrustes(m[x], template)
template2 += next
template2 /= len(m)
#STEP 3 (below): ALIGN TO NEW TEMPLATE
aligned = [np.zeros(template2.shape)] * len(m)
for x in range(0, len(m)):
next = procrustes(m[x], template2)
aligned[x] = next
return aligned
elif (align == 'SRM') or (method == 'SRM'):
data = [i.T for i in data]
srm = SRM(features=np.min([i.shape[0] for i in data]))
fit = srm.fit(data)
return [i.T for i in srm.transform(data)] |
def get_all_xml_file_paths(self, raw_data_directory: str) -> List[str]:
""" Loads all XML-files that are located in the folder.
:param raw_data_directory: Path to the raw directory, where the MUSCIMA++ dataset was extracted to
"""
raw_data_directory = os.path.join(raw_data_directory, "v1.0", "data", "cropobjects_manual")
xml_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.xml'))]
return xml_files | Loads all XML-files that are located in the folder.
:param raw_data_directory: Path to the raw directory, where the MUSCIMA++ dataset was extracted to | Below is the the instruction that describes the task:
### Input:
Loads all XML-files that are located in the folder.
:param raw_data_directory: Path to the raw directory, where the MUSCIMA++ dataset was extracted to
### Response:
def get_all_xml_file_paths(self, raw_data_directory: str) -> List[str]:
""" Loads all XML-files that are located in the folder.
:param raw_data_directory: Path to the raw directory, where the MUSCIMA++ dataset was extracted to
"""
raw_data_directory = os.path.join(raw_data_directory, "v1.0", "data", "cropobjects_manual")
xml_files = [y for x in os.walk(raw_data_directory) for y in glob(os.path.join(x[0], '*.xml'))]
return xml_files |
def get_objects_apk(self, filename=None, digest=None):
"""
Returns APK, DalvikVMFormat and Analysis of a specified APK.
You must specify either `filename` or `digest`.
It is possible to use both, but in this case only `digest` is used.
example::
s = Session()
digest = s.add("some.apk")
a, d, dx = s.get_objects_apk(digest=digest)
example::
s = Session()
filename = "some.apk"
digest = s.add(filename)
a, d, dx = s.get_objects_apk(filename=filename)
:param filename: the filename of the APK file, only used of digest is None
:param digest: the sha256 hash, as returned by :meth:`add` for the APK
:returns: a tuple of (APK, [DalvikVMFormat], Analysis)
"""
if not filename and not digest:
raise ValueError("Must give at least filename or digest!")
if digest is None:
digests = self.analyzed_files.get(filename)
# Negate to reduce tree
if not digests:
return None, None, None
digest = digests[0]
a = self.analyzed_apk[digest][0]
dx = self.analyzed_vms[digest]
return a, dx.vms, dx | Returns APK, DalvikVMFormat and Analysis of a specified APK.
You must specify either `filename` or `digest`.
It is possible to use both, but in this case only `digest` is used.
example::
s = Session()
digest = s.add("some.apk")
a, d, dx = s.get_objects_apk(digest=digest)
example::
s = Session()
filename = "some.apk"
digest = s.add(filename)
a, d, dx = s.get_objects_apk(filename=filename)
:param filename: the filename of the APK file, only used of digest is None
:param digest: the sha256 hash, as returned by :meth:`add` for the APK
:returns: a tuple of (APK, [DalvikVMFormat], Analysis) | Below is the the instruction that describes the task:
### Input:
Returns APK, DalvikVMFormat and Analysis of a specified APK.
You must specify either `filename` or `digest`.
It is possible to use both, but in this case only `digest` is used.
example::
s = Session()
digest = s.add("some.apk")
a, d, dx = s.get_objects_apk(digest=digest)
example::
s = Session()
filename = "some.apk"
digest = s.add(filename)
a, d, dx = s.get_objects_apk(filename=filename)
:param filename: the filename of the APK file, only used of digest is None
:param digest: the sha256 hash, as returned by :meth:`add` for the APK
:returns: a tuple of (APK, [DalvikVMFormat], Analysis)
### Response:
def get_objects_apk(self, filename=None, digest=None):
"""
Returns APK, DalvikVMFormat and Analysis of a specified APK.
You must specify either `filename` or `digest`.
It is possible to use both, but in this case only `digest` is used.
example::
s = Session()
digest = s.add("some.apk")
a, d, dx = s.get_objects_apk(digest=digest)
example::
s = Session()
filename = "some.apk"
digest = s.add(filename)
a, d, dx = s.get_objects_apk(filename=filename)
:param filename: the filename of the APK file, only used of digest is None
:param digest: the sha256 hash, as returned by :meth:`add` for the APK
:returns: a tuple of (APK, [DalvikVMFormat], Analysis)
"""
if not filename and not digest:
raise ValueError("Must give at least filename or digest!")
if digest is None:
digests = self.analyzed_files.get(filename)
# Negate to reduce tree
if not digests:
return None, None, None
digest = digests[0]
a = self.analyzed_apk[digest][0]
dx = self.analyzed_vms[digest]
return a, dx.vms, dx |
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
network_id = self._send_cmd_to_wpas(obj['name'], 'ADD_NETWORK', True)
network_id = network_id.strip()
params.process_akm()
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} ssid \"{}\"'.format(network_id, params.ssid))
key_mgmt = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
key_mgmt = 'WPA-PSK'
elif params.akm[-1] in [AKM_TYPE_WPA, AKM_TYPE_WPA2]:
key_mgmt = 'WPA-EAP'
else:
key_mgmt = 'NONE'
if key_mgmt:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} key_mgmt {}'.format(
network_id,
key_mgmt))
proto = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA]:
proto = 'WPA'
elif params.akm[-1] in [AKM_TYPE_WPA2PSK, AKM_TYPE_WPA2]:
proto = 'RSN'
if proto:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} proto {}'.format(
network_id,
proto))
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} psk \"{}\"'.format(network_id, params.key))
return params | Add an AP profile for connecting to afterward. | Below is the the instruction that describes the task:
### Input:
Add an AP profile for connecting to afterward.
### Response:
def add_network_profile(self, obj, params):
"""Add an AP profile for connecting to afterward."""
network_id = self._send_cmd_to_wpas(obj['name'], 'ADD_NETWORK', True)
network_id = network_id.strip()
params.process_akm()
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} ssid \"{}\"'.format(network_id, params.ssid))
key_mgmt = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
key_mgmt = 'WPA-PSK'
elif params.akm[-1] in [AKM_TYPE_WPA, AKM_TYPE_WPA2]:
key_mgmt = 'WPA-EAP'
else:
key_mgmt = 'NONE'
if key_mgmt:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} key_mgmt {}'.format(
network_id,
key_mgmt))
proto = ''
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA]:
proto = 'WPA'
elif params.akm[-1] in [AKM_TYPE_WPA2PSK, AKM_TYPE_WPA2]:
proto = 'RSN'
if proto:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} proto {}'.format(
network_id,
proto))
if params.akm[-1] in [AKM_TYPE_WPAPSK, AKM_TYPE_WPA2PSK]:
self._send_cmd_to_wpas(
obj['name'],
'SET_NETWORK {} psk \"{}\"'.format(network_id, params.key))
return params |
def get_namespaced_custom_object(self, group, version, namespace, plural, name, **kwargs):
"""
Returns a namespace scoped custom object
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_namespaced_custom_object(group, version, namespace, plural, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, **kwargs)
else:
(data) = self.get_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, **kwargs)
return data | Returns a namespace scoped custom object
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_namespaced_custom_object(group, version, namespace, plural, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:return: object
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Returns a namespace scoped custom object
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_namespaced_custom_object(group, version, namespace, plural, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:return: object
If the method is called asynchronously,
returns the request thread.
### Response:
def get_namespaced_custom_object(self, group, version, namespace, plural, name, **kwargs):
"""
Returns a namespace scoped custom object
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_namespaced_custom_object(group, version, namespace, plural, name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str group: the custom resource's group (required)
:param str version: the custom resource's version (required)
:param str namespace: The custom resource's namespace (required)
:param str plural: the custom resource's plural name. For TPRs this would be lowercase plural kind. (required)
:param str name: the custom object's name (required)
:return: object
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, **kwargs)
else:
(data) = self.get_namespaced_custom_object_with_http_info(group, version, namespace, plural, name, **kwargs)
return data |
async def stop(wallet_name: str) -> None:
"""
Gracefully stop an external revocation registry builder, waiting for its current.
The indy-sdk toolkit uses a temporary directory for tails file mustration,
and shutting down the toolkit removes the directory, crashing the external
tails file write. This method allows a graceful stop to wait for completion
of such tasks already in progress.
:wallet_name: name external revocation registry builder to check
:return: whether a task is pending.
"""
LOGGER.debug('RevRegBuilder.stop >>>')
dir_sentinel = join(RevRegBuilder.dir_tails_sentinel(wallet_name))
if isdir(dir_sentinel):
open(join(dir_sentinel, '.stop'), 'w').close() # touch
while any(isfile(join(dir_sentinel, d, '.in-progress')) for d in listdir(dir_sentinel)):
await asyncio.sleep(1)
LOGGER.debug('RevRegBuilder.stop <<<') | Gracefully stop an external revocation registry builder, waiting for its current.
The indy-sdk toolkit uses a temporary directory for tails file mustration,
and shutting down the toolkit removes the directory, crashing the external
tails file write. This method allows a graceful stop to wait for completion
of such tasks already in progress.
:wallet_name: name external revocation registry builder to check
:return: whether a task is pending. | Below is the the instruction that describes the task:
### Input:
Gracefully stop an external revocation registry builder, waiting for its current.
The indy-sdk toolkit uses a temporary directory for tails file mustration,
and shutting down the toolkit removes the directory, crashing the external
tails file write. This method allows a graceful stop to wait for completion
of such tasks already in progress.
:wallet_name: name external revocation registry builder to check
:return: whether a task is pending.
### Response:
async def stop(wallet_name: str) -> None:
"""
Gracefully stop an external revocation registry builder, waiting for its current.
The indy-sdk toolkit uses a temporary directory for tails file mustration,
and shutting down the toolkit removes the directory, crashing the external
tails file write. This method allows a graceful stop to wait for completion
of such tasks already in progress.
:wallet_name: name external revocation registry builder to check
:return: whether a task is pending.
"""
LOGGER.debug('RevRegBuilder.stop >>>')
dir_sentinel = join(RevRegBuilder.dir_tails_sentinel(wallet_name))
if isdir(dir_sentinel):
open(join(dir_sentinel, '.stop'), 'w').close() # touch
while any(isfile(join(dir_sentinel, d, '.in-progress')) for d in listdir(dir_sentinel)):
await asyncio.sleep(1)
LOGGER.debug('RevRegBuilder.stop <<<') |
def extract_object_properties(self, o, limit_size=False):
"""Extracts all properties from an object (eg. f_locals, f_globals,
user dict, instance ...) and returns them as an array of variables.
"""
try:
prop_str = repr(o)[:512]
except:
prop_str = "Error while extracting value"
_logger.e_debug("extract_object_properties(%s)", prop_str)
var_list = []
if isinstance(o, dict):
a_var_name = None
a_var_value = None
for a_var_name in o:
a_var_value = o[a_var_name]
children_count = self.object_properties_count(a_var_value)
v_name, v_value, v_type = self.extract_name_value_type(a_var_name,
a_var_value,
limit_size=limit_size)
a_var_info = {
'id': id(a_var_value),
'name': v_name,
'type': "%s%s" % (v_type, " [%s]" % children_count if children_count else '',),
'value': v_value,
'children_count': children_count,
}
var_list.append(a_var_info)
elif type(o) in (list, tuple, set,):
MAX_CHILDREN_TO_RETURN = 256
MAX_CHILDREN_MESSAGE = "Truncated by ikpdb (don't hot change me !)."
a_var_name = None
a_var_value = None
do_truncate = len(o) > MAX_CHILDREN_TO_RETURN
for idx, a_var_value in enumerate(o):
children_count = self.object_properties_count(a_var_value)
v_name, v_value, v_type = self.extract_name_value_type(idx,
a_var_value,
limit_size=limit_size)
var_list.append({
'id': id(a_var_value),
'name': v_name,
'type': "%s%s" % (v_type, " [%s]" % children_count if children_count else '',),
'value': v_value,
'children_count': children_count,
})
if do_truncate and idx==MAX_CHILDREN_TO_RETURN-1:
var_list.append({
'id': None,
'name': str(MAX_CHILDREN_TO_RETURN),
'type': '',
'value': MAX_CHILDREN_MESSAGE,
'children_count': 0,
})
break
else:
a_var_name = None
a_var_value = None
if hasattr(o, '__dict__'):
for a_var_name, a_var_value in o.__dict__.items():
if (not a_var_name.startswith('__')
and not type(a_var_value) in (types.ModuleType,
types.MethodType,
types.FunctionType,)):
children_count = self.object_properties_count(a_var_value)
v_name, v_value, v_type = self.extract_name_value_type(a_var_name,
a_var_value,
limit_size=limit_size)
var_list.append({
'id': id(a_var_value),
'name': v_name,
'type': "%s%s" % (v_type, " [%s]" % children_count if children_count else '',),
'value': v_value,
'children_count': children_count,
})
return var_list | Extracts all properties from an object (eg. f_locals, f_globals,
user dict, instance ...) and returns them as an array of variables. | Below is the the instruction that describes the task:
### Input:
Extracts all properties from an object (eg. f_locals, f_globals,
user dict, instance ...) and returns them as an array of variables.
### Response:
def extract_object_properties(self, o, limit_size=False):
"""Extracts all properties from an object (eg. f_locals, f_globals,
user dict, instance ...) and returns them as an array of variables.
"""
try:
prop_str = repr(o)[:512]
except:
prop_str = "Error while extracting value"
_logger.e_debug("extract_object_properties(%s)", prop_str)
var_list = []
if isinstance(o, dict):
a_var_name = None
a_var_value = None
for a_var_name in o:
a_var_value = o[a_var_name]
children_count = self.object_properties_count(a_var_value)
v_name, v_value, v_type = self.extract_name_value_type(a_var_name,
a_var_value,
limit_size=limit_size)
a_var_info = {
'id': id(a_var_value),
'name': v_name,
'type': "%s%s" % (v_type, " [%s]" % children_count if children_count else '',),
'value': v_value,
'children_count': children_count,
}
var_list.append(a_var_info)
elif type(o) in (list, tuple, set,):
MAX_CHILDREN_TO_RETURN = 256
MAX_CHILDREN_MESSAGE = "Truncated by ikpdb (don't hot change me !)."
a_var_name = None
a_var_value = None
do_truncate = len(o) > MAX_CHILDREN_TO_RETURN
for idx, a_var_value in enumerate(o):
children_count = self.object_properties_count(a_var_value)
v_name, v_value, v_type = self.extract_name_value_type(idx,
a_var_value,
limit_size=limit_size)
var_list.append({
'id': id(a_var_value),
'name': v_name,
'type': "%s%s" % (v_type, " [%s]" % children_count if children_count else '',),
'value': v_value,
'children_count': children_count,
})
if do_truncate and idx==MAX_CHILDREN_TO_RETURN-1:
var_list.append({
'id': None,
'name': str(MAX_CHILDREN_TO_RETURN),
'type': '',
'value': MAX_CHILDREN_MESSAGE,
'children_count': 0,
})
break
else:
a_var_name = None
a_var_value = None
if hasattr(o, '__dict__'):
for a_var_name, a_var_value in o.__dict__.items():
if (not a_var_name.startswith('__')
and not type(a_var_value) in (types.ModuleType,
types.MethodType,
types.FunctionType,)):
children_count = self.object_properties_count(a_var_value)
v_name, v_value, v_type = self.extract_name_value_type(a_var_name,
a_var_value,
limit_size=limit_size)
var_list.append({
'id': id(a_var_value),
'name': v_name,
'type': "%s%s" % (v_type, " [%s]" % children_count if children_count else '',),
'value': v_value,
'children_count': children_count,
})
return var_list |
def _handle_response(response):
"""
Handle the response and possible failures.
:param Response response: Response data.
:return: A dictionary or a string with response data.
:raises: NeverBounceAPIError if the API call fails.
"""
if not response.ok:
raise NeverBounceAPIError(response)
if response.headers.get('Content-Type') == 'application/octet-stream':
return response.iter_lines()
try:
resp = response.json()
except ValueError:
raise InvalidResponseError('Failed to handle the response content-type {}.'.format(
response.headers.get('Content-Type'))
)
if 'success' in resp and not resp['success']:
if 'msg' in resp and resp['msg'] == 'Authentication failed':
raise AccessTokenExpired
else:
raise NeverBounceAPIError(response)
return resp | Handle the response and possible failures.
:param Response response: Response data.
:return: A dictionary or a string with response data.
:raises: NeverBounceAPIError if the API call fails. | Below is the the instruction that describes the task:
### Input:
Handle the response and possible failures.
:param Response response: Response data.
:return: A dictionary or a string with response data.
:raises: NeverBounceAPIError if the API call fails.
### Response:
def _handle_response(response):
"""
Handle the response and possible failures.
:param Response response: Response data.
:return: A dictionary or a string with response data.
:raises: NeverBounceAPIError if the API call fails.
"""
if not response.ok:
raise NeverBounceAPIError(response)
if response.headers.get('Content-Type') == 'application/octet-stream':
return response.iter_lines()
try:
resp = response.json()
except ValueError:
raise InvalidResponseError('Failed to handle the response content-type {}.'.format(
response.headers.get('Content-Type'))
)
if 'success' in resp and not resp['success']:
if 'msg' in resp and resp['msg'] == 'Authentication failed':
raise AccessTokenExpired
else:
raise NeverBounceAPIError(response)
return resp |
def exclude_reference_link(self, exclude):
"""Sets `sysparm_exclude_reference_link` to a bool value
:param exclude: bool
"""
if not isinstance(exclude, bool):
raise InvalidUsage('exclude_reference_link must be of type bool')
self._sysparms['sysparm_exclude_reference_link'] = exclude | Sets `sysparm_exclude_reference_link` to a bool value
:param exclude: bool | Below is the the instruction that describes the task:
### Input:
Sets `sysparm_exclude_reference_link` to a bool value
:param exclude: bool
### Response:
def exclude_reference_link(self, exclude):
"""Sets `sysparm_exclude_reference_link` to a bool value
:param exclude: bool
"""
if not isinstance(exclude, bool):
raise InvalidUsage('exclude_reference_link must be of type bool')
self._sysparms['sysparm_exclude_reference_link'] = exclude |
def _walk_issuers(self, path, paths, failed_paths):
"""
Recursively looks through the list of known certificates for the issuer
of the certificate specified, stopping once the certificate in question
is one contained within the CA certs list
:param path:
A ValidationPath object representing the current traversal of
possible paths
:param paths:
A list of completed ValidationPath objects. This is mutated as
results are found.
:param failed_paths:
A list of certvalidator.path.ValidationPath objects that failed due
to no matching issuer before reaching a certificate from the CA
certs list
"""
if path.first.signature in self._ca_lookup:
paths.append(path)
return
new_branches = 0
for issuer in self._possible_issuers(path.first):
try:
self._walk_issuers(path.copy().prepend(issuer), paths, failed_paths)
new_branches += 1
except (DuplicateCertificateError):
pass
if not new_branches:
failed_paths.append(path) | Recursively looks through the list of known certificates for the issuer
of the certificate specified, stopping once the certificate in question
is one contained within the CA certs list
:param path:
A ValidationPath object representing the current traversal of
possible paths
:param paths:
A list of completed ValidationPath objects. This is mutated as
results are found.
:param failed_paths:
A list of certvalidator.path.ValidationPath objects that failed due
to no matching issuer before reaching a certificate from the CA
certs list | Below is the the instruction that describes the task:
### Input:
Recursively looks through the list of known certificates for the issuer
of the certificate specified, stopping once the certificate in question
is one contained within the CA certs list
:param path:
A ValidationPath object representing the current traversal of
possible paths
:param paths:
A list of completed ValidationPath objects. This is mutated as
results are found.
:param failed_paths:
A list of certvalidator.path.ValidationPath objects that failed due
to no matching issuer before reaching a certificate from the CA
certs list
### Response:
def _walk_issuers(self, path, paths, failed_paths):
"""
Recursively looks through the list of known certificates for the issuer
of the certificate specified, stopping once the certificate in question
is one contained within the CA certs list
:param path:
A ValidationPath object representing the current traversal of
possible paths
:param paths:
A list of completed ValidationPath objects. This is mutated as
results are found.
:param failed_paths:
A list of certvalidator.path.ValidationPath objects that failed due
to no matching issuer before reaching a certificate from the CA
certs list
"""
if path.first.signature in self._ca_lookup:
paths.append(path)
return
new_branches = 0
for issuer in self._possible_issuers(path.first):
try:
self._walk_issuers(path.copy().prepend(issuer), paths, failed_paths)
new_branches += 1
except (DuplicateCertificateError):
pass
if not new_branches:
failed_paths.append(path) |
def GetAncestorControl(self, condition: Callable) -> 'Control':
"""
Get a ancestor control that matches the condition.
condition: Callable, function (control: Control, depth: int)->bool,
depth starts with -1 and decreses when search goes up.
Return `Control` subclass or None.
"""
ancestor = self
depth = 0
while True:
ancestor = ancestor.GetParentControl()
depth -= 1
if ancestor:
if condition(ancestor, depth):
return ancestor
else:
break | Get a ancestor control that matches the condition.
condition: Callable, function (control: Control, depth: int)->bool,
depth starts with -1 and decreses when search goes up.
Return `Control` subclass or None. | Below is the the instruction that describes the task:
### Input:
Get a ancestor control that matches the condition.
condition: Callable, function (control: Control, depth: int)->bool,
depth starts with -1 and decreses when search goes up.
Return `Control` subclass or None.
### Response:
def GetAncestorControl(self, condition: Callable) -> 'Control':
"""
Get a ancestor control that matches the condition.
condition: Callable, function (control: Control, depth: int)->bool,
depth starts with -1 and decreses when search goes up.
Return `Control` subclass or None.
"""
ancestor = self
depth = 0
while True:
ancestor = ancestor.GetParentControl()
depth -= 1
if ancestor:
if condition(ancestor, depth):
return ancestor
else:
break |
def draw(self, current_time, frame_time):
"""
Calls the superclass ``draw()`` methods and checks ``HEADLESS_FRAMES``/``HEADLESS_DURATION``
"""
super().draw(current_time, frame_time)
if self.headless_duration and current_time >= self.headless_duration:
self.close() | Calls the superclass ``draw()`` methods and checks ``HEADLESS_FRAMES``/``HEADLESS_DURATION`` | Below is the the instruction that describes the task:
### Input:
Calls the superclass ``draw()`` methods and checks ``HEADLESS_FRAMES``/``HEADLESS_DURATION``
### Response:
def draw(self, current_time, frame_time):
"""
Calls the superclass ``draw()`` methods and checks ``HEADLESS_FRAMES``/``HEADLESS_DURATION``
"""
super().draw(current_time, frame_time)
if self.headless_duration and current_time >= self.headless_duration:
self.close() |
def getChildren(self, name=None, ns=None):
"""
Get a list of children by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain a prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@return: The list of matching children.
@rtype: [L{Element},...]
"""
if ns is None:
if name is None:
return self.children
prefix, name = splitPrefix(name)
if prefix is not None:
ns = self.resolvePrefix(prefix)
return [c for c in self.children if c.match(name, ns)] | Get a list of children by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain a prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@return: The list of matching children.
@rtype: [L{Element},...] | Below is the the instruction that describes the task:
### Input:
Get a list of children by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain a prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@return: The list of matching children.
@rtype: [L{Element},...]
### Response:
def getChildren(self, name=None, ns=None):
"""
Get a list of children by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain a prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@return: The list of matching children.
@rtype: [L{Element},...]
"""
if ns is None:
if name is None:
return self.children
prefix, name = splitPrefix(name)
if prefix is not None:
ns = self.resolvePrefix(prefix)
return [c for c in self.children if c.match(name, ns)] |
def sweep(self):
'''
This methods runs in a separate thread. So long as
`self.sweep_flag` is set, it expires keys according to
the process explained in the docstring for the `TimedDict`
class. The thread is halted by calling `self.stop_sweep()`,
which sets the `self.sweep_flag` to `False`.
'''
while self.sweep_flag:
current_time = time.time()
expire_keys = set()
keys_checked = 0.
items = list(self.time_dict.items())
for key, expire_time in items:
if random.random() > self.sample_probability:
continue
keys_checked += 1
if current_time >= expire_time:
expire_keys.add(key)
logging.debug(
'marking key for deletion: {key}'.
format(key=str(key)))
for key in expire_keys:
self.expire_key(key)
expired_keys_ratio = (
len(expire_keys) / keys_checked
if keys_checked > 0 else 0.)
if expired_keys_ratio < self.expired_keys_ratio:
time.sleep(1. / self.checks_per_second) | This methods runs in a separate thread. So long as
`self.sweep_flag` is set, it expires keys according to
the process explained in the docstring for the `TimedDict`
class. The thread is halted by calling `self.stop_sweep()`,
which sets the `self.sweep_flag` to `False`. | Below is the the instruction that describes the task:
### Input:
This methods runs in a separate thread. So long as
`self.sweep_flag` is set, it expires keys according to
the process explained in the docstring for the `TimedDict`
class. The thread is halted by calling `self.stop_sweep()`,
which sets the `self.sweep_flag` to `False`.
### Response:
def sweep(self):
'''
This methods runs in a separate thread. So long as
`self.sweep_flag` is set, it expires keys according to
the process explained in the docstring for the `TimedDict`
class. The thread is halted by calling `self.stop_sweep()`,
which sets the `self.sweep_flag` to `False`.
'''
while self.sweep_flag:
current_time = time.time()
expire_keys = set()
keys_checked = 0.
items = list(self.time_dict.items())
for key, expire_time in items:
if random.random() > self.sample_probability:
continue
keys_checked += 1
if current_time >= expire_time:
expire_keys.add(key)
logging.debug(
'marking key for deletion: {key}'.
format(key=str(key)))
for key in expire_keys:
self.expire_key(key)
expired_keys_ratio = (
len(expire_keys) / keys_checked
if keys_checked > 0 else 0.)
if expired_keys_ratio < self.expired_keys_ratio:
time.sleep(1. / self.checks_per_second) |
def add_rup_params(self, rupture):
"""
Add .REQUIRES_RUPTURE_PARAMETERS to the rupture
"""
for param in self.REQUIRES_RUPTURE_PARAMETERS:
if param == 'mag':
value = rupture.mag
elif param == 'strike':
value = rupture.surface.get_strike()
elif param == 'dip':
value = rupture.surface.get_dip()
elif param == 'rake':
value = rupture.rake
elif param == 'ztor':
value = rupture.surface.get_top_edge_depth()
elif param == 'hypo_lon':
value = rupture.hypocenter.longitude
elif param == 'hypo_lat':
value = rupture.hypocenter.latitude
elif param == 'hypo_depth':
value = rupture.hypocenter.depth
elif param == 'width':
value = rupture.surface.get_width()
else:
raise ValueError('%s requires unknown rupture parameter %r' %
(type(self).__name__, param))
setattr(rupture, param, value) | Add .REQUIRES_RUPTURE_PARAMETERS to the rupture | Below is the the instruction that describes the task:
### Input:
Add .REQUIRES_RUPTURE_PARAMETERS to the rupture
### Response:
def add_rup_params(self, rupture):
"""
Add .REQUIRES_RUPTURE_PARAMETERS to the rupture
"""
for param in self.REQUIRES_RUPTURE_PARAMETERS:
if param == 'mag':
value = rupture.mag
elif param == 'strike':
value = rupture.surface.get_strike()
elif param == 'dip':
value = rupture.surface.get_dip()
elif param == 'rake':
value = rupture.rake
elif param == 'ztor':
value = rupture.surface.get_top_edge_depth()
elif param == 'hypo_lon':
value = rupture.hypocenter.longitude
elif param == 'hypo_lat':
value = rupture.hypocenter.latitude
elif param == 'hypo_depth':
value = rupture.hypocenter.depth
elif param == 'width':
value = rupture.surface.get_width()
else:
raise ValueError('%s requires unknown rupture parameter %r' %
(type(self).__name__, param))
setattr(rupture, param, value) |
def clean_time(sltime, in_format='%Y-%m-%dT%H:%M:%S%z', out_format='%Y-%m-%d %H:%M'):
"""Easy way to format time strings
:param string sltime: A softlayer formatted time string
:param string in_format: Datetime format for strptime
:param string out_format: Datetime format for strftime
"""
try:
clean = datetime.datetime.strptime(sltime, in_format)
return clean.strftime(out_format)
# The %z option only exists with py3.6+
except ValueError:
return sltime | Easy way to format time strings
:param string sltime: A softlayer formatted time string
:param string in_format: Datetime format for strptime
:param string out_format: Datetime format for strftime | Below is the the instruction that describes the task:
### Input:
Easy way to format time strings
:param string sltime: A softlayer formatted time string
:param string in_format: Datetime format for strptime
:param string out_format: Datetime format for strftime
### Response:
def clean_time(sltime, in_format='%Y-%m-%dT%H:%M:%S%z', out_format='%Y-%m-%d %H:%M'):
"""Easy way to format time strings
:param string sltime: A softlayer formatted time string
:param string in_format: Datetime format for strptime
:param string out_format: Datetime format for strftime
"""
try:
clean = datetime.datetime.strptime(sltime, in_format)
return clean.strftime(out_format)
# The %z option only exists with py3.6+
except ValueError:
return sltime |
def get_small_file(context, path):
"""
Basic in-memory caching module fetcher. This generates one roundtrip for
every previously unseen file, so it is only a temporary solution.
:param context:
Context we should direct FileService requests to. For now (and probably
forever) this is just the top-level Mitogen connection manager process.
:param path:
Path to fetch from FileService, must previously have been registered by
a privileged context using the `register` command.
:returns:
Bytestring file data.
"""
pool = mitogen.service.get_or_create_pool(router=context.router)
service = pool.get_service(u'mitogen.service.PushFileService')
return service.get(path) | Basic in-memory caching module fetcher. This generates one roundtrip for
every previously unseen file, so it is only a temporary solution.
:param context:
Context we should direct FileService requests to. For now (and probably
forever) this is just the top-level Mitogen connection manager process.
:param path:
Path to fetch from FileService, must previously have been registered by
a privileged context using the `register` command.
:returns:
Bytestring file data. | Below is the the instruction that describes the task:
### Input:
Basic in-memory caching module fetcher. This generates one roundtrip for
every previously unseen file, so it is only a temporary solution.
:param context:
Context we should direct FileService requests to. For now (and probably
forever) this is just the top-level Mitogen connection manager process.
:param path:
Path to fetch from FileService, must previously have been registered by
a privileged context using the `register` command.
:returns:
Bytestring file data.
### Response:
def get_small_file(context, path):
"""
Basic in-memory caching module fetcher. This generates one roundtrip for
every previously unseen file, so it is only a temporary solution.
:param context:
Context we should direct FileService requests to. For now (and probably
forever) this is just the top-level Mitogen connection manager process.
:param path:
Path to fetch from FileService, must previously have been registered by
a privileged context using the `register` command.
:returns:
Bytestring file data.
"""
pool = mitogen.service.get_or_create_pool(router=context.router)
service = pool.get_service(u'mitogen.service.PushFileService')
return service.get(path) |
def _comparison_generator(old_list, new_list, compare_fn):
"""
:type old_list: sorted list
:type new_list: sorted list
:type compare_fn: function
:param compare_fn:
takes two arguments, A and B
returns 0 if equal
returns -1 if A is less than B
else returns 1
"""
old_index = 0
new_index = 0
while old_index < len(old_list) and new_index < len(new_list):
old_value = old_list[old_index]
new_value = new_list[new_index]
status = compare_fn(old_value, new_value)
if status == 0:
yield (old_value, new_value,)
old_index += 1
new_index += 1
elif status == -1:
yield (old_value, None,)
old_index += 1
else:
yield (None, new_value,)
new_index += 1
# Catch leftovers. Only one of these while statements should run.
while old_index < len(old_list):
yield (old_list[old_index], None,)
old_index += 1
while new_index < len(new_list):
yield (None, new_list[new_index],)
new_index += 1 | :type old_list: sorted list
:type new_list: sorted list
:type compare_fn: function
:param compare_fn:
takes two arguments, A and B
returns 0 if equal
returns -1 if A is less than B
else returns 1 | Below is the the instruction that describes the task:
### Input:
:type old_list: sorted list
:type new_list: sorted list
:type compare_fn: function
:param compare_fn:
takes two arguments, A and B
returns 0 if equal
returns -1 if A is less than B
else returns 1
### Response:
def _comparison_generator(old_list, new_list, compare_fn):
"""
:type old_list: sorted list
:type new_list: sorted list
:type compare_fn: function
:param compare_fn:
takes two arguments, A and B
returns 0 if equal
returns -1 if A is less than B
else returns 1
"""
old_index = 0
new_index = 0
while old_index < len(old_list) and new_index < len(new_list):
old_value = old_list[old_index]
new_value = new_list[new_index]
status = compare_fn(old_value, new_value)
if status == 0:
yield (old_value, new_value,)
old_index += 1
new_index += 1
elif status == -1:
yield (old_value, None,)
old_index += 1
else:
yield (None, new_value,)
new_index += 1
# Catch leftovers. Only one of these while statements should run.
while old_index < len(old_list):
yield (old_list[old_index], None,)
old_index += 1
while new_index < len(new_list):
yield (None, new_list[new_index],)
new_index += 1 |
def get_pretty_xml(self, encoding='unicode'):
"""Returns:
str : Current state of the wrapper as a pretty printed XML string.
"""
return d1_common.xml.reformat_to_pretty_xml(
xml.etree.ElementTree.tostring(self._root_el, encoding)
) | Returns:
str : Current state of the wrapper as a pretty printed XML string. | Below is the the instruction that describes the task:
### Input:
Returns:
str : Current state of the wrapper as a pretty printed XML string.
### Response:
def get_pretty_xml(self, encoding='unicode'):
"""Returns:
str : Current state of the wrapper as a pretty printed XML string.
"""
return d1_common.xml.reformat_to_pretty_xml(
xml.etree.ElementTree.tostring(self._root_el, encoding)
) |
def checkPassword(self, password):
"""
Check the given plaintext password against the response in this
credentials object.
@type password: C{str}
@param password: The known correct password associated with
C{self.username}.
@return: A C{bool}, C{True} if this credentials object agrees with the
given password, C{False} otherwise.
"""
if isinstance(password, unicode):
password = password.encode('utf-8')
correctResponse = _calcResponse(self.challenge, self.nonce, password)
return correctResponse == self.response | Check the given plaintext password against the response in this
credentials object.
@type password: C{str}
@param password: The known correct password associated with
C{self.username}.
@return: A C{bool}, C{True} if this credentials object agrees with the
given password, C{False} otherwise. | Below is the the instruction that describes the task:
### Input:
Check the given plaintext password against the response in this
credentials object.
@type password: C{str}
@param password: The known correct password associated with
C{self.username}.
@return: A C{bool}, C{True} if this credentials object agrees with the
given password, C{False} otherwise.
### Response:
def checkPassword(self, password):
"""
Check the given plaintext password against the response in this
credentials object.
@type password: C{str}
@param password: The known correct password associated with
C{self.username}.
@return: A C{bool}, C{True} if this credentials object agrees with the
given password, C{False} otherwise.
"""
if isinstance(password, unicode):
password = password.encode('utf-8')
correctResponse = _calcResponse(self.challenge, self.nonce, password)
return correctResponse == self.response |
def project(self, n):
"""
Convenience method for projection of a tensor into a
vector. Returns the tensor dotted into a unit vector
along the input n.
Args:
n (3x1 array-like): direction to project onto
Returns (float):
scalar value corresponding to the projection of
the tensor into the vector
"""
n = get_uvec(n)
return self.einsum_sequence([n] * self.rank) | Convenience method for projection of a tensor into a
vector. Returns the tensor dotted into a unit vector
along the input n.
Args:
n (3x1 array-like): direction to project onto
Returns (float):
scalar value corresponding to the projection of
the tensor into the vector | Below is the the instruction that describes the task:
### Input:
Convenience method for projection of a tensor into a
vector. Returns the tensor dotted into a unit vector
along the input n.
Args:
n (3x1 array-like): direction to project onto
Returns (float):
scalar value corresponding to the projection of
the tensor into the vector
### Response:
def project(self, n):
"""
Convenience method for projection of a tensor into a
vector. Returns the tensor dotted into a unit vector
along the input n.
Args:
n (3x1 array-like): direction to project onto
Returns (float):
scalar value corresponding to the projection of
the tensor into the vector
"""
n = get_uvec(n)
return self.einsum_sequence([n] * self.rank) |
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) | Get lock configuration
Args:
device_label (str): device label of lock | Below is the the instruction that describes the task:
### Input:
Get lock configuration
Args:
device_label (str): device label of lock
### Response:
def get_lock_config(self, device_label):
""" Get lock configuration
Args:
device_label (str): device label of lock
"""
response = None
try:
response = requests.get(
urls.lockconfig(self._giid, device_label),
headers={
'Accept': 'application/json, text/javascript, */*; q=0.01',
'Cookie': 'vid={}'.format(self._vid)})
except requests.exceptions.RequestException as ex:
raise RequestError(ex)
_validate_response(response)
return json.loads(response.text) |
def transform_relations(rdf, relationmap):
"""Transform YSO-style concept relations into SKOS equivalents."""
affected_types = (SKOS.Concept, SKOS.Collection,
SKOSEXT.DeprecatedConcept)
props = set()
for t in affected_types:
for conc in rdf.subjects(RDF.type, t):
for p, o in rdf.predicate_objects(conc):
if isinstance(o, (URIRef, BNode)) \
and (p in relationmap or not in_general_ns(p)):
props.add(p)
for p in props:
if mapping_match(p, relationmap):
newval = mapping_get(p, relationmap)
logging.debug("transform relation %s -> %s", p, str(newval))
replace_predicate(
rdf, p, newval, subjecttypes=affected_types)
else:
logging.info("Don't know what to do with relation %s", p) | Transform YSO-style concept relations into SKOS equivalents. | Below is the the instruction that describes the task:
### Input:
Transform YSO-style concept relations into SKOS equivalents.
### Response:
def transform_relations(rdf, relationmap):
"""Transform YSO-style concept relations into SKOS equivalents."""
affected_types = (SKOS.Concept, SKOS.Collection,
SKOSEXT.DeprecatedConcept)
props = set()
for t in affected_types:
for conc in rdf.subjects(RDF.type, t):
for p, o in rdf.predicate_objects(conc):
if isinstance(o, (URIRef, BNode)) \
and (p in relationmap or not in_general_ns(p)):
props.add(p)
for p in props:
if mapping_match(p, relationmap):
newval = mapping_get(p, relationmap)
logging.debug("transform relation %s -> %s", p, str(newval))
replace_predicate(
rdf, p, newval, subjecttypes=affected_types)
else:
logging.info("Don't know what to do with relation %s", p) |
def initialize_options(self):
"""Set command option defaults."""
setuptools.command.build_py.build_py.initialize_options(self)
self.meteor = 'meteor'
self.meteor_debug = False
self.build_lib = None
self.package_dir = None
self.meteor_builds = []
self.no_prune_npm = None
self.inplace = True | Set command option defaults. | Below is the the instruction that describes the task:
### Input:
Set command option defaults.
### Response:
def initialize_options(self):
"""Set command option defaults."""
setuptools.command.build_py.build_py.initialize_options(self)
self.meteor = 'meteor'
self.meteor_debug = False
self.build_lib = None
self.package_dir = None
self.meteor_builds = []
self.no_prune_npm = None
self.inplace = True |
def ezplot(f,xlim,ylim=None,ax = None,vectorized=True,N=None,contour = False,args=None,kwargs=None,dry_run=False,show=None,include_endpoints=False):
'''
Plot polynomial approximation.
:param vectorized: `f` can handle an array of inputs
'''
kwargs = kwargs or {}
args = args or []
d = 1 if ylim is None else 2
if ax is None:
fig = plt.figure()
show = show if show is not None else True
ax = fig.gca() if (d==1 or contour) else fig.gca(projection='3d')
if d == 1:
if N is None:
N = 200
if include_endpoints:
X = np.linspace(xlim[0],xlim[1],N)
else:
L = xlim[1] - xlim[0]
X = np.linspace(xlim[0] + L / N, xlim[1] - L / N, N)
X = X.reshape((-1, 1))
if vectorized:
Z = f(X)
else:
Z = np.array([f(x) for x in X])
if not dry_run:
C = ax.plot(X, Z,*args,**kwargs)
elif d == 2:
if N is None:
N = 30
T = np.zeros((N, 2))
if include_endpoints:
T[:,0]=np.linspace(xlim[0],xlim[1],N)
T[:,1]=np.linspace(ylim[0],ylim[1],N)
else:
L = xlim[1] - xlim[0]
T[:, 0] = np.linspace(xlim[0] + L / N, xlim[1] - L / N, N)
L = ylim[1] - ylim[0]
T[:, 1] = np.linspace(ylim[0] + L / N, ylim[1] - L / N, N)
X, Y = meshgrid(T[:, 0], T[:, 1])
Z = grid_evaluation(X, Y, f,vectorized=vectorized)
if contour:
if not dry_run:
# C = ax.contour(X,Y,Z,levels = np.array([0.001,1000]),colors=['red','blue'])
N=200
colors=np.concatenate((np.ones((N,1)),np.tile(np.linspace(1,0,N).reshape(-1,1),(1,2))),axis=1)
colors = [ [1,1,1],*colors,[1,0,0]]
print('max',np.max(Z[:]))
C = ax.contourf(X,Y,Z,levels = [-np.inf,*np.linspace(-20,20,N),np.inf],colors=colors)
else:
if not dry_run:
C = ax.plot_surface(X, Y, Z)#cmap=cm.coolwarm,
# C = ax.plot_wireframe(X, Y, Z, rcount=30,ccount=30)
if show:
plt.show()
return ax,C,Z | Plot polynomial approximation.
:param vectorized: `f` can handle an array of inputs | Below is the the instruction that describes the task:
### Input:
Plot polynomial approximation.
:param vectorized: `f` can handle an array of inputs
### Response:
def ezplot(f,xlim,ylim=None,ax = None,vectorized=True,N=None,contour = False,args=None,kwargs=None,dry_run=False,show=None,include_endpoints=False):
'''
Plot polynomial approximation.
:param vectorized: `f` can handle an array of inputs
'''
kwargs = kwargs or {}
args = args or []
d = 1 if ylim is None else 2
if ax is None:
fig = plt.figure()
show = show if show is not None else True
ax = fig.gca() if (d==1 or contour) else fig.gca(projection='3d')
if d == 1:
if N is None:
N = 200
if include_endpoints:
X = np.linspace(xlim[0],xlim[1],N)
else:
L = xlim[1] - xlim[0]
X = np.linspace(xlim[0] + L / N, xlim[1] - L / N, N)
X = X.reshape((-1, 1))
if vectorized:
Z = f(X)
else:
Z = np.array([f(x) for x in X])
if not dry_run:
C = ax.plot(X, Z,*args,**kwargs)
elif d == 2:
if N is None:
N = 30
T = np.zeros((N, 2))
if include_endpoints:
T[:,0]=np.linspace(xlim[0],xlim[1],N)
T[:,1]=np.linspace(ylim[0],ylim[1],N)
else:
L = xlim[1] - xlim[0]
T[:, 0] = np.linspace(xlim[0] + L / N, xlim[1] - L / N, N)
L = ylim[1] - ylim[0]
T[:, 1] = np.linspace(ylim[0] + L / N, ylim[1] - L / N, N)
X, Y = meshgrid(T[:, 0], T[:, 1])
Z = grid_evaluation(X, Y, f,vectorized=vectorized)
if contour:
if not dry_run:
# C = ax.contour(X,Y,Z,levels = np.array([0.001,1000]),colors=['red','blue'])
N=200
colors=np.concatenate((np.ones((N,1)),np.tile(np.linspace(1,0,N).reshape(-1,1),(1,2))),axis=1)
colors = [ [1,1,1],*colors,[1,0,0]]
print('max',np.max(Z[:]))
C = ax.contourf(X,Y,Z,levels = [-np.inf,*np.linspace(-20,20,N),np.inf],colors=colors)
else:
if not dry_run:
C = ax.plot_surface(X, Y, Z)#cmap=cm.coolwarm,
# C = ax.plot_wireframe(X, Y, Z, rcount=30,ccount=30)
if show:
plt.show()
return ax,C,Z |
def __snake_case(self, descriptor):
"""
Utility method to convert camelcase to snake
:param descriptor: The dictionary to convert
"""
newdict = {}
for i, (k, v) in enumerate(descriptor.items()):
newkey = ""
for j, c in enumerate(k):
if c.isupper():
if len(newkey) != 0:
newkey += '_'
newkey += c.lower()
else:
newkey += c
newdict[newkey] = v
return newdict | Utility method to convert camelcase to snake
:param descriptor: The dictionary to convert | Below is the the instruction that describes the task:
### Input:
Utility method to convert camelcase to snake
:param descriptor: The dictionary to convert
### Response:
def __snake_case(self, descriptor):
"""
Utility method to convert camelcase to snake
:param descriptor: The dictionary to convert
"""
newdict = {}
for i, (k, v) in enumerate(descriptor.items()):
newkey = ""
for j, c in enumerate(k):
if c.isupper():
if len(newkey) != 0:
newkey += '_'
newkey += c.lower()
else:
newkey += c
newdict[newkey] = v
return newdict |
def _set_vcpus_ram(self, vcpus, ram):
"""
Set the number of vCPU cores and amount of RAM for the GNS3 VM.
:param vcpus: number of vCPU cores
:param ram: amount of RAM
"""
# memory must be a multiple of 4 (VMware requirement)
if ram % 4 != 0:
raise GNS3VMError("Allocated memory {} for the GNS3 VM must be a multiple of 4".format(ram))
available_vcpus = psutil.cpu_count(logical=False)
if vcpus > available_vcpus:
raise GNS3VMError("You have allocated too many vCPUs for the GNS3 VM! (max available is {} vCPUs)".format(available_vcpus))
try:
pairs = VMware.parse_vmware_file(self._vmx_path)
pairs["numvcpus"] = str(vcpus)
pairs["memsize"] = str(ram)
VMware.write_vmx_file(self._vmx_path, pairs)
log.info("GNS3 VM vCPU count set to {} and RAM amount set to {}".format(vcpus, ram))
except OSError as e:
raise GNS3VMError('Could not read/write VMware VMX file "{}": {}'.format(self._vmx_path, e)) | Set the number of vCPU cores and amount of RAM for the GNS3 VM.
:param vcpus: number of vCPU cores
:param ram: amount of RAM | Below is the the instruction that describes the task:
### Input:
Set the number of vCPU cores and amount of RAM for the GNS3 VM.
:param vcpus: number of vCPU cores
:param ram: amount of RAM
### Response:
def _set_vcpus_ram(self, vcpus, ram):
"""
Set the number of vCPU cores and amount of RAM for the GNS3 VM.
:param vcpus: number of vCPU cores
:param ram: amount of RAM
"""
# memory must be a multiple of 4 (VMware requirement)
if ram % 4 != 0:
raise GNS3VMError("Allocated memory {} for the GNS3 VM must be a multiple of 4".format(ram))
available_vcpus = psutil.cpu_count(logical=False)
if vcpus > available_vcpus:
raise GNS3VMError("You have allocated too many vCPUs for the GNS3 VM! (max available is {} vCPUs)".format(available_vcpus))
try:
pairs = VMware.parse_vmware_file(self._vmx_path)
pairs["numvcpus"] = str(vcpus)
pairs["memsize"] = str(ram)
VMware.write_vmx_file(self._vmx_path, pairs)
log.info("GNS3 VM vCPU count set to {} and RAM amount set to {}".format(vcpus, ram))
except OSError as e:
raise GNS3VMError('Could not read/write VMware VMX file "{}": {}'.format(self._vmx_path, e)) |
def altshuler_debyetemp(v, v0, gamma0, gamma_inf, beta, theta0):
"""
calculate Debye temperature for Altshuler equation
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param gamma_inf: Gruneisen parameter at infinite pressure
:param beta: volume dependence of Gruneisen parameter
:param theta0: Debye temperature at 1 bar in K
:return: Debye temperature in K
"""
x = v / v0
if isuncertainties([v, v0, gamma0, gamma_inf, beta, theta0]):
theta = theta0 * np.power(x, -1. * gamma_inf) *\
unp.exp((gamma0 - gamma_inf) / beta * (1. - np.power(x, beta)))
else:
theta = theta0 * np.power(x, -1. * gamma_inf) *\
np.exp((gamma0 - gamma_inf) / beta * (1. - np.power(x, beta)))
return theta | calculate Debye temperature for Altshuler equation
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param gamma_inf: Gruneisen parameter at infinite pressure
:param beta: volume dependence of Gruneisen parameter
:param theta0: Debye temperature at 1 bar in K
:return: Debye temperature in K | Below is the the instruction that describes the task:
### Input:
calculate Debye temperature for Altshuler equation
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param gamma_inf: Gruneisen parameter at infinite pressure
:param beta: volume dependence of Gruneisen parameter
:param theta0: Debye temperature at 1 bar in K
:return: Debye temperature in K
### Response:
def altshuler_debyetemp(v, v0, gamma0, gamma_inf, beta, theta0):
"""
calculate Debye temperature for Altshuler equation
:param v: unit-cell volume in A^3
:param v0: unit-cell volume in A^3 at 1 bar
:param gamma0: Gruneisen parameter at 1 bar
:param gamma_inf: Gruneisen parameter at infinite pressure
:param beta: volume dependence of Gruneisen parameter
:param theta0: Debye temperature at 1 bar in K
:return: Debye temperature in K
"""
x = v / v0
if isuncertainties([v, v0, gamma0, gamma_inf, beta, theta0]):
theta = theta0 * np.power(x, -1. * gamma_inf) *\
unp.exp((gamma0 - gamma_inf) / beta * (1. - np.power(x, beta)))
else:
theta = theta0 * np.power(x, -1. * gamma_inf) *\
np.exp((gamma0 - gamma_inf) / beta * (1. - np.power(x, beta)))
return theta |
def reload(self, result=None):
"""
Reloads the resource.
"""
if result is None:
result = self._client._get(
self.__class__.base_url(
self.sys['space'].id,
self.sys['id'],
environment_id=self._environment_id
)
)
self._update_from_resource(result)
return self | Reloads the resource. | Below is the the instruction that describes the task:
### Input:
Reloads the resource.
### Response:
def reload(self, result=None):
"""
Reloads the resource.
"""
if result is None:
result = self._client._get(
self.__class__.base_url(
self.sys['space'].id,
self.sys['id'],
environment_id=self._environment_id
)
)
self._update_from_resource(result)
return self |
def change_options(self, **kwargs):
'''
Change one of the track's options in the viewconf
'''
new_options = json.loads(json.dumps(self.viewconf['options']))
new_options = {**new_options, **kwargs}
return self.change_attributes(options=new_options) | Change one of the track's options in the viewconf | Below is the the instruction that describes the task:
### Input:
Change one of the track's options in the viewconf
### Response:
def change_options(self, **kwargs):
'''
Change one of the track's options in the viewconf
'''
new_options = json.loads(json.dumps(self.viewconf['options']))
new_options = {**new_options, **kwargs}
return self.change_attributes(options=new_options) |
def load(cls, path, fmt=None, backend=None):
r"""Load a graph from a file.
Edge weights are retrieved as an edge attribute named "weight".
Signals are retrieved from node attributes,
and stored in the :attr:`signals` dictionary under the attribute name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
path : string
Path to the file from which to load the graph.
fmt : {'graphml', 'gml', 'gexf', None}, optional
Format in which the graph is saved.
Guessed from the filename extension if None.
backend : {'networkx', 'graph-tool', None}, optional
Library used to load the graph. Automatically chosen if None.
Returns
-------
graph : :class:`Graph`
The loaded graph.
See Also
--------
save : save a graph to a file
from_networkx : load with NetworkX then import in the PyGSP
from_graphtool : load with graph-tool then import in the PyGSP
Notes
-----
A lossless round-trip is only guaranteed if the graph (and its signals)
is saved and loaded with the same backend.
Loading from other formats is possible by loading in NetworkX or
graph-tool, and importing to the PyGSP.
The proposed formats are however tested for faithful round-trips.
Examples
--------
>>> graph = graphs.Logo()
>>> graph.save('logo.graphml')
>>> graph = graphs.Graph.load('logo.graphml')
>>> import os
>>> os.remove('logo.graphml')
"""
if fmt is None:
fmt = os.path.splitext(path)[1][1:]
if fmt not in ['graphml', 'gml', 'gexf']:
raise ValueError('Unsupported format {}.'.format(fmt))
def load_networkx(path, fmt):
nx = _import_networkx()
load = getattr(nx, 'read_' + fmt)
graph = load(path)
return cls.from_networkx(graph)
def load_graphtool(path, fmt):
gt = _import_graphtool()
graph = gt.load_graph(path, fmt=fmt)
return cls.from_graphtool(graph)
if backend == 'networkx':
return load_networkx(path, fmt)
elif backend == 'graph-tool':
return load_graphtool(path, fmt)
elif backend is None:
try:
return load_networkx(path, fmt)
except ImportError:
try:
return load_graphtool(path, fmt)
except ImportError:
raise ImportError('Cannot import networkx nor graph-tool.')
else:
raise ValueError('Unknown backend {}.'.format(backend)) | r"""Load a graph from a file.
Edge weights are retrieved as an edge attribute named "weight".
Signals are retrieved from node attributes,
and stored in the :attr:`signals` dictionary under the attribute name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
path : string
Path to the file from which to load the graph.
fmt : {'graphml', 'gml', 'gexf', None}, optional
Format in which the graph is saved.
Guessed from the filename extension if None.
backend : {'networkx', 'graph-tool', None}, optional
Library used to load the graph. Automatically chosen if None.
Returns
-------
graph : :class:`Graph`
The loaded graph.
See Also
--------
save : save a graph to a file
from_networkx : load with NetworkX then import in the PyGSP
from_graphtool : load with graph-tool then import in the PyGSP
Notes
-----
A lossless round-trip is only guaranteed if the graph (and its signals)
is saved and loaded with the same backend.
Loading from other formats is possible by loading in NetworkX or
graph-tool, and importing to the PyGSP.
The proposed formats are however tested for faithful round-trips.
Examples
--------
>>> graph = graphs.Logo()
>>> graph.save('logo.graphml')
>>> graph = graphs.Graph.load('logo.graphml')
>>> import os
>>> os.remove('logo.graphml') | Below is the the instruction that describes the task:
### Input:
r"""Load a graph from a file.
Edge weights are retrieved as an edge attribute named "weight".
Signals are retrieved from node attributes,
and stored in the :attr:`signals` dictionary under the attribute name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
path : string
Path to the file from which to load the graph.
fmt : {'graphml', 'gml', 'gexf', None}, optional
Format in which the graph is saved.
Guessed from the filename extension if None.
backend : {'networkx', 'graph-tool', None}, optional
Library used to load the graph. Automatically chosen if None.
Returns
-------
graph : :class:`Graph`
The loaded graph.
See Also
--------
save : save a graph to a file
from_networkx : load with NetworkX then import in the PyGSP
from_graphtool : load with graph-tool then import in the PyGSP
Notes
-----
A lossless round-trip is only guaranteed if the graph (and its signals)
is saved and loaded with the same backend.
Loading from other formats is possible by loading in NetworkX or
graph-tool, and importing to the PyGSP.
The proposed formats are however tested for faithful round-trips.
Examples
--------
>>> graph = graphs.Logo()
>>> graph.save('logo.graphml')
>>> graph = graphs.Graph.load('logo.graphml')
>>> import os
>>> os.remove('logo.graphml')
### Response:
def load(cls, path, fmt=None, backend=None):
r"""Load a graph from a file.
Edge weights are retrieved as an edge attribute named "weight".
Signals are retrieved from node attributes,
and stored in the :attr:`signals` dictionary under the attribute name.
`N`-dimensional signals that were broken during export are joined.
Parameters
----------
path : string
Path to the file from which to load the graph.
fmt : {'graphml', 'gml', 'gexf', None}, optional
Format in which the graph is saved.
Guessed from the filename extension if None.
backend : {'networkx', 'graph-tool', None}, optional
Library used to load the graph. Automatically chosen if None.
Returns
-------
graph : :class:`Graph`
The loaded graph.
See Also
--------
save : save a graph to a file
from_networkx : load with NetworkX then import in the PyGSP
from_graphtool : load with graph-tool then import in the PyGSP
Notes
-----
A lossless round-trip is only guaranteed if the graph (and its signals)
is saved and loaded with the same backend.
Loading from other formats is possible by loading in NetworkX or
graph-tool, and importing to the PyGSP.
The proposed formats are however tested for faithful round-trips.
Examples
--------
>>> graph = graphs.Logo()
>>> graph.save('logo.graphml')
>>> graph = graphs.Graph.load('logo.graphml')
>>> import os
>>> os.remove('logo.graphml')
"""
if fmt is None:
fmt = os.path.splitext(path)[1][1:]
if fmt not in ['graphml', 'gml', 'gexf']:
raise ValueError('Unsupported format {}.'.format(fmt))
def load_networkx(path, fmt):
nx = _import_networkx()
load = getattr(nx, 'read_' + fmt)
graph = load(path)
return cls.from_networkx(graph)
def load_graphtool(path, fmt):
gt = _import_graphtool()
graph = gt.load_graph(path, fmt=fmt)
return cls.from_graphtool(graph)
if backend == 'networkx':
return load_networkx(path, fmt)
elif backend == 'graph-tool':
return load_graphtool(path, fmt)
elif backend is None:
try:
return load_networkx(path, fmt)
except ImportError:
try:
return load_graphtool(path, fmt)
except ImportError:
raise ImportError('Cannot import networkx nor graph-tool.')
else:
raise ValueError('Unknown backend {}.'.format(backend)) |
def switch_led_on(self, ids):
""" Switches on the LED of the motors with the specified ids. """
self._set_LED(dict(zip(ids, itertools.repeat(True)))) | Switches on the LED of the motors with the specified ids. | Below is the the instruction that describes the task:
### Input:
Switches on the LED of the motors with the specified ids.
### Response:
def switch_led_on(self, ids):
""" Switches on the LED of the motors with the specified ids. """
self._set_LED(dict(zip(ids, itertools.repeat(True)))) |
def is_article(self, response, url):
"""
Tests if the given response is an article by calling and checking
the heuristics set in config.cfg and sitelist.json
:param obj response: The response of the site.
:param str url: The base_url (needed to get the site-specific config
from the JSON-file)
:return bool: true if the heuristics match the site as an article
"""
site = self.__sites_object[url]
heuristics = self.__get_enabled_heuristics(url)
self.log.info("Checking site: %s", response.url)
statement = self.__get_condition(url)
self.log.debug("Condition (original): %s", statement)
for heuristic, condition in heuristics.items():
heuristic_func = getattr(self, heuristic)
result = heuristic_func(response, site)
check = self.__evaluate_result(result, condition)
statement = re.sub(r"\b%s\b" % heuristic, str(check), statement)
self.log.debug("Checking heuristic (%s)"
" result (%s) on condition (%s): %s",
heuristic, result, condition, check)
self.log.debug("Condition (evaluated): %s", statement)
is_article = eval(statement)
self.log.debug("Article accepted: %s", is_article)
return is_article | Tests if the given response is an article by calling and checking
the heuristics set in config.cfg and sitelist.json
:param obj response: The response of the site.
:param str url: The base_url (needed to get the site-specific config
from the JSON-file)
:return bool: true if the heuristics match the site as an article | Below is the the instruction that describes the task:
### Input:
Tests if the given response is an article by calling and checking
the heuristics set in config.cfg and sitelist.json
:param obj response: The response of the site.
:param str url: The base_url (needed to get the site-specific config
from the JSON-file)
:return bool: true if the heuristics match the site as an article
### Response:
def is_article(self, response, url):
"""
Tests if the given response is an article by calling and checking
the heuristics set in config.cfg and sitelist.json
:param obj response: The response of the site.
:param str url: The base_url (needed to get the site-specific config
from the JSON-file)
:return bool: true if the heuristics match the site as an article
"""
site = self.__sites_object[url]
heuristics = self.__get_enabled_heuristics(url)
self.log.info("Checking site: %s", response.url)
statement = self.__get_condition(url)
self.log.debug("Condition (original): %s", statement)
for heuristic, condition in heuristics.items():
heuristic_func = getattr(self, heuristic)
result = heuristic_func(response, site)
check = self.__evaluate_result(result, condition)
statement = re.sub(r"\b%s\b" % heuristic, str(check), statement)
self.log.debug("Checking heuristic (%s)"
" result (%s) on condition (%s): %s",
heuristic, result, condition, check)
self.log.debug("Condition (evaluated): %s", statement)
is_article = eval(statement)
self.log.debug("Article accepted: %s", is_article)
return is_article |
def seek(self, offset, whence=SEEK_SET):
"""
Change the stream position to the given byte offset.
Args:
offset: Offset is interpreted relative to the position indicated by
whence.
whence: The default value for whence is SEEK_SET.
Values for whence are:
SEEK_SET or 0 – start of the stream (the default);
offset should be zero or positive
SEEK_CUR or 1 – current stream position;
offset may be negative
SEEK_END or 2 – end of the stream;
offset is usually negative
Returns:
int: The new absolute position.
"""
if not self._seekable:
raise UnsupportedOperation('seek')
# Only read mode is seekable
with self._seek_lock:
# Set seek using raw method and
# sync buffered seek with raw seek
self.raw.seek(offset, whence)
self._seek = seek = self.raw._seek
# Preload starting from current seek
self._preload_range()
return seek | Change the stream position to the given byte offset.
Args:
offset: Offset is interpreted relative to the position indicated by
whence.
whence: The default value for whence is SEEK_SET.
Values for whence are:
SEEK_SET or 0 – start of the stream (the default);
offset should be zero or positive
SEEK_CUR or 1 – current stream position;
offset may be negative
SEEK_END or 2 – end of the stream;
offset is usually negative
Returns:
int: The new absolute position. | Below is the the instruction that describes the task:
### Input:
Change the stream position to the given byte offset.
Args:
offset: Offset is interpreted relative to the position indicated by
whence.
whence: The default value for whence is SEEK_SET.
Values for whence are:
SEEK_SET or 0 – start of the stream (the default);
offset should be zero or positive
SEEK_CUR or 1 – current stream position;
offset may be negative
SEEK_END or 2 – end of the stream;
offset is usually negative
Returns:
int: The new absolute position.
### Response:
def seek(self, offset, whence=SEEK_SET):
"""
Change the stream position to the given byte offset.
Args:
offset: Offset is interpreted relative to the position indicated by
whence.
whence: The default value for whence is SEEK_SET.
Values for whence are:
SEEK_SET or 0 – start of the stream (the default);
offset should be zero or positive
SEEK_CUR or 1 – current stream position;
offset may be negative
SEEK_END or 2 – end of the stream;
offset is usually negative
Returns:
int: The new absolute position.
"""
if not self._seekable:
raise UnsupportedOperation('seek')
# Only read mode is seekable
with self._seek_lock:
# Set seek using raw method and
# sync buffered seek with raw seek
self.raw.seek(offset, whence)
self._seek = seek = self.raw._seek
# Preload starting from current seek
self._preload_range()
return seek |
def cmd(self, fun, arg=None, pub_data=None, kwarg=None, print_event=True, full_return=False):
'''
Execute a function
.. code-block:: python
>>> wheel.cmd('key.finger', ['jerry'])
{'minions': {'jerry': '5d:f6:79:43:5e:d4:42:3f:57:b8:45:a8:7e:a4:6e:ca'}}
'''
return super(WheelClient, self).cmd(fun,
arg,
pub_data,
kwarg,
print_event,
full_return) | Execute a function
.. code-block:: python
>>> wheel.cmd('key.finger', ['jerry'])
{'minions': {'jerry': '5d:f6:79:43:5e:d4:42:3f:57:b8:45:a8:7e:a4:6e:ca'}} | Below is the the instruction that describes the task:
### Input:
Execute a function
.. code-block:: python
>>> wheel.cmd('key.finger', ['jerry'])
{'minions': {'jerry': '5d:f6:79:43:5e:d4:42:3f:57:b8:45:a8:7e:a4:6e:ca'}}
### Response:
def cmd(self, fun, arg=None, pub_data=None, kwarg=None, print_event=True, full_return=False):
'''
Execute a function
.. code-block:: python
>>> wheel.cmd('key.finger', ['jerry'])
{'minions': {'jerry': '5d:f6:79:43:5e:d4:42:3f:57:b8:45:a8:7e:a4:6e:ca'}}
'''
return super(WheelClient, self).cmd(fun,
arg,
pub_data,
kwarg,
print_event,
full_return) |
def read(self):
"""
This module is lazy-loaded by default. You can read all internal
structure by calling this method.
"""
stack = [self[0].child]
while stack:
current = stack.pop()
if current.right:
stack.append(current.right)
if current.left:
stack.append(current.left)
self[0].seek(0) | This module is lazy-loaded by default. You can read all internal
structure by calling this method. | Below is the the instruction that describes the task:
### Input:
This module is lazy-loaded by default. You can read all internal
structure by calling this method.
### Response:
def read(self):
"""
This module is lazy-loaded by default. You can read all internal
structure by calling this method.
"""
stack = [self[0].child]
while stack:
current = stack.pop()
if current.right:
stack.append(current.right)
if current.left:
stack.append(current.left)
self[0].seek(0) |
def make_diag_scale(loc=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None,
dtype=None):
"""Creates a LinearOperator representing a diagonal matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal
matrix. When `None` no diagonal term is added to the LinearOperator.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag = scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is added to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
assert_positive: Python `bool` indicating whether LinearOperator should be
checked for being positive definite.
name: Python `str` name given to ops managed by this object.
dtype: TF `DType` to prefer when converting args to `Tensor`s. Else, we fall
back to a compatible dtype across all of `loc`, `scale_diag`, and
`scale_identity_multiplier`.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return with_dependencies([
assert_util.assert_positive(
x, message="diagonal part must be positive"),
], x)
return with_dependencies([
assert_util.assert_none_equal(
x, tf.zeros([], x.dtype), message="diagonal part must be non-zero")
], x)
with tf.name_scope(name or "make_diag_scale"):
if dtype is None:
dtype = dtype_util.common_dtype(
[loc, scale_diag, scale_identity_multiplier],
preferred_dtype=tf.float32)
loc = _convert_to_tensor(loc, name="loc", dtype=dtype)
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag", dtype=dtype)
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier",
dtype=dtype)
if scale_diag is not None:
if scale_identity_multiplier is not None:
scale_diag += scale_identity_multiplier[..., tf.newaxis]
return tf.linalg.LinearOperatorDiag(
diag=_maybe_attach_assertion(scale_diag),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive)
if loc is None and shape_hint is None:
raise ValueError("Cannot infer `event_shape` unless `loc` or "
"`shape_hint` is specified.")
num_rows = shape_hint
del shape_hint
if num_rows is None:
num_rows = tf.compat.dimension_value(loc.shape[-1])
if num_rows is None:
num_rows = tf.shape(input=loc)[-1]
if scale_identity_multiplier is None:
return tf.linalg.LinearOperatorIdentity(
num_rows=num_rows,
dtype=dtype,
is_self_adjoint=True,
is_positive_definite=True,
assert_proper_shapes=validate_args)
return tf.linalg.LinearOperatorScaledIdentity(
num_rows=num_rows,
multiplier=_maybe_attach_assertion(scale_identity_multiplier),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive,
assert_proper_shapes=validate_args) | Creates a LinearOperator representing a diagonal matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal
matrix. When `None` no diagonal term is added to the LinearOperator.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag = scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is added to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
assert_positive: Python `bool` indicating whether LinearOperator should be
checked for being positive definite.
name: Python `str` name given to ops managed by this object.
dtype: TF `DType` to prefer when converting args to `Tensor`s. Else, we fall
back to a compatible dtype across all of `loc`, `scale_diag`, and
`scale_identity_multiplier`.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None. | Below is the the instruction that describes the task:
### Input:
Creates a LinearOperator representing a diagonal matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal
matrix. When `None` no diagonal term is added to the LinearOperator.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag = scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is added to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
assert_positive: Python `bool` indicating whether LinearOperator should be
checked for being positive definite.
name: Python `str` name given to ops managed by this object.
dtype: TF `DType` to prefer when converting args to `Tensor`s. Else, we fall
back to a compatible dtype across all of `loc`, `scale_diag`, and
`scale_identity_multiplier`.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
### Response:
def make_diag_scale(loc=None,
scale_diag=None,
scale_identity_multiplier=None,
shape_hint=None,
validate_args=False,
assert_positive=False,
name=None,
dtype=None):
"""Creates a LinearOperator representing a diagonal matrix.
Args:
loc: Floating-point `Tensor`. This is used for inferring shape in the case
where only `scale_identity_multiplier` is set.
scale_diag: Floating-point `Tensor` representing the diagonal matrix.
`scale_diag` has shape [N1, N2, ... k], which represents a k x k diagonal
matrix. When `None` no diagonal term is added to the LinearOperator.
scale_identity_multiplier: floating point rank 0 `Tensor` representing a
scaling done to the identity matrix. When `scale_identity_multiplier =
scale_diag = scale_tril = None` then `scale += IdentityMatrix`. Otherwise
no scaled-identity-matrix is added to `scale`.
shape_hint: scalar integer `Tensor` representing a hint at the dimension of
the identity matrix when only `scale_identity_multiplier` is set.
validate_args: Python `bool` indicating whether arguments should be checked
for correctness.
assert_positive: Python `bool` indicating whether LinearOperator should be
checked for being positive definite.
name: Python `str` name given to ops managed by this object.
dtype: TF `DType` to prefer when converting args to `Tensor`s. Else, we fall
back to a compatible dtype across all of `loc`, `scale_diag`, and
`scale_identity_multiplier`.
Returns:
`LinearOperator` representing a lower triangular matrix.
Raises:
ValueError: If only `scale_identity_multiplier` is set and `loc` and
`shape_hint` are both None.
"""
def _maybe_attach_assertion(x):
if not validate_args:
return x
if assert_positive:
return with_dependencies([
assert_util.assert_positive(
x, message="diagonal part must be positive"),
], x)
return with_dependencies([
assert_util.assert_none_equal(
x, tf.zeros([], x.dtype), message="diagonal part must be non-zero")
], x)
with tf.name_scope(name or "make_diag_scale"):
if dtype is None:
dtype = dtype_util.common_dtype(
[loc, scale_diag, scale_identity_multiplier],
preferred_dtype=tf.float32)
loc = _convert_to_tensor(loc, name="loc", dtype=dtype)
scale_diag = _convert_to_tensor(scale_diag, name="scale_diag", dtype=dtype)
scale_identity_multiplier = _convert_to_tensor(
scale_identity_multiplier,
name="scale_identity_multiplier",
dtype=dtype)
if scale_diag is not None:
if scale_identity_multiplier is not None:
scale_diag += scale_identity_multiplier[..., tf.newaxis]
return tf.linalg.LinearOperatorDiag(
diag=_maybe_attach_assertion(scale_diag),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive)
if loc is None and shape_hint is None:
raise ValueError("Cannot infer `event_shape` unless `loc` or "
"`shape_hint` is specified.")
num_rows = shape_hint
del shape_hint
if num_rows is None:
num_rows = tf.compat.dimension_value(loc.shape[-1])
if num_rows is None:
num_rows = tf.shape(input=loc)[-1]
if scale_identity_multiplier is None:
return tf.linalg.LinearOperatorIdentity(
num_rows=num_rows,
dtype=dtype,
is_self_adjoint=True,
is_positive_definite=True,
assert_proper_shapes=validate_args)
return tf.linalg.LinearOperatorScaledIdentity(
num_rows=num_rows,
multiplier=_maybe_attach_assertion(scale_identity_multiplier),
is_non_singular=True,
is_self_adjoint=True,
is_positive_definite=assert_positive,
assert_proper_shapes=validate_args) |
def decipher(self, string):
"""Decipher string using Playfair cipher according to initialised key. Punctuation and whitespace
are removed from the input. The ciphertext should be an even number of characters. If the input ciphertext is not an even number of characters, an 'X' will be appended.
Example::
plaintext = Playfair(key='zgptfoihmuwdrcnykeqaxvsbl').decipher(ciphertext)
:param string: The string to decipher.
:returns: The deciphered string.
"""
string = self.remove_punctuation(string)
if len(string) % 2 == 1:
string += 'X'
ret = ''
for c in range(0, len(string), 2):
ret += self.decipher_pair(string[c], string[c + 1])
return ret | Decipher string using Playfair cipher according to initialised key. Punctuation and whitespace
are removed from the input. The ciphertext should be an even number of characters. If the input ciphertext is not an even number of characters, an 'X' will be appended.
Example::
plaintext = Playfair(key='zgptfoihmuwdrcnykeqaxvsbl').decipher(ciphertext)
:param string: The string to decipher.
:returns: The deciphered string. | Below is the the instruction that describes the task:
### Input:
Decipher string using Playfair cipher according to initialised key. Punctuation and whitespace
are removed from the input. The ciphertext should be an even number of characters. If the input ciphertext is not an even number of characters, an 'X' will be appended.
Example::
plaintext = Playfair(key='zgptfoihmuwdrcnykeqaxvsbl').decipher(ciphertext)
:param string: The string to decipher.
:returns: The deciphered string.
### Response:
def decipher(self, string):
"""Decipher string using Playfair cipher according to initialised key. Punctuation and whitespace
are removed from the input. The ciphertext should be an even number of characters. If the input ciphertext is not an even number of characters, an 'X' will be appended.
Example::
plaintext = Playfair(key='zgptfoihmuwdrcnykeqaxvsbl').decipher(ciphertext)
:param string: The string to decipher.
:returns: The deciphered string.
"""
string = self.remove_punctuation(string)
if len(string) % 2 == 1:
string += 'X'
ret = ''
for c in range(0, len(string), 2):
ret += self.decipher_pair(string[c], string[c + 1])
return ret |
def get_xeditable_form_kwargs(self):
""" Returns a dict of keyword arguments to be sent to the xeditable form class. """
kwargs = {
'model': self.get_queryset().model,
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
})
return kwargs | Returns a dict of keyword arguments to be sent to the xeditable form class. | Below is the the instruction that describes the task:
### Input:
Returns a dict of keyword arguments to be sent to the xeditable form class.
### Response:
def get_xeditable_form_kwargs(self):
""" Returns a dict of keyword arguments to be sent to the xeditable form class. """
kwargs = {
'model': self.get_queryset().model,
}
if self.request.method in ('POST', 'PUT'):
kwargs.update({
'data': self.request.POST,
})
return kwargs |
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value | Temporarily set a parameter value using the with statement.
Aliasing allowed. | Below is the the instruction that describes the task:
### Input:
Temporarily set a parameter value using the with statement.
Aliasing allowed.
### Response:
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value |
def build(self, connection, grammar):
"""
Execute the blueprint against the database.
:param connection: The connection to use
:type connection: orator.connections.Connection
:param grammar: The grammar to user
:type grammar: orator.query.grammars.QueryGrammar
"""
for statement in self.to_sql(connection, grammar):
connection.statement(statement) | Execute the blueprint against the database.
:param connection: The connection to use
:type connection: orator.connections.Connection
:param grammar: The grammar to user
:type grammar: orator.query.grammars.QueryGrammar | Below is the the instruction that describes the task:
### Input:
Execute the blueprint against the database.
:param connection: The connection to use
:type connection: orator.connections.Connection
:param grammar: The grammar to user
:type grammar: orator.query.grammars.QueryGrammar
### Response:
def build(self, connection, grammar):
"""
Execute the blueprint against the database.
:param connection: The connection to use
:type connection: orator.connections.Connection
:param grammar: The grammar to user
:type grammar: orator.query.grammars.QueryGrammar
"""
for statement in self.to_sql(connection, grammar):
connection.statement(statement) |
def allow_create(function):
"""
Decorate the `form_valid` method in a Create/Update class to create new
values if necessary.
.. warning::
Make sure that this decorator **only** decorates the ``form_valid()``
method and **only** this one.
"""
@wraps(function)
def _wrapped_func(*args, **kwargs):
form = args[0]
# If this argument is not a form, there are a lot of chances that
# you didn't decorate the right method.
# This decorator is only to be used decorating "form_valid()"
if isinstance(form, (Form, ModelForm)):
# If the form is not valid, don't try to create new values
if not form.is_valid():
return function(*args, **kwargs)
for k, field in form.fields.items():
if getattr(field, 'create', False) \
and getattr(field, '_new_values', None):
new_values = field.create_new_values()
# update the field value
form.cleaned_data[k] = form.cleaned_data[k] | new_values
return function(*args, **kwargs)
return _wrapped_func | Decorate the `form_valid` method in a Create/Update class to create new
values if necessary.
.. warning::
Make sure that this decorator **only** decorates the ``form_valid()``
method and **only** this one. | Below is the the instruction that describes the task:
### Input:
Decorate the `form_valid` method in a Create/Update class to create new
values if necessary.
.. warning::
Make sure that this decorator **only** decorates the ``form_valid()``
method and **only** this one.
### Response:
def allow_create(function):
"""
Decorate the `form_valid` method in a Create/Update class to create new
values if necessary.
.. warning::
Make sure that this decorator **only** decorates the ``form_valid()``
method and **only** this one.
"""
@wraps(function)
def _wrapped_func(*args, **kwargs):
form = args[0]
# If this argument is not a form, there are a lot of chances that
# you didn't decorate the right method.
# This decorator is only to be used decorating "form_valid()"
if isinstance(form, (Form, ModelForm)):
# If the form is not valid, don't try to create new values
if not form.is_valid():
return function(*args, **kwargs)
for k, field in form.fields.items():
if getattr(field, 'create', False) \
and getattr(field, '_new_values', None):
new_values = field.create_new_values()
# update the field value
form.cleaned_data[k] = form.cleaned_data[k] | new_values
return function(*args, **kwargs)
return _wrapped_func |
def hmmsearch(self, output_path, input_path, unpack, seq_type, threads, cutoff, orfm):
'''
hmmsearch - Search raw reads for hits using search_hmm list
Parameters
----------
output_path : str
path to output domtblout table
input_path : str
path to input sequences to search
unpack : UnpackRawReads
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
seq_type : str
variable containing a string, either 'nucleotide' or 'aminoacid'.
Tells the pipeline whether or not to call ORFs on the sequence.
If sequence is 'nucleotide', ORFs are called. If not, no ORFs.
threads : Integer
Number of threads to use. Passed to HMMsearch command.
cutoff : str
cutoff for HMMsearch to use, either an evalue or --cut_tc, meaning
use the TC score cutoff specified within the HMM. Passed to
HMMsearch command.
orfm : OrfM
Object that builds the command chunk for calling ORFs on sequences
coming through as stdin. Outputs to stdout. Calls command_line
to construct final command line string.
Returns
-------
output_table_list : array of HMMSearchResult
Includes the name of the output domtblout table given by hmmer
Raises
------
hmmsearcher.NoInputSequencesException
Raised if there are no sequences fed into the HMM.
'''
# Define the base hmmsearch command.
logging.debug("Using %i HMMs to search" % (len(self.search_hmm)))
output_table_list = []
if len(self.search_hmm) > 1:
for hmm in self.search_hmm:
out = os.path.join(os.path.split(output_path)[0], os.path.basename(hmm).split('.')[0] + '_' + os.path.split(output_path)[1])
output_table_list.append(out)
elif len(self.search_hmm) == 1:
output_table_list.append(output_path)
else:
raise Exception("Programming error: expected 1 or more HMMs")
# Choose an input to this base command based off the file format found.
if seq_type == 'nucleotide': # If the input is nucleotide sequence
input_cmd = orfm.command_line(input_path)
elif seq_type == 'aminoacid': # If the input is amino acid sequence
input_cmd = unpack.command_line()
else:
raise Exception('Programming Error: error guessing input sequence type')
# Run the HMMsearches
if cutoff == "--cut_tc":
searcher = HmmSearcher(threads, cutoff)
else:
searcher = HmmSearcher(threads, '--domE %s' % cutoff)
searcher.hmmsearch(input_cmd, self.search_hmm, output_table_list)
hmmtables = [HMMSearchResult.import_from_hmmsearch_table(x) for x in output_table_list]
return hmmtables | hmmsearch - Search raw reads for hits using search_hmm list
Parameters
----------
output_path : str
path to output domtblout table
input_path : str
path to input sequences to search
unpack : UnpackRawReads
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
seq_type : str
variable containing a string, either 'nucleotide' or 'aminoacid'.
Tells the pipeline whether or not to call ORFs on the sequence.
If sequence is 'nucleotide', ORFs are called. If not, no ORFs.
threads : Integer
Number of threads to use. Passed to HMMsearch command.
cutoff : str
cutoff for HMMsearch to use, either an evalue or --cut_tc, meaning
use the TC score cutoff specified within the HMM. Passed to
HMMsearch command.
orfm : OrfM
Object that builds the command chunk for calling ORFs on sequences
coming through as stdin. Outputs to stdout. Calls command_line
to construct final command line string.
Returns
-------
output_table_list : array of HMMSearchResult
Includes the name of the output domtblout table given by hmmer
Raises
------
hmmsearcher.NoInputSequencesException
Raised if there are no sequences fed into the HMM. | Below is the the instruction that describes the task:
### Input:
hmmsearch - Search raw reads for hits using search_hmm list
Parameters
----------
output_path : str
path to output domtblout table
input_path : str
path to input sequences to search
unpack : UnpackRawReads
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
seq_type : str
variable containing a string, either 'nucleotide' or 'aminoacid'.
Tells the pipeline whether or not to call ORFs on the sequence.
If sequence is 'nucleotide', ORFs are called. If not, no ORFs.
threads : Integer
Number of threads to use. Passed to HMMsearch command.
cutoff : str
cutoff for HMMsearch to use, either an evalue or --cut_tc, meaning
use the TC score cutoff specified within the HMM. Passed to
HMMsearch command.
orfm : OrfM
Object that builds the command chunk for calling ORFs on sequences
coming through as stdin. Outputs to stdout. Calls command_line
to construct final command line string.
Returns
-------
output_table_list : array of HMMSearchResult
Includes the name of the output domtblout table given by hmmer
Raises
------
hmmsearcher.NoInputSequencesException
Raised if there are no sequences fed into the HMM.
### Response:
def hmmsearch(self, output_path, input_path, unpack, seq_type, threads, cutoff, orfm):
'''
hmmsearch - Search raw reads for hits using search_hmm list
Parameters
----------
output_path : str
path to output domtblout table
input_path : str
path to input sequences to search
unpack : UnpackRawReads
UnpackRawReads object, returns string command that will output
sequences to stdout when called on command line
(use: unpack.command_line())
seq_type : str
variable containing a string, either 'nucleotide' or 'aminoacid'.
Tells the pipeline whether or not to call ORFs on the sequence.
If sequence is 'nucleotide', ORFs are called. If not, no ORFs.
threads : Integer
Number of threads to use. Passed to HMMsearch command.
cutoff : str
cutoff for HMMsearch to use, either an evalue or --cut_tc, meaning
use the TC score cutoff specified within the HMM. Passed to
HMMsearch command.
orfm : OrfM
Object that builds the command chunk for calling ORFs on sequences
coming through as stdin. Outputs to stdout. Calls command_line
to construct final command line string.
Returns
-------
output_table_list : array of HMMSearchResult
Includes the name of the output domtblout table given by hmmer
Raises
------
hmmsearcher.NoInputSequencesException
Raised if there are no sequences fed into the HMM.
'''
# Define the base hmmsearch command.
logging.debug("Using %i HMMs to search" % (len(self.search_hmm)))
output_table_list = []
if len(self.search_hmm) > 1:
for hmm in self.search_hmm:
out = os.path.join(os.path.split(output_path)[0], os.path.basename(hmm).split('.')[0] + '_' + os.path.split(output_path)[1])
output_table_list.append(out)
elif len(self.search_hmm) == 1:
output_table_list.append(output_path)
else:
raise Exception("Programming error: expected 1 or more HMMs")
# Choose an input to this base command based off the file format found.
if seq_type == 'nucleotide': # If the input is nucleotide sequence
input_cmd = orfm.command_line(input_path)
elif seq_type == 'aminoacid': # If the input is amino acid sequence
input_cmd = unpack.command_line()
else:
raise Exception('Programming Error: error guessing input sequence type')
# Run the HMMsearches
if cutoff == "--cut_tc":
searcher = HmmSearcher(threads, cutoff)
else:
searcher = HmmSearcher(threads, '--domE %s' % cutoff)
searcher.hmmsearch(input_cmd, self.search_hmm, output_table_list)
hmmtables = [HMMSearchResult.import_from_hmmsearch_table(x) for x in output_table_list]
return hmmtables |
def extract_features_using_pefile(self, pef):
''' Process the PE File using the Python pefile module. '''
# Store all extracted features into feature lists
extracted_dense = {}
extracted_sparse = {}
# Now slog through the info and extract the features
feature_not_found_flag = -99
feature_default_value = 0
self._warnings = []
# Set all the dense features and sparse features to 'feature not found'
# value and then check later to see if it was found
for feature in self._dense_feature_list:
extracted_dense[feature] = feature_not_found_flag
for feature in self._sparse_feature_list:
extracted_sparse[feature] = feature_not_found_flag
# Check to make sure all the section names are standard
std_sections = ['.text', '.bss', '.rdata', '.data', '.rsrc', '.edata', '.idata',
'.pdata', '.debug', '.reloc', '.stab', '.stabstr', '.tls',
'.crt', '.gnu_deb', '.eh_fram', '.exptbl', '.rodata']
for i in range(200):
std_sections.append('/'+str(i))
std_section_names = 1
extracted_sparse['section_names'] = []
for section in pef.sections:
name = convert_to_ascii_null_term(section.Name).lower()
extracted_sparse['section_names'].append(name)
if name not in std_sections:
std_section_names = 0
extracted_dense['std_section_names'] = std_section_names
extracted_dense['debug_size'] = pef.OPTIONAL_HEADER.DATA_DIRECTORY[6].Size
extracted_dense['major_version'] = pef.OPTIONAL_HEADER.MajorImageVersion
extracted_dense['minor_version'] = pef.OPTIONAL_HEADER.MinorImageVersion
extracted_dense['iat_rva'] = pef.OPTIONAL_HEADER.DATA_DIRECTORY[1].VirtualAddress
extracted_dense['export_size'] = pef.OPTIONAL_HEADER.DATA_DIRECTORY[0].Size
extracted_dense['check_sum'] = pef.OPTIONAL_HEADER.CheckSum
try:
extracted_dense['generated_check_sum'] = pef.generate_checksum()
except ValueError:
extracted_dense['generated_check_sum'] = 0
if len(pef.sections) > 0:
extracted_dense['virtual_address'] = pef.sections[0].VirtualAddress
extracted_dense['virtual_size'] = pef.sections[0].Misc_VirtualSize
extracted_dense['number_of_sections'] = pef.FILE_HEADER.NumberOfSections
extracted_dense['compile_date'] = pef.FILE_HEADER.TimeDateStamp
extracted_dense['number_of_rva_and_sizes'] = pef.OPTIONAL_HEADER.NumberOfRvaAndSizes
extracted_dense['total_size_pe'] = len(pef.__data__)
# Number of import and exports
if hasattr(pef, 'DIRECTORY_ENTRY_IMPORT'):
extracted_dense['number_of_imports'] = len(pef.DIRECTORY_ENTRY_IMPORT)
num_imported_symbols = 0
for module in pef.DIRECTORY_ENTRY_IMPORT:
num_imported_symbols += len(module.imports)
extracted_dense['number_of_import_symbols'] = num_imported_symbols
if hasattr(pef, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
extracted_dense['number_of_bound_imports'] = len(pef.DIRECTORY_ENTRY_BOUND_IMPORT)
num_imported_symbols = 0
for module in pef.DIRECTORY_ENTRY_BOUND_IMPORT:
num_imported_symbols += len(module.entries)
extracted_dense['number_of_bound_import_symbols'] = num_imported_symbols
if hasattr(pef, 'DIRECTORY_ENTRY_EXPORT'):
try:
extracted_dense['number_of_export_symbols'] = len(pef.DIRECTORY_ENTRY_EXPORT.symbols)
symbol_set = set()
for symbol in pef.DIRECTORY_ENTRY_EXPORT.symbols:
symbol_info = 'unknown'
if not symbol.name:
symbol_info = 'ordinal=' + str(symbol.ordinal)
else:
symbol_info = 'name=' + symbol.name
symbol_set.add(convert_to_utf8('%s' % (symbol_info)).lower())
# Now convert set to list and add to features
extracted_sparse['ExportedSymbols'] = list(symbol_set)
except AttributeError:
extracted_sparse['ExportedSymbols'] = ['AttributeError']
# Specific Import info (Note this will be a sparse field woo hoo!)
if hasattr(pef, 'DIRECTORY_ENTRY_IMPORT'):
symbol_set = set()
for module in pef.DIRECTORY_ENTRY_IMPORT:
for symbol in module.imports:
symbol_info = 'unknown'
if symbol.import_by_ordinal is True:
symbol_info = 'ordinal=' + str(symbol.ordinal)
else:
symbol_info = 'name=' + symbol.name
# symbol_info['hint'] = symbol.hint
if symbol.bound:
symbol_info += ' bound=' + str(symbol.bound)
symbol_set.add(convert_to_utf8('%s:%s' % (module.dll, symbol_info)).lower())
# Now convert set to list and add to features
extracted_sparse['imported_symbols'] = list(symbol_set)
# Do we have a second section
if len(pef.sections) >= 2:
extracted_dense['virtual_size_2'] = pef.sections[1].Misc_VirtualSize
extracted_dense['size_image'] = pef.OPTIONAL_HEADER.SizeOfImage
extracted_dense['size_code'] = pef.OPTIONAL_HEADER.SizeOfCode
extracted_dense['size_initdata'] = pef.OPTIONAL_HEADER.SizeOfInitializedData
extracted_dense['size_uninit'] = pef.OPTIONAL_HEADER.SizeOfUninitializedData
extracted_dense['pe_majorlink'] = pef.OPTIONAL_HEADER.MajorLinkerVersion
extracted_dense['pe_minorlink'] = pef.OPTIONAL_HEADER.MinorLinkerVersion
extracted_dense['pe_driver'] = 1 if pef.is_driver() else 0
extracted_dense['pe_exe'] = 1 if pef.is_exe() else 0
extracted_dense['pe_dll'] = 1 if pef.is_dll() else 0
extracted_dense['pe_i386'] = 1
if pef.FILE_HEADER.Machine != 0x014c:
extracted_dense['pe_i386'] = 0
extracted_dense['pe_char'] = pef.FILE_HEADER.Characteristics
# Data directory features!!
datadirs = {
0: 'IMAGE_DIRECTORY_ENTRY_EXPORT', 1: 'IMAGE_DIRECTORY_ENTRY_IMPORT',
2: 'IMAGE_DIRECTORY_ENTRY_RESOURCE', 5: 'IMAGE_DIRECTORY_ENTRY_BASERELOC',
12: 'IMAGE_DIRECTORY_ENTRY_IAT'}
for idx, datadir in datadirs.items():
datadir = pefile.DIRECTORY_ENTRY[idx]
if len(pef.OPTIONAL_HEADER.DATA_DIRECTORY) <= idx:
continue
directory = pef.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
extracted_dense['datadir_%s_size' % datadir] = directory.Size
# Section features
section_flags = ['IMAGE_SCN_MEM_EXECUTE', 'IMAGE_SCN_CNT_CODE', 'IMAGE_SCN_MEM_WRITE', 'IMAGE_SCN_MEM_READ']
rawexecsize = 0
vaexecsize = 0
for sec in pef.sections:
if not sec:
continue
for char in section_flags:
# does the section have one of our attribs?
if hasattr(sec, char):
rawexecsize += sec.SizeOfRawData
vaexecsize += sec.Misc_VirtualSize
break
# Take out any weird characters in section names
secname = convert_to_ascii_null_term(sec.Name).lower()
secname = secname.replace('.', '')
if secname in std_sections:
extracted_dense['sec_entropy_%s' % secname] = sec.get_entropy()
extracted_dense['sec_rawptr_%s' % secname] = sec.PointerToRawData
extracted_dense['sec_rawsize_%s' % secname] = sec.SizeOfRawData
extracted_dense['sec_vasize_%s' % secname] = sec.Misc_VirtualSize
extracted_dense['sec_va_execsize'] = vaexecsize
extracted_dense['sec_raw_execsize'] = rawexecsize
# Imphash (implemented in pefile 1.2.10-139 or later)
try:
extracted_sparse['imp_hash'] = pef.get_imphash()
except AttributeError:
extracted_sparse['imp_hash'] = 'Not found: Install pefile 1.2.10-139 or later'
# Register if there were any pe warnings
warnings = pef.get_warnings()
if warnings:
extracted_dense['pe_warnings'] = 1
extracted_sparse['pe_warning_strings'] = warnings
else:
extracted_dense['pe_warnings'] = 0
# Issue a warning if the feature isn't found
for feature in self._dense_feature_list:
if extracted_dense[feature] == feature_not_found_flag:
extracted_dense[feature] = feature_default_value
if (self._verbose):
print 'info: Feature: %s not found! Setting to %d' % (feature, feature_default_value)
# Issue a warning if the feature isn't found
for feature in self._sparse_feature_list:
if extracted_sparse[feature] == feature_not_found_flag:
extracted_sparse[feature] = [] # For sparse data probably best default
if (self._verbose):
print 'info: Feature: %s not found! Setting to %d' % (feature, feature_default_value)
# Set the features for the class var
self._dense_features = extracted_dense
self._sparse_features = extracted_sparse
return self.get_dense_features(), self.get_sparse_features() | Process the PE File using the Python pefile module. | Below is the the instruction that describes the task:
### Input:
Process the PE File using the Python pefile module.
### Response:
def extract_features_using_pefile(self, pef):
''' Process the PE File using the Python pefile module. '''
# Store all extracted features into feature lists
extracted_dense = {}
extracted_sparse = {}
# Now slog through the info and extract the features
feature_not_found_flag = -99
feature_default_value = 0
self._warnings = []
# Set all the dense features and sparse features to 'feature not found'
# value and then check later to see if it was found
for feature in self._dense_feature_list:
extracted_dense[feature] = feature_not_found_flag
for feature in self._sparse_feature_list:
extracted_sparse[feature] = feature_not_found_flag
# Check to make sure all the section names are standard
std_sections = ['.text', '.bss', '.rdata', '.data', '.rsrc', '.edata', '.idata',
'.pdata', '.debug', '.reloc', '.stab', '.stabstr', '.tls',
'.crt', '.gnu_deb', '.eh_fram', '.exptbl', '.rodata']
for i in range(200):
std_sections.append('/'+str(i))
std_section_names = 1
extracted_sparse['section_names'] = []
for section in pef.sections:
name = convert_to_ascii_null_term(section.Name).lower()
extracted_sparse['section_names'].append(name)
if name not in std_sections:
std_section_names = 0
extracted_dense['std_section_names'] = std_section_names
extracted_dense['debug_size'] = pef.OPTIONAL_HEADER.DATA_DIRECTORY[6].Size
extracted_dense['major_version'] = pef.OPTIONAL_HEADER.MajorImageVersion
extracted_dense['minor_version'] = pef.OPTIONAL_HEADER.MinorImageVersion
extracted_dense['iat_rva'] = pef.OPTIONAL_HEADER.DATA_DIRECTORY[1].VirtualAddress
extracted_dense['export_size'] = pef.OPTIONAL_HEADER.DATA_DIRECTORY[0].Size
extracted_dense['check_sum'] = pef.OPTIONAL_HEADER.CheckSum
try:
extracted_dense['generated_check_sum'] = pef.generate_checksum()
except ValueError:
extracted_dense['generated_check_sum'] = 0
if len(pef.sections) > 0:
extracted_dense['virtual_address'] = pef.sections[0].VirtualAddress
extracted_dense['virtual_size'] = pef.sections[0].Misc_VirtualSize
extracted_dense['number_of_sections'] = pef.FILE_HEADER.NumberOfSections
extracted_dense['compile_date'] = pef.FILE_HEADER.TimeDateStamp
extracted_dense['number_of_rva_and_sizes'] = pef.OPTIONAL_HEADER.NumberOfRvaAndSizes
extracted_dense['total_size_pe'] = len(pef.__data__)
# Number of import and exports
if hasattr(pef, 'DIRECTORY_ENTRY_IMPORT'):
extracted_dense['number_of_imports'] = len(pef.DIRECTORY_ENTRY_IMPORT)
num_imported_symbols = 0
for module in pef.DIRECTORY_ENTRY_IMPORT:
num_imported_symbols += len(module.imports)
extracted_dense['number_of_import_symbols'] = num_imported_symbols
if hasattr(pef, 'DIRECTORY_ENTRY_BOUND_IMPORT'):
extracted_dense['number_of_bound_imports'] = len(pef.DIRECTORY_ENTRY_BOUND_IMPORT)
num_imported_symbols = 0
for module in pef.DIRECTORY_ENTRY_BOUND_IMPORT:
num_imported_symbols += len(module.entries)
extracted_dense['number_of_bound_import_symbols'] = num_imported_symbols
if hasattr(pef, 'DIRECTORY_ENTRY_EXPORT'):
try:
extracted_dense['number_of_export_symbols'] = len(pef.DIRECTORY_ENTRY_EXPORT.symbols)
symbol_set = set()
for symbol in pef.DIRECTORY_ENTRY_EXPORT.symbols:
symbol_info = 'unknown'
if not symbol.name:
symbol_info = 'ordinal=' + str(symbol.ordinal)
else:
symbol_info = 'name=' + symbol.name
symbol_set.add(convert_to_utf8('%s' % (symbol_info)).lower())
# Now convert set to list and add to features
extracted_sparse['ExportedSymbols'] = list(symbol_set)
except AttributeError:
extracted_sparse['ExportedSymbols'] = ['AttributeError']
# Specific Import info (Note this will be a sparse field woo hoo!)
if hasattr(pef, 'DIRECTORY_ENTRY_IMPORT'):
symbol_set = set()
for module in pef.DIRECTORY_ENTRY_IMPORT:
for symbol in module.imports:
symbol_info = 'unknown'
if symbol.import_by_ordinal is True:
symbol_info = 'ordinal=' + str(symbol.ordinal)
else:
symbol_info = 'name=' + symbol.name
# symbol_info['hint'] = symbol.hint
if symbol.bound:
symbol_info += ' bound=' + str(symbol.bound)
symbol_set.add(convert_to_utf8('%s:%s' % (module.dll, symbol_info)).lower())
# Now convert set to list and add to features
extracted_sparse['imported_symbols'] = list(symbol_set)
# Do we have a second section
if len(pef.sections) >= 2:
extracted_dense['virtual_size_2'] = pef.sections[1].Misc_VirtualSize
extracted_dense['size_image'] = pef.OPTIONAL_HEADER.SizeOfImage
extracted_dense['size_code'] = pef.OPTIONAL_HEADER.SizeOfCode
extracted_dense['size_initdata'] = pef.OPTIONAL_HEADER.SizeOfInitializedData
extracted_dense['size_uninit'] = pef.OPTIONAL_HEADER.SizeOfUninitializedData
extracted_dense['pe_majorlink'] = pef.OPTIONAL_HEADER.MajorLinkerVersion
extracted_dense['pe_minorlink'] = pef.OPTIONAL_HEADER.MinorLinkerVersion
extracted_dense['pe_driver'] = 1 if pef.is_driver() else 0
extracted_dense['pe_exe'] = 1 if pef.is_exe() else 0
extracted_dense['pe_dll'] = 1 if pef.is_dll() else 0
extracted_dense['pe_i386'] = 1
if pef.FILE_HEADER.Machine != 0x014c:
extracted_dense['pe_i386'] = 0
extracted_dense['pe_char'] = pef.FILE_HEADER.Characteristics
# Data directory features!!
datadirs = {
0: 'IMAGE_DIRECTORY_ENTRY_EXPORT', 1: 'IMAGE_DIRECTORY_ENTRY_IMPORT',
2: 'IMAGE_DIRECTORY_ENTRY_RESOURCE', 5: 'IMAGE_DIRECTORY_ENTRY_BASERELOC',
12: 'IMAGE_DIRECTORY_ENTRY_IAT'}
for idx, datadir in datadirs.items():
datadir = pefile.DIRECTORY_ENTRY[idx]
if len(pef.OPTIONAL_HEADER.DATA_DIRECTORY) <= idx:
continue
directory = pef.OPTIONAL_HEADER.DATA_DIRECTORY[idx]
extracted_dense['datadir_%s_size' % datadir] = directory.Size
# Section features
section_flags = ['IMAGE_SCN_MEM_EXECUTE', 'IMAGE_SCN_CNT_CODE', 'IMAGE_SCN_MEM_WRITE', 'IMAGE_SCN_MEM_READ']
rawexecsize = 0
vaexecsize = 0
for sec in pef.sections:
if not sec:
continue
for char in section_flags:
# does the section have one of our attribs?
if hasattr(sec, char):
rawexecsize += sec.SizeOfRawData
vaexecsize += sec.Misc_VirtualSize
break
# Take out any weird characters in section names
secname = convert_to_ascii_null_term(sec.Name).lower()
secname = secname.replace('.', '')
if secname in std_sections:
extracted_dense['sec_entropy_%s' % secname] = sec.get_entropy()
extracted_dense['sec_rawptr_%s' % secname] = sec.PointerToRawData
extracted_dense['sec_rawsize_%s' % secname] = sec.SizeOfRawData
extracted_dense['sec_vasize_%s' % secname] = sec.Misc_VirtualSize
extracted_dense['sec_va_execsize'] = vaexecsize
extracted_dense['sec_raw_execsize'] = rawexecsize
# Imphash (implemented in pefile 1.2.10-139 or later)
try:
extracted_sparse['imp_hash'] = pef.get_imphash()
except AttributeError:
extracted_sparse['imp_hash'] = 'Not found: Install pefile 1.2.10-139 or later'
# Register if there were any pe warnings
warnings = pef.get_warnings()
if warnings:
extracted_dense['pe_warnings'] = 1
extracted_sparse['pe_warning_strings'] = warnings
else:
extracted_dense['pe_warnings'] = 0
# Issue a warning if the feature isn't found
for feature in self._dense_feature_list:
if extracted_dense[feature] == feature_not_found_flag:
extracted_dense[feature] = feature_default_value
if (self._verbose):
print 'info: Feature: %s not found! Setting to %d' % (feature, feature_default_value)
# Issue a warning if the feature isn't found
for feature in self._sparse_feature_list:
if extracted_sparse[feature] == feature_not_found_flag:
extracted_sparse[feature] = [] # For sparse data probably best default
if (self._verbose):
print 'info: Feature: %s not found! Setting to %d' % (feature, feature_default_value)
# Set the features for the class var
self._dense_features = extracted_dense
self._sparse_features = extracted_sparse
return self.get_dense_features(), self.get_sparse_features() |
def configure_args(self, arguments):
"""
Create configuration has from command line arguments
:type arguments: :py:class:`argparse.Namespace`
:params arguments: arguments produced by :py:meth:`Cli.parse_args()`
"""
module, key, config_path = self.check_file_paths(arguments.module,
arguments.key,
arguments.config)
log_dir = self.check_directory_paths(arguments.log_dir)
if arguments.repository_url is None:
url = default_config['repository']['url']
else:
url = arguments.repository_url
args_config = dict(aws=dict(bucket=arguments.bucket),
logging=dict(dir=arguments.log_dir,
prefix=arguments.log_prefix),
workers=arguments.workers,
repository=dict(enabled=arguments.repository,
url=url,
manifest=arguments.repository_manifest,
gpg_verify=arguments.gpg_verify))
if arguments.server is not None:
jump_host = None
if arguments.jump_server is not None:
if arguments.jump_port is not None:
jump_port = int(arguments.jump_port)
else:
jump_port = None
jump_host = dict(zip(jump_host_allowed_keys,
[arguments.jump_server,
jump_port,
arguments.jump_username,
arguments.jump_password,
arguments.jump_key]))
if arguments.port is not None:
port = int(arguments.port)
else:
port = None
host = dict(zip(host_allowed_keys,
[arguments.server, port, arguments.username,
arguments.password, module, key,
arguments.filename, jump_host]))
args_config['hosts'] = []
args_config['hosts'].append(host)
if config_path is not None:
try:
config = self.load_config(config_path)
self.validate_config(config)
args_config.update(config)
except YAMLError as ex:
logger.warn('Invalid yaml Format: {0}'.format(ex))
raise
except InvalidConfigurationError as ex:
logger.warn(ex)
raise
return args_config | Create configuration has from command line arguments
:type arguments: :py:class:`argparse.Namespace`
:params arguments: arguments produced by :py:meth:`Cli.parse_args()` | Below is the the instruction that describes the task:
### Input:
Create configuration has from command line arguments
:type arguments: :py:class:`argparse.Namespace`
:params arguments: arguments produced by :py:meth:`Cli.parse_args()`
### Response:
def configure_args(self, arguments):
"""
Create configuration has from command line arguments
:type arguments: :py:class:`argparse.Namespace`
:params arguments: arguments produced by :py:meth:`Cli.parse_args()`
"""
module, key, config_path = self.check_file_paths(arguments.module,
arguments.key,
arguments.config)
log_dir = self.check_directory_paths(arguments.log_dir)
if arguments.repository_url is None:
url = default_config['repository']['url']
else:
url = arguments.repository_url
args_config = dict(aws=dict(bucket=arguments.bucket),
logging=dict(dir=arguments.log_dir,
prefix=arguments.log_prefix),
workers=arguments.workers,
repository=dict(enabled=arguments.repository,
url=url,
manifest=arguments.repository_manifest,
gpg_verify=arguments.gpg_verify))
if arguments.server is not None:
jump_host = None
if arguments.jump_server is not None:
if arguments.jump_port is not None:
jump_port = int(arguments.jump_port)
else:
jump_port = None
jump_host = dict(zip(jump_host_allowed_keys,
[arguments.jump_server,
jump_port,
arguments.jump_username,
arguments.jump_password,
arguments.jump_key]))
if arguments.port is not None:
port = int(arguments.port)
else:
port = None
host = dict(zip(host_allowed_keys,
[arguments.server, port, arguments.username,
arguments.password, module, key,
arguments.filename, jump_host]))
args_config['hosts'] = []
args_config['hosts'].append(host)
if config_path is not None:
try:
config = self.load_config(config_path)
self.validate_config(config)
args_config.update(config)
except YAMLError as ex:
logger.warn('Invalid yaml Format: {0}'.format(ex))
raise
except InvalidConfigurationError as ex:
logger.warn(ex)
raise
return args_config |
def get_credit_notes_per_page(self, per_page=1000, page=1, params=None):
"""
Get credit notes per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
"""
return self._get_resource_per_page(resource=CREDIT_NOTES, per_page=per_page, page=page, params=params) | Get credit notes per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list | Below is the the instruction that describes the task:
### Input:
Get credit notes per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
### Response:
def get_credit_notes_per_page(self, per_page=1000, page=1, params=None):
"""
Get credit notes per page
:param per_page: How many objects per page. Default: 1000
:param page: Which page. Default: 1
:param params: Search parameters. Default: {}
:return: list
"""
return self._get_resource_per_page(resource=CREDIT_NOTES, per_page=per_page, page=page, params=params) |
def get_variant_genotypes(self, variant):
"""Get the genotypes from a well formed variant instance.
Args:
marker (Variant): A Variant instance.
Returns:
A list of Genotypes instance containing a pointer to the variant as
well as a vector of encoded genotypes.
"""
# The chromosome to search for (if a general one is set, that's the one
# we need to search for)
chrom = variant.chrom.name
if self.chrom is not None and chrom == self.chrom:
chrom = "NA"
# Getting the results
results = []
iterator = self._bgen.iter_variants_in_region(
CHROM_STR_DECODE.get(chrom, chrom), variant.pos, variant.pos,
)
for info, dosage in iterator:
if (variant.alleles is None or
variant.iterable_alleles_eq([info.a1, info.a2])):
results.append(Genotypes(
Variant(
info.name,
CHROM_STR_ENCODE.get(info.chrom, info.chrom),
info.pos, [info.a1, info.a2],
),
dosage,
reference=info.a1,
coded=info.a2,
multiallelic=True,
))
# If there are no results
if not results:
logging.variant_name_not_found(variant)
return results | Get the genotypes from a well formed variant instance.
Args:
marker (Variant): A Variant instance.
Returns:
A list of Genotypes instance containing a pointer to the variant as
well as a vector of encoded genotypes. | Below is the the instruction that describes the task:
### Input:
Get the genotypes from a well formed variant instance.
Args:
marker (Variant): A Variant instance.
Returns:
A list of Genotypes instance containing a pointer to the variant as
well as a vector of encoded genotypes.
### Response:
def get_variant_genotypes(self, variant):
"""Get the genotypes from a well formed variant instance.
Args:
marker (Variant): A Variant instance.
Returns:
A list of Genotypes instance containing a pointer to the variant as
well as a vector of encoded genotypes.
"""
# The chromosome to search for (if a general one is set, that's the one
# we need to search for)
chrom = variant.chrom.name
if self.chrom is not None and chrom == self.chrom:
chrom = "NA"
# Getting the results
results = []
iterator = self._bgen.iter_variants_in_region(
CHROM_STR_DECODE.get(chrom, chrom), variant.pos, variant.pos,
)
for info, dosage in iterator:
if (variant.alleles is None or
variant.iterable_alleles_eq([info.a1, info.a2])):
results.append(Genotypes(
Variant(
info.name,
CHROM_STR_ENCODE.get(info.chrom, info.chrom),
info.pos, [info.a1, info.a2],
),
dosage,
reference=info.a1,
coded=info.a2,
multiallelic=True,
))
# If there are no results
if not results:
logging.variant_name_not_found(variant)
return results |
def checkOneValue(self,v,strict=0):
"""Checks a single value to see if it is in range or choice list
Allows indirection strings starting with ")". Assumes
v has already been converted to right value by
_coerceOneValue. Returns value if OK, or raises
ValueError if not OK.
"""
if v in [None, INDEF] or (isinstance(v,str) and v[:1] == ")"):
return v
elif v == "":
# most parameters treat null string as omitted value
return None
elif self.choice is not None and v not in self.choiceDict:
schoice = list(map(self.toString, self.choice))
schoice = "|".join(schoice)
raise ValueError("Parameter %s: "
"value %s is not in choice list (%s)" %
(self.name, str(v), schoice))
elif (self.min not in [None, INDEF] and v<self.min):
raise ValueError("Parameter %s: "
"value `%s' is less than minimum `%s'" %
(self.name, str(v), str(self.min)))
elif (self.max not in [None, INDEF] and v>self.max):
raise ValueError("Parameter %s: "
"value `%s' is greater than maximum `%s'" %
(self.name, str(v), str(self.max)))
return v | Checks a single value to see if it is in range or choice list
Allows indirection strings starting with ")". Assumes
v has already been converted to right value by
_coerceOneValue. Returns value if OK, or raises
ValueError if not OK. | Below is the the instruction that describes the task:
### Input:
Checks a single value to see if it is in range or choice list
Allows indirection strings starting with ")". Assumes
v has already been converted to right value by
_coerceOneValue. Returns value if OK, or raises
ValueError if not OK.
### Response:
def checkOneValue(self,v,strict=0):
"""Checks a single value to see if it is in range or choice list
Allows indirection strings starting with ")". Assumes
v has already been converted to right value by
_coerceOneValue. Returns value if OK, or raises
ValueError if not OK.
"""
if v in [None, INDEF] or (isinstance(v,str) and v[:1] == ")"):
return v
elif v == "":
# most parameters treat null string as omitted value
return None
elif self.choice is not None and v not in self.choiceDict:
schoice = list(map(self.toString, self.choice))
schoice = "|".join(schoice)
raise ValueError("Parameter %s: "
"value %s is not in choice list (%s)" %
(self.name, str(v), schoice))
elif (self.min not in [None, INDEF] and v<self.min):
raise ValueError("Parameter %s: "
"value `%s' is less than minimum `%s'" %
(self.name, str(v), str(self.min)))
elif (self.max not in [None, INDEF] and v>self.max):
raise ValueError("Parameter %s: "
"value `%s' is greater than maximum `%s'" %
(self.name, str(v), str(self.max)))
return v |
def inverse(self):
""" Inverts image (all nonzeros become zeros and vice verse)
Returns
-------
:obj:`BinaryImage`
inverse of this binary image
"""
data = np.zeros(self.shape).astype(np.uint8)
ind = np.where(self.data == 0)
data[ind[0], ind[1], ...] = BINARY_IM_MAX_VAL
return BinaryImage(data, self._frame) | Inverts image (all nonzeros become zeros and vice verse)
Returns
-------
:obj:`BinaryImage`
inverse of this binary image | Below is the the instruction that describes the task:
### Input:
Inverts image (all nonzeros become zeros and vice verse)
Returns
-------
:obj:`BinaryImage`
inverse of this binary image
### Response:
def inverse(self):
""" Inverts image (all nonzeros become zeros and vice verse)
Returns
-------
:obj:`BinaryImage`
inverse of this binary image
"""
data = np.zeros(self.shape).astype(np.uint8)
ind = np.where(self.data == 0)
data[ind[0], ind[1], ...] = BINARY_IM_MAX_VAL
return BinaryImage(data, self._frame) |
def log_to_history(logger, name):
"""Decorate function, adding a logger handler stored in FITS."""
def log_to_history_decorator(method):
def l2h_method(self, ri):
history_header = fits.Header()
fh = FITSHistoryHandler(history_header)
fh.setLevel(logging.INFO)
logger.addHandler(fh)
try:
result = method(self, ri)
field = getattr(result, name, None)
if field:
with field.open() as hdulist:
hdr = hdulist[0].header
hdr.extend(history_header.cards)
return result
finally:
logger.removeHandler(fh)
return l2h_method
return log_to_history_decorator | Decorate function, adding a logger handler stored in FITS. | Below is the the instruction that describes the task:
### Input:
Decorate function, adding a logger handler stored in FITS.
### Response:
def log_to_history(logger, name):
"""Decorate function, adding a logger handler stored in FITS."""
def log_to_history_decorator(method):
def l2h_method(self, ri):
history_header = fits.Header()
fh = FITSHistoryHandler(history_header)
fh.setLevel(logging.INFO)
logger.addHandler(fh)
try:
result = method(self, ri)
field = getattr(result, name, None)
if field:
with field.open() as hdulist:
hdr = hdulist[0].header
hdr.extend(history_header.cards)
return result
finally:
logger.removeHandler(fh)
return l2h_method
return log_to_history_decorator |
def get_rgb_image_as_bytes(self, format='png', quality=90):
"""Get the current image shown in the viewer, with any overlaid
graphics, in the form of a buffer in the form of bytes.
Parameters
----------
format : str
See :meth:`get_rgb_image_as_buffer`.
quality: int
See :meth:`get_rgb_image_as_buffer`.
Returns
-------
buffer : bytes
The window contents as a buffer in the form of bytes.
"""
obuf = self.get_rgb_image_as_buffer(format=format, quality=quality)
return bytes(obuf.getvalue()) | Get the current image shown in the viewer, with any overlaid
graphics, in the form of a buffer in the form of bytes.
Parameters
----------
format : str
See :meth:`get_rgb_image_as_buffer`.
quality: int
See :meth:`get_rgb_image_as_buffer`.
Returns
-------
buffer : bytes
The window contents as a buffer in the form of bytes. | Below is the the instruction that describes the task:
### Input:
Get the current image shown in the viewer, with any overlaid
graphics, in the form of a buffer in the form of bytes.
Parameters
----------
format : str
See :meth:`get_rgb_image_as_buffer`.
quality: int
See :meth:`get_rgb_image_as_buffer`.
Returns
-------
buffer : bytes
The window contents as a buffer in the form of bytes.
### Response:
def get_rgb_image_as_bytes(self, format='png', quality=90):
"""Get the current image shown in the viewer, with any overlaid
graphics, in the form of a buffer in the form of bytes.
Parameters
----------
format : str
See :meth:`get_rgb_image_as_buffer`.
quality: int
See :meth:`get_rgb_image_as_buffer`.
Returns
-------
buffer : bytes
The window contents as a buffer in the form of bytes.
"""
obuf = self.get_rgb_image_as_buffer(format=format, quality=quality)
return bytes(obuf.getvalue()) |
def map_words(self, start, end):
"""Return a memory-map of the elements `start` through `end`.
The memory map will offer the 8-byte double-precision floats
("elements") in the file from index `start` through to the index
`end`, inclusive, both counting the first float as element 1.
Memory maps must begin on a page boundary, so `skip` returns the
number of extra bytes at the beginning of the return value.
"""
i, j = 8 * start - 8, 8 * end
try:
fileno = self.file.fileno()
except (AttributeError, io.UnsupportedOperation):
fileno = None
if fileno is None:
skip = 0
self.file.seek(i)
m = self.file.read(j - i)
else:
skip = i % mmap.ALLOCATIONGRANULARITY
r = mmap.ACCESS_READ
m = mmap.mmap(fileno, length=j-i+skip, access=r, offset=i-skip)
if sys.version_info > (3,):
m = memoryview(m) # so further slicing can return views
return m, skip | Return a memory-map of the elements `start` through `end`.
The memory map will offer the 8-byte double-precision floats
("elements") in the file from index `start` through to the index
`end`, inclusive, both counting the first float as element 1.
Memory maps must begin on a page boundary, so `skip` returns the
number of extra bytes at the beginning of the return value. | Below is the the instruction that describes the task:
### Input:
Return a memory-map of the elements `start` through `end`.
The memory map will offer the 8-byte double-precision floats
("elements") in the file from index `start` through to the index
`end`, inclusive, both counting the first float as element 1.
Memory maps must begin on a page boundary, so `skip` returns the
number of extra bytes at the beginning of the return value.
### Response:
def map_words(self, start, end):
"""Return a memory-map of the elements `start` through `end`.
The memory map will offer the 8-byte double-precision floats
("elements") in the file from index `start` through to the index
`end`, inclusive, both counting the first float as element 1.
Memory maps must begin on a page boundary, so `skip` returns the
number of extra bytes at the beginning of the return value.
"""
i, j = 8 * start - 8, 8 * end
try:
fileno = self.file.fileno()
except (AttributeError, io.UnsupportedOperation):
fileno = None
if fileno is None:
skip = 0
self.file.seek(i)
m = self.file.read(j - i)
else:
skip = i % mmap.ALLOCATIONGRANULARITY
r = mmap.ACCESS_READ
m = mmap.mmap(fileno, length=j-i+skip, access=r, offset=i-skip)
if sys.version_info > (3,):
m = memoryview(m) # so further slicing can return views
return m, skip |
def mode(name, num, minimum=0, maximum=0, ref=None):
'''
Calculates the mode of the ``num`` most recent values. Requires a list.
USAGE:
.. code-block:: yaml
foo:
calc.mode:
- name: myregentry
- num: 5
'''
return calc(
name=name,
num=num,
oper='mode',
minimum=minimum,
maximum=maximum,
ref=ref
) | Calculates the mode of the ``num`` most recent values. Requires a list.
USAGE:
.. code-block:: yaml
foo:
calc.mode:
- name: myregentry
- num: 5 | Below is the the instruction that describes the task:
### Input:
Calculates the mode of the ``num`` most recent values. Requires a list.
USAGE:
.. code-block:: yaml
foo:
calc.mode:
- name: myregentry
- num: 5
### Response:
def mode(name, num, minimum=0, maximum=0, ref=None):
'''
Calculates the mode of the ``num`` most recent values. Requires a list.
USAGE:
.. code-block:: yaml
foo:
calc.mode:
- name: myregentry
- num: 5
'''
return calc(
name=name,
num=num,
oper='mode',
minimum=minimum,
maximum=maximum,
ref=ref
) |
async def async_enqueue_download(self, resource):
'''
Enqueue the download of the given foreign resource.
'''
worker = self.pick_sticky(resource.url_string)
await worker.enqueue(enums.Task.DOWNLOAD, (resource,)) | Enqueue the download of the given foreign resource. | Below is the the instruction that describes the task:
### Input:
Enqueue the download of the given foreign resource.
### Response:
async def async_enqueue_download(self, resource):
'''
Enqueue the download of the given foreign resource.
'''
worker = self.pick_sticky(resource.url_string)
await worker.enqueue(enums.Task.DOWNLOAD, (resource,)) |
def set_cursor_position(self, position):
"""Set cursor position"""
position = self.get_position(position)
cursor = self.textCursor()
cursor.setPosition(position)
self.setTextCursor(cursor)
self.ensureCursorVisible() | Set cursor position | Below is the the instruction that describes the task:
### Input:
Set cursor position
### Response:
def set_cursor_position(self, position):
"""Set cursor position"""
position = self.get_position(position)
cursor = self.textCursor()
cursor.setPosition(position)
self.setTextCursor(cursor)
self.ensureCursorVisible() |
def strip_inserts(fasta):
"""
remove insertion columns from aligned fasta file
"""
for seq in parse_fasta(fasta):
seq[1] = ''.join([b for b in seq[1] if b == '-' or b.isupper()])
yield seq | remove insertion columns from aligned fasta file | Below is the the instruction that describes the task:
### Input:
remove insertion columns from aligned fasta file
### Response:
def strip_inserts(fasta):
"""
remove insertion columns from aligned fasta file
"""
for seq in parse_fasta(fasta):
seq[1] = ''.join([b for b in seq[1] if b == '-' or b.isupper()])
yield seq |
def _pfp__set_value(self, new_val):
"""Set the value of the String, taking into account
escaping and such as well
"""
if not isinstance(new_val, Field):
new_val = utils.binary(utils.string_escape(new_val))
super(String, self)._pfp__set_value(new_val) | Set the value of the String, taking into account
escaping and such as well | Below is the the instruction that describes the task:
### Input:
Set the value of the String, taking into account
escaping and such as well
### Response:
def _pfp__set_value(self, new_val):
"""Set the value of the String, taking into account
escaping and such as well
"""
if not isinstance(new_val, Field):
new_val = utils.binary(utils.string_escape(new_val))
super(String, self)._pfp__set_value(new_val) |
def delete_entity(sender, instance, **kwargs):
"""Delete Entity when last Data object is deleted."""
# 1 means that the last Data object is going to be deleted.
Entity.objects.annotate(num_data=Count('data')).filter(data=instance, num_data=1).delete() | Delete Entity when last Data object is deleted. | Below is the the instruction that describes the task:
### Input:
Delete Entity when last Data object is deleted.
### Response:
def delete_entity(sender, instance, **kwargs):
"""Delete Entity when last Data object is deleted."""
# 1 means that the last Data object is going to be deleted.
Entity.objects.annotate(num_data=Count('data')).filter(data=instance, num_data=1).delete() |
def deregister_calendar(self, name):
"""
If a calendar is registered with the given name, it is de-registered.
Parameters
----------
cal_name : str
The name of the calendar to be deregistered.
"""
self._calendars.pop(name, None)
self._calendar_factories.pop(name, None)
self._aliases.pop(name, None) | If a calendar is registered with the given name, it is de-registered.
Parameters
----------
cal_name : str
The name of the calendar to be deregistered. | Below is the the instruction that describes the task:
### Input:
If a calendar is registered with the given name, it is de-registered.
Parameters
----------
cal_name : str
The name of the calendar to be deregistered.
### Response:
def deregister_calendar(self, name):
"""
If a calendar is registered with the given name, it is de-registered.
Parameters
----------
cal_name : str
The name of the calendar to be deregistered.
"""
self._calendars.pop(name, None)
self._calendar_factories.pop(name, None)
self._aliases.pop(name, None) |
def load(self, pathname):
"""Loads entry from directory."""
match = self._entry_re.match(pathname)
if not match:
return None
self.ignore = (match.group(1) == "ignore")
if not os.path.isdir(pathname):
raise ValueError("%s: not a directory" % pathname)
if not os.access(pathname, os.R_OK | os.W_OK):
raise ValueError("%s: insufficient access privileges" % pathname)
# parse index.html file
parser = Purr.Parsers.LogEntryIndexParser(pathname)
self.index_file = os.path.join(pathname, 'index.html')
for i, line in enumerate(open(self.index_file)):
try:
parser.feed(line)
except:
dprintf(0, "parse error at line %d of %s\n", i, self.index_file)
raise
# set things up from parser
try:
self.timestamp = int(float(parser.timestamp))
except:
self.timestamp = int(time.time())
self.title = getattr(parser, 'title', None)
if self.title is None:
self.title = "Malformed entry, probably needs to be deleted"
self.comment = getattr(parser, 'comments', None) or ""
self.dps = getattr(parser, 'dps', [])
self.pathname = pathname
# see if any data products have been removed on us
self.dps = [dp for dp in self.dps if os.path.exists(dp.fullpath)]
# see if the cached include file is up-to-date
self.cached_include = cache = os.path.join(pathname, 'index.include.html')
mtime = (os.path.exists(cache) or 0) and os.path.getmtime(cache)
if mtime >= max(Purr.Render.youngest_renderer, os.path.getmtime(self.index_file)):
dprintf(2, "entry %s has a valid include cache\n", pathname)
self.cached_include_valid = True
else:
dprintf(2, "entry %s does not have a valid include cache\n", pathname)
self.cached_include_valid = False
# mark entry as unchanged, if renderers are older than index
self.updated = (Purr.Render.youngest_renderer > os.path.getmtime(self.index_file)) | Loads entry from directory. | Below is the the instruction that describes the task:
### Input:
Loads entry from directory.
### Response:
def load(self, pathname):
"""Loads entry from directory."""
match = self._entry_re.match(pathname)
if not match:
return None
self.ignore = (match.group(1) == "ignore")
if not os.path.isdir(pathname):
raise ValueError("%s: not a directory" % pathname)
if not os.access(pathname, os.R_OK | os.W_OK):
raise ValueError("%s: insufficient access privileges" % pathname)
# parse index.html file
parser = Purr.Parsers.LogEntryIndexParser(pathname)
self.index_file = os.path.join(pathname, 'index.html')
for i, line in enumerate(open(self.index_file)):
try:
parser.feed(line)
except:
dprintf(0, "parse error at line %d of %s\n", i, self.index_file)
raise
# set things up from parser
try:
self.timestamp = int(float(parser.timestamp))
except:
self.timestamp = int(time.time())
self.title = getattr(parser, 'title', None)
if self.title is None:
self.title = "Malformed entry, probably needs to be deleted"
self.comment = getattr(parser, 'comments', None) or ""
self.dps = getattr(parser, 'dps', [])
self.pathname = pathname
# see if any data products have been removed on us
self.dps = [dp for dp in self.dps if os.path.exists(dp.fullpath)]
# see if the cached include file is up-to-date
self.cached_include = cache = os.path.join(pathname, 'index.include.html')
mtime = (os.path.exists(cache) or 0) and os.path.getmtime(cache)
if mtime >= max(Purr.Render.youngest_renderer, os.path.getmtime(self.index_file)):
dprintf(2, "entry %s has a valid include cache\n", pathname)
self.cached_include_valid = True
else:
dprintf(2, "entry %s does not have a valid include cache\n", pathname)
self.cached_include_valid = False
# mark entry as unchanged, if renderers are older than index
self.updated = (Purr.Render.youngest_renderer > os.path.getmtime(self.index_file)) |
def stat(path, format):
"""Call stat on file
:param path: HDFS Path
:param format: Stat format
:returns: Stat output
:raises: IOError: If unsuccessful
"""
cmd = "hadoop fs -stat %s %s" % (format, path)
rcode, stdout, stderr = _checked_hadoop_fs_command(cmd)
return stdout.rstrip() | Call stat on file
:param path: HDFS Path
:param format: Stat format
:returns: Stat output
:raises: IOError: If unsuccessful | Below is the the instruction that describes the task:
### Input:
Call stat on file
:param path: HDFS Path
:param format: Stat format
:returns: Stat output
:raises: IOError: If unsuccessful
### Response:
def stat(path, format):
"""Call stat on file
:param path: HDFS Path
:param format: Stat format
:returns: Stat output
:raises: IOError: If unsuccessful
"""
cmd = "hadoop fs -stat %s %s" % (format, path)
rcode, stdout, stderr = _checked_hadoop_fs_command(cmd)
return stdout.rstrip() |
def sle(actual, predicted):
"""
Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted
"""
return (np.power(np.log(np.array(actual)+1) -
np.log(np.array(predicted)+1), 2)) | Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted | Below is the the instruction that describes the task:
### Input:
Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted
### Response:
def sle(actual, predicted):
"""
Computes the squared log error.
This function computes the squared log error between two numbers,
or for element between a pair of lists or numpy arrays.
Parameters
----------
actual : int, float, list of numbers, numpy array
The ground truth value
predicted : same type as actual
The predicted value
Returns
-------
score : double or list of doubles
The squared log error between actual and predicted
"""
return (np.power(np.log(np.array(actual)+1) -
np.log(np.array(predicted)+1), 2)) |
def _set_rpc(self, rpc_type: str) -> None:
"""
Sets rpc based on the type
:param rpc_type: The type of connection: like infura, ganache, localhost
:return:
"""
if rpc_type == "infura":
self.set_api_rpc_infura()
elif rpc_type == "localhost":
self.set_api_rpc_localhost()
else:
self.set_api_rpc(rpc_type) | Sets rpc based on the type
:param rpc_type: The type of connection: like infura, ganache, localhost
:return: | Below is the the instruction that describes the task:
### Input:
Sets rpc based on the type
:param rpc_type: The type of connection: like infura, ganache, localhost
:return:
### Response:
def _set_rpc(self, rpc_type: str) -> None:
"""
Sets rpc based on the type
:param rpc_type: The type of connection: like infura, ganache, localhost
:return:
"""
if rpc_type == "infura":
self.set_api_rpc_infura()
elif rpc_type == "localhost":
self.set_api_rpc_localhost()
else:
self.set_api_rpc(rpc_type) |
def str2chars(strings) -> numpy.ndarray:
"""Return |numpy.ndarray| containing the byte characters (second axis)
of all given strings (first axis).
>>> from hydpy.core.netcdftools import str2chars
>>> str2chars(['zeros', 'ones'])
array([[b'z', b'e', b'r', b'o', b's'],
[b'o', b'n', b'e', b's', b'']],
dtype='|S1')
>>> str2chars([])
array([], shape=(0, 0),
dtype='|S1')
"""
maxlen = 0
for name in strings:
maxlen = max(maxlen, len(name))
# noinspection PyTypeChecker
chars = numpy.full(
(len(strings), maxlen), b'', dtype='|S1')
for idx, name in enumerate(strings):
for jdx, char in enumerate(name):
chars[idx, jdx] = char.encode('utf-8')
return chars | Return |numpy.ndarray| containing the byte characters (second axis)
of all given strings (first axis).
>>> from hydpy.core.netcdftools import str2chars
>>> str2chars(['zeros', 'ones'])
array([[b'z', b'e', b'r', b'o', b's'],
[b'o', b'n', b'e', b's', b'']],
dtype='|S1')
>>> str2chars([])
array([], shape=(0, 0),
dtype='|S1') | Below is the the instruction that describes the task:
### Input:
Return |numpy.ndarray| containing the byte characters (second axis)
of all given strings (first axis).
>>> from hydpy.core.netcdftools import str2chars
>>> str2chars(['zeros', 'ones'])
array([[b'z', b'e', b'r', b'o', b's'],
[b'o', b'n', b'e', b's', b'']],
dtype='|S1')
>>> str2chars([])
array([], shape=(0, 0),
dtype='|S1')
### Response:
def str2chars(strings) -> numpy.ndarray:
"""Return |numpy.ndarray| containing the byte characters (second axis)
of all given strings (first axis).
>>> from hydpy.core.netcdftools import str2chars
>>> str2chars(['zeros', 'ones'])
array([[b'z', b'e', b'r', b'o', b's'],
[b'o', b'n', b'e', b's', b'']],
dtype='|S1')
>>> str2chars([])
array([], shape=(0, 0),
dtype='|S1')
"""
maxlen = 0
for name in strings:
maxlen = max(maxlen, len(name))
# noinspection PyTypeChecker
chars = numpy.full(
(len(strings), maxlen), b'', dtype='|S1')
for idx, name in enumerate(strings):
for jdx, char in enumerate(name):
chars[idx, jdx] = char.encode('utf-8')
return chars |
def receive_hardbounce_post(self, post_params):
"""
Hard bounce postbacks
"""
if isinstance(post_params, dict):
required_params = ['action', 'email', 'sig']
if not self.check_for_valid_postback_actions(required_params, post_params):
return False
else:
return False
if post_params['action'] != 'hardbounce':
return False
signature = post_params['sig']
post_params = post_params.copy()
del post_params['sig']
if signature != get_signature_hash(post_params, self.secret):
return False
# for sends
if 'send_id' in post_params:
send_id = post_params['send_id']
send_response = self.get_send(send_id)
if not send_response.is_ok():
return False
send_obj = send_response.get_body()
if not send_obj or 'email' not in send_obj:
return False
# for blasts
if 'blast_id' in post_params:
blast_id = post_params['blast_id']
blast_response = self.get_blast(blast_id)
if not blast_response.is_ok():
return False
blast_obj = blast_response.get_body()
if not blast_obj:
return False
return True | Hard bounce postbacks | Below is the the instruction that describes the task:
### Input:
Hard bounce postbacks
### Response:
def receive_hardbounce_post(self, post_params):
"""
Hard bounce postbacks
"""
if isinstance(post_params, dict):
required_params = ['action', 'email', 'sig']
if not self.check_for_valid_postback_actions(required_params, post_params):
return False
else:
return False
if post_params['action'] != 'hardbounce':
return False
signature = post_params['sig']
post_params = post_params.copy()
del post_params['sig']
if signature != get_signature_hash(post_params, self.secret):
return False
# for sends
if 'send_id' in post_params:
send_id = post_params['send_id']
send_response = self.get_send(send_id)
if not send_response.is_ok():
return False
send_obj = send_response.get_body()
if not send_obj or 'email' not in send_obj:
return False
# for blasts
if 'blast_id' in post_params:
blast_id = post_params['blast_id']
blast_response = self.get_blast(blast_id)
if not blast_response.is_ok():
return False
blast_obj = blast_response.get_body()
if not blast_obj:
return False
return True |
def load(cls, webfinger, pypump):
""" Load JSON from disk into store object """
filename = cls.get_filename()
if os.path.isfile(filename):
data = open(filename).read()
data = json.loads(data)
store = cls(data, filename=filename)
else:
store = cls(filename=filename)
store.prefix = webfinger
return store | Load JSON from disk into store object | Below is the the instruction that describes the task:
### Input:
Load JSON from disk into store object
### Response:
def load(cls, webfinger, pypump):
""" Load JSON from disk into store object """
filename = cls.get_filename()
if os.path.isfile(filename):
data = open(filename).read()
data = json.loads(data)
store = cls(data, filename=filename)
else:
store = cls(filename=filename)
store.prefix = webfinger
return store |
def sk_log_loss(y_true: Union[List[List[float]], List[List[int]], np.ndarray],
y_predicted: Union[List[List[float]], List[List[int]], np.ndarray]) -> float:
"""
Calculates log loss.
Args:
y_true: list or array of true values
y_predicted: list or array of predicted values
Returns:
Log loss
"""
return log_loss(y_true, y_predicted) | Calculates log loss.
Args:
y_true: list or array of true values
y_predicted: list or array of predicted values
Returns:
Log loss | Below is the the instruction that describes the task:
### Input:
Calculates log loss.
Args:
y_true: list or array of true values
y_predicted: list or array of predicted values
Returns:
Log loss
### Response:
def sk_log_loss(y_true: Union[List[List[float]], List[List[int]], np.ndarray],
y_predicted: Union[List[List[float]], List[List[int]], np.ndarray]) -> float:
"""
Calculates log loss.
Args:
y_true: list or array of true values
y_predicted: list or array of predicted values
Returns:
Log loss
"""
return log_loss(y_true, y_predicted) |
def from_raw(self, raw: RawScalar) -> Optional[bool]:
"""Override superclass method."""
if isinstance(raw, bool):
return raw | Override superclass method. | Below is the the instruction that describes the task:
### Input:
Override superclass method.
### Response:
def from_raw(self, raw: RawScalar) -> Optional[bool]:
"""Override superclass method."""
if isinstance(raw, bool):
return raw |
def simple_cnn(actns:Collection[int], kernel_szs:Collection[int]=None,
strides:Collection[int]=None, bn=False) -> nn.Sequential:
"CNN with `conv_layer` defined by `actns`, `kernel_szs` and `strides`, plus batchnorm if `bn`."
nl = len(actns)-1
kernel_szs = ifnone(kernel_szs, [3]*nl)
strides = ifnone(strides , [2]*nl)
layers = [conv_layer(actns[i], actns[i+1], kernel_szs[i], stride=strides[i],
norm_type=(NormType.Batch if bn and i<(len(strides)-1) else None)) for i in range_of(strides)]
layers.append(PoolFlatten())
return nn.Sequential(*layers) | CNN with `conv_layer` defined by `actns`, `kernel_szs` and `strides`, plus batchnorm if `bn`. | Below is the the instruction that describes the task:
### Input:
CNN with `conv_layer` defined by `actns`, `kernel_szs` and `strides`, plus batchnorm if `bn`.
### Response:
def simple_cnn(actns:Collection[int], kernel_szs:Collection[int]=None,
strides:Collection[int]=None, bn=False) -> nn.Sequential:
"CNN with `conv_layer` defined by `actns`, `kernel_szs` and `strides`, plus batchnorm if `bn`."
nl = len(actns)-1
kernel_szs = ifnone(kernel_szs, [3]*nl)
strides = ifnone(strides , [2]*nl)
layers = [conv_layer(actns[i], actns[i+1], kernel_szs[i], stride=strides[i],
norm_type=(NormType.Batch if bn and i<(len(strides)-1) else None)) for i in range_of(strides)]
layers.append(PoolFlatten())
return nn.Sequential(*layers) |
def eval_input(self, expr):
"""eval_input: testlist NEWLINE* ENDMARKER"""
return ast.Expression(body=[expr], loc=expr.loc) | eval_input: testlist NEWLINE* ENDMARKER | Below is the the instruction that describes the task:
### Input:
eval_input: testlist NEWLINE* ENDMARKER
### Response:
def eval_input(self, expr):
"""eval_input: testlist NEWLINE* ENDMARKER"""
return ast.Expression(body=[expr], loc=expr.loc) |
def from_file(cls,
source,
distance_weights=None,
merge_same_words=False,
group_marker_opening='<<',
group_marker_closing='>>'):
"""
Read a string from a file and derive a ``Graph`` from it.
This is a convenience function for opening a file and passing its
contents to ``Graph.from_string()`` (see that for more detail)
Args:
source (str): the file to read and derive the graph from
distance_weights (dict): dict of relative indices corresponding
with word weights. See ``Graph.from_string`` for more detail.
merge_same_words (bool): whether nodes which have the same value
should be merged or not.
group_marker_opening (str): The string used to mark the beginning
of word groups.
group_marker_closing (str): The string used to mark the end
of word groups.
Returns: Graph
Example:
>>> graph = Graph.from_file('cage.txt') # doctest: +SKIP
>>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP
'poetry i have nothing to say and i'
"""
source_string = open(source, 'r').read()
return cls.from_string(source_string,
distance_weights,
merge_same_words,
group_marker_opening=group_marker_opening,
group_marker_closing=group_marker_closing) | Read a string from a file and derive a ``Graph`` from it.
This is a convenience function for opening a file and passing its
contents to ``Graph.from_string()`` (see that for more detail)
Args:
source (str): the file to read and derive the graph from
distance_weights (dict): dict of relative indices corresponding
with word weights. See ``Graph.from_string`` for more detail.
merge_same_words (bool): whether nodes which have the same value
should be merged or not.
group_marker_opening (str): The string used to mark the beginning
of word groups.
group_marker_closing (str): The string used to mark the end
of word groups.
Returns: Graph
Example:
>>> graph = Graph.from_file('cage.txt') # doctest: +SKIP
>>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP
'poetry i have nothing to say and i' | Below is the the instruction that describes the task:
### Input:
Read a string from a file and derive a ``Graph`` from it.
This is a convenience function for opening a file and passing its
contents to ``Graph.from_string()`` (see that for more detail)
Args:
source (str): the file to read and derive the graph from
distance_weights (dict): dict of relative indices corresponding
with word weights. See ``Graph.from_string`` for more detail.
merge_same_words (bool): whether nodes which have the same value
should be merged or not.
group_marker_opening (str): The string used to mark the beginning
of word groups.
group_marker_closing (str): The string used to mark the end
of word groups.
Returns: Graph
Example:
>>> graph = Graph.from_file('cage.txt') # doctest: +SKIP
>>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP
'poetry i have nothing to say and i'
### Response:
def from_file(cls,
source,
distance_weights=None,
merge_same_words=False,
group_marker_opening='<<',
group_marker_closing='>>'):
"""
Read a string from a file and derive a ``Graph`` from it.
This is a convenience function for opening a file and passing its
contents to ``Graph.from_string()`` (see that for more detail)
Args:
source (str): the file to read and derive the graph from
distance_weights (dict): dict of relative indices corresponding
with word weights. See ``Graph.from_string`` for more detail.
merge_same_words (bool): whether nodes which have the same value
should be merged or not.
group_marker_opening (str): The string used to mark the beginning
of word groups.
group_marker_closing (str): The string used to mark the end
of word groups.
Returns: Graph
Example:
>>> graph = Graph.from_file('cage.txt') # doctest: +SKIP
>>> ' '.join(graph.pick().value for i in range(8)) # doctest: +SKIP
'poetry i have nothing to say and i'
"""
source_string = open(source, 'r').read()
return cls.from_string(source_string,
distance_weights,
merge_same_words,
group_marker_opening=group_marker_opening,
group_marker_closing=group_marker_closing) |
def getActiveCompactions(self, login, tserver):
"""
Parameters:
- login
- tserver
"""
self.send_getActiveCompactions(login, tserver)
return self.recv_getActiveCompactions() | Parameters:
- login
- tserver | Below is the the instruction that describes the task:
### Input:
Parameters:
- login
- tserver
### Response:
def getActiveCompactions(self, login, tserver):
"""
Parameters:
- login
- tserver
"""
self.send_getActiveCompactions(login, tserver)
return self.recv_getActiveCompactions() |
def _update(self):
"""Initialize the 1D interpolation."""
if self.strains.size and self.strains.size == self.values.size:
x = np.log(self.strains)
y = self.values
if x.size < 4:
self._interpolater = interp1d(
x,
y,
'linear',
bounds_error=False,
fill_value=(y[0], y[-1]))
else:
self._interpolater = interp1d(
x,
y,
'cubic',
bounds_error=False,
fill_value=(y[0], y[-1])) | Initialize the 1D interpolation. | Below is the the instruction that describes the task:
### Input:
Initialize the 1D interpolation.
### Response:
def _update(self):
"""Initialize the 1D interpolation."""
if self.strains.size and self.strains.size == self.values.size:
x = np.log(self.strains)
y = self.values
if x.size < 4:
self._interpolater = interp1d(
x,
y,
'linear',
bounds_error=False,
fill_value=(y[0], y[-1]))
else:
self._interpolater = interp1d(
x,
y,
'cubic',
bounds_error=False,
fill_value=(y[0], y[-1])) |
def RenderJson(self, pretty=False):
"""
Render a Tropo object into a Json string.
"""
steps = self._steps
topdict = {}
topdict['tropo'] = steps
if pretty:
try:
json = jsonlib.dumps(topdict, indent=4, sort_keys=False)
except TypeError:
json = jsonlib.dumps(topdict)
else:
json = jsonlib.dumps(topdict)
return json | Render a Tropo object into a Json string. | Below is the the instruction that describes the task:
### Input:
Render a Tropo object into a Json string.
### Response:
def RenderJson(self, pretty=False):
"""
Render a Tropo object into a Json string.
"""
steps = self._steps
topdict = {}
topdict['tropo'] = steps
if pretty:
try:
json = jsonlib.dumps(topdict, indent=4, sort_keys=False)
except TypeError:
json = jsonlib.dumps(topdict)
else:
json = jsonlib.dumps(topdict)
return json |
def get(cls, id_):
"""Return a workflow object from id."""
with db.session.no_autoflush:
query = cls.dbmodel.query.filter_by(id=id_)
try:
model = query.one()
except NoResultFound:
raise WorkflowsMissingObject("No object for for id {0}".format(
id_
))
return cls(model) | Return a workflow object from id. | Below is the the instruction that describes the task:
### Input:
Return a workflow object from id.
### Response:
def get(cls, id_):
"""Return a workflow object from id."""
with db.session.no_autoflush:
query = cls.dbmodel.query.filter_by(id=id_)
try:
model = query.one()
except NoResultFound:
raise WorkflowsMissingObject("No object for for id {0}".format(
id_
))
return cls(model) |
def configure(self, rhsm=None, repositories=None):
"""This method will configure the host0 and run the hypervisor."""
if rhsm is not None:
self.rhsm_register(rhsm)
if repositories is not None:
self.enable_repositories(repositories)
self.create_stack_user()
self.deploy_hypervisor() | This method will configure the host0 and run the hypervisor. | Below is the the instruction that describes the task:
### Input:
This method will configure the host0 and run the hypervisor.
### Response:
def configure(self, rhsm=None, repositories=None):
"""This method will configure the host0 and run the hypervisor."""
if rhsm is not None:
self.rhsm_register(rhsm)
if repositories is not None:
self.enable_repositories(repositories)
self.create_stack_user()
self.deploy_hypervisor() |
def word_similarity_explorer_gensim(corpus,
category,
target_term,
category_name=None,
not_category_name=None,
word2vec=None,
alpha=0.01,
max_p_val=0.1,
term_significance=None,
**kwargs):
'''
Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str
Name of category to use. E.g., "5-star reviews."
not_category_name : str
Name of everything that isn't in category. E.g., "Below 5-star reviews".
target_term : str
Word or phrase for semantic similarity comparison
word2vec : word2vec.Word2Vec
Gensim-compatible Word2Vec model of lower-cased corpus. If none, o
ne will be trained using Word2VecFromParsedCorpus(corpus).train()
alpha : float, default = 0.01
Uniform dirichlet prior for p-value calculation
max_p_val : float, default = 0.1
Max p-val to use find set of terms for similarity calculation
term_significance : TermSignificance
Significance finder
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization
'''
if word2vec is None:
word2vec = Word2VecFromParsedCorpus(corpus).train()
if term_significance is None:
term_significance = LogOddsRatioUninformativeDirichletPrior(alpha)
assert issubclass(type(term_significance), TermSignificance)
scores = []
for tok in corpus._term_idx_store._i2val:
try:
scores.append(word2vec.similarity(target_term, tok.replace(' ', '_')))
except:
try:
scores.append(np.mean([word2vec.similarity(target_term, tok_part)
for tok_part in tok.split()]))
except:
scores.append(0)
scores = np.array(scores)
return produce_scattertext_explorer(corpus,
category,
category_name,
not_category_name,
scores=scores,
sort_by_dist=False,
reverse_sort_scores_for_not_category=False,
word_vec_use_p_vals=True,
term_significance=term_significance,
max_p_val=max_p_val,
p_value_colors=True,
**kwargs) | Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str
Name of category to use. E.g., "5-star reviews."
not_category_name : str
Name of everything that isn't in category. E.g., "Below 5-star reviews".
target_term : str
Word or phrase for semantic similarity comparison
word2vec : word2vec.Word2Vec
Gensim-compatible Word2Vec model of lower-cased corpus. If none, o
ne will be trained using Word2VecFromParsedCorpus(corpus).train()
alpha : float, default = 0.01
Uniform dirichlet prior for p-value calculation
max_p_val : float, default = 0.1
Max p-val to use find set of terms for similarity calculation
term_significance : TermSignificance
Significance finder
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization | Below is the the instruction that describes the task:
### Input:
Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str
Name of category to use. E.g., "5-star reviews."
not_category_name : str
Name of everything that isn't in category. E.g., "Below 5-star reviews".
target_term : str
Word or phrase for semantic similarity comparison
word2vec : word2vec.Word2Vec
Gensim-compatible Word2Vec model of lower-cased corpus. If none, o
ne will be trained using Word2VecFromParsedCorpus(corpus).train()
alpha : float, default = 0.01
Uniform dirichlet prior for p-value calculation
max_p_val : float, default = 0.1
Max p-val to use find set of terms for similarity calculation
term_significance : TermSignificance
Significance finder
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization
### Response:
def word_similarity_explorer_gensim(corpus,
category,
target_term,
category_name=None,
not_category_name=None,
word2vec=None,
alpha=0.01,
max_p_val=0.1,
term_significance=None,
**kwargs):
'''
Parameters
----------
corpus : Corpus
Corpus to use.
category : str
Name of category column as it appears in original data frame.
category_name : str
Name of category to use. E.g., "5-star reviews."
not_category_name : str
Name of everything that isn't in category. E.g., "Below 5-star reviews".
target_term : str
Word or phrase for semantic similarity comparison
word2vec : word2vec.Word2Vec
Gensim-compatible Word2Vec model of lower-cased corpus. If none, o
ne will be trained using Word2VecFromParsedCorpus(corpus).train()
alpha : float, default = 0.01
Uniform dirichlet prior for p-value calculation
max_p_val : float, default = 0.1
Max p-val to use find set of terms for similarity calculation
term_significance : TermSignificance
Significance finder
Remaining arguments are from `produce_scattertext_explorer`.
Returns
-------
str, html of visualization
'''
if word2vec is None:
word2vec = Word2VecFromParsedCorpus(corpus).train()
if term_significance is None:
term_significance = LogOddsRatioUninformativeDirichletPrior(alpha)
assert issubclass(type(term_significance), TermSignificance)
scores = []
for tok in corpus._term_idx_store._i2val:
try:
scores.append(word2vec.similarity(target_term, tok.replace(' ', '_')))
except:
try:
scores.append(np.mean([word2vec.similarity(target_term, tok_part)
for tok_part in tok.split()]))
except:
scores.append(0)
scores = np.array(scores)
return produce_scattertext_explorer(corpus,
category,
category_name,
not_category_name,
scores=scores,
sort_by_dist=False,
reverse_sort_scores_for_not_category=False,
word_vec_use_p_vals=True,
term_significance=term_significance,
max_p_val=max_p_val,
p_value_colors=True,
**kwargs) |
def _end_sessions(self, session_ids):
"""Send endSessions command(s) with the given session ids."""
try:
# Use SocketInfo.command directly to avoid implicitly creating
# another session.
with self._socket_for_reads(
ReadPreference.PRIMARY_PREFERRED,
None) as (sock_info, slave_ok):
if not sock_info.supports_sessions:
return
for i in range(0, len(session_ids), common._MAX_END_SESSIONS):
spec = SON([('endSessions',
session_ids[i:i + common._MAX_END_SESSIONS])])
sock_info.command(
'admin', spec, slave_ok=slave_ok, client=self)
except PyMongoError:
# Drivers MUST ignore any errors returned by the endSessions
# command.
pass | Send endSessions command(s) with the given session ids. | Below is the the instruction that describes the task:
### Input:
Send endSessions command(s) with the given session ids.
### Response:
def _end_sessions(self, session_ids):
"""Send endSessions command(s) with the given session ids."""
try:
# Use SocketInfo.command directly to avoid implicitly creating
# another session.
with self._socket_for_reads(
ReadPreference.PRIMARY_PREFERRED,
None) as (sock_info, slave_ok):
if not sock_info.supports_sessions:
return
for i in range(0, len(session_ids), common._MAX_END_SESSIONS):
spec = SON([('endSessions',
session_ids[i:i + common._MAX_END_SESSIONS])])
sock_info.command(
'admin', spec, slave_ok=slave_ok, client=self)
except PyMongoError:
# Drivers MUST ignore any errors returned by the endSessions
# command.
pass |
def get_resource(self):
'''use the user provided endpoint and keys (from environment) to
connect to the resource. We can share the aws environment
variables:
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html
'''
# If base is not defined, assume using aws client
if self.base != None:
# s3.ServiceResource()
self.s3 = boto3.resource('s3',
endpoint_url=self.base,
aws_access_key_id=self._id,
aws_secret_access_key=self._key,
config=boto3.session.Config(signature_version=self._signature))
else:
# We will need to test options for reading credentials here
self.s3 = boto3.client('s3') | use the user provided endpoint and keys (from environment) to
connect to the resource. We can share the aws environment
variables:
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html | Below is the the instruction that describes the task:
### Input:
use the user provided endpoint and keys (from environment) to
connect to the resource. We can share the aws environment
variables:
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html
### Response:
def get_resource(self):
'''use the user provided endpoint and keys (from environment) to
connect to the resource. We can share the aws environment
variables:
AWS_ACCESS_KEY_ID
AWS_SECRET_ACCESS_KEY
https://docs.aws.amazon.com/cli/latest/userguide/cli-configure-envvars.html
'''
# If base is not defined, assume using aws client
if self.base != None:
# s3.ServiceResource()
self.s3 = boto3.resource('s3',
endpoint_url=self.base,
aws_access_key_id=self._id,
aws_secret_access_key=self._key,
config=boto3.session.Config(signature_version=self._signature))
else:
# We will need to test options for reading credentials here
self.s3 = boto3.client('s3') |
def _call_init_upload(file_name, file_size, metadata, tags, project, samples_resource):
"""Call init_upload at the One Codex API and return data used to upload the file.
Parameters
----------
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
file_size : `integer`
Accurate size of file to be uploaded, in bytes.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
samples_resource : `onecodex.models.Samples`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Returns
-------
`dict`
Contains, at a minimum, 'upload_url' and 'sample_id'. Should also contain various additional
data used to upload the file to fastx-proxy, a user's S3 bucket, or an intermediate bucket.
"""
upload_args = {
"filename": file_name,
"size": file_size,
"upload_type": "standard", # this is multipart form data
}
if metadata:
# format metadata keys as snake case
new_metadata = {}
for md_key, md_val in metadata.items():
new_metadata[snake_case(md_key)] = md_val
upload_args["metadata"] = new_metadata
if tags:
upload_args["tags"] = tags
if project:
upload_args["project"] = getattr(project, "id", project)
try:
upload_info = samples_resource.init_upload(upload_args)
except requests.exceptions.HTTPError as e:
raise_api_error(e.response, state="init")
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
return upload_info | Call init_upload at the One Codex API and return data used to upload the file.
Parameters
----------
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
file_size : `integer`
Accurate size of file to be uploaded, in bytes.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
samples_resource : `onecodex.models.Samples`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Returns
-------
`dict`
Contains, at a minimum, 'upload_url' and 'sample_id'. Should also contain various additional
data used to upload the file to fastx-proxy, a user's S3 bucket, or an intermediate bucket. | Below is the the instruction that describes the task:
### Input:
Call init_upload at the One Codex API and return data used to upload the file.
Parameters
----------
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
file_size : `integer`
Accurate size of file to be uploaded, in bytes.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
samples_resource : `onecodex.models.Samples`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Returns
-------
`dict`
Contains, at a minimum, 'upload_url' and 'sample_id'. Should also contain various additional
data used to upload the file to fastx-proxy, a user's S3 bucket, or an intermediate bucket.
### Response:
def _call_init_upload(file_name, file_size, metadata, tags, project, samples_resource):
"""Call init_upload at the One Codex API and return data used to upload the file.
Parameters
----------
file_name : `string`
The file_name you wish to associate this fastx file with at One Codex.
file_size : `integer`
Accurate size of file to be uploaded, in bytes.
metadata : `dict`, optional
tags : `list`, optional
project : `string`, optional
UUID of project to associate this sample with.
samples_resource : `onecodex.models.Samples`
Wrapped potion-client object exposing `init_upload` and `confirm_upload` routes to mainline.
Returns
-------
`dict`
Contains, at a minimum, 'upload_url' and 'sample_id'. Should also contain various additional
data used to upload the file to fastx-proxy, a user's S3 bucket, or an intermediate bucket.
"""
upload_args = {
"filename": file_name,
"size": file_size,
"upload_type": "standard", # this is multipart form data
}
if metadata:
# format metadata keys as snake case
new_metadata = {}
for md_key, md_val in metadata.items():
new_metadata[snake_case(md_key)] = md_val
upload_args["metadata"] = new_metadata
if tags:
upload_args["tags"] = tags
if project:
upload_args["project"] = getattr(project, "id", project)
try:
upload_info = samples_resource.init_upload(upload_args)
except requests.exceptions.HTTPError as e:
raise_api_error(e.response, state="init")
except requests.exceptions.ConnectionError:
raise_connectivity_error(file_name)
return upload_info |
def start(self):
"""
Extension of Pusher.connect() method, which registers all callbacks with
the relevant channels, before initializing a connection.
:return:
"""
super(BitstampWSS, self).start()
self.pusher = pusherclient.Pusher(self.addr, **self.__pusher_options)
self.pusher.connection.bind('pusher:connection_established',
self._register_bindings)
self.pusher.connect() | Extension of Pusher.connect() method, which registers all callbacks with
the relevant channels, before initializing a connection.
:return: | Below is the the instruction that describes the task:
### Input:
Extension of Pusher.connect() method, which registers all callbacks with
the relevant channels, before initializing a connection.
:return:
### Response:
def start(self):
"""
Extension of Pusher.connect() method, which registers all callbacks with
the relevant channels, before initializing a connection.
:return:
"""
super(BitstampWSS, self).start()
self.pusher = pusherclient.Pusher(self.addr, **self.__pusher_options)
self.pusher.connection.bind('pusher:connection_established',
self._register_bindings)
self.pusher.connect() |
def run_attack(sess, model, x, y, attack, attack_params, batch_size=None,
devices=None, feed=None, pass_y=False):
"""
Run attack on every example in a dataset.
:param sess: tf.Session
:param model: cleverhans.model.Model
:param x: numpy array containing input examples (e.g. MNIST().x_test )
:param y: numpy array containing example labels (e.g. MNIST().y_test )
:param attack: cleverhans.attack.Attack
:param attack_params: dictionary
passed to attack.generate as keyword arguments.
:param batch_size: Number of examples to use in a single evaluation batch.
If not specified, this function will use a reasonable guess and
may run out of memory.
When choosing the batch size, keep in mind that the batch will
be divided up evenly among available devices. If you can fit 128
examples in memory on one GPU and you have 8 GPUs, you probably
want to use a batch size of 1024 (unless a different batch size
runs faster with the ops you are using, etc.)
:param devices: An optional list of string device names to use.
If not specified, this function will use all visible GPUs.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param pass_y: bool. If true pass 'y' to `attack.generate`
:return:
an ndarray of bools indicating whether each example is correct
an ndarray of probabilities assigned to the prediction for each example
"""
_check_x(x)
_check_y(y)
factory = _AttackFactory(model, attack, attack_params, pass_y)
out, = batch_eval_multi_worker(sess, factory, [x, y], batch_size=batch_size,
devices=devices, feed=feed)
return out | Run attack on every example in a dataset.
:param sess: tf.Session
:param model: cleverhans.model.Model
:param x: numpy array containing input examples (e.g. MNIST().x_test )
:param y: numpy array containing example labels (e.g. MNIST().y_test )
:param attack: cleverhans.attack.Attack
:param attack_params: dictionary
passed to attack.generate as keyword arguments.
:param batch_size: Number of examples to use in a single evaluation batch.
If not specified, this function will use a reasonable guess and
may run out of memory.
When choosing the batch size, keep in mind that the batch will
be divided up evenly among available devices. If you can fit 128
examples in memory on one GPU and you have 8 GPUs, you probably
want to use a batch size of 1024 (unless a different batch size
runs faster with the ops you are using, etc.)
:param devices: An optional list of string device names to use.
If not specified, this function will use all visible GPUs.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param pass_y: bool. If true pass 'y' to `attack.generate`
:return:
an ndarray of bools indicating whether each example is correct
an ndarray of probabilities assigned to the prediction for each example | Below is the the instruction that describes the task:
### Input:
Run attack on every example in a dataset.
:param sess: tf.Session
:param model: cleverhans.model.Model
:param x: numpy array containing input examples (e.g. MNIST().x_test )
:param y: numpy array containing example labels (e.g. MNIST().y_test )
:param attack: cleverhans.attack.Attack
:param attack_params: dictionary
passed to attack.generate as keyword arguments.
:param batch_size: Number of examples to use in a single evaluation batch.
If not specified, this function will use a reasonable guess and
may run out of memory.
When choosing the batch size, keep in mind that the batch will
be divided up evenly among available devices. If you can fit 128
examples in memory on one GPU and you have 8 GPUs, you probably
want to use a batch size of 1024 (unless a different batch size
runs faster with the ops you are using, etc.)
:param devices: An optional list of string device names to use.
If not specified, this function will use all visible GPUs.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param pass_y: bool. If true pass 'y' to `attack.generate`
:return:
an ndarray of bools indicating whether each example is correct
an ndarray of probabilities assigned to the prediction for each example
### Response:
def run_attack(sess, model, x, y, attack, attack_params, batch_size=None,
devices=None, feed=None, pass_y=False):
"""
Run attack on every example in a dataset.
:param sess: tf.Session
:param model: cleverhans.model.Model
:param x: numpy array containing input examples (e.g. MNIST().x_test )
:param y: numpy array containing example labels (e.g. MNIST().y_test )
:param attack: cleverhans.attack.Attack
:param attack_params: dictionary
passed to attack.generate as keyword arguments.
:param batch_size: Number of examples to use in a single evaluation batch.
If not specified, this function will use a reasonable guess and
may run out of memory.
When choosing the batch size, keep in mind that the batch will
be divided up evenly among available devices. If you can fit 128
examples in memory on one GPU and you have 8 GPUs, you probably
want to use a batch size of 1024 (unless a different batch size
runs faster with the ops you are using, etc.)
:param devices: An optional list of string device names to use.
If not specified, this function will use all visible GPUs.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param pass_y: bool. If true pass 'y' to `attack.generate`
:return:
an ndarray of bools indicating whether each example is correct
an ndarray of probabilities assigned to the prediction for each example
"""
_check_x(x)
_check_y(y)
factory = _AttackFactory(model, attack, attack_params, pass_y)
out, = batch_eval_multi_worker(sess, factory, [x, y], batch_size=batch_size,
devices=devices, feed=feed)
return out |
def _get_provitem_from_args(xs):
"""Retrieve processed item from list of input arguments.
"""
for i, x in enumerate(xs):
if _has_provenance(x):
return i, x
return -1, None | Retrieve processed item from list of input arguments. | Below is the the instruction that describes the task:
### Input:
Retrieve processed item from list of input arguments.
### Response:
def _get_provitem_from_args(xs):
"""Retrieve processed item from list of input arguments.
"""
for i, x in enumerate(xs):
if _has_provenance(x):
return i, x
return -1, None |
def merge(self, subordinate_graph):
"""
merge rules:
00 + 00 == 00 00 + 0B == 0B
0A + 00 == 0A 0A + 0B == 0A
A0 + 00 == A0 A0 + 0B == AB
AA + 00 == AA AA + 0B == AB
00 + B0 == B0 00 + BB == BB
0A + B0 == BA 0A + BB == BA
A0 + B0 == A0 A0 + BB == AB
AA + B0 == AA AA + BB == AA
"""
if not isinstance(subordinate_graph, Graph):
raise Exception("Graph is expected to only merge with a Graph.")
subordinate_nodes = subordinate_graph.get_nodes()
merge_results = []
for node_id in subordinate_nodes:
node = subordinate_nodes[node_id]
merge_results.append((
node.get_id(),
self.insert_node(node)
))
# TODO perhaps throw exception if merge was unsuccessful
return merge_results | merge rules:
00 + 00 == 00 00 + 0B == 0B
0A + 00 == 0A 0A + 0B == 0A
A0 + 00 == A0 A0 + 0B == AB
AA + 00 == AA AA + 0B == AB
00 + B0 == B0 00 + BB == BB
0A + B0 == BA 0A + BB == BA
A0 + B0 == A0 A0 + BB == AB
AA + B0 == AA AA + BB == AA | Below is the the instruction that describes the task:
### Input:
merge rules:
00 + 00 == 00 00 + 0B == 0B
0A + 00 == 0A 0A + 0B == 0A
A0 + 00 == A0 A0 + 0B == AB
AA + 00 == AA AA + 0B == AB
00 + B0 == B0 00 + BB == BB
0A + B0 == BA 0A + BB == BA
A0 + B0 == A0 A0 + BB == AB
AA + B0 == AA AA + BB == AA
### Response:
def merge(self, subordinate_graph):
"""
merge rules:
00 + 00 == 00 00 + 0B == 0B
0A + 00 == 0A 0A + 0B == 0A
A0 + 00 == A0 A0 + 0B == AB
AA + 00 == AA AA + 0B == AB
00 + B0 == B0 00 + BB == BB
0A + B0 == BA 0A + BB == BA
A0 + B0 == A0 A0 + BB == AB
AA + B0 == AA AA + BB == AA
"""
if not isinstance(subordinate_graph, Graph):
raise Exception("Graph is expected to only merge with a Graph.")
subordinate_nodes = subordinate_graph.get_nodes()
merge_results = []
for node_id in subordinate_nodes:
node = subordinate_nodes[node_id]
merge_results.append((
node.get_id(),
self.insert_node(node)
))
# TODO perhaps throw exception if merge was unsuccessful
return merge_results |
def handle_msec_timestamp(self, m, master):
'''special handling for MAVLink packets with a time_boot_ms field'''
if m.get_type() == 'GLOBAL_POSITION_INT':
# this is fix time, not boot time
return
msec = m.time_boot_ms
if msec + 30000 < master.highest_msec:
self.say('Time has wrapped')
print('Time has wrapped', msec, master.highest_msec)
self.status.highest_msec = msec
for mm in self.mpstate.mav_master:
mm.link_delayed = False
mm.highest_msec = msec
return
# we want to detect when a link is delayed
master.highest_msec = msec
if msec > self.status.highest_msec:
self.status.highest_msec = msec
if msec < self.status.highest_msec and len(self.mpstate.mav_master) > 1 and self.mpstate.settings.checkdelay:
master.link_delayed = True
else:
master.link_delayed = False | special handling for MAVLink packets with a time_boot_ms field | Below is the the instruction that describes the task:
### Input:
special handling for MAVLink packets with a time_boot_ms field
### Response:
def handle_msec_timestamp(self, m, master):
'''special handling for MAVLink packets with a time_boot_ms field'''
if m.get_type() == 'GLOBAL_POSITION_INT':
# this is fix time, not boot time
return
msec = m.time_boot_ms
if msec + 30000 < master.highest_msec:
self.say('Time has wrapped')
print('Time has wrapped', msec, master.highest_msec)
self.status.highest_msec = msec
for mm in self.mpstate.mav_master:
mm.link_delayed = False
mm.highest_msec = msec
return
# we want to detect when a link is delayed
master.highest_msec = msec
if msec > self.status.highest_msec:
self.status.highest_msec = msec
if msec < self.status.highest_msec and len(self.mpstate.mav_master) > 1 and self.mpstate.settings.checkdelay:
master.link_delayed = True
else:
master.link_delayed = False |
def change_cell(self, x, y, ch, fg, bg):
"""Change cell in position (x;y).
"""
self.console.draw_char(x, y, ch, fg, bg) | Change cell in position (x;y). | Below is the the instruction that describes the task:
### Input:
Change cell in position (x;y).
### Response:
def change_cell(self, x, y, ch, fg, bg):
"""Change cell in position (x;y).
"""
self.console.draw_char(x, y, ch, fg, bg) |
def get_condition_value(self, operator, value):
"""
Gets the condition value based on the operator and value
:param operator: the condition operator name
:type operator: str
:param value: the value to be formatted based on the condition operator
:type value: object
:return: the comparison operator from the Where class's comparison_map
:rtype: str
"""
if operator in ('contains', 'icontains'):
value = '%{0}%'.format(value)
elif operator == 'startswith':
value = '{0}%'.format(value)
return value | Gets the condition value based on the operator and value
:param operator: the condition operator name
:type operator: str
:param value: the value to be formatted based on the condition operator
:type value: object
:return: the comparison operator from the Where class's comparison_map
:rtype: str | Below is the the instruction that describes the task:
### Input:
Gets the condition value based on the operator and value
:param operator: the condition operator name
:type operator: str
:param value: the value to be formatted based on the condition operator
:type value: object
:return: the comparison operator from the Where class's comparison_map
:rtype: str
### Response:
def get_condition_value(self, operator, value):
"""
Gets the condition value based on the operator and value
:param operator: the condition operator name
:type operator: str
:param value: the value to be formatted based on the condition operator
:type value: object
:return: the comparison operator from the Where class's comparison_map
:rtype: str
"""
if operator in ('contains', 'icontains'):
value = '%{0}%'.format(value)
elif operator == 'startswith':
value = '{0}%'.format(value)
return value |
def _print_stats(cls, stats: Statistics, human_format_speed: bool=True):
'''Log the final statistics to the user.'''
time_length = datetime.timedelta(
seconds=int(stats.stop_time - stats.start_time)
)
file_size = wpull.string.format_size(stats.size)
if stats.bandwidth_meter.num_samples:
speed = stats.bandwidth_meter.speed()
if human_format_speed:
speed_size_str = wpull.string.format_size(speed)
else:
speed_size_str = '{:.1f} b'.format(speed * 8)
else:
speed_size_str = _('-- B')
_logger.info(_('FINISHED.'))
_logger.info(__(
_(
'Duration: {preformatted_timedelta}. '
'Speed: {preformatted_speed_size}/s.'
),
preformatted_timedelta=time_length,
preformatted_speed_size=speed_size_str,
))
_logger.info(__(
gettext.ngettext(
'Downloaded: {num_files} file, {preformatted_file_size}.',
'Downloaded: {num_files} files, {preformatted_file_size}.',
stats.files
),
num_files=stats.files,
preformatted_file_size=file_size
))
if stats.is_quota_exceeded:
_logger.info(_('Download quota exceeded.')) | Log the final statistics to the user. | Below is the the instruction that describes the task:
### Input:
Log the final statistics to the user.
### Response:
def _print_stats(cls, stats: Statistics, human_format_speed: bool=True):
'''Log the final statistics to the user.'''
time_length = datetime.timedelta(
seconds=int(stats.stop_time - stats.start_time)
)
file_size = wpull.string.format_size(stats.size)
if stats.bandwidth_meter.num_samples:
speed = stats.bandwidth_meter.speed()
if human_format_speed:
speed_size_str = wpull.string.format_size(speed)
else:
speed_size_str = '{:.1f} b'.format(speed * 8)
else:
speed_size_str = _('-- B')
_logger.info(_('FINISHED.'))
_logger.info(__(
_(
'Duration: {preformatted_timedelta}. '
'Speed: {preformatted_speed_size}/s.'
),
preformatted_timedelta=time_length,
preformatted_speed_size=speed_size_str,
))
_logger.info(__(
gettext.ngettext(
'Downloaded: {num_files} file, {preformatted_file_size}.',
'Downloaded: {num_files} files, {preformatted_file_size}.',
stats.files
),
num_files=stats.files,
preformatted_file_size=file_size
))
if stats.is_quota_exceeded:
_logger.info(_('Download quota exceeded.')) |
def format(self, formatter, link_resolver, output):
"""
Banana banana
"""
if not self.title and self.name:
title = os.path.splitext(self.name)[0]
self.title = os.path.basename(title).replace('-', ' ')
self.formatted_contents = u''
self.build_path = os.path.join(formatter.get_output_folder(self),
self.link.ref)
if self.ast:
out, diags = cmark.ast_to_html(self.ast, link_resolver)
for diag in diags:
warn(
diag.code,
message=diag.message,
filename=self.source_file or self.name)
self.formatted_contents += out
if not self.formatted_contents:
self.__format_page_comment(formatter, link_resolver)
self.output_attrs = defaultdict(lambda: defaultdict(dict))
formatter.prepare_page_attributes(self)
self.__format_symbols(formatter, link_resolver)
self.detailed_description =\
formatter.format_page(self)[0]
if output:
formatter.cache_page(self) | Banana banana | Below is the the instruction that describes the task:
### Input:
Banana banana
### Response:
def format(self, formatter, link_resolver, output):
"""
Banana banana
"""
if not self.title and self.name:
title = os.path.splitext(self.name)[0]
self.title = os.path.basename(title).replace('-', ' ')
self.formatted_contents = u''
self.build_path = os.path.join(formatter.get_output_folder(self),
self.link.ref)
if self.ast:
out, diags = cmark.ast_to_html(self.ast, link_resolver)
for diag in diags:
warn(
diag.code,
message=diag.message,
filename=self.source_file or self.name)
self.formatted_contents += out
if not self.formatted_contents:
self.__format_page_comment(formatter, link_resolver)
self.output_attrs = defaultdict(lambda: defaultdict(dict))
formatter.prepare_page_attributes(self)
self.__format_symbols(formatter, link_resolver)
self.detailed_description =\
formatter.format_page(self)[0]
if output:
formatter.cache_page(self) |
def send_cmd(self, command, connId='default'):
"""
Sends any command to FTP server. Returns server output.
Parameters:
- command - any valid command to be sent (invalid will result in exception).
- connId(optional) - connection identifier. By default equals 'default'
Example:
| send cmd | HELP |
"""
thisConn = self.__getConnection(connId)
outputMsg = ""
try:
outputMsg += str(thisConn.sendcmd(command))
except ftplib.all_errors as e:
raise FtpLibraryError(str(e))
if self.printOutput:
logger.info(outputMsg)
return outputMsg | Sends any command to FTP server. Returns server output.
Parameters:
- command - any valid command to be sent (invalid will result in exception).
- connId(optional) - connection identifier. By default equals 'default'
Example:
| send cmd | HELP | | Below is the the instruction that describes the task:
### Input:
Sends any command to FTP server. Returns server output.
Parameters:
- command - any valid command to be sent (invalid will result in exception).
- connId(optional) - connection identifier. By default equals 'default'
Example:
| send cmd | HELP |
### Response:
def send_cmd(self, command, connId='default'):
"""
Sends any command to FTP server. Returns server output.
Parameters:
- command - any valid command to be sent (invalid will result in exception).
- connId(optional) - connection identifier. By default equals 'default'
Example:
| send cmd | HELP |
"""
thisConn = self.__getConnection(connId)
outputMsg = ""
try:
outputMsg += str(thisConn.sendcmd(command))
except ftplib.all_errors as e:
raise FtpLibraryError(str(e))
if self.printOutput:
logger.info(outputMsg)
return outputMsg |
def from_label(cls, label):
r"""Take pauli string to construct pauli.
The qubit index of pauli label is q_{n-1} ... q_0.
E.g., a pauli is $P_{n-1} \otimes ... \otimes P_0$
Args:
label (str): pauli label
Returns:
Pauli: the constructed pauli
Raises:
QiskitError: invalid character in the label
"""
z = np.zeros(len(label), dtype=np.bool)
x = np.zeros(len(label), dtype=np.bool)
for i, char in enumerate(label):
if char == 'X':
x[-i - 1] = True
elif char == 'Z':
z[-i - 1] = True
elif char == 'Y':
z[-i - 1] = True
x[-i - 1] = True
elif char != 'I':
raise QiskitError("Pauli string must be only consisted of 'I', 'X', "
"'Y' or 'Z' but you have {}.".format(char))
return cls(z=z, x=x) | r"""Take pauli string to construct pauli.
The qubit index of pauli label is q_{n-1} ... q_0.
E.g., a pauli is $P_{n-1} \otimes ... \otimes P_0$
Args:
label (str): pauli label
Returns:
Pauli: the constructed pauli
Raises:
QiskitError: invalid character in the label | Below is the the instruction that describes the task:
### Input:
r"""Take pauli string to construct pauli.
The qubit index of pauli label is q_{n-1} ... q_0.
E.g., a pauli is $P_{n-1} \otimes ... \otimes P_0$
Args:
label (str): pauli label
Returns:
Pauli: the constructed pauli
Raises:
QiskitError: invalid character in the label
### Response:
def from_label(cls, label):
r"""Take pauli string to construct pauli.
The qubit index of pauli label is q_{n-1} ... q_0.
E.g., a pauli is $P_{n-1} \otimes ... \otimes P_0$
Args:
label (str): pauli label
Returns:
Pauli: the constructed pauli
Raises:
QiskitError: invalid character in the label
"""
z = np.zeros(len(label), dtype=np.bool)
x = np.zeros(len(label), dtype=np.bool)
for i, char in enumerate(label):
if char == 'X':
x[-i - 1] = True
elif char == 'Z':
z[-i - 1] = True
elif char == 'Y':
z[-i - 1] = True
x[-i - 1] = True
elif char != 'I':
raise QiskitError("Pauli string must be only consisted of 'I', 'X', "
"'Y' or 'Z' but you have {}.".format(char))
return cls(z=z, x=x) |
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
# filled_length = int(length * iteration // total)
# bar = fill * filled_length + '-' * (length - filled_length)
# print('\r %s |%s| %s %s' % (prefix, bar, percent, suffix), end='\r')
print(percent)
# Print New Line on Complete
if iteration == total:
print() | Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str) | Below is the the instruction that describes the task:
### Input:
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
### Response:
def print_progress_bar(iteration, total, prefix='', suffix='', decimals=1, length=100, fill='█'):
"""
Call in a loop to create terminal progress bar
@params:
iteration - Required : current iteration (Int)
total - Required : total iterations (Int)
prefix - Optional : prefix string (Str)
suffix - Optional : suffix string (Str)
decimals - Optional : positive number of decimals in percent complete (Int)
length - Optional : character length of bar (Int)
fill - Optional : bar fill character (Str)
"""
percent = ("{0:." + str(decimals) + "f}").format(100 * (iteration / float(total)))
# filled_length = int(length * iteration // total)
# bar = fill * filled_length + '-' * (length - filled_length)
# print('\r %s |%s| %s %s' % (prefix, bar, percent, suffix), end='\r')
print(percent)
# Print New Line on Complete
if iteration == total:
print() |
def set_atten(self, idx, value):
"""Sets the attenuation value for a particular signal path.
Args:
idx: Zero-based index int which is the identifier for a particular
signal path in an instrument. For instruments that only has one
channel, this is ignored by the device.
value: A float that is the attenuation value to set.
Raises:
Error: The underlying telnet connection to the instrument is not
open.
IndexError: The index of the attenuator is greater than the maximum
index of the underlying instrument.
ValueError: The requested set value is greater than the maximum
attenuation value.
"""
if not self.is_open:
raise attenuator.Error(
"Connection to attenuator at %s is not open!" %
self._telnet_client.host)
if idx + 1 > self.path_count:
raise IndexError("Attenuator index out of range!", self.path_count,
idx)
if value > self.max_atten:
raise ValueError("Attenuator value out of range!", self.max_atten,
value)
# The actual device uses one-based index for channel numbers.
self._telnet_client.cmd("CHAN:%s:SETATT:%s" % (idx + 1, value)) | Sets the attenuation value for a particular signal path.
Args:
idx: Zero-based index int which is the identifier for a particular
signal path in an instrument. For instruments that only has one
channel, this is ignored by the device.
value: A float that is the attenuation value to set.
Raises:
Error: The underlying telnet connection to the instrument is not
open.
IndexError: The index of the attenuator is greater than the maximum
index of the underlying instrument.
ValueError: The requested set value is greater than the maximum
attenuation value. | Below is the the instruction that describes the task:
### Input:
Sets the attenuation value for a particular signal path.
Args:
idx: Zero-based index int which is the identifier for a particular
signal path in an instrument. For instruments that only has one
channel, this is ignored by the device.
value: A float that is the attenuation value to set.
Raises:
Error: The underlying telnet connection to the instrument is not
open.
IndexError: The index of the attenuator is greater than the maximum
index of the underlying instrument.
ValueError: The requested set value is greater than the maximum
attenuation value.
### Response:
def set_atten(self, idx, value):
"""Sets the attenuation value for a particular signal path.
Args:
idx: Zero-based index int which is the identifier for a particular
signal path in an instrument. For instruments that only has one
channel, this is ignored by the device.
value: A float that is the attenuation value to set.
Raises:
Error: The underlying telnet connection to the instrument is not
open.
IndexError: The index of the attenuator is greater than the maximum
index of the underlying instrument.
ValueError: The requested set value is greater than the maximum
attenuation value.
"""
if not self.is_open:
raise attenuator.Error(
"Connection to attenuator at %s is not open!" %
self._telnet_client.host)
if idx + 1 > self.path_count:
raise IndexError("Attenuator index out of range!", self.path_count,
idx)
if value > self.max_atten:
raise ValueError("Attenuator value out of range!", self.max_atten,
value)
# The actual device uses one-based index for channel numbers.
self._telnet_client.cmd("CHAN:%s:SETATT:%s" % (idx + 1, value)) |
def __calculate_current_value(self, asset_class: AssetClass):
""" Calculate totals for asset class by adding all the children values """
# Is this the final asset class, the one with stocks?
if asset_class.stocks:
# add all the stocks
stocks_sum = Decimal(0)
for stock in asset_class.stocks:
# recalculate into base currency!
stocks_sum += stock.value_in_base_currency
asset_class.curr_value = stocks_sum
if asset_class.classes:
# load totals for child classes
for child in asset_class.classes:
self.__calculate_current_value(child)
asset_class.curr_value += child.curr_value | Calculate totals for asset class by adding all the children values | Below is the the instruction that describes the task:
### Input:
Calculate totals for asset class by adding all the children values
### Response:
def __calculate_current_value(self, asset_class: AssetClass):
""" Calculate totals for asset class by adding all the children values """
# Is this the final asset class, the one with stocks?
if asset_class.stocks:
# add all the stocks
stocks_sum = Decimal(0)
for stock in asset_class.stocks:
# recalculate into base currency!
stocks_sum += stock.value_in_base_currency
asset_class.curr_value = stocks_sum
if asset_class.classes:
# load totals for child classes
for child in asset_class.classes:
self.__calculate_current_value(child)
asset_class.curr_value += child.curr_value |
def get_image_platform_digest(self, image, platform):
"""Get digest of specified image and platform
:param ImageName image: image
:param str platform: name of the platform/arch (x86_64, ppc64le, ...)
:raises KeyError: when digest is not found
:rtype: str
:return: digest of the specified image (fedora@sha256:...)
"""
image_digests = self.get_image_digests(image)
digest = image_digests.get(platform)
if digest is None:
raise KeyError(
'Image {} has no digest record for platform {}'.format(image, platform)
)
return digest | Get digest of specified image and platform
:param ImageName image: image
:param str platform: name of the platform/arch (x86_64, ppc64le, ...)
:raises KeyError: when digest is not found
:rtype: str
:return: digest of the specified image (fedora@sha256:...) | Below is the the instruction that describes the task:
### Input:
Get digest of specified image and platform
:param ImageName image: image
:param str platform: name of the platform/arch (x86_64, ppc64le, ...)
:raises KeyError: when digest is not found
:rtype: str
:return: digest of the specified image (fedora@sha256:...)
### Response:
def get_image_platform_digest(self, image, platform):
"""Get digest of specified image and platform
:param ImageName image: image
:param str platform: name of the platform/arch (x86_64, ppc64le, ...)
:raises KeyError: when digest is not found
:rtype: str
:return: digest of the specified image (fedora@sha256:...)
"""
image_digests = self.get_image_digests(image)
digest = image_digests.get(platform)
if digest is None:
raise KeyError(
'Image {} has no digest record for platform {}'.format(image, platform)
)
return digest |
def _query_k(k, i, P, oracle, query, trn, state_cache, dist_cache, smooth=False, D=None, weight=0.5):
"""A helper function for query-matching function`s iteration over observations.
Args:
k - index of the candidate path
i - index of the frames of the observations
P - the path matrix of size K x N, K the number for paths initiated,
N the frame number of observations
oracle - an encoded oracle
query - observations matrix (numpy array) of dimension N x D.
D the dimension of the observation.
trn - function handle of forward links vector gathering
state_cache - a list storing the states visited during the for loop for k
dist_cache - a list of the same lenth as oracle storing the
distance calculated between the current observation and states
in the oracle
smooth - whether to enforce a preference on continuation or not
D - Self-similarity matrix, required if smooth is set to True
weight - the weight between continuation or jumps (1.0 for certain continuation)
"""
_trn = trn(oracle, P[i - 1][k])
t = list(itertools.chain.from_iterable([oracle.latent[oracle.data[j]] for j in _trn]))
_trn_unseen = [_t for _t in _trn if _t not in state_cache]
state_cache.extend(_trn_unseen)
if _trn_unseen:
t_unseen = list(itertools.chain.from_iterable([oracle.latent[oracle.data[j]] for j in _trn_unseen]))
dist_cache[t_unseen] = _dist_obs_oracle(oracle, query[i], t_unseen)
dvec = dist_cache[t]
if smooth and P[i - 1][k] < oracle.n_states - 1:
dvec = dvec * (1.0 - weight) + weight * np.array([D[P[i - 1][k]][_t - 1] for _t in t])
_m = np.argmin(dvec)
return t[_m], dvec[_m] | A helper function for query-matching function`s iteration over observations.
Args:
k - index of the candidate path
i - index of the frames of the observations
P - the path matrix of size K x N, K the number for paths initiated,
N the frame number of observations
oracle - an encoded oracle
query - observations matrix (numpy array) of dimension N x D.
D the dimension of the observation.
trn - function handle of forward links vector gathering
state_cache - a list storing the states visited during the for loop for k
dist_cache - a list of the same lenth as oracle storing the
distance calculated between the current observation and states
in the oracle
smooth - whether to enforce a preference on continuation or not
D - Self-similarity matrix, required if smooth is set to True
weight - the weight between continuation or jumps (1.0 for certain continuation) | Below is the the instruction that describes the task:
### Input:
A helper function for query-matching function`s iteration over observations.
Args:
k - index of the candidate path
i - index of the frames of the observations
P - the path matrix of size K x N, K the number for paths initiated,
N the frame number of observations
oracle - an encoded oracle
query - observations matrix (numpy array) of dimension N x D.
D the dimension of the observation.
trn - function handle of forward links vector gathering
state_cache - a list storing the states visited during the for loop for k
dist_cache - a list of the same lenth as oracle storing the
distance calculated between the current observation and states
in the oracle
smooth - whether to enforce a preference on continuation or not
D - Self-similarity matrix, required if smooth is set to True
weight - the weight between continuation or jumps (1.0 for certain continuation)
### Response:
def _query_k(k, i, P, oracle, query, trn, state_cache, dist_cache, smooth=False, D=None, weight=0.5):
"""A helper function for query-matching function`s iteration over observations.
Args:
k - index of the candidate path
i - index of the frames of the observations
P - the path matrix of size K x N, K the number for paths initiated,
N the frame number of observations
oracle - an encoded oracle
query - observations matrix (numpy array) of dimension N x D.
D the dimension of the observation.
trn - function handle of forward links vector gathering
state_cache - a list storing the states visited during the for loop for k
dist_cache - a list of the same lenth as oracle storing the
distance calculated between the current observation and states
in the oracle
smooth - whether to enforce a preference on continuation or not
D - Self-similarity matrix, required if smooth is set to True
weight - the weight between continuation or jumps (1.0 for certain continuation)
"""
_trn = trn(oracle, P[i - 1][k])
t = list(itertools.chain.from_iterable([oracle.latent[oracle.data[j]] for j in _trn]))
_trn_unseen = [_t for _t in _trn if _t not in state_cache]
state_cache.extend(_trn_unseen)
if _trn_unseen:
t_unseen = list(itertools.chain.from_iterable([oracle.latent[oracle.data[j]] for j in _trn_unseen]))
dist_cache[t_unseen] = _dist_obs_oracle(oracle, query[i], t_unseen)
dvec = dist_cache[t]
if smooth and P[i - 1][k] < oracle.n_states - 1:
dvec = dvec * (1.0 - weight) + weight * np.array([D[P[i - 1][k]][_t - 1] for _t in t])
_m = np.argmin(dvec)
return t[_m], dvec[_m] |
def parse_notification_xml(xml: str) -> Union[AliasRegistration, Payment]:
""""
Both alias registration and payments are received here.
We can differentiate them by looking at the use-alias user-parameter (and verifying the amount is o).
"""
body = fromstring(xml).find('body')
transaction = body.find('transaction')
_user_parameters = transaction.find('userParameters')
def get_named_parameter(name):
return _user_parameters.find("parameter[@name='" + name + "']")
def success():
return transaction.get('status') == 'success'
def parse_success():
# From the spec: sign2 is only returned in the success case
computed_signature = sign_web(body.get('merchantId'), transaction.find('amount').text,
transaction.find('currency').text,
transaction.find('uppTransactionId').text)
sign2 = get_named_parameter('sign2').text
if computed_signature != sign2:
raise ValueError('sign2 did not match computed signature')
success = transaction.find('success')
d = dict(
response_code=success.find('responseCode').text,
response_message=success.find('responseMessage').text,
authorization_code=success.find('authorizationCode').text,
acquirer_authorization_code=success.find('acqAuthorizationCode').text,
)
return {k: v for k, v in d.items() if v is not None}
def parse_error():
error = transaction.find('error')
d = dict(
error_code=error.find('errorCode').text,
error_message=error.find('errorMessage').text,
error_detail=error.find('errorDetail').text)
acquirer_error_code = get_named_parameter('acqErrorCode')
if acquirer_error_code is not None:
d['acquirer_error_code'] = acquirer_error_code.text
return {k: v for k, v in d.items() if v is not None}
def parse_common_attributes():
d = dict(
transaction_id=transaction.find('uppTransactionId').text,
merchant_id=body.get('merchantId'),
client_ref=transaction.get('refno'),
amount=parse_money(transaction))
payment_method = transaction.find('pmethod')
if payment_method is not None:
d['payment_method'] = payment_method.text
request_type = transaction.find('reqtype')
if request_type is not None:
d['request_type'] = request_type.text
credit_card_country = get_named_parameter('returnCustomerCountry')
if credit_card_country is not None:
d['credit_card_country'] = credit_card_country.text
expiry_month = get_named_parameter('expm')
if expiry_month is not None:
d['expiry_month'] = int(expiry_month.text)
expiry_year = get_named_parameter('expy')
if expiry_year is not None:
d['expiry_year'] = int(expiry_year.text)
return d
# End of inner helper functions, we're back inside parse_notification_xml
use_alias_parameter = get_named_parameter('useAlias')
if use_alias_parameter is not None and use_alias_parameter.text == 'true':
# It's an alias registration
d = dict(parse_common_attributes())
masked_card_number = get_named_parameter('maskedCC')
if masked_card_number is not None:
d['masked_card_number'] = masked_card_number.text
card_alias = get_named_parameter('aliasCC')
if card_alias is not None:
d['card_alias'] = card_alias.text
if success():
d['success'] = True
d.update(parse_success())
else:
d['success'] = False
d.update(parse_error())
return AliasRegistration(**d)
else:
# It's a payment or a charge
if success():
d = dict(success=True)
cardno = get_named_parameter('cardno')
if cardno is not None:
d['masked_card_number'] = cardno.text
d.update(parse_common_attributes())
d.update(parse_success())
return Payment(**d)
else:
d = dict(success=False)
d.update(parse_common_attributes())
d.update(parse_error())
return Payment(**d) | Both alias registration and payments are received here.
We can differentiate them by looking at the use-alias user-parameter (and verifying the amount is o). | Below is the the instruction that describes the task:
### Input:
Both alias registration and payments are received here.
We can differentiate them by looking at the use-alias user-parameter (and verifying the amount is o).
### Response:
def parse_notification_xml(xml: str) -> Union[AliasRegistration, Payment]:
""""
Both alias registration and payments are received here.
We can differentiate them by looking at the use-alias user-parameter (and verifying the amount is o).
"""
body = fromstring(xml).find('body')
transaction = body.find('transaction')
_user_parameters = transaction.find('userParameters')
def get_named_parameter(name):
return _user_parameters.find("parameter[@name='" + name + "']")
def success():
return transaction.get('status') == 'success'
def parse_success():
# From the spec: sign2 is only returned in the success case
computed_signature = sign_web(body.get('merchantId'), transaction.find('amount').text,
transaction.find('currency').text,
transaction.find('uppTransactionId').text)
sign2 = get_named_parameter('sign2').text
if computed_signature != sign2:
raise ValueError('sign2 did not match computed signature')
success = transaction.find('success')
d = dict(
response_code=success.find('responseCode').text,
response_message=success.find('responseMessage').text,
authorization_code=success.find('authorizationCode').text,
acquirer_authorization_code=success.find('acqAuthorizationCode').text,
)
return {k: v for k, v in d.items() if v is not None}
def parse_error():
error = transaction.find('error')
d = dict(
error_code=error.find('errorCode').text,
error_message=error.find('errorMessage').text,
error_detail=error.find('errorDetail').text)
acquirer_error_code = get_named_parameter('acqErrorCode')
if acquirer_error_code is not None:
d['acquirer_error_code'] = acquirer_error_code.text
return {k: v for k, v in d.items() if v is not None}
def parse_common_attributes():
d = dict(
transaction_id=transaction.find('uppTransactionId').text,
merchant_id=body.get('merchantId'),
client_ref=transaction.get('refno'),
amount=parse_money(transaction))
payment_method = transaction.find('pmethod')
if payment_method is not None:
d['payment_method'] = payment_method.text
request_type = transaction.find('reqtype')
if request_type is not None:
d['request_type'] = request_type.text
credit_card_country = get_named_parameter('returnCustomerCountry')
if credit_card_country is not None:
d['credit_card_country'] = credit_card_country.text
expiry_month = get_named_parameter('expm')
if expiry_month is not None:
d['expiry_month'] = int(expiry_month.text)
expiry_year = get_named_parameter('expy')
if expiry_year is not None:
d['expiry_year'] = int(expiry_year.text)
return d
# End of inner helper functions, we're back inside parse_notification_xml
use_alias_parameter = get_named_parameter('useAlias')
if use_alias_parameter is not None and use_alias_parameter.text == 'true':
# It's an alias registration
d = dict(parse_common_attributes())
masked_card_number = get_named_parameter('maskedCC')
if masked_card_number is not None:
d['masked_card_number'] = masked_card_number.text
card_alias = get_named_parameter('aliasCC')
if card_alias is not None:
d['card_alias'] = card_alias.text
if success():
d['success'] = True
d.update(parse_success())
else:
d['success'] = False
d.update(parse_error())
return AliasRegistration(**d)
else:
# It's a payment or a charge
if success():
d = dict(success=True)
cardno = get_named_parameter('cardno')
if cardno is not None:
d['masked_card_number'] = cardno.text
d.update(parse_common_attributes())
d.update(parse_success())
return Payment(**d)
else:
d = dict(success=False)
d.update(parse_common_attributes())
d.update(parse_error())
return Payment(**d) |
def diff(**kwargs):
'''
Returns the difference between the candidate and the current configuration
id : 0
The rollback ID value (0-49)
CLI Example:
.. code-block:: bash
salt 'device_name' junos.diff 3
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
id_ = kwargs.pop('id', 0)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
try:
ret['message'] = conn.cu.diff(rb_id=id_)
except Exception as exception:
ret['message'] = 'Could not get diff with error "{0}"'.format(
exception)
ret['out'] = False
return ret | Returns the difference between the candidate and the current configuration
id : 0
The rollback ID value (0-49)
CLI Example:
.. code-block:: bash
salt 'device_name' junos.diff 3 | Below is the the instruction that describes the task:
### Input:
Returns the difference between the candidate and the current configuration
id : 0
The rollback ID value (0-49)
CLI Example:
.. code-block:: bash
salt 'device_name' junos.diff 3
### Response:
def diff(**kwargs):
'''
Returns the difference between the candidate and the current configuration
id : 0
The rollback ID value (0-49)
CLI Example:
.. code-block:: bash
salt 'device_name' junos.diff 3
'''
kwargs = salt.utils.args.clean_kwargs(**kwargs)
id_ = kwargs.pop('id', 0)
if kwargs:
salt.utils.args.invalid_kwargs(kwargs)
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
try:
ret['message'] = conn.cu.diff(rb_id=id_)
except Exception as exception:
ret['message'] = 'Could not get diff with error "{0}"'.format(
exception)
ret['out'] = False
return ret |
def toc(self):
""" stops the timer """
elapsed = self._time() - self.tstart
if self.verbose:
self.write('...toc(%r)=%.4fs\n' % (self.label, elapsed))
self.flush()
return elapsed | stops the timer | Below is the the instruction that describes the task:
### Input:
stops the timer
### Response:
def toc(self):
""" stops the timer """
elapsed = self._time() - self.tstart
if self.verbose:
self.write('...toc(%r)=%.4fs\n' % (self.label, elapsed))
self.flush()
return elapsed |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.