code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _ensure_channel_connected(self, destination_id):
""" Ensure we opened a channel to destination_id. """
if destination_id not in self._open_channels:
self._open_channels.append(destination_id)
self.send_message(
destination_id, NS_CONNECTION,
{MESSAGE_TYPE: TYPE_CONNECT,
'origin': {},
'userAgent': 'PyChromecast',
'senderInfo': {
'sdkType': 2,
'version': '15.605.1.3',
'browserVersion': "44.0.2403.30",
'platform': 4,
'systemVersion': 'Macintosh; Intel Mac OS X10_10_3',
'connectionType': 1}},
no_add_request_id=True) | Ensure we opened a channel to destination_id. | Below is the the instruction that describes the task:
### Input:
Ensure we opened a channel to destination_id.
### Response:
def _ensure_channel_connected(self, destination_id):
""" Ensure we opened a channel to destination_id. """
if destination_id not in self._open_channels:
self._open_channels.append(destination_id)
self.send_message(
destination_id, NS_CONNECTION,
{MESSAGE_TYPE: TYPE_CONNECT,
'origin': {},
'userAgent': 'PyChromecast',
'senderInfo': {
'sdkType': 2,
'version': '15.605.1.3',
'browserVersion': "44.0.2403.30",
'platform': 4,
'systemVersion': 'Macintosh; Intel Mac OS X10_10_3',
'connectionType': 1}},
no_add_request_id=True) |
def _normalize_correlation_data(self, corr_data, norm_unit):
"""Normalize the correlation data if necessary.
Fisher-transform and then z-score the data for every norm_unit samples
if norm_unit > 1.
Parameters
----------
corr_data: the correlation data
in shape [num_samples, num_processed_voxels, num_voxels]
norm_unit: int
the number of samples on which the normalization
is performed
Returns
-------
normalized_corr_data: the normalized correlation data
in shape [num_samples, num_voxels, num_voxels]
"""
# normalize if necessary
if norm_unit > 1:
num_samples = len(corr_data)
[_, d2, d3] = corr_data.shape
second_dimension = d2 * d3
# this is a shallow copy
normalized_corr_data = corr_data.reshape(1,
num_samples,
second_dimension)
fcma_extension.normalization(normalized_corr_data, norm_unit)
normalized_corr_data = normalized_corr_data.reshape(num_samples,
d2, d3)
logger.debug(
'normalization done'
)
else:
normalized_corr_data = corr_data
return normalized_corr_data | Normalize the correlation data if necessary.
Fisher-transform and then z-score the data for every norm_unit samples
if norm_unit > 1.
Parameters
----------
corr_data: the correlation data
in shape [num_samples, num_processed_voxels, num_voxels]
norm_unit: int
the number of samples on which the normalization
is performed
Returns
-------
normalized_corr_data: the normalized correlation data
in shape [num_samples, num_voxels, num_voxels] | Below is the the instruction that describes the task:
### Input:
Normalize the correlation data if necessary.
Fisher-transform and then z-score the data for every norm_unit samples
if norm_unit > 1.
Parameters
----------
corr_data: the correlation data
in shape [num_samples, num_processed_voxels, num_voxels]
norm_unit: int
the number of samples on which the normalization
is performed
Returns
-------
normalized_corr_data: the normalized correlation data
in shape [num_samples, num_voxels, num_voxels]
### Response:
def _normalize_correlation_data(self, corr_data, norm_unit):
"""Normalize the correlation data if necessary.
Fisher-transform and then z-score the data for every norm_unit samples
if norm_unit > 1.
Parameters
----------
corr_data: the correlation data
in shape [num_samples, num_processed_voxels, num_voxels]
norm_unit: int
the number of samples on which the normalization
is performed
Returns
-------
normalized_corr_data: the normalized correlation data
in shape [num_samples, num_voxels, num_voxels]
"""
# normalize if necessary
if norm_unit > 1:
num_samples = len(corr_data)
[_, d2, d3] = corr_data.shape
second_dimension = d2 * d3
# this is a shallow copy
normalized_corr_data = corr_data.reshape(1,
num_samples,
second_dimension)
fcma_extension.normalization(normalized_corr_data, norm_unit)
normalized_corr_data = normalized_corr_data.reshape(num_samples,
d2, d3)
logger.debug(
'normalization done'
)
else:
normalized_corr_data = corr_data
return normalized_corr_data |
def youtube_id(self):
"""Extract and return Youtube video id"""
m = re.search(r'/embed/([A-Za-z0-9\-=_]*)', self.embed)
if m:
return m.group(1)
return '' | Extract and return Youtube video id | Below is the the instruction that describes the task:
### Input:
Extract and return Youtube video id
### Response:
def youtube_id(self):
"""Extract and return Youtube video id"""
m = re.search(r'/embed/([A-Za-z0-9\-=_]*)', self.embed)
if m:
return m.group(1)
return '' |
def obj2str(obj, pk_protocol=pk_protocol):
"""Convert arbitrary object to utf-8 string, using
base64encode algorithm.
Usage::
>>> from weatherlab.lib.dataIO.pk import obj2str
>>> data = {"a": 1, "b": 2}
>>> obj2str(data, pk_protocol=2)
'gAJ9cQAoWAEAAABhcQFLAVgBAAAAYnECSwJ1Lg=='
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的"字符串"
"""
return base64.b64encode(pickle.dumps(
obj, protocol=pk_protocol)).decode("utf-8") | Convert arbitrary object to utf-8 string, using
base64encode algorithm.
Usage::
>>> from weatherlab.lib.dataIO.pk import obj2str
>>> data = {"a": 1, "b": 2}
>>> obj2str(data, pk_protocol=2)
'gAJ9cQAoWAEAAABhcQFLAVgBAAAAYnECSwJ1Lg=='
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的"字符串" | Below is the the instruction that describes the task:
### Input:
Convert arbitrary object to utf-8 string, using
base64encode algorithm.
Usage::
>>> from weatherlab.lib.dataIO.pk import obj2str
>>> data = {"a": 1, "b": 2}
>>> obj2str(data, pk_protocol=2)
'gAJ9cQAoWAEAAABhcQFLAVgBAAAAYnECSwJ1Lg=='
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的"字符串"
### Response:
def obj2str(obj, pk_protocol=pk_protocol):
"""Convert arbitrary object to utf-8 string, using
base64encode algorithm.
Usage::
>>> from weatherlab.lib.dataIO.pk import obj2str
>>> data = {"a": 1, "b": 2}
>>> obj2str(data, pk_protocol=2)
'gAJ9cQAoWAEAAABhcQFLAVgBAAAAYnECSwJ1Lg=='
**中文文档**
将可Pickle化的Python对象转化为utf-8编码的"字符串"
"""
return base64.b64encode(pickle.dumps(
obj, protocol=pk_protocol)).decode("utf-8") |
def _parse_next_token(self):
"""Will parse patterns until it gets to the next token or EOF."""
while self._position < self.limit:
token = self._next_pattern()
if token:
return token
return None | Will parse patterns until it gets to the next token or EOF. | Below is the the instruction that describes the task:
### Input:
Will parse patterns until it gets to the next token or EOF.
### Response:
def _parse_next_token(self):
"""Will parse patterns until it gets to the next token or EOF."""
while self._position < self.limit:
token = self._next_pattern()
if token:
return token
return None |
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: QueueContext for this QueueInstance
:rtype: twilio.rest.api.v2010.account.queue.QueueContext
"""
if self._context is None:
self._context = QueueContext(
self._version,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
return self._context | Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: QueueContext for this QueueInstance
:rtype: twilio.rest.api.v2010.account.queue.QueueContext | Below is the the instruction that describes the task:
### Input:
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: QueueContext for this QueueInstance
:rtype: twilio.rest.api.v2010.account.queue.QueueContext
### Response:
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: QueueContext for this QueueInstance
:rtype: twilio.rest.api.v2010.account.queue.QueueContext
"""
if self._context is None:
self._context = QueueContext(
self._version,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
return self._context |
def newidfobject(self, key, aname='', defaultvalues=True, **kwargs):
"""
Add a new idfobject to the model. If you don't specify a value for a
field, the default value will be set.
For example ::
newidfobject("CONSTRUCTION")
newidfobject("CONSTRUCTION",
Name='Interior Ceiling_class',
Outside_Layer='LW Concrete',
Layer_2='soundmat')
Parameters
----------
key : str
The type of IDF object. This must be in ALL_CAPS.
aname : str, deprecated
This parameter is not used. It is left there for backward
compatibility.
defaultvalues: boolean
default is True. If True default values WILL be set.
If False, default values WILL NOT be set
**kwargs
Keyword arguments in the format `field=value` used to set the value
of fields in the IDF object when it is created.
Returns
-------
EpBunch object
"""
obj = newrawobject(self.model, self.idd_info,
key, block=self.block, defaultvalues=defaultvalues)
abunch = obj2bunch(self.model, self.idd_info, obj)
if aname:
warnings.warn("The aname parameter should no longer be used.", UserWarning)
namebunch(abunch, aname)
self.idfobjects[key].append(abunch)
for k, v in list(kwargs.items()):
abunch[k] = v
return abunch | Add a new idfobject to the model. If you don't specify a value for a
field, the default value will be set.
For example ::
newidfobject("CONSTRUCTION")
newidfobject("CONSTRUCTION",
Name='Interior Ceiling_class',
Outside_Layer='LW Concrete',
Layer_2='soundmat')
Parameters
----------
key : str
The type of IDF object. This must be in ALL_CAPS.
aname : str, deprecated
This parameter is not used. It is left there for backward
compatibility.
defaultvalues: boolean
default is True. If True default values WILL be set.
If False, default values WILL NOT be set
**kwargs
Keyword arguments in the format `field=value` used to set the value
of fields in the IDF object when it is created.
Returns
-------
EpBunch object | Below is the the instruction that describes the task:
### Input:
Add a new idfobject to the model. If you don't specify a value for a
field, the default value will be set.
For example ::
newidfobject("CONSTRUCTION")
newidfobject("CONSTRUCTION",
Name='Interior Ceiling_class',
Outside_Layer='LW Concrete',
Layer_2='soundmat')
Parameters
----------
key : str
The type of IDF object. This must be in ALL_CAPS.
aname : str, deprecated
This parameter is not used. It is left there for backward
compatibility.
defaultvalues: boolean
default is True. If True default values WILL be set.
If False, default values WILL NOT be set
**kwargs
Keyword arguments in the format `field=value` used to set the value
of fields in the IDF object when it is created.
Returns
-------
EpBunch object
### Response:
def newidfobject(self, key, aname='', defaultvalues=True, **kwargs):
"""
Add a new idfobject to the model. If you don't specify a value for a
field, the default value will be set.
For example ::
newidfobject("CONSTRUCTION")
newidfobject("CONSTRUCTION",
Name='Interior Ceiling_class',
Outside_Layer='LW Concrete',
Layer_2='soundmat')
Parameters
----------
key : str
The type of IDF object. This must be in ALL_CAPS.
aname : str, deprecated
This parameter is not used. It is left there for backward
compatibility.
defaultvalues: boolean
default is True. If True default values WILL be set.
If False, default values WILL NOT be set
**kwargs
Keyword arguments in the format `field=value` used to set the value
of fields in the IDF object when it is created.
Returns
-------
EpBunch object
"""
obj = newrawobject(self.model, self.idd_info,
key, block=self.block, defaultvalues=defaultvalues)
abunch = obj2bunch(self.model, self.idd_info, obj)
if aname:
warnings.warn("The aname parameter should no longer be used.", UserWarning)
namebunch(abunch, aname)
self.idfobjects[key].append(abunch)
for k, v in list(kwargs.items()):
abunch[k] = v
return abunch |
def get_field_values_as_list(self,field):
'''
:param str field: The name of the field for which to pull in values.
Will parse the query results (must be ungrouped) and return all values of 'field' as a list. Note that these are not unique values. Example::
>>> r.get_field_values_as_list('product_name_exact')
['Mauris risus risus lacus. sit', 'dolor auctor Vivamus fringilla. vulputate', 'semper nisi lacus nulla sed', 'vel amet diam sed posuere', 'vitae neque ultricies, Phasellus ac', 'consectetur nisi orci, eu diam', 'sapien, nisi accumsan accumsan In', 'ligula. odio ipsum sit vel', 'tempus orci. elit, Ut nisl.', 'neque nisi Integer nisi Lorem']
'''
return [doc[field] for doc in self.docs if field in doc] | :param str field: The name of the field for which to pull in values.
Will parse the query results (must be ungrouped) and return all values of 'field' as a list. Note that these are not unique values. Example::
>>> r.get_field_values_as_list('product_name_exact')
['Mauris risus risus lacus. sit', 'dolor auctor Vivamus fringilla. vulputate', 'semper nisi lacus nulla sed', 'vel amet diam sed posuere', 'vitae neque ultricies, Phasellus ac', 'consectetur nisi orci, eu diam', 'sapien, nisi accumsan accumsan In', 'ligula. odio ipsum sit vel', 'tempus orci. elit, Ut nisl.', 'neque nisi Integer nisi Lorem'] | Below is the the instruction that describes the task:
### Input:
:param str field: The name of the field for which to pull in values.
Will parse the query results (must be ungrouped) and return all values of 'field' as a list. Note that these are not unique values. Example::
>>> r.get_field_values_as_list('product_name_exact')
['Mauris risus risus lacus. sit', 'dolor auctor Vivamus fringilla. vulputate', 'semper nisi lacus nulla sed', 'vel amet diam sed posuere', 'vitae neque ultricies, Phasellus ac', 'consectetur nisi orci, eu diam', 'sapien, nisi accumsan accumsan In', 'ligula. odio ipsum sit vel', 'tempus orci. elit, Ut nisl.', 'neque nisi Integer nisi Lorem']
### Response:
def get_field_values_as_list(self,field):
'''
:param str field: The name of the field for which to pull in values.
Will parse the query results (must be ungrouped) and return all values of 'field' as a list. Note that these are not unique values. Example::
>>> r.get_field_values_as_list('product_name_exact')
['Mauris risus risus lacus. sit', 'dolor auctor Vivamus fringilla. vulputate', 'semper nisi lacus nulla sed', 'vel amet diam sed posuere', 'vitae neque ultricies, Phasellus ac', 'consectetur nisi orci, eu diam', 'sapien, nisi accumsan accumsan In', 'ligula. odio ipsum sit vel', 'tempus orci. elit, Ut nisl.', 'neque nisi Integer nisi Lorem']
'''
return [doc[field] for doc in self.docs if field in doc] |
def del_module(self, module):
"""Remove a module from the context"""
rev = util.get_latest_revision(module)
del self.modules[(module.arg, rev)] | Remove a module from the context | Below is the the instruction that describes the task:
### Input:
Remove a module from the context
### Response:
def del_module(self, module):
"""Remove a module from the context"""
rev = util.get_latest_revision(module)
del self.modules[(module.arg, rev)] |
def verify_firebase_token(id_token, request, audience=None):
"""Verifies an ID Token issued by Firebase Authentication.
Args:
id_token (Union[str, bytes]): The encoded token.
request (google.auth.transport.Request): The object used to make
HTTP requests.
audience (str): The audience that this token is intended for. This is
typically your Firebase application ID. If None then the audience
is not verified.
Returns:
Mapping[str, Any]: The decoded token.
"""
return verify_token(
id_token, request, audience=audience, certs_url=_GOOGLE_APIS_CERTS_URL) | Verifies an ID Token issued by Firebase Authentication.
Args:
id_token (Union[str, bytes]): The encoded token.
request (google.auth.transport.Request): The object used to make
HTTP requests.
audience (str): The audience that this token is intended for. This is
typically your Firebase application ID. If None then the audience
is not verified.
Returns:
Mapping[str, Any]: The decoded token. | Below is the the instruction that describes the task:
### Input:
Verifies an ID Token issued by Firebase Authentication.
Args:
id_token (Union[str, bytes]): The encoded token.
request (google.auth.transport.Request): The object used to make
HTTP requests.
audience (str): The audience that this token is intended for. This is
typically your Firebase application ID. If None then the audience
is not verified.
Returns:
Mapping[str, Any]: The decoded token.
### Response:
def verify_firebase_token(id_token, request, audience=None):
"""Verifies an ID Token issued by Firebase Authentication.
Args:
id_token (Union[str, bytes]): The encoded token.
request (google.auth.transport.Request): The object used to make
HTTP requests.
audience (str): The audience that this token is intended for. This is
typically your Firebase application ID. If None then the audience
is not verified.
Returns:
Mapping[str, Any]: The decoded token.
"""
return verify_token(
id_token, request, audience=audience, certs_url=_GOOGLE_APIS_CERTS_URL) |
def get_lb_pkgs(self):
"""Retrieves the local load balancer packages.
:returns: A dictionary containing the load balancer packages
"""
_filter = {'items': {'description':
utils.query_filter('*Load Balancer*')}}
packages = self.prod_pkg.getItems(id=0, filter=_filter)
pkgs = []
for package in packages:
if not package['description'].startswith('Global'):
pkgs.append(package)
return pkgs | Retrieves the local load balancer packages.
:returns: A dictionary containing the load balancer packages | Below is the the instruction that describes the task:
### Input:
Retrieves the local load balancer packages.
:returns: A dictionary containing the load balancer packages
### Response:
def get_lb_pkgs(self):
"""Retrieves the local load balancer packages.
:returns: A dictionary containing the load balancer packages
"""
_filter = {'items': {'description':
utils.query_filter('*Load Balancer*')}}
packages = self.prod_pkg.getItems(id=0, filter=_filter)
pkgs = []
for package in packages:
if not package['description'].startswith('Global'):
pkgs.append(package)
return pkgs |
def cancel(self):
""" Cancel a pending publish task """
target_url = self._client.get_url('PUBLISH', 'DELETE', 'single', {'id': self.id})
r = self._client.request('DELETE', target_url)
logger.info("cancel(): %s", r.status_code) | Cancel a pending publish task | Below is the the instruction that describes the task:
### Input:
Cancel a pending publish task
### Response:
def cancel(self):
""" Cancel a pending publish task """
target_url = self._client.get_url('PUBLISH', 'DELETE', 'single', {'id': self.id})
r = self._client.request('DELETE', target_url)
logger.info("cancel(): %s", r.status_code) |
def diff_roessler(value_array, a, c):
"""The Roessler attractor differential equation
:param value_array: 3d array containing the x,y, and z component values.
:param a: Constant attractor parameter
:param c: Constant attractor parameter
:return: 3d array of the Roessler system evaluated at `value_array`
"""
b=a
diff_array = np.zeros(3)
diff_array[0] = -value_array[1] - value_array[2]
diff_array[1] = value_array[0] + a * value_array[1]
diff_array[2] = b + value_array[2] * (value_array[0] - c)
return diff_array | The Roessler attractor differential equation
:param value_array: 3d array containing the x,y, and z component values.
:param a: Constant attractor parameter
:param c: Constant attractor parameter
:return: 3d array of the Roessler system evaluated at `value_array` | Below is the the instruction that describes the task:
### Input:
The Roessler attractor differential equation
:param value_array: 3d array containing the x,y, and z component values.
:param a: Constant attractor parameter
:param c: Constant attractor parameter
:return: 3d array of the Roessler system evaluated at `value_array`
### Response:
def diff_roessler(value_array, a, c):
"""The Roessler attractor differential equation
:param value_array: 3d array containing the x,y, and z component values.
:param a: Constant attractor parameter
:param c: Constant attractor parameter
:return: 3d array of the Roessler system evaluated at `value_array`
"""
b=a
diff_array = np.zeros(3)
diff_array[0] = -value_array[1] - value_array[2]
diff_array[1] = value_array[0] + a * value_array[1]
diff_array[2] = b + value_array[2] * (value_array[0] - c)
return diff_array |
def mock_server_receive(sock, length):
"""Receive `length` bytes from a socket object."""
msg = b''
while length:
chunk = sock.recv(length)
if chunk == b'':
raise socket.error(errno.ECONNRESET, 'closed')
length -= len(chunk)
msg += chunk
return msg | Receive `length` bytes from a socket object. | Below is the the instruction that describes the task:
### Input:
Receive `length` bytes from a socket object.
### Response:
def mock_server_receive(sock, length):
"""Receive `length` bytes from a socket object."""
msg = b''
while length:
chunk = sock.recv(length)
if chunk == b'':
raise socket.error(errno.ECONNRESET, 'closed')
length -= len(chunk)
msg += chunk
return msg |
def connect(self):
"""
Starts the mongodb connection. Must be called before anything else
will work.
"""
self.client = MongoClient(self.mongo_uri)
self.db = self.client[self.db_name] | Starts the mongodb connection. Must be called before anything else
will work. | Below is the the instruction that describes the task:
### Input:
Starts the mongodb connection. Must be called before anything else
will work.
### Response:
def connect(self):
"""
Starts the mongodb connection. Must be called before anything else
will work.
"""
self.client = MongoClient(self.mongo_uri)
self.db = self.client[self.db_name] |
def to_array(self):
"""
Serializes this PassportFile to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(PassportFile, self).to_array()
array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str
array['file_size'] = int(self.file_size) # type int
array['file_date'] = int(self.file_date) # type int
return array | Serializes this PassportFile to a dictionary.
:return: dictionary representation of this object.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Serializes this PassportFile to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
### Response:
def to_array(self):
"""
Serializes this PassportFile to a dictionary.
:return: dictionary representation of this object.
:rtype: dict
"""
array = super(PassportFile, self).to_array()
array['file_id'] = u(self.file_id) # py2: type unicode, py3: type str
array['file_size'] = int(self.file_size) # type int
array['file_date'] = int(self.file_date) # type int
return array |
def _get_next_or_previous_by_order(self, is_next, **kwargs):
"""
Retrieves next or previous object by order. We implement our
own version instead of Django's so we can hook into the
published manager, concrete subclasses and our custom
``with_respect_to`` method.
"""
lookup = self.with_respect_to()
lookup["_order"] = self._order + (1 if is_next else -1)
concrete_model = base_concrete_model(Orderable, self)
try:
queryset = concrete_model.objects.published
except AttributeError:
queryset = concrete_model.objects.filter
try:
return queryset(**kwargs).get(**lookup)
except concrete_model.DoesNotExist:
pass | Retrieves next or previous object by order. We implement our
own version instead of Django's so we can hook into the
published manager, concrete subclasses and our custom
``with_respect_to`` method. | Below is the the instruction that describes the task:
### Input:
Retrieves next or previous object by order. We implement our
own version instead of Django's so we can hook into the
published manager, concrete subclasses and our custom
``with_respect_to`` method.
### Response:
def _get_next_or_previous_by_order(self, is_next, **kwargs):
"""
Retrieves next or previous object by order. We implement our
own version instead of Django's so we can hook into the
published manager, concrete subclasses and our custom
``with_respect_to`` method.
"""
lookup = self.with_respect_to()
lookup["_order"] = self._order + (1 if is_next else -1)
concrete_model = base_concrete_model(Orderable, self)
try:
queryset = concrete_model.objects.published
except AttributeError:
queryset = concrete_model.objects.filter
try:
return queryset(**kwargs).get(**lookup)
except concrete_model.DoesNotExist:
pass |
def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613
'''
List all available package upgrades on this system
CLI Example:
.. code-block:: bash
salt '*' pkgutil.list_upgrades
'''
if salt.utils.data.is_true(refresh):
refresh_db()
upgrades = {}
lines = __salt__['cmd.run_stdout'](
'/opt/csw/bin/pkgutil -A --parse').splitlines()
for line in lines:
comps = line.split('\t')
if comps[2] == "SAME":
continue
if comps[2] == "not installed":
continue
upgrades[comps[0]] = comps[1]
return upgrades | List all available package upgrades on this system
CLI Example:
.. code-block:: bash
salt '*' pkgutil.list_upgrades | Below is the the instruction that describes the task:
### Input:
List all available package upgrades on this system
CLI Example:
.. code-block:: bash
salt '*' pkgutil.list_upgrades
### Response:
def list_upgrades(refresh=True, **kwargs): # pylint: disable=W0613
'''
List all available package upgrades on this system
CLI Example:
.. code-block:: bash
salt '*' pkgutil.list_upgrades
'''
if salt.utils.data.is_true(refresh):
refresh_db()
upgrades = {}
lines = __salt__['cmd.run_stdout'](
'/opt/csw/bin/pkgutil -A --parse').splitlines()
for line in lines:
comps = line.split('\t')
if comps[2] == "SAME":
continue
if comps[2] == "not installed":
continue
upgrades[comps[0]] = comps[1]
return upgrades |
def ndimage_to_list(image):
"""
Split a n dimensional ANTsImage into a list
of n-1 dimensional ANTsImages
Arguments
---------
image : ANTsImage
n-dimensional image to split
Returns
-------
list of ANTsImage types
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> image2 = ants.image_read(ants.get_ants_data('r16'))
>>> imageTar = ants.make_image( ( *image2.shape, 2 ) )
>>> image3 = ants.list_to_ndimage( imageTar, [image,image2])
>>> image3.dimension == 3
>>> images_unmerged = ants.ndimage_to_list( image3 )
>>> len(images_unmerged) == 2
>>> images_unmerged[0].dimension == 2
"""
inpixeltype = image.pixeltype
dimension = image.dimension
components = 1
imageShape = image.shape
nSections = imageShape[ dimension - 1 ]
subdimension = dimension - 1
suborigin = iio.get_origin( image )[0:subdimension]
subspacing = iio.get_spacing( image )[0:subdimension]
subdirection = np.eye( subdimension )
for i in range( subdimension ):
subdirection[i,:] = iio.get_direction( image )[i,0:subdimension]
subdim = image.shape[ 0:subdimension ]
imagelist = []
for i in range( nSections ):
img = utils.slice_image( image, axis = subdimension, idx = i )
iio.set_spacing( img, subspacing )
iio.set_origin( img, suborigin )
iio.set_direction( img, subdirection )
imagelist.append( img )
return imagelist | Split a n dimensional ANTsImage into a list
of n-1 dimensional ANTsImages
Arguments
---------
image : ANTsImage
n-dimensional image to split
Returns
-------
list of ANTsImage types
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> image2 = ants.image_read(ants.get_ants_data('r16'))
>>> imageTar = ants.make_image( ( *image2.shape, 2 ) )
>>> image3 = ants.list_to_ndimage( imageTar, [image,image2])
>>> image3.dimension == 3
>>> images_unmerged = ants.ndimage_to_list( image3 )
>>> len(images_unmerged) == 2
>>> images_unmerged[0].dimension == 2 | Below is the the instruction that describes the task:
### Input:
Split a n dimensional ANTsImage into a list
of n-1 dimensional ANTsImages
Arguments
---------
image : ANTsImage
n-dimensional image to split
Returns
-------
list of ANTsImage types
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> image2 = ants.image_read(ants.get_ants_data('r16'))
>>> imageTar = ants.make_image( ( *image2.shape, 2 ) )
>>> image3 = ants.list_to_ndimage( imageTar, [image,image2])
>>> image3.dimension == 3
>>> images_unmerged = ants.ndimage_to_list( image3 )
>>> len(images_unmerged) == 2
>>> images_unmerged[0].dimension == 2
### Response:
def ndimage_to_list(image):
"""
Split a n dimensional ANTsImage into a list
of n-1 dimensional ANTsImages
Arguments
---------
image : ANTsImage
n-dimensional image to split
Returns
-------
list of ANTsImage types
Example
-------
>>> import ants
>>> image = ants.image_read(ants.get_ants_data('r16'))
>>> image2 = ants.image_read(ants.get_ants_data('r16'))
>>> imageTar = ants.make_image( ( *image2.shape, 2 ) )
>>> image3 = ants.list_to_ndimage( imageTar, [image,image2])
>>> image3.dimension == 3
>>> images_unmerged = ants.ndimage_to_list( image3 )
>>> len(images_unmerged) == 2
>>> images_unmerged[0].dimension == 2
"""
inpixeltype = image.pixeltype
dimension = image.dimension
components = 1
imageShape = image.shape
nSections = imageShape[ dimension - 1 ]
subdimension = dimension - 1
suborigin = iio.get_origin( image )[0:subdimension]
subspacing = iio.get_spacing( image )[0:subdimension]
subdirection = np.eye( subdimension )
for i in range( subdimension ):
subdirection[i,:] = iio.get_direction( image )[i,0:subdimension]
subdim = image.shape[ 0:subdimension ]
imagelist = []
for i in range( nSections ):
img = utils.slice_image( image, axis = subdimension, idx = i )
iio.set_spacing( img, subspacing )
iio.set_origin( img, suborigin )
iio.set_direction( img, subdirection )
imagelist.append( img )
return imagelist |
def iiOfAny(instance, classes):
"""
Returns true, if `instance` is instance of any (iiOfAny) of the `classes`.
This function doesn't use :py:func:`isinstance` check, it just compares the
`class` names.
This can be generaly dangerous, but it is really useful when you are
comparing class serialized in one module and deserialized in another.
This causes, that module paths in class internals are different and
:py:func:`isinstance` and :py:func:`type` comparsions thus fails.
Use this function instead, if you wan't to check what type is your
deserialized message.
Args:
instance (object): class instance you want to know the type
classes (list): classes, or just one class you want to compare - func
automatically converts nonlist/nontuple parameters to
list
Returns:
bool: True if `instance` **can be** instance of any of the `classes`.
"""
if type(classes) not in [list, tuple]:
classes = [classes]
return any(
type(instance).__name__ == cls.__name__
for cls in classes
) | Returns true, if `instance` is instance of any (iiOfAny) of the `classes`.
This function doesn't use :py:func:`isinstance` check, it just compares the
`class` names.
This can be generaly dangerous, but it is really useful when you are
comparing class serialized in one module and deserialized in another.
This causes, that module paths in class internals are different and
:py:func:`isinstance` and :py:func:`type` comparsions thus fails.
Use this function instead, if you wan't to check what type is your
deserialized message.
Args:
instance (object): class instance you want to know the type
classes (list): classes, or just one class you want to compare - func
automatically converts nonlist/nontuple parameters to
list
Returns:
bool: True if `instance` **can be** instance of any of the `classes`. | Below is the the instruction that describes the task:
### Input:
Returns true, if `instance` is instance of any (iiOfAny) of the `classes`.
This function doesn't use :py:func:`isinstance` check, it just compares the
`class` names.
This can be generaly dangerous, but it is really useful when you are
comparing class serialized in one module and deserialized in another.
This causes, that module paths in class internals are different and
:py:func:`isinstance` and :py:func:`type` comparsions thus fails.
Use this function instead, if you wan't to check what type is your
deserialized message.
Args:
instance (object): class instance you want to know the type
classes (list): classes, or just one class you want to compare - func
automatically converts nonlist/nontuple parameters to
list
Returns:
bool: True if `instance` **can be** instance of any of the `classes`.
### Response:
def iiOfAny(instance, classes):
"""
Returns true, if `instance` is instance of any (iiOfAny) of the `classes`.
This function doesn't use :py:func:`isinstance` check, it just compares the
`class` names.
This can be generaly dangerous, but it is really useful when you are
comparing class serialized in one module and deserialized in another.
This causes, that module paths in class internals are different and
:py:func:`isinstance` and :py:func:`type` comparsions thus fails.
Use this function instead, if you wan't to check what type is your
deserialized message.
Args:
instance (object): class instance you want to know the type
classes (list): classes, or just one class you want to compare - func
automatically converts nonlist/nontuple parameters to
list
Returns:
bool: True if `instance` **can be** instance of any of the `classes`.
"""
if type(classes) not in [list, tuple]:
classes = [classes]
return any(
type(instance).__name__ == cls.__name__
for cls in classes
) |
def init_environment():
"""Set environment variables that are important for the pipeline.
:returns: None
:rtype: None
:raises: None
"""
os.environ['DJANGO_SETTINGS_MODULE'] = 'jukeboxcore.djsettings'
pluginpath = os.pathsep.join((os.environ.get('JUKEBOX_PLUGIN_PATH', ''), constants.BUILTIN_PLUGIN_PATH))
os.environ['JUKEBOX_PLUGIN_PATH'] = pluginpath | Set environment variables that are important for the pipeline.
:returns: None
:rtype: None
:raises: None | Below is the the instruction that describes the task:
### Input:
Set environment variables that are important for the pipeline.
:returns: None
:rtype: None
:raises: None
### Response:
def init_environment():
"""Set environment variables that are important for the pipeline.
:returns: None
:rtype: None
:raises: None
"""
os.environ['DJANGO_SETTINGS_MODULE'] = 'jukeboxcore.djsettings'
pluginpath = os.pathsep.join((os.environ.get('JUKEBOX_PLUGIN_PATH', ''), constants.BUILTIN_PLUGIN_PATH))
os.environ['JUKEBOX_PLUGIN_PATH'] = pluginpath |
def create_session(self, user_agent, remote_address, client_version):
"""
Create a new session.
:param str user_agent: Client user agent
:param str remote_addr: Remote address of client
:param str client_version: Remote client version
:return: The new session id
:rtype: int
"""
self.session_counter += 1
self.sessions[self.session_counter] = session = self.session_class()
# Set session properties
session.user_agent = user_agent
session.remote_address = remote_address
session.client_version = client_version
# Invoke hooks
invoke_hooks(self.hooks, "session_created", self.session_counter)
return self.session_counter | Create a new session.
:param str user_agent: Client user agent
:param str remote_addr: Remote address of client
:param str client_version: Remote client version
:return: The new session id
:rtype: int | Below is the the instruction that describes the task:
### Input:
Create a new session.
:param str user_agent: Client user agent
:param str remote_addr: Remote address of client
:param str client_version: Remote client version
:return: The new session id
:rtype: int
### Response:
def create_session(self, user_agent, remote_address, client_version):
"""
Create a new session.
:param str user_agent: Client user agent
:param str remote_addr: Remote address of client
:param str client_version: Remote client version
:return: The new session id
:rtype: int
"""
self.session_counter += 1
self.sessions[self.session_counter] = session = self.session_class()
# Set session properties
session.user_agent = user_agent
session.remote_address = remote_address
session.client_version = client_version
# Invoke hooks
invoke_hooks(self.hooks, "session_created", self.session_counter)
return self.session_counter |
def checksum(self, path, hashtype='sha1'):
"""Returns the checksum of the given path."""
return self._handler.checksum(hashtype, posix_path(path)) | Returns the checksum of the given path. | Below is the the instruction that describes the task:
### Input:
Returns the checksum of the given path.
### Response:
def checksum(self, path, hashtype='sha1'):
"""Returns the checksum of the given path."""
return self._handler.checksum(hashtype, posix_path(path)) |
def make_tensor_proto(values, dtype=None, shape=None, verify_shape=False):
"""Create a TensorProto.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
verify_shape: Boolean that enables verification of a shape of values.
Returns:
A `TensorProto`. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with `tensor_util.MakeNdarray(proto)`.
If `values` is a `TensorProto`, it is immediately returned; `dtype` and
`shape` are ignored.
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values or if verify_shape is
True and shape of values is not equals to a shape from the argument.
make_tensor_proto accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a convertible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto converted) must have the convertible type with dtype.
make_tensor_proto then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
"""
if isinstance(values, tensor_pb2.TensorProto):
return values
if dtype:
dtype = dtypes.as_dtype(dtype)
is_quantized = dtype in [
dtypes.qint8,
dtypes.quint8,
dtypes.qint16,
dtypes.quint16,
dtypes.qint32,
]
# We first convert value to a numpy array or scalar.
if isinstance(values, (np.ndarray, np.generic)):
if dtype:
nparray = values.astype(dtype.as_numpy_dtype)
else:
nparray = values
elif callable(getattr(values, "__array__", None)) or isinstance(
getattr(values, "__array_interface__", None), dict
):
# If a class has the __array__ method, or __array_interface__ dict, then it
# is possible to convert to numpy array.
nparray = np.asarray(values, dtype=dtype)
# This is the preferred way to create an array from the object, so replace
# the `values` with the array so that _FlattenToStrings is not run.
values = nparray
else:
if values is None:
raise ValueError("None values not supported.")
# if dtype is provided, forces numpy array to be the type
# provided if possible.
if dtype and dtype.is_numpy_compatible:
np_dt = dtype.as_numpy_dtype
else:
np_dt = None
# If shape is None, numpy.prod returns None when dtype is not set, but raises
# exception when dtype is set to np.int64
if shape is not None and np.prod(shape, dtype=np.int64) == 0:
nparray = np.empty(shape, dtype=np_dt)
else:
_Assertconvertible(values, dtype)
nparray = np.array(values, dtype=np_dt)
# check to them.
# We need to pass in quantized values as tuples, so don't apply the shape
if list(nparray.shape) != _GetDenseDimensions(values) and not is_quantized:
raise ValueError(
"""Argument must be a dense tensor: %s"""
""" - got shape %s, but wanted %s."""
% (values, list(nparray.shape), _GetDenseDimensions(values))
)
# python/numpy default float type is float64. We prefer float32 instead.
if (nparray.dtype == np.float64) and dtype is None:
nparray = nparray.astype(np.float32)
# python/numpy default int type is int64. We prefer int32 instead.
elif (nparray.dtype == np.int64) and dtype is None:
downcasted_array = nparray.astype(np.int32)
# Do not down cast if it leads to precision loss.
if np.array_equal(downcasted_array, nparray):
nparray = downcasted_array
# if dtype is provided, it must be convertible with what numpy
# conversion says.
numpy_dtype = dtypes.as_dtype(nparray.dtype)
if numpy_dtype is None:
raise TypeError("Unrecognized data type: %s" % nparray.dtype)
# If dtype was specified and is a quantized type, we convert
# numpy_dtype back into the quantized version.
if is_quantized:
numpy_dtype = dtype
if dtype is not None and (
not hasattr(dtype, "base_dtype") or dtype.base_dtype != numpy_dtype.base_dtype
):
raise TypeError(
"Inconvertible types: %s vs. %s. Value is %s"
% (dtype, nparray.dtype, values)
)
# If shape is not given, get the shape from the numpy array.
if shape is None:
shape = nparray.shape
is_same_size = True
shape_size = nparray.size
else:
shape = [int(dim) for dim in shape]
shape_size = np.prod(shape, dtype=np.int64)
is_same_size = shape_size == nparray.size
if verify_shape:
if not nparray.shape == tuple(shape):
raise TypeError(
"Expected Tensor's shape: %s, got %s."
% (tuple(shape), nparray.shape)
)
if nparray.size > shape_size:
raise ValueError(
"Too many elements provided. Needed at most %d, but received %d"
% (shape_size, nparray.size)
)
tensor_proto = tensor_pb2.TensorProto(
dtype=numpy_dtype.as_datatype_enum,
tensor_shape=tensor_shape.as_shape(shape).as_proto(),
)
if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
if nparray.size * nparray.itemsize >= (1 << 31):
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB."
)
tensor_proto.tensor_content = nparray.tostring()
return tensor_proto
# If we were not given values as a numpy array, compute the proto_values
# from the given values directly, to avoid numpy trimming nulls from the
# strings. Since values could be a list of strings, or a multi-dimensional
# list of lists that might or might not correspond to the given shape,
# we flatten it conservatively.
if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
proto_values = _FlattenToStrings(values)
# At this point, values may be a list of objects that we could not
# identify a common type for (hence it was inferred as
# np.object/dtypes.string). If we are unable to convert it to a
# string, we raise a more helpful error message.
#
# Ideally, we'd be able to convert the elements of the list to a
# common type, but this type inference requires some thinking and
# so we defer it for now.
try:
str_values = [compat.as_bytes(x) for x in proto_values]
except TypeError:
raise TypeError(
"Failed to convert object of type %s to Tensor. "
"Contents: %s. Consider casting elements to a "
"supported type." % (type(values), values)
)
tensor_proto.string_val.extend(str_values)
return tensor_proto
# TensorFlow expects C order (a.k.a., eigen row major).
proto_values = nparray.ravel()
append_fn = GetNumpyAppendFn(proto_values.dtype)
if append_fn is None:
raise TypeError(
"Element type not supported in TensorProto: %s" % numpy_dtype.name
)
append_fn(tensor_proto, proto_values)
return tensor_proto | Create a TensorProto.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
verify_shape: Boolean that enables verification of a shape of values.
Returns:
A `TensorProto`. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with `tensor_util.MakeNdarray(proto)`.
If `values` is a `TensorProto`, it is immediately returned; `dtype` and
`shape` are ignored.
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values or if verify_shape is
True and shape of values is not equals to a shape from the argument.
make_tensor_proto accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a convertible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto converted) must have the convertible type with dtype.
make_tensor_proto then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies. | Below is the the instruction that describes the task:
### Input:
Create a TensorProto.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
verify_shape: Boolean that enables verification of a shape of values.
Returns:
A `TensorProto`. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with `tensor_util.MakeNdarray(proto)`.
If `values` is a `TensorProto`, it is immediately returned; `dtype` and
`shape` are ignored.
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values or if verify_shape is
True and shape of values is not equals to a shape from the argument.
make_tensor_proto accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a convertible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto converted) must have the convertible type with dtype.
make_tensor_proto then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
### Response:
def make_tensor_proto(values, dtype=None, shape=None, verify_shape=False):
"""Create a TensorProto.
Args:
values: Values to put in the TensorProto.
dtype: Optional tensor_pb2 DataType value.
shape: List of integers representing the dimensions of tensor.
verify_shape: Boolean that enables verification of a shape of values.
Returns:
A `TensorProto`. Depending on the type, it may contain data in the
"tensor_content" attribute, which is not directly useful to Python programs.
To access the values you should convert the proto back to a numpy ndarray
with `tensor_util.MakeNdarray(proto)`.
If `values` is a `TensorProto`, it is immediately returned; `dtype` and
`shape` are ignored.
Raises:
TypeError: if unsupported types are provided.
ValueError: if arguments have inappropriate values or if verify_shape is
True and shape of values is not equals to a shape from the argument.
make_tensor_proto accepts "values" of a python scalar, a python list, a
numpy ndarray, or a numpy scalar.
If "values" is a python scalar or a python list, make_tensor_proto
first convert it to numpy ndarray. If dtype is None, the
conversion tries its best to infer the right numpy data
type. Otherwise, the resulting numpy array has a convertible data
type with the given dtype.
In either case above, the numpy ndarray (either the caller provided
or the auto converted) must have the convertible type with dtype.
make_tensor_proto then converts the numpy array to a tensor proto.
If "shape" is None, the resulting tensor proto represents the numpy
array precisely.
Otherwise, "shape" specifies the tensor's shape and the numpy array
can not have more elements than what "shape" specifies.
"""
if isinstance(values, tensor_pb2.TensorProto):
return values
if dtype:
dtype = dtypes.as_dtype(dtype)
is_quantized = dtype in [
dtypes.qint8,
dtypes.quint8,
dtypes.qint16,
dtypes.quint16,
dtypes.qint32,
]
# We first convert value to a numpy array or scalar.
if isinstance(values, (np.ndarray, np.generic)):
if dtype:
nparray = values.astype(dtype.as_numpy_dtype)
else:
nparray = values
elif callable(getattr(values, "__array__", None)) or isinstance(
getattr(values, "__array_interface__", None), dict
):
# If a class has the __array__ method, or __array_interface__ dict, then it
# is possible to convert to numpy array.
nparray = np.asarray(values, dtype=dtype)
# This is the preferred way to create an array from the object, so replace
# the `values` with the array so that _FlattenToStrings is not run.
values = nparray
else:
if values is None:
raise ValueError("None values not supported.")
# if dtype is provided, forces numpy array to be the type
# provided if possible.
if dtype and dtype.is_numpy_compatible:
np_dt = dtype.as_numpy_dtype
else:
np_dt = None
# If shape is None, numpy.prod returns None when dtype is not set, but raises
# exception when dtype is set to np.int64
if shape is not None and np.prod(shape, dtype=np.int64) == 0:
nparray = np.empty(shape, dtype=np_dt)
else:
_Assertconvertible(values, dtype)
nparray = np.array(values, dtype=np_dt)
# check to them.
# We need to pass in quantized values as tuples, so don't apply the shape
if list(nparray.shape) != _GetDenseDimensions(values) and not is_quantized:
raise ValueError(
"""Argument must be a dense tensor: %s"""
""" - got shape %s, but wanted %s."""
% (values, list(nparray.shape), _GetDenseDimensions(values))
)
# python/numpy default float type is float64. We prefer float32 instead.
if (nparray.dtype == np.float64) and dtype is None:
nparray = nparray.astype(np.float32)
# python/numpy default int type is int64. We prefer int32 instead.
elif (nparray.dtype == np.int64) and dtype is None:
downcasted_array = nparray.astype(np.int32)
# Do not down cast if it leads to precision loss.
if np.array_equal(downcasted_array, nparray):
nparray = downcasted_array
# if dtype is provided, it must be convertible with what numpy
# conversion says.
numpy_dtype = dtypes.as_dtype(nparray.dtype)
if numpy_dtype is None:
raise TypeError("Unrecognized data type: %s" % nparray.dtype)
# If dtype was specified and is a quantized type, we convert
# numpy_dtype back into the quantized version.
if is_quantized:
numpy_dtype = dtype
if dtype is not None and (
not hasattr(dtype, "base_dtype") or dtype.base_dtype != numpy_dtype.base_dtype
):
raise TypeError(
"Inconvertible types: %s vs. %s. Value is %s"
% (dtype, nparray.dtype, values)
)
# If shape is not given, get the shape from the numpy array.
if shape is None:
shape = nparray.shape
is_same_size = True
shape_size = nparray.size
else:
shape = [int(dim) for dim in shape]
shape_size = np.prod(shape, dtype=np.int64)
is_same_size = shape_size == nparray.size
if verify_shape:
if not nparray.shape == tuple(shape):
raise TypeError(
"Expected Tensor's shape: %s, got %s."
% (tuple(shape), nparray.shape)
)
if nparray.size > shape_size:
raise ValueError(
"Too many elements provided. Needed at most %d, but received %d"
% (shape_size, nparray.size)
)
tensor_proto = tensor_pb2.TensorProto(
dtype=numpy_dtype.as_datatype_enum,
tensor_shape=tensor_shape.as_shape(shape).as_proto(),
)
if is_same_size and numpy_dtype in _TENSOR_CONTENT_TYPES and shape_size > 1:
if nparray.size * nparray.itemsize >= (1 << 31):
raise ValueError(
"Cannot create a tensor proto whose content is larger than 2GB."
)
tensor_proto.tensor_content = nparray.tostring()
return tensor_proto
# If we were not given values as a numpy array, compute the proto_values
# from the given values directly, to avoid numpy trimming nulls from the
# strings. Since values could be a list of strings, or a multi-dimensional
# list of lists that might or might not correspond to the given shape,
# we flatten it conservatively.
if numpy_dtype == dtypes.string and not isinstance(values, np.ndarray):
proto_values = _FlattenToStrings(values)
# At this point, values may be a list of objects that we could not
# identify a common type for (hence it was inferred as
# np.object/dtypes.string). If we are unable to convert it to a
# string, we raise a more helpful error message.
#
# Ideally, we'd be able to convert the elements of the list to a
# common type, but this type inference requires some thinking and
# so we defer it for now.
try:
str_values = [compat.as_bytes(x) for x in proto_values]
except TypeError:
raise TypeError(
"Failed to convert object of type %s to Tensor. "
"Contents: %s. Consider casting elements to a "
"supported type." % (type(values), values)
)
tensor_proto.string_val.extend(str_values)
return tensor_proto
# TensorFlow expects C order (a.k.a., eigen row major).
proto_values = nparray.ravel()
append_fn = GetNumpyAppendFn(proto_values.dtype)
if append_fn is None:
raise TypeError(
"Element type not supported in TensorProto: %s" % numpy_dtype.name
)
append_fn(tensor_proto, proto_values)
return tensor_proto |
def execute(self,
image = None,
command = None,
app = None,
writable = False,
contain = False,
bind = None,
stream = False,
nv = False,
return_result=False):
''' execute: send a command to a container
Parameters
==========
image: full path to singularity image
command: command to send to container
app: if not None, execute a command in context of an app
writable: This option makes the file system accessible as read/write
contain: This option disables the automatic sharing of writable
filesystems on your host
bind: list or single string of bind paths.
This option allows you to map directories on your host system to
directories within your container using bind mounts
nv: if True, load Nvidia Drivers in runtime (default False)
return_result: if True, return entire json object with return code
and message result (default is False)
'''
from spython.utils import check_install
check_install()
cmd = self._init_command('exec')
# nv option leverages any GPU cards
if nv is True:
cmd += ['--nv']
# If the image is given as a list, it's probably the command
if isinstance(image, list):
command = image
image = None
if command is not None:
# No image provided, default to use the client's loaded image
if image is None:
image = self._get_uri()
self.quiet = True
# If an instance is provided, grab it's name
if isinstance(image, self.instance):
image = image.get_uri()
# Does the user want to use bind paths option?
if bind is not None:
cmd += self._generate_bind_list(bind)
# Does the user want to run an app?
if app is not None:
cmd = cmd + ['--app', app]
sudo = False
if writable is True:
sudo = True
if not isinstance(command, list):
command = command.split(' ')
cmd = cmd + [image] + command
if stream is False:
return self._run_command(cmd,
sudo=sudo,
return_result=return_result)
return stream_command(cmd, sudo=sudo)
bot.error('Please include a command (list) to execute.') | execute: send a command to a container
Parameters
==========
image: full path to singularity image
command: command to send to container
app: if not None, execute a command in context of an app
writable: This option makes the file system accessible as read/write
contain: This option disables the automatic sharing of writable
filesystems on your host
bind: list or single string of bind paths.
This option allows you to map directories on your host system to
directories within your container using bind mounts
nv: if True, load Nvidia Drivers in runtime (default False)
return_result: if True, return entire json object with return code
and message result (default is False) | Below is the the instruction that describes the task:
### Input:
execute: send a command to a container
Parameters
==========
image: full path to singularity image
command: command to send to container
app: if not None, execute a command in context of an app
writable: This option makes the file system accessible as read/write
contain: This option disables the automatic sharing of writable
filesystems on your host
bind: list or single string of bind paths.
This option allows you to map directories on your host system to
directories within your container using bind mounts
nv: if True, load Nvidia Drivers in runtime (default False)
return_result: if True, return entire json object with return code
and message result (default is False)
### Response:
def execute(self,
image = None,
command = None,
app = None,
writable = False,
contain = False,
bind = None,
stream = False,
nv = False,
return_result=False):
''' execute: send a command to a container
Parameters
==========
image: full path to singularity image
command: command to send to container
app: if not None, execute a command in context of an app
writable: This option makes the file system accessible as read/write
contain: This option disables the automatic sharing of writable
filesystems on your host
bind: list or single string of bind paths.
This option allows you to map directories on your host system to
directories within your container using bind mounts
nv: if True, load Nvidia Drivers in runtime (default False)
return_result: if True, return entire json object with return code
and message result (default is False)
'''
from spython.utils import check_install
check_install()
cmd = self._init_command('exec')
# nv option leverages any GPU cards
if nv is True:
cmd += ['--nv']
# If the image is given as a list, it's probably the command
if isinstance(image, list):
command = image
image = None
if command is not None:
# No image provided, default to use the client's loaded image
if image is None:
image = self._get_uri()
self.quiet = True
# If an instance is provided, grab it's name
if isinstance(image, self.instance):
image = image.get_uri()
# Does the user want to use bind paths option?
if bind is not None:
cmd += self._generate_bind_list(bind)
# Does the user want to run an app?
if app is not None:
cmd = cmd + ['--app', app]
sudo = False
if writable is True:
sudo = True
if not isinstance(command, list):
command = command.split(' ')
cmd = cmd + [image] + command
if stream is False:
return self._run_command(cmd,
sudo=sudo,
return_result=return_result)
return stream_command(cmd, sudo=sudo)
bot.error('Please include a command (list) to execute.') |
def pretend_option(fn):
# type: (FunctionType) -> FunctionType
""" Decorator to add a --pretend option to any click command.
The value won't be passed down to the command, but rather handled in the
callback. The value will be accessible through `peltak.core.context` under
'pretend' if the command needs it. To get the current value you can do:
>>> from peltak.commands import click, root_cli
>>> from peltak.core import context
>>>
>>> @root_cli.command('my-command')
>>> @pretend_option
>>> def my_command():
... pretend = context.get('pretend', False)
This value will be accessible from anywhere in the code.
"""
def set_pretend(ctx, param, value): # pylint: disable=missing-docstring
# type: (click.Context, str, Any) -> None
from peltak.core import context
from peltak.core import shell
context.set('pretend', value or False)
if value:
shell.cprint('<90>{}', _pretend_msg())
return click.option(
'--pretend',
is_flag=True,
help=("Do not actually do anything, just print shell commands that"
"would be executed."),
expose_value=False,
callback=set_pretend
)(fn) | Decorator to add a --pretend option to any click command.
The value won't be passed down to the command, but rather handled in the
callback. The value will be accessible through `peltak.core.context` under
'pretend' if the command needs it. To get the current value you can do:
>>> from peltak.commands import click, root_cli
>>> from peltak.core import context
>>>
>>> @root_cli.command('my-command')
>>> @pretend_option
>>> def my_command():
... pretend = context.get('pretend', False)
This value will be accessible from anywhere in the code. | Below is the the instruction that describes the task:
### Input:
Decorator to add a --pretend option to any click command.
The value won't be passed down to the command, but rather handled in the
callback. The value will be accessible through `peltak.core.context` under
'pretend' if the command needs it. To get the current value you can do:
>>> from peltak.commands import click, root_cli
>>> from peltak.core import context
>>>
>>> @root_cli.command('my-command')
>>> @pretend_option
>>> def my_command():
... pretend = context.get('pretend', False)
This value will be accessible from anywhere in the code.
### Response:
def pretend_option(fn):
# type: (FunctionType) -> FunctionType
""" Decorator to add a --pretend option to any click command.
The value won't be passed down to the command, but rather handled in the
callback. The value will be accessible through `peltak.core.context` under
'pretend' if the command needs it. To get the current value you can do:
>>> from peltak.commands import click, root_cli
>>> from peltak.core import context
>>>
>>> @root_cli.command('my-command')
>>> @pretend_option
>>> def my_command():
... pretend = context.get('pretend', False)
This value will be accessible from anywhere in the code.
"""
def set_pretend(ctx, param, value): # pylint: disable=missing-docstring
# type: (click.Context, str, Any) -> None
from peltak.core import context
from peltak.core import shell
context.set('pretend', value or False)
if value:
shell.cprint('<90>{}', _pretend_msg())
return click.option(
'--pretend',
is_flag=True,
help=("Do not actually do anything, just print shell commands that"
"would be executed."),
expose_value=False,
callback=set_pretend
)(fn) |
def check_version(version, range_=None):
"""Check that the found software version is within supplied range.
Args:
version: Version of the package as a Version object.
range_: Allowable version range as a VersionRange object.
"""
if range_ and version not in range_:
raise RezBindError("found version %s is not within range %s"
% (str(version), str(range_))) | Check that the found software version is within supplied range.
Args:
version: Version of the package as a Version object.
range_: Allowable version range as a VersionRange object. | Below is the the instruction that describes the task:
### Input:
Check that the found software version is within supplied range.
Args:
version: Version of the package as a Version object.
range_: Allowable version range as a VersionRange object.
### Response:
def check_version(version, range_=None):
"""Check that the found software version is within supplied range.
Args:
version: Version of the package as a Version object.
range_: Allowable version range as a VersionRange object.
"""
if range_ and version not in range_:
raise RezBindError("found version %s is not within range %s"
% (str(version), str(range_))) |
def put(self, key, value, lease=None):
"""Put puts the given key into the key-value store.
A put request increments the revision of the key-value store
and generates one event in the event history.
:param key:
:param value:
:param lease:
:return: boolean
"""
payload = {
"key": _encode(key),
"value": _encode(value)
}
if lease:
payload['lease'] = lease.id
self.post(self.get_url("/kv/put"), json=payload)
return True | Put puts the given key into the key-value store.
A put request increments the revision of the key-value store
and generates one event in the event history.
:param key:
:param value:
:param lease:
:return: boolean | Below is the the instruction that describes the task:
### Input:
Put puts the given key into the key-value store.
A put request increments the revision of the key-value store
and generates one event in the event history.
:param key:
:param value:
:param lease:
:return: boolean
### Response:
def put(self, key, value, lease=None):
"""Put puts the given key into the key-value store.
A put request increments the revision of the key-value store
and generates one event in the event history.
:param key:
:param value:
:param lease:
:return: boolean
"""
payload = {
"key": _encode(key),
"value": _encode(value)
}
if lease:
payload['lease'] = lease.id
self.post(self.get_url("/kv/put"), json=payload)
return True |
def filter(self, *filt, **kwargs):
"""Filter this `TimeSeries` with an IIR or FIR filter
Parameters
----------
*filt : filter arguments
1, 2, 3, or 4 arguments defining the filter to be applied,
- an ``Nx1`` `~numpy.ndarray` of FIR coefficients
- an ``Nx6`` `~numpy.ndarray` of SOS coefficients
- ``(numerator, denominator)`` polynomials
- ``(zeros, poles, gain)``
- ``(A, B, C, D)`` 'state-space' representation
filtfilt : `bool`, optional
filter forward and backwards to preserve phase,
default: `False`
analog : `bool`, optional
if `True`, filter coefficients will be converted from Hz
to Z-domain digital representation, default: `False`
inplace : `bool`, optional
if `True`, this array will be overwritten with the filtered
version, default: `False`
**kwargs
other keyword arguments are passed to the filter method
Returns
-------
result : `TimeSeries`
the filtered version of the input `TimeSeries`
Notes
-----
IIR filters are converted either into cascading
second-order sections (if `scipy >= 0.16` is installed), or into the
``(numerator, denominator)`` representation before being applied
to this `TimeSeries`.
.. note::
When using `scipy < 0.16` some higher-order filters may be
unstable. With `scipy >= 0.16` higher-order filters are
decomposed into second-order-sections, and so are much more stable.
FIR filters are passed directly to :func:`scipy.signal.lfilter` or
:func:`scipy.signal.filtfilt` without any conversions.
See also
--------
scipy.signal.sosfilt
for details on filtering with second-order sections
(`scipy >= 0.16` only)
scipy.signal.sosfiltfilt
for details on forward-backward filtering with second-order
sections (`scipy >= 0.18` only)
scipy.signal.lfilter
for details on filtering (without SOS)
scipy.signal.filtfilt
for details on forward-backward filtering (without SOS)
Raises
------
ValueError
if ``filt`` arguments cannot be interpreted properly
Examples
--------
We can design an arbitrarily complicated filter using
:mod:`gwpy.signal.filter_design`
>>> from gwpy.signal import filter_design
>>> bp = filter_design.bandpass(50, 250, 4096.)
>>> notches = [filter_design.notch(f, 4096.) for f in (60, 120, 180)]
>>> zpk = filter_design.concatenate_zpks(bp, *notches)
And then can download some data from LOSC to apply it using
`TimeSeries.filter`:
>>> from gwpy.timeseries import TimeSeries
>>> data = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> filtered = data.filter(zpk, filtfilt=True)
We can plot the original signal, and the filtered version, cutting
off either end of the filtered data to remove filter-edge artefacts
>>> from gwpy.plot import Plot
>>> plot = Plot(data, filtered[128:-128], separate=True)
>>> plot.show()
"""
# parse keyword arguments
filtfilt = kwargs.pop('filtfilt', False)
# parse filter
form, filt = filter_design.parse_filter(
filt, analog=kwargs.pop('analog', False),
sample_rate=self.sample_rate.to('Hz').value,
)
if form == 'zpk':
try:
sos = signal.zpk2sos(*filt)
except AttributeError: # scipy < 0.16, no SOS filtering
sos = None
b, a = signal.zpk2tf(*filt)
else:
sos = None
b, a = filt
# perform filter
kwargs.setdefault('axis', 0)
if sos is not None and filtfilt:
out = signal.sosfiltfilt(sos, self, **kwargs)
elif sos is not None:
out = signal.sosfilt(sos, self, **kwargs)
elif filtfilt:
out = signal.filtfilt(b, a, self, **kwargs)
else:
out = signal.lfilter(b, a, self, **kwargs)
# format as type(self)
new = out.view(type(self))
new.__metadata_finalize__(self)
new._unit = self.unit
return new | Filter this `TimeSeries` with an IIR or FIR filter
Parameters
----------
*filt : filter arguments
1, 2, 3, or 4 arguments defining the filter to be applied,
- an ``Nx1`` `~numpy.ndarray` of FIR coefficients
- an ``Nx6`` `~numpy.ndarray` of SOS coefficients
- ``(numerator, denominator)`` polynomials
- ``(zeros, poles, gain)``
- ``(A, B, C, D)`` 'state-space' representation
filtfilt : `bool`, optional
filter forward and backwards to preserve phase,
default: `False`
analog : `bool`, optional
if `True`, filter coefficients will be converted from Hz
to Z-domain digital representation, default: `False`
inplace : `bool`, optional
if `True`, this array will be overwritten with the filtered
version, default: `False`
**kwargs
other keyword arguments are passed to the filter method
Returns
-------
result : `TimeSeries`
the filtered version of the input `TimeSeries`
Notes
-----
IIR filters are converted either into cascading
second-order sections (if `scipy >= 0.16` is installed), or into the
``(numerator, denominator)`` representation before being applied
to this `TimeSeries`.
.. note::
When using `scipy < 0.16` some higher-order filters may be
unstable. With `scipy >= 0.16` higher-order filters are
decomposed into second-order-sections, and so are much more stable.
FIR filters are passed directly to :func:`scipy.signal.lfilter` or
:func:`scipy.signal.filtfilt` without any conversions.
See also
--------
scipy.signal.sosfilt
for details on filtering with second-order sections
(`scipy >= 0.16` only)
scipy.signal.sosfiltfilt
for details on forward-backward filtering with second-order
sections (`scipy >= 0.18` only)
scipy.signal.lfilter
for details on filtering (without SOS)
scipy.signal.filtfilt
for details on forward-backward filtering (without SOS)
Raises
------
ValueError
if ``filt`` arguments cannot be interpreted properly
Examples
--------
We can design an arbitrarily complicated filter using
:mod:`gwpy.signal.filter_design`
>>> from gwpy.signal import filter_design
>>> bp = filter_design.bandpass(50, 250, 4096.)
>>> notches = [filter_design.notch(f, 4096.) for f in (60, 120, 180)]
>>> zpk = filter_design.concatenate_zpks(bp, *notches)
And then can download some data from LOSC to apply it using
`TimeSeries.filter`:
>>> from gwpy.timeseries import TimeSeries
>>> data = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> filtered = data.filter(zpk, filtfilt=True)
We can plot the original signal, and the filtered version, cutting
off either end of the filtered data to remove filter-edge artefacts
>>> from gwpy.plot import Plot
>>> plot = Plot(data, filtered[128:-128], separate=True)
>>> plot.show() | Below is the the instruction that describes the task:
### Input:
Filter this `TimeSeries` with an IIR or FIR filter
Parameters
----------
*filt : filter arguments
1, 2, 3, or 4 arguments defining the filter to be applied,
- an ``Nx1`` `~numpy.ndarray` of FIR coefficients
- an ``Nx6`` `~numpy.ndarray` of SOS coefficients
- ``(numerator, denominator)`` polynomials
- ``(zeros, poles, gain)``
- ``(A, B, C, D)`` 'state-space' representation
filtfilt : `bool`, optional
filter forward and backwards to preserve phase,
default: `False`
analog : `bool`, optional
if `True`, filter coefficients will be converted from Hz
to Z-domain digital representation, default: `False`
inplace : `bool`, optional
if `True`, this array will be overwritten with the filtered
version, default: `False`
**kwargs
other keyword arguments are passed to the filter method
Returns
-------
result : `TimeSeries`
the filtered version of the input `TimeSeries`
Notes
-----
IIR filters are converted either into cascading
second-order sections (if `scipy >= 0.16` is installed), or into the
``(numerator, denominator)`` representation before being applied
to this `TimeSeries`.
.. note::
When using `scipy < 0.16` some higher-order filters may be
unstable. With `scipy >= 0.16` higher-order filters are
decomposed into second-order-sections, and so are much more stable.
FIR filters are passed directly to :func:`scipy.signal.lfilter` or
:func:`scipy.signal.filtfilt` without any conversions.
See also
--------
scipy.signal.sosfilt
for details on filtering with second-order sections
(`scipy >= 0.16` only)
scipy.signal.sosfiltfilt
for details on forward-backward filtering with second-order
sections (`scipy >= 0.18` only)
scipy.signal.lfilter
for details on filtering (without SOS)
scipy.signal.filtfilt
for details on forward-backward filtering (without SOS)
Raises
------
ValueError
if ``filt`` arguments cannot be interpreted properly
Examples
--------
We can design an arbitrarily complicated filter using
:mod:`gwpy.signal.filter_design`
>>> from gwpy.signal import filter_design
>>> bp = filter_design.bandpass(50, 250, 4096.)
>>> notches = [filter_design.notch(f, 4096.) for f in (60, 120, 180)]
>>> zpk = filter_design.concatenate_zpks(bp, *notches)
And then can download some data from LOSC to apply it using
`TimeSeries.filter`:
>>> from gwpy.timeseries import TimeSeries
>>> data = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> filtered = data.filter(zpk, filtfilt=True)
We can plot the original signal, and the filtered version, cutting
off either end of the filtered data to remove filter-edge artefacts
>>> from gwpy.plot import Plot
>>> plot = Plot(data, filtered[128:-128], separate=True)
>>> plot.show()
### Response:
def filter(self, *filt, **kwargs):
"""Filter this `TimeSeries` with an IIR or FIR filter
Parameters
----------
*filt : filter arguments
1, 2, 3, or 4 arguments defining the filter to be applied,
- an ``Nx1`` `~numpy.ndarray` of FIR coefficients
- an ``Nx6`` `~numpy.ndarray` of SOS coefficients
- ``(numerator, denominator)`` polynomials
- ``(zeros, poles, gain)``
- ``(A, B, C, D)`` 'state-space' representation
filtfilt : `bool`, optional
filter forward and backwards to preserve phase,
default: `False`
analog : `bool`, optional
if `True`, filter coefficients will be converted from Hz
to Z-domain digital representation, default: `False`
inplace : `bool`, optional
if `True`, this array will be overwritten with the filtered
version, default: `False`
**kwargs
other keyword arguments are passed to the filter method
Returns
-------
result : `TimeSeries`
the filtered version of the input `TimeSeries`
Notes
-----
IIR filters are converted either into cascading
second-order sections (if `scipy >= 0.16` is installed), or into the
``(numerator, denominator)`` representation before being applied
to this `TimeSeries`.
.. note::
When using `scipy < 0.16` some higher-order filters may be
unstable. With `scipy >= 0.16` higher-order filters are
decomposed into second-order-sections, and so are much more stable.
FIR filters are passed directly to :func:`scipy.signal.lfilter` or
:func:`scipy.signal.filtfilt` without any conversions.
See also
--------
scipy.signal.sosfilt
for details on filtering with second-order sections
(`scipy >= 0.16` only)
scipy.signal.sosfiltfilt
for details on forward-backward filtering with second-order
sections (`scipy >= 0.18` only)
scipy.signal.lfilter
for details on filtering (without SOS)
scipy.signal.filtfilt
for details on forward-backward filtering (without SOS)
Raises
------
ValueError
if ``filt`` arguments cannot be interpreted properly
Examples
--------
We can design an arbitrarily complicated filter using
:mod:`gwpy.signal.filter_design`
>>> from gwpy.signal import filter_design
>>> bp = filter_design.bandpass(50, 250, 4096.)
>>> notches = [filter_design.notch(f, 4096.) for f in (60, 120, 180)]
>>> zpk = filter_design.concatenate_zpks(bp, *notches)
And then can download some data from LOSC to apply it using
`TimeSeries.filter`:
>>> from gwpy.timeseries import TimeSeries
>>> data = TimeSeries.fetch_open_data('H1', 1126259446, 1126259478)
>>> filtered = data.filter(zpk, filtfilt=True)
We can plot the original signal, and the filtered version, cutting
off either end of the filtered data to remove filter-edge artefacts
>>> from gwpy.plot import Plot
>>> plot = Plot(data, filtered[128:-128], separate=True)
>>> plot.show()
"""
# parse keyword arguments
filtfilt = kwargs.pop('filtfilt', False)
# parse filter
form, filt = filter_design.parse_filter(
filt, analog=kwargs.pop('analog', False),
sample_rate=self.sample_rate.to('Hz').value,
)
if form == 'zpk':
try:
sos = signal.zpk2sos(*filt)
except AttributeError: # scipy < 0.16, no SOS filtering
sos = None
b, a = signal.zpk2tf(*filt)
else:
sos = None
b, a = filt
# perform filter
kwargs.setdefault('axis', 0)
if sos is not None and filtfilt:
out = signal.sosfiltfilt(sos, self, **kwargs)
elif sos is not None:
out = signal.sosfilt(sos, self, **kwargs)
elif filtfilt:
out = signal.filtfilt(b, a, self, **kwargs)
else:
out = signal.lfilter(b, a, self, **kwargs)
# format as type(self)
new = out.view(type(self))
new.__metadata_finalize__(self)
new._unit = self.unit
return new |
def find_node(self, name, create=False):
"""Find a node in the zone, possibly creating it.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@raises KeyError: the name is not known and create was not specified.
@rtype: dns.node.Node object
"""
name = self._validate_name(name)
node = self.nodes.get(name)
if node is None:
if not create:
raise KeyError
node = self.node_factory()
self.nodes[name] = node
return node | Find a node in the zone, possibly creating it.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@raises KeyError: the name is not known and create was not specified.
@rtype: dns.node.Node object | Below is the the instruction that describes the task:
### Input:
Find a node in the zone, possibly creating it.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@raises KeyError: the name is not known and create was not specified.
@rtype: dns.node.Node object
### Response:
def find_node(self, name, create=False):
"""Find a node in the zone, possibly creating it.
@param name: the name of the node to find
@type name: dns.name.Name object or string
@param create: should the node be created if it doesn't exist?
@type create: bool
@raises KeyError: the name is not known and create was not specified.
@rtype: dns.node.Node object
"""
name = self._validate_name(name)
node = self.nodes.get(name)
if node is None:
if not create:
raise KeyError
node = self.node_factory()
self.nodes[name] = node
return node |
def apply(expr, func, axis=0, names=None, types=None, reduce=False,
resources=None, keep_nulls=False, args=(), **kwargs):
"""
Apply a function to a row when axis=1 or column when axis=0.
:param expr:
:param func: function to apply
:param axis: row when axis=1 else column
:param names: output names
:param types: output types
:param reduce: if True will return a sequence else return a collection
:param resources: resources to read
:param keep_nulls: if True, keep rows producing empty results, only work in lateral views
:param args: args for function
:param kwargs: kwargs for function
:return:
:Example:
Apply a function to a row:
>>> from odps.df import output
>>>
>>> @output(['iris_add', 'iris_sub'], ['float', 'float'])
>>> def handle(row):
>>> yield row.sepallength - row.sepalwidth, row.sepallength + row.sepalwidth
>>> yield row.petallength - row.petalwidth, row.petallength + row.petalwidth
>>>
>>> iris.apply(handle, axis=1).count()
Apply a function to a column:
>>> class Agg(object):
>>>
>>> def buffer(self):
>>> return [0.0, 0]
>>>
>>> def __call__(self, buffer, val):
>>> buffer[0] += val
>>> buffer[1] += 1
>>>
>>> def merge(self, buffer, pbuffer):
>>> buffer[0] += pbuffer[0]
>>> buffer[1] += pbuffer[1]
>>>
>>> def getvalue(self, buffer):
>>> if buffer[1] == 0:
>>> return 0.0
>>> return buffer[0] / buffer[1]
>>>
>>> iris.exclude('name').apply(Agg)
"""
if not isinstance(expr, CollectionExpr):
return
if isinstance(func, FunctionWrapper):
names = names or func.output_names
types = types or func.output_types
func = func._func
if axis == 0:
types = types or expr.schema.types
types = [validate_data_type(t) for t in types]
fields = [expr[n].agg(func, rtype=t, resources=resources)
for n, t in zip(expr.schema.names, types)]
if names:
fields = [f.rename(n) for f, n in zip(fields, names)]
else:
names = [f.name for f in fields]
return Summary(_input=expr, _fields=fields, _schema=Schema.from_lists(names, types))
else:
collection_resources = utils.get_collection_resources(resources)
if types is not None:
if isinstance(types, list):
types = tuple(types)
elif isinstance(types, six.string_types):
types = (types,)
types = tuple(validate_data_type(t) for t in types)
if reduce:
from .element import MappedExpr
from ..backends.context import context
if names is not None and len(names) > 1:
raise ValueError('When reduce, at most one name can be specified')
name = names[0] if names is not None else None
if not types and kwargs.get('rtype', None) is not None:
types = [kwargs.pop('rtype')]
tp = types[0] if types is not None else (utils.get_annotation_rtype(func) or string)
if not context.is_cached(expr) and (hasattr(expr, '_fields') and expr._fields is not None):
inputs = [e.copy_tree(stop_cond=lambda x: any(i is expr.input for i in x.children()))
for e in expr._fields]
else:
inputs = [expr[n] for n in expr.schema.names]
return MappedExpr(_func=func, _func_args=args, _func_kwargs=kwargs,
_name=name, _data_type=tp,
_inputs=inputs, _multiple=True,
_resources=resources, _collection_resources=collection_resources)
else:
return _apply_horizontal(expr, func, names=names, types=types, resources=resources,
collection_resources=collection_resources, keep_nulls=keep_nulls,
args=args, **kwargs) | Apply a function to a row when axis=1 or column when axis=0.
:param expr:
:param func: function to apply
:param axis: row when axis=1 else column
:param names: output names
:param types: output types
:param reduce: if True will return a sequence else return a collection
:param resources: resources to read
:param keep_nulls: if True, keep rows producing empty results, only work in lateral views
:param args: args for function
:param kwargs: kwargs for function
:return:
:Example:
Apply a function to a row:
>>> from odps.df import output
>>>
>>> @output(['iris_add', 'iris_sub'], ['float', 'float'])
>>> def handle(row):
>>> yield row.sepallength - row.sepalwidth, row.sepallength + row.sepalwidth
>>> yield row.petallength - row.petalwidth, row.petallength + row.petalwidth
>>>
>>> iris.apply(handle, axis=1).count()
Apply a function to a column:
>>> class Agg(object):
>>>
>>> def buffer(self):
>>> return [0.0, 0]
>>>
>>> def __call__(self, buffer, val):
>>> buffer[0] += val
>>> buffer[1] += 1
>>>
>>> def merge(self, buffer, pbuffer):
>>> buffer[0] += pbuffer[0]
>>> buffer[1] += pbuffer[1]
>>>
>>> def getvalue(self, buffer):
>>> if buffer[1] == 0:
>>> return 0.0
>>> return buffer[0] / buffer[1]
>>>
>>> iris.exclude('name').apply(Agg) | Below is the the instruction that describes the task:
### Input:
Apply a function to a row when axis=1 or column when axis=0.
:param expr:
:param func: function to apply
:param axis: row when axis=1 else column
:param names: output names
:param types: output types
:param reduce: if True will return a sequence else return a collection
:param resources: resources to read
:param keep_nulls: if True, keep rows producing empty results, only work in lateral views
:param args: args for function
:param kwargs: kwargs for function
:return:
:Example:
Apply a function to a row:
>>> from odps.df import output
>>>
>>> @output(['iris_add', 'iris_sub'], ['float', 'float'])
>>> def handle(row):
>>> yield row.sepallength - row.sepalwidth, row.sepallength + row.sepalwidth
>>> yield row.petallength - row.petalwidth, row.petallength + row.petalwidth
>>>
>>> iris.apply(handle, axis=1).count()
Apply a function to a column:
>>> class Agg(object):
>>>
>>> def buffer(self):
>>> return [0.0, 0]
>>>
>>> def __call__(self, buffer, val):
>>> buffer[0] += val
>>> buffer[1] += 1
>>>
>>> def merge(self, buffer, pbuffer):
>>> buffer[0] += pbuffer[0]
>>> buffer[1] += pbuffer[1]
>>>
>>> def getvalue(self, buffer):
>>> if buffer[1] == 0:
>>> return 0.0
>>> return buffer[0] / buffer[1]
>>>
>>> iris.exclude('name').apply(Agg)
### Response:
def apply(expr, func, axis=0, names=None, types=None, reduce=False,
resources=None, keep_nulls=False, args=(), **kwargs):
"""
Apply a function to a row when axis=1 or column when axis=0.
:param expr:
:param func: function to apply
:param axis: row when axis=1 else column
:param names: output names
:param types: output types
:param reduce: if True will return a sequence else return a collection
:param resources: resources to read
:param keep_nulls: if True, keep rows producing empty results, only work in lateral views
:param args: args for function
:param kwargs: kwargs for function
:return:
:Example:
Apply a function to a row:
>>> from odps.df import output
>>>
>>> @output(['iris_add', 'iris_sub'], ['float', 'float'])
>>> def handle(row):
>>> yield row.sepallength - row.sepalwidth, row.sepallength + row.sepalwidth
>>> yield row.petallength - row.petalwidth, row.petallength + row.petalwidth
>>>
>>> iris.apply(handle, axis=1).count()
Apply a function to a column:
>>> class Agg(object):
>>>
>>> def buffer(self):
>>> return [0.0, 0]
>>>
>>> def __call__(self, buffer, val):
>>> buffer[0] += val
>>> buffer[1] += 1
>>>
>>> def merge(self, buffer, pbuffer):
>>> buffer[0] += pbuffer[0]
>>> buffer[1] += pbuffer[1]
>>>
>>> def getvalue(self, buffer):
>>> if buffer[1] == 0:
>>> return 0.0
>>> return buffer[0] / buffer[1]
>>>
>>> iris.exclude('name').apply(Agg)
"""
if not isinstance(expr, CollectionExpr):
return
if isinstance(func, FunctionWrapper):
names = names or func.output_names
types = types or func.output_types
func = func._func
if axis == 0:
types = types or expr.schema.types
types = [validate_data_type(t) for t in types]
fields = [expr[n].agg(func, rtype=t, resources=resources)
for n, t in zip(expr.schema.names, types)]
if names:
fields = [f.rename(n) for f, n in zip(fields, names)]
else:
names = [f.name for f in fields]
return Summary(_input=expr, _fields=fields, _schema=Schema.from_lists(names, types))
else:
collection_resources = utils.get_collection_resources(resources)
if types is not None:
if isinstance(types, list):
types = tuple(types)
elif isinstance(types, six.string_types):
types = (types,)
types = tuple(validate_data_type(t) for t in types)
if reduce:
from .element import MappedExpr
from ..backends.context import context
if names is not None and len(names) > 1:
raise ValueError('When reduce, at most one name can be specified')
name = names[0] if names is not None else None
if not types and kwargs.get('rtype', None) is not None:
types = [kwargs.pop('rtype')]
tp = types[0] if types is not None else (utils.get_annotation_rtype(func) or string)
if not context.is_cached(expr) and (hasattr(expr, '_fields') and expr._fields is not None):
inputs = [e.copy_tree(stop_cond=lambda x: any(i is expr.input for i in x.children()))
for e in expr._fields]
else:
inputs = [expr[n] for n in expr.schema.names]
return MappedExpr(_func=func, _func_args=args, _func_kwargs=kwargs,
_name=name, _data_type=tp,
_inputs=inputs, _multiple=True,
_resources=resources, _collection_resources=collection_resources)
else:
return _apply_horizontal(expr, func, names=names, types=types, resources=resources,
collection_resources=collection_resources, keep_nulls=keep_nulls,
args=args, **kwargs) |
def isdir(path):
"""
Return True if path is an existing directory.
Equivalent to "os.path.isdir".
Args:
path (path-like object): Path or URL.
Returns:
bool: True if directory exists.
"""
system = get_instance(path)
# User may use directory path without trailing '/'
# like on standard file systems
return system.isdir(system.ensure_dir_path(path)) | Return True if path is an existing directory.
Equivalent to "os.path.isdir".
Args:
path (path-like object): Path or URL.
Returns:
bool: True if directory exists. | Below is the the instruction that describes the task:
### Input:
Return True if path is an existing directory.
Equivalent to "os.path.isdir".
Args:
path (path-like object): Path or URL.
Returns:
bool: True if directory exists.
### Response:
def isdir(path):
"""
Return True if path is an existing directory.
Equivalent to "os.path.isdir".
Args:
path (path-like object): Path or URL.
Returns:
bool: True if directory exists.
"""
system = get_instance(path)
# User may use directory path without trailing '/'
# like on standard file systems
return system.isdir(system.ensure_dir_path(path)) |
def from_args(self, **kwargs):
"""Read config values from `kwargs`."""
for k, v in iitems(kwargs):
if v is not None:
if k in self.MUST_BE_LIST and isinstance(v, string_class):
v = [v]
setattr(self, k, v) | Read config values from `kwargs`. | Below is the the instruction that describes the task:
### Input:
Read config values from `kwargs`.
### Response:
def from_args(self, **kwargs):
"""Read config values from `kwargs`."""
for k, v in iitems(kwargs):
if v is not None:
if k in self.MUST_BE_LIST and isinstance(v, string_class):
v = [v]
setattr(self, k, v) |
def get_session(user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs):
# type: (Optional[str], Optional[str], Optional[str], Any) -> requests.Session
"""Set up and return Session object that is set up with retrying. Requires either global user agent to be set or
appropriate user agent parameter(s) to be completed.
Args:
user_agent (Optional[str]): User agent string. HDXPythonUtilities/X.X.X- is prefixed.
user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml.
user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied.
**kwargs: See below
auth (Tuple[str, str]): Authorisation information in tuple form (user, pass) OR
basic_auth (str): Authorisation information in basic auth string form (Basic xxxxxxxxxxxxxxxx) OR
basic_auth_file (str): Path to file containing authorisation information in basic auth string form (Basic xxxxxxxxxxxxxxxx)
extra_params_dict (Dict): Extra parameters to put on end of url as a dictionary OR
extra_params_json (str): Path to JSON file containing extra parameters to put on end of url OR
extra_params_yaml (str): Path to YAML file containing extra parameters to put on end of url
extra_params_lookup (str): Lookup key for parameters. If not given assumes parameters are at root of the dict.
status_forcelist (iterable): HTTP statuses for which to force retry. Defaults to [429, 500, 502, 503, 504].
method_whitelist (iterable): HTTP methods for which to force retry. Defaults t0 frozenset(['GET']).
"""
s = requests.Session()
ua = kwargs.get('full_agent')
if not ua:
ua = UserAgent.get(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs)
s.headers['User-Agent'] = ua
extra_params = os.getenv('EXTRA_PARAMS')
if extra_params is not None:
extra_params_dict = dict()
if '=' in extra_params:
logger.info('Loading extra parameters from environment variable')
for extra_param in extra_params.split(','):
key, value = extra_param.split('=')
extra_params_dict[key] = value
else:
extra_params_found = False
extra_params_dict = kwargs.get('extra_params_dict')
if extra_params_dict:
extra_params_found = True
logger.info('Loading extra parameters from dictionary')
extra_params_json = kwargs.get('extra_params_json', '')
if extra_params_json:
if extra_params_found:
raise SessionError('More than one set of extra parameters given!')
extra_params_found = True
logger.info('Loading extra parameters from: %s' % extra_params_json)
extra_params_dict = load_json(extra_params_json)
extra_params_yaml = kwargs.get('extra_params_yaml', '')
if extra_params_found:
if extra_params_yaml:
raise SessionError('More than one set of extra parameters given!')
else:
if extra_params_yaml:
logger.info('Loading extra parameters from: %s' % extra_params_yaml)
extra_params_dict = load_yaml(extra_params_yaml)
else:
extra_params_dict = dict()
extra_params_lookup = kwargs.get('extra_params_lookup')
if extra_params_lookup:
extra_params_dict = extra_params_dict.get(extra_params_lookup)
if extra_params_dict is None:
raise SessionError('%s does not exist in extra_params!' % extra_params_lookup)
auth_found = False
basic_auth = os.getenv('BASIC_AUTH')
if basic_auth:
logger.info('Loading authorisation from basic_auth environment variable')
auth_found = True
else:
basic_auth = kwargs.get('basic_auth')
if basic_auth:
logger.info('Loading authorisation from basic_auth argument')
auth_found = True
bauth = extra_params_dict.get('basic_auth')
if bauth:
if not auth_found:
basic_auth = bauth
logger.info('Loading authorisation from basic_auth parameter')
auth_found = True
del extra_params_dict['basic_auth']
s.params = extra_params_dict
auth = kwargs.get('auth')
if auth:
if auth_found:
raise SessionError('More than one authorisation given!')
logger.info('Loading authorisation from auth argument')
auth_found = True
basic_auth_file = kwargs.get('basic_auth_file')
if basic_auth_file:
if auth_found:
raise SessionError('More than one authorisation given!')
logger.info('Loading authorisation from: %s' % basic_auth_file)
basic_auth = load_file_to_str(basic_auth_file)
if basic_auth:
auth = decode(basic_auth)
s.auth = auth
status_forcelist = kwargs.get('status_forcelist', [429, 500, 502, 503, 504])
method_whitelist = kwargs.get('method_whitelist', frozenset(['HEAD', 'TRACE', 'GET', 'PUT', 'OPTIONS', 'DELETE']))
retries = Retry(total=5, backoff_factor=0.4, status_forcelist=status_forcelist, method_whitelist=method_whitelist,
raise_on_redirect=True,
raise_on_status=True)
s.mount('http://', HTTPAdapter(max_retries=retries, pool_connections=100, pool_maxsize=100))
s.mount('https://', HTTPAdapter(max_retries=retries, pool_connections=100, pool_maxsize=100))
return s | Set up and return Session object that is set up with retrying. Requires either global user agent to be set or
appropriate user agent parameter(s) to be completed.
Args:
user_agent (Optional[str]): User agent string. HDXPythonUtilities/X.X.X- is prefixed.
user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml.
user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied.
**kwargs: See below
auth (Tuple[str, str]): Authorisation information in tuple form (user, pass) OR
basic_auth (str): Authorisation information in basic auth string form (Basic xxxxxxxxxxxxxxxx) OR
basic_auth_file (str): Path to file containing authorisation information in basic auth string form (Basic xxxxxxxxxxxxxxxx)
extra_params_dict (Dict): Extra parameters to put on end of url as a dictionary OR
extra_params_json (str): Path to JSON file containing extra parameters to put on end of url OR
extra_params_yaml (str): Path to YAML file containing extra parameters to put on end of url
extra_params_lookup (str): Lookup key for parameters. If not given assumes parameters are at root of the dict.
status_forcelist (iterable): HTTP statuses for which to force retry. Defaults to [429, 500, 502, 503, 504].
method_whitelist (iterable): HTTP methods for which to force retry. Defaults t0 frozenset(['GET']). | Below is the the instruction that describes the task:
### Input:
Set up and return Session object that is set up with retrying. Requires either global user agent to be set or
appropriate user agent parameter(s) to be completed.
Args:
user_agent (Optional[str]): User agent string. HDXPythonUtilities/X.X.X- is prefixed.
user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml.
user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied.
**kwargs: See below
auth (Tuple[str, str]): Authorisation information in tuple form (user, pass) OR
basic_auth (str): Authorisation information in basic auth string form (Basic xxxxxxxxxxxxxxxx) OR
basic_auth_file (str): Path to file containing authorisation information in basic auth string form (Basic xxxxxxxxxxxxxxxx)
extra_params_dict (Dict): Extra parameters to put on end of url as a dictionary OR
extra_params_json (str): Path to JSON file containing extra parameters to put on end of url OR
extra_params_yaml (str): Path to YAML file containing extra parameters to put on end of url
extra_params_lookup (str): Lookup key for parameters. If not given assumes parameters are at root of the dict.
status_forcelist (iterable): HTTP statuses for which to force retry. Defaults to [429, 500, 502, 503, 504].
method_whitelist (iterable): HTTP methods for which to force retry. Defaults t0 frozenset(['GET']).
### Response:
def get_session(user_agent=None, user_agent_config_yaml=None, user_agent_lookup=None, **kwargs):
# type: (Optional[str], Optional[str], Optional[str], Any) -> requests.Session
"""Set up and return Session object that is set up with retrying. Requires either global user agent to be set or
appropriate user agent parameter(s) to be completed.
Args:
user_agent (Optional[str]): User agent string. HDXPythonUtilities/X.X.X- is prefixed.
user_agent_config_yaml (Optional[str]): Path to YAML user agent configuration. Ignored if user_agent supplied. Defaults to ~/.useragent.yml.
user_agent_lookup (Optional[str]): Lookup key for YAML. Ignored if user_agent supplied.
**kwargs: See below
auth (Tuple[str, str]): Authorisation information in tuple form (user, pass) OR
basic_auth (str): Authorisation information in basic auth string form (Basic xxxxxxxxxxxxxxxx) OR
basic_auth_file (str): Path to file containing authorisation information in basic auth string form (Basic xxxxxxxxxxxxxxxx)
extra_params_dict (Dict): Extra parameters to put on end of url as a dictionary OR
extra_params_json (str): Path to JSON file containing extra parameters to put on end of url OR
extra_params_yaml (str): Path to YAML file containing extra parameters to put on end of url
extra_params_lookup (str): Lookup key for parameters. If not given assumes parameters are at root of the dict.
status_forcelist (iterable): HTTP statuses for which to force retry. Defaults to [429, 500, 502, 503, 504].
method_whitelist (iterable): HTTP methods for which to force retry. Defaults t0 frozenset(['GET']).
"""
s = requests.Session()
ua = kwargs.get('full_agent')
if not ua:
ua = UserAgent.get(user_agent, user_agent_config_yaml, user_agent_lookup, **kwargs)
s.headers['User-Agent'] = ua
extra_params = os.getenv('EXTRA_PARAMS')
if extra_params is not None:
extra_params_dict = dict()
if '=' in extra_params:
logger.info('Loading extra parameters from environment variable')
for extra_param in extra_params.split(','):
key, value = extra_param.split('=')
extra_params_dict[key] = value
else:
extra_params_found = False
extra_params_dict = kwargs.get('extra_params_dict')
if extra_params_dict:
extra_params_found = True
logger.info('Loading extra parameters from dictionary')
extra_params_json = kwargs.get('extra_params_json', '')
if extra_params_json:
if extra_params_found:
raise SessionError('More than one set of extra parameters given!')
extra_params_found = True
logger.info('Loading extra parameters from: %s' % extra_params_json)
extra_params_dict = load_json(extra_params_json)
extra_params_yaml = kwargs.get('extra_params_yaml', '')
if extra_params_found:
if extra_params_yaml:
raise SessionError('More than one set of extra parameters given!')
else:
if extra_params_yaml:
logger.info('Loading extra parameters from: %s' % extra_params_yaml)
extra_params_dict = load_yaml(extra_params_yaml)
else:
extra_params_dict = dict()
extra_params_lookup = kwargs.get('extra_params_lookup')
if extra_params_lookup:
extra_params_dict = extra_params_dict.get(extra_params_lookup)
if extra_params_dict is None:
raise SessionError('%s does not exist in extra_params!' % extra_params_lookup)
auth_found = False
basic_auth = os.getenv('BASIC_AUTH')
if basic_auth:
logger.info('Loading authorisation from basic_auth environment variable')
auth_found = True
else:
basic_auth = kwargs.get('basic_auth')
if basic_auth:
logger.info('Loading authorisation from basic_auth argument')
auth_found = True
bauth = extra_params_dict.get('basic_auth')
if bauth:
if not auth_found:
basic_auth = bauth
logger.info('Loading authorisation from basic_auth parameter')
auth_found = True
del extra_params_dict['basic_auth']
s.params = extra_params_dict
auth = kwargs.get('auth')
if auth:
if auth_found:
raise SessionError('More than one authorisation given!')
logger.info('Loading authorisation from auth argument')
auth_found = True
basic_auth_file = kwargs.get('basic_auth_file')
if basic_auth_file:
if auth_found:
raise SessionError('More than one authorisation given!')
logger.info('Loading authorisation from: %s' % basic_auth_file)
basic_auth = load_file_to_str(basic_auth_file)
if basic_auth:
auth = decode(basic_auth)
s.auth = auth
status_forcelist = kwargs.get('status_forcelist', [429, 500, 502, 503, 504])
method_whitelist = kwargs.get('method_whitelist', frozenset(['HEAD', 'TRACE', 'GET', 'PUT', 'OPTIONS', 'DELETE']))
retries = Retry(total=5, backoff_factor=0.4, status_forcelist=status_forcelist, method_whitelist=method_whitelist,
raise_on_redirect=True,
raise_on_status=True)
s.mount('http://', HTTPAdapter(max_retries=retries, pool_connections=100, pool_maxsize=100))
s.mount('https://', HTTPAdapter(max_retries=retries, pool_connections=100, pool_maxsize=100))
return s |
def save_load(jid, clear_load, minion=None):
'''
Save the load to the specified jid
'''
cb_ = _get_connection()
try:
jid_doc = cb_.get(six.text_type(jid))
except couchbase.exceptions.NotFoundError:
cb_.add(six.text_type(jid), {}, ttl=_get_ttl())
jid_doc = cb_.get(six.text_type(jid))
jid_doc.value['load'] = clear_load
cb_.replace(six.text_type(jid), jid_doc.value, cas=jid_doc.cas, ttl=_get_ttl())
# if you have a tgt, save that for the UI etc
if 'tgt' in clear_load and clear_load['tgt'] != '':
ckminions = salt.utils.minions.CkMinions(__opts__)
# Retrieve the minions list
_res = ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob')
)
minions = _res['minions']
save_minions(jid, minions) | Save the load to the specified jid | Below is the the instruction that describes the task:
### Input:
Save the load to the specified jid
### Response:
def save_load(jid, clear_load, minion=None):
'''
Save the load to the specified jid
'''
cb_ = _get_connection()
try:
jid_doc = cb_.get(six.text_type(jid))
except couchbase.exceptions.NotFoundError:
cb_.add(six.text_type(jid), {}, ttl=_get_ttl())
jid_doc = cb_.get(six.text_type(jid))
jid_doc.value['load'] = clear_load
cb_.replace(six.text_type(jid), jid_doc.value, cas=jid_doc.cas, ttl=_get_ttl())
# if you have a tgt, save that for the UI etc
if 'tgt' in clear_load and clear_load['tgt'] != '':
ckminions = salt.utils.minions.CkMinions(__opts__)
# Retrieve the minions list
_res = ckminions.check_minions(
clear_load['tgt'],
clear_load.get('tgt_type', 'glob')
)
minions = _res['minions']
save_minions(jid, minions) |
def _get_cross_pt_dual_simp(self, dual_simp):
"""
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
plane function:
normal[0]x + normal[1]y + normal[2]z = e_surf
from self:
normal_e_m to get the plane functions
dual_simp: (i, j, k) simplices from the dual convex hull
i, j, k: plane index(same order in normal_e_m)
"""
matrix_surfs = [self.facets[dual_simp[i]].normal for i in range(3)]
matrix_e = [self.facets[dual_simp[i]].e_surf for i in range(3)]
cross_pt = sp.dot(sp.linalg.inv(matrix_surfs), matrix_e)
return cross_pt | |normal| = 1, e_surf is plane's distance to (0, 0, 0),
plane function:
normal[0]x + normal[1]y + normal[2]z = e_surf
from self:
normal_e_m to get the plane functions
dual_simp: (i, j, k) simplices from the dual convex hull
i, j, k: plane index(same order in normal_e_m) | Below is the the instruction that describes the task:
### Input:
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
plane function:
normal[0]x + normal[1]y + normal[2]z = e_surf
from self:
normal_e_m to get the plane functions
dual_simp: (i, j, k) simplices from the dual convex hull
i, j, k: plane index(same order in normal_e_m)
### Response:
def _get_cross_pt_dual_simp(self, dual_simp):
"""
|normal| = 1, e_surf is plane's distance to (0, 0, 0),
plane function:
normal[0]x + normal[1]y + normal[2]z = e_surf
from self:
normal_e_m to get the plane functions
dual_simp: (i, j, k) simplices from the dual convex hull
i, j, k: plane index(same order in normal_e_m)
"""
matrix_surfs = [self.facets[dual_simp[i]].normal for i in range(3)]
matrix_e = [self.facets[dual_simp[i]].e_surf for i in range(3)]
cross_pt = sp.dot(sp.linalg.inv(matrix_surfs), matrix_e)
return cross_pt |
def do_videoplaceholder(parser, token):
"""
Method that parse the imageplaceholder template tag.
"""
name, params = parse_placeholder(parser, token)
return VideoPlaceholderNode(name, **params) | Method that parse the imageplaceholder template tag. | Below is the the instruction that describes the task:
### Input:
Method that parse the imageplaceholder template tag.
### Response:
def do_videoplaceholder(parser, token):
"""
Method that parse the imageplaceholder template tag.
"""
name, params = parse_placeholder(parser, token)
return VideoPlaceholderNode(name, **params) |
def check(frame) -> None:
"""
Check that this frame contains acceptable values.
Raise :exc:`~websockets.exceptions.WebSocketProtocolError` if this
frame contains incorrect values.
"""
# The first parameter is called `frame` rather than `self`,
# but it's the instance of class to which this method is bound.
if frame.rsv1 or frame.rsv2 or frame.rsv3:
raise WebSocketProtocolError("Reserved bits must be 0")
if frame.opcode in DATA_OPCODES:
return
elif frame.opcode in CTRL_OPCODES:
if len(frame.data) > 125:
raise WebSocketProtocolError("Control frame too long")
if not frame.fin:
raise WebSocketProtocolError("Fragmented control frame")
else:
raise WebSocketProtocolError(f"Invalid opcode: {frame.opcode}") | Check that this frame contains acceptable values.
Raise :exc:`~websockets.exceptions.WebSocketProtocolError` if this
frame contains incorrect values. | Below is the the instruction that describes the task:
### Input:
Check that this frame contains acceptable values.
Raise :exc:`~websockets.exceptions.WebSocketProtocolError` if this
frame contains incorrect values.
### Response:
def check(frame) -> None:
"""
Check that this frame contains acceptable values.
Raise :exc:`~websockets.exceptions.WebSocketProtocolError` if this
frame contains incorrect values.
"""
# The first parameter is called `frame` rather than `self`,
# but it's the instance of class to which this method is bound.
if frame.rsv1 or frame.rsv2 or frame.rsv3:
raise WebSocketProtocolError("Reserved bits must be 0")
if frame.opcode in DATA_OPCODES:
return
elif frame.opcode in CTRL_OPCODES:
if len(frame.data) > 125:
raise WebSocketProtocolError("Control frame too long")
if not frame.fin:
raise WebSocketProtocolError("Fragmented control frame")
else:
raise WebSocketProtocolError(f"Invalid opcode: {frame.opcode}") |
def result(self):
"""Return the context result object pulled from the persistence_engine
if it has been set.
"""
if not self._result:
if not self._persistence_engine:
return None
self._result = self._persistence_engine.get_context_result(self)
return self._result | Return the context result object pulled from the persistence_engine
if it has been set. | Below is the the instruction that describes the task:
### Input:
Return the context result object pulled from the persistence_engine
if it has been set.
### Response:
def result(self):
"""Return the context result object pulled from the persistence_engine
if it has been set.
"""
if not self._result:
if not self._persistence_engine:
return None
self._result = self._persistence_engine.get_context_result(self)
return self._result |
def download_historical(tickers_list, output_folder):
"""Download historical data from Yahoo Finance.
Downloads full historical data from Yahoo Finance as CSV. The following
fields are available: Adj Close, Close, High, Low, Open and Volume. Files
will be saved to output_folder as <ticker>.csv.
:param tickers_list: List of tickers that will be returned.
:type tickers_list: list of strings
:param output_folder: Output folder path
:type output_folder: string
"""
__validate_list(tickers_list)
for ticker in tickers_list:
file_name = os.path.join(output_folder, ticker + '.csv')
with open(file_name, 'wb') as f:
base_url = 'http://real-chart.finance.yahoo.com/table.csv?s='
try:
urlopen(base_url + ticker)
urlretrieve(base_url + ticker, f.name)
except:
os.remove(file_name)
raise RequestError('Unable to process the request. Check if ' +
ticker + ' is a valid stock ticker') | Download historical data from Yahoo Finance.
Downloads full historical data from Yahoo Finance as CSV. The following
fields are available: Adj Close, Close, High, Low, Open and Volume. Files
will be saved to output_folder as <ticker>.csv.
:param tickers_list: List of tickers that will be returned.
:type tickers_list: list of strings
:param output_folder: Output folder path
:type output_folder: string | Below is the the instruction that describes the task:
### Input:
Download historical data from Yahoo Finance.
Downloads full historical data from Yahoo Finance as CSV. The following
fields are available: Adj Close, Close, High, Low, Open and Volume. Files
will be saved to output_folder as <ticker>.csv.
:param tickers_list: List of tickers that will be returned.
:type tickers_list: list of strings
:param output_folder: Output folder path
:type output_folder: string
### Response:
def download_historical(tickers_list, output_folder):
"""Download historical data from Yahoo Finance.
Downloads full historical data from Yahoo Finance as CSV. The following
fields are available: Adj Close, Close, High, Low, Open and Volume. Files
will be saved to output_folder as <ticker>.csv.
:param tickers_list: List of tickers that will be returned.
:type tickers_list: list of strings
:param output_folder: Output folder path
:type output_folder: string
"""
__validate_list(tickers_list)
for ticker in tickers_list:
file_name = os.path.join(output_folder, ticker + '.csv')
with open(file_name, 'wb') as f:
base_url = 'http://real-chart.finance.yahoo.com/table.csv?s='
try:
urlopen(base_url + ticker)
urlretrieve(base_url + ticker, f.name)
except:
os.remove(file_name)
raise RequestError('Unable to process the request. Check if ' +
ticker + ' is a valid stock ticker') |
def attach_template(self, _template, _key, **unbound_var_values):
"""Attaches the template to this with the _key is supplied with this layer.
Note: names were chosen to avoid conflicts.
Args:
_template: The template to construct.
_key: The key that this layer should replace.
**unbound_var_values: The values for the unbound_vars.
Returns:
A new layer with operation applied.
Raises:
ValueError: If _key is specified twice or there is a problem computing the
template.
"""
if _key in unbound_var_values:
raise ValueError('%s specified twice.' % _key)
unbound_var_values[_key] = self
return _DeferredLayer(self.bookkeeper,
_template.as_layer().construct,
[],
unbound_var_values,
scope=self._scope,
defaults=self._defaults,
partial_context=self._partial_context) | Attaches the template to this with the _key is supplied with this layer.
Note: names were chosen to avoid conflicts.
Args:
_template: The template to construct.
_key: The key that this layer should replace.
**unbound_var_values: The values for the unbound_vars.
Returns:
A new layer with operation applied.
Raises:
ValueError: If _key is specified twice or there is a problem computing the
template. | Below is the the instruction that describes the task:
### Input:
Attaches the template to this with the _key is supplied with this layer.
Note: names were chosen to avoid conflicts.
Args:
_template: The template to construct.
_key: The key that this layer should replace.
**unbound_var_values: The values for the unbound_vars.
Returns:
A new layer with operation applied.
Raises:
ValueError: If _key is specified twice or there is a problem computing the
template.
### Response:
def attach_template(self, _template, _key, **unbound_var_values):
"""Attaches the template to this with the _key is supplied with this layer.
Note: names were chosen to avoid conflicts.
Args:
_template: The template to construct.
_key: The key that this layer should replace.
**unbound_var_values: The values for the unbound_vars.
Returns:
A new layer with operation applied.
Raises:
ValueError: If _key is specified twice or there is a problem computing the
template.
"""
if _key in unbound_var_values:
raise ValueError('%s specified twice.' % _key)
unbound_var_values[_key] = self
return _DeferredLayer(self.bookkeeper,
_template.as_layer().construct,
[],
unbound_var_values,
scope=self._scope,
defaults=self._defaults,
partial_context=self._partial_context) |
def onecmd(self, statement: Union[Statement, str]) -> bool:
""" This executes the actual do_* method for a command.
If the command provided doesn't exist, then it executes default() instead.
:param statement: intended to be a Statement instance parsed command from the input stream, alternative
acceptance of a str is present only for backward compatibility with cmd
:return: a flag indicating whether the interpretation of commands should stop
"""
# For backwards compatibility with cmd, allow a str to be passed in
if not isinstance(statement, Statement):
statement = self._complete_statement(statement)
# Check if this is a macro
if statement.command in self.macros:
stop = self._run_macro(statement)
else:
func = self.cmd_func(statement.command)
if func:
# Check to see if this command should be stored in history
if statement.command not in self.exclude_from_history \
and statement.command not in self.disabled_commands:
self.history.append(statement)
stop = func(statement)
else:
stop = self.default(statement)
if stop is None:
stop = False
return stop | This executes the actual do_* method for a command.
If the command provided doesn't exist, then it executes default() instead.
:param statement: intended to be a Statement instance parsed command from the input stream, alternative
acceptance of a str is present only for backward compatibility with cmd
:return: a flag indicating whether the interpretation of commands should stop | Below is the the instruction that describes the task:
### Input:
This executes the actual do_* method for a command.
If the command provided doesn't exist, then it executes default() instead.
:param statement: intended to be a Statement instance parsed command from the input stream, alternative
acceptance of a str is present only for backward compatibility with cmd
:return: a flag indicating whether the interpretation of commands should stop
### Response:
def onecmd(self, statement: Union[Statement, str]) -> bool:
""" This executes the actual do_* method for a command.
If the command provided doesn't exist, then it executes default() instead.
:param statement: intended to be a Statement instance parsed command from the input stream, alternative
acceptance of a str is present only for backward compatibility with cmd
:return: a flag indicating whether the interpretation of commands should stop
"""
# For backwards compatibility with cmd, allow a str to be passed in
if not isinstance(statement, Statement):
statement = self._complete_statement(statement)
# Check if this is a macro
if statement.command in self.macros:
stop = self._run_macro(statement)
else:
func = self.cmd_func(statement.command)
if func:
# Check to see if this command should be stored in history
if statement.command not in self.exclude_from_history \
and statement.command not in self.disabled_commands:
self.history.append(statement)
stop = func(statement)
else:
stop = self.default(statement)
if stop is None:
stop = False
return stop |
def major_tick_mark(self):
"""
Read/write :ref:`XlTickMark` value specifying the type of major tick
mark to display on this axis.
"""
majorTickMark = self._element.majorTickMark
if majorTickMark is None:
return XL_TICK_MARK.CROSS
return majorTickMark.val | Read/write :ref:`XlTickMark` value specifying the type of major tick
mark to display on this axis. | Below is the the instruction that describes the task:
### Input:
Read/write :ref:`XlTickMark` value specifying the type of major tick
mark to display on this axis.
### Response:
def major_tick_mark(self):
"""
Read/write :ref:`XlTickMark` value specifying the type of major tick
mark to display on this axis.
"""
majorTickMark = self._element.majorTickMark
if majorTickMark is None:
return XL_TICK_MARK.CROSS
return majorTickMark.val |
def make_application():
"""
Create a application configured to send metrics.
Metrics will be sent to localhost:8125 namespaced with
``webapps``. Run netcat or a similar listener then run this
example. HTTP GETs will result in a metric like::
webapps.SimpleHandler.GET.204:255.24497032165527|ms
"""
settings = {}
application = web.Application([web.url('/', SimpleHandler)], **settings)
statsd.install(application, **{'namespace': 'testing'})
return application | Create a application configured to send metrics.
Metrics will be sent to localhost:8125 namespaced with
``webapps``. Run netcat or a similar listener then run this
example. HTTP GETs will result in a metric like::
webapps.SimpleHandler.GET.204:255.24497032165527|ms | Below is the the instruction that describes the task:
### Input:
Create a application configured to send metrics.
Metrics will be sent to localhost:8125 namespaced with
``webapps``. Run netcat or a similar listener then run this
example. HTTP GETs will result in a metric like::
webapps.SimpleHandler.GET.204:255.24497032165527|ms
### Response:
def make_application():
"""
Create a application configured to send metrics.
Metrics will be sent to localhost:8125 namespaced with
``webapps``. Run netcat or a similar listener then run this
example. HTTP GETs will result in a metric like::
webapps.SimpleHandler.GET.204:255.24497032165527|ms
"""
settings = {}
application = web.Application([web.url('/', SimpleHandler)], **settings)
statsd.install(application, **{'namespace': 'testing'})
return application |
def _parse_binding_config(self, binding_config):
"""Parse configured interface -> ACL bindings
Bindings are returned as a set of (intf, name, direction) tuples:
set([(intf1, acl_name, direction),
(intf2, acl_name, direction),
...,
])
"""
parsed_bindings = set()
for acl in binding_config['aclList']:
for intf in acl['configuredIngressIntfs']:
parsed_bindings.add((intf['name'], acl['name'],
a_const.INGRESS_DIRECTION))
for intf in acl['configuredEgressIntfs']:
parsed_bindings.add((intf['name'], acl['name'],
a_const.EGRESS_DIRECTION))
return parsed_bindings | Parse configured interface -> ACL bindings
Bindings are returned as a set of (intf, name, direction) tuples:
set([(intf1, acl_name, direction),
(intf2, acl_name, direction),
...,
]) | Below is the the instruction that describes the task:
### Input:
Parse configured interface -> ACL bindings
Bindings are returned as a set of (intf, name, direction) tuples:
set([(intf1, acl_name, direction),
(intf2, acl_name, direction),
...,
])
### Response:
def _parse_binding_config(self, binding_config):
"""Parse configured interface -> ACL bindings
Bindings are returned as a set of (intf, name, direction) tuples:
set([(intf1, acl_name, direction),
(intf2, acl_name, direction),
...,
])
"""
parsed_bindings = set()
for acl in binding_config['aclList']:
for intf in acl['configuredIngressIntfs']:
parsed_bindings.add((intf['name'], acl['name'],
a_const.INGRESS_DIRECTION))
for intf in acl['configuredEgressIntfs']:
parsed_bindings.add((intf['name'], acl['name'],
a_const.EGRESS_DIRECTION))
return parsed_bindings |
def WriteAllCrashDetails(client_id,
crash_details,
flow_session_id=None,
hunt_session_id=None,
token=None):
"""Updates the last crash attribute of the client."""
# AFF4.
if data_store.AFF4Enabled():
with aff4.FACTORY.Create(
client_id, aff4_grr.VFSGRRClient, token=token) as client_obj:
client_obj.Set(client_obj.Schema.LAST_CRASH(crash_details))
# Duplicate the crash information in a number of places so we can find it
# easily.
client_urn = rdf_client.ClientURN(client_id)
client_crashes = aff4_grr.VFSGRRClient.CrashCollectionURNForCID(client_urn)
with data_store.DB.GetMutationPool() as pool:
grr_collections.CrashCollection.StaticAdd(
client_crashes, crash_details, mutation_pool=pool)
# Relational db.
if data_store.RelationalDBEnabled():
try:
data_store.REL_DB.WriteClientCrashInfo(client_id, crash_details)
except db.UnknownClientError:
pass
if not flow_session_id:
return
if data_store.RelationalDBEnabled():
flow_id = flow_session_id.Basename()
data_store.REL_DB.UpdateFlow(
client_id, flow_id, client_crash_info=crash_details)
flow_obj = data_store.REL_DB.ReadFlowObject(client_id, flow_id)
if flow_obj.parent_hunt_id:
db_compat.ProcessHuntClientCrash(
flow_obj, client_crash_info=crash_details)
# TODO(amoser): Registering crashes in hunts is currently not implemented for
# the relational db.
if not data_store.RelationalDBEnabled():
with aff4.FACTORY.Open(
flow_session_id,
flow.GRRFlow,
mode="rw",
age=aff4.NEWEST_TIME,
token=token) as aff4_flow:
aff4_flow.Set(aff4_flow.Schema.CLIENT_CRASH(crash_details))
hunt_session_id = ExtractHuntId(flow_session_id)
if hunt_session_id and hunt_session_id != flow_session_id:
hunt_obj = aff4.FACTORY.Open(
hunt_session_id,
aff4_type=implementation.GRRHunt,
mode="rw",
token=token)
hunt_obj.RegisterCrash(crash_details) | Updates the last crash attribute of the client. | Below is the the instruction that describes the task:
### Input:
Updates the last crash attribute of the client.
### Response:
def WriteAllCrashDetails(client_id,
crash_details,
flow_session_id=None,
hunt_session_id=None,
token=None):
"""Updates the last crash attribute of the client."""
# AFF4.
if data_store.AFF4Enabled():
with aff4.FACTORY.Create(
client_id, aff4_grr.VFSGRRClient, token=token) as client_obj:
client_obj.Set(client_obj.Schema.LAST_CRASH(crash_details))
# Duplicate the crash information in a number of places so we can find it
# easily.
client_urn = rdf_client.ClientURN(client_id)
client_crashes = aff4_grr.VFSGRRClient.CrashCollectionURNForCID(client_urn)
with data_store.DB.GetMutationPool() as pool:
grr_collections.CrashCollection.StaticAdd(
client_crashes, crash_details, mutation_pool=pool)
# Relational db.
if data_store.RelationalDBEnabled():
try:
data_store.REL_DB.WriteClientCrashInfo(client_id, crash_details)
except db.UnknownClientError:
pass
if not flow_session_id:
return
if data_store.RelationalDBEnabled():
flow_id = flow_session_id.Basename()
data_store.REL_DB.UpdateFlow(
client_id, flow_id, client_crash_info=crash_details)
flow_obj = data_store.REL_DB.ReadFlowObject(client_id, flow_id)
if flow_obj.parent_hunt_id:
db_compat.ProcessHuntClientCrash(
flow_obj, client_crash_info=crash_details)
# TODO(amoser): Registering crashes in hunts is currently not implemented for
# the relational db.
if not data_store.RelationalDBEnabled():
with aff4.FACTORY.Open(
flow_session_id,
flow.GRRFlow,
mode="rw",
age=aff4.NEWEST_TIME,
token=token) as aff4_flow:
aff4_flow.Set(aff4_flow.Schema.CLIENT_CRASH(crash_details))
hunt_session_id = ExtractHuntId(flow_session_id)
if hunt_session_id and hunt_session_id != flow_session_id:
hunt_obj = aff4.FACTORY.Open(
hunt_session_id,
aff4_type=implementation.GRRHunt,
mode="rw",
token=token)
hunt_obj.RegisterCrash(crash_details) |
def search_shows_by_keyword(self, keyword, unite=0, source_site=None,
category=None, release_year=None,
area=None, orderby='view-count',
paid=None, hasvideotype=None,
page=1, count=20):
"""doc: http://open.youku.com/docs/doc?id=82
"""
url = 'https://openapi.youku.com/v2/searches/show/by_keyword.json'
params = {
'client_id': self.client_id,
'keyword': keyword,
'unite': unite,
'source_site': source_site,
'category': category,
'release_year': release_year,
'area': area,
'orderby': orderby,
'paid': paid,
'hasvideotype': hasvideotype,
'page': page,
'count': count
}
params = remove_none_value(params)
r = requests.get(url, params=params)
check_error(r)
return r.json() | doc: http://open.youku.com/docs/doc?id=82 | Below is the the instruction that describes the task:
### Input:
doc: http://open.youku.com/docs/doc?id=82
### Response:
def search_shows_by_keyword(self, keyword, unite=0, source_site=None,
category=None, release_year=None,
area=None, orderby='view-count',
paid=None, hasvideotype=None,
page=1, count=20):
"""doc: http://open.youku.com/docs/doc?id=82
"""
url = 'https://openapi.youku.com/v2/searches/show/by_keyword.json'
params = {
'client_id': self.client_id,
'keyword': keyword,
'unite': unite,
'source_site': source_site,
'category': category,
'release_year': release_year,
'area': area,
'orderby': orderby,
'paid': paid,
'hasvideotype': hasvideotype,
'page': page,
'count': count
}
params = remove_none_value(params)
r = requests.get(url, params=params)
check_error(r)
return r.json() |
def accuracy(conf_matrix):
"""
Given a confusion matrix, returns the accuracy.
Accuracy Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml
"""
total, correct = 0.0, 0.0
for true_response, guess_dict in conf_matrix.items():
for guess, count in guess_dict.items():
if true_response == guess:
correct += count
total += count
return correct/total | Given a confusion matrix, returns the accuracy.
Accuracy Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml | Below is the the instruction that describes the task:
### Input:
Given a confusion matrix, returns the accuracy.
Accuracy Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml
### Response:
def accuracy(conf_matrix):
"""
Given a confusion matrix, returns the accuracy.
Accuracy Definition: http://research.ics.aalto.fi/events/eyechallenge2005/evaluation.shtml
"""
total, correct = 0.0, 0.0
for true_response, guess_dict in conf_matrix.items():
for guess, count in guess_dict.items():
if true_response == guess:
correct += count
total += count
return correct/total |
def find_device(self, service_uuids=[], name=None, timeout_sec=TIMEOUT_SEC):
"""Return the first device that advertises the specified service UUIDs or
has the specified name. Will wait up to timeout_sec seconds for the device
to be found, and if the timeout is zero then it will not wait at all and
immediately return a result. When no device is found a value of None is
returned.
"""
start = time.time()
while True:
# Call find_devices and grab the first result if any are found.
found = self.find_devices(service_uuids, name)
if len(found) > 0:
return found[0]
# No device was found. Check if the timeout is exceeded and wait to
# try again.
if time.time()-start >= timeout_sec:
# Failed to find a device within the timeout.
return None
time.sleep(1) | Return the first device that advertises the specified service UUIDs or
has the specified name. Will wait up to timeout_sec seconds for the device
to be found, and if the timeout is zero then it will not wait at all and
immediately return a result. When no device is found a value of None is
returned. | Below is the the instruction that describes the task:
### Input:
Return the first device that advertises the specified service UUIDs or
has the specified name. Will wait up to timeout_sec seconds for the device
to be found, and if the timeout is zero then it will not wait at all and
immediately return a result. When no device is found a value of None is
returned.
### Response:
def find_device(self, service_uuids=[], name=None, timeout_sec=TIMEOUT_SEC):
"""Return the first device that advertises the specified service UUIDs or
has the specified name. Will wait up to timeout_sec seconds for the device
to be found, and if the timeout is zero then it will not wait at all and
immediately return a result. When no device is found a value of None is
returned.
"""
start = time.time()
while True:
# Call find_devices and grab the first result if any are found.
found = self.find_devices(service_uuids, name)
if len(found) > 0:
return found[0]
# No device was found. Check if the timeout is exceeded and wait to
# try again.
if time.time()-start >= timeout_sec:
# Failed to find a device within the timeout.
return None
time.sleep(1) |
def _reset(self, constraints=None):
"""Auxiliary method to reset the smtlib external solver to initial defaults"""
if self._proc is None:
self._start_proc()
else:
if self.support_reset:
self._send("(reset)")
for cfg in self._init:
self._send(cfg)
else:
self._stop_proc()
self._start_proc()
if constraints is not None:
self._send(constraints) | Auxiliary method to reset the smtlib external solver to initial defaults | Below is the the instruction that describes the task:
### Input:
Auxiliary method to reset the smtlib external solver to initial defaults
### Response:
def _reset(self, constraints=None):
"""Auxiliary method to reset the smtlib external solver to initial defaults"""
if self._proc is None:
self._start_proc()
else:
if self.support_reset:
self._send("(reset)")
for cfg in self._init:
self._send(cfg)
else:
self._stop_proc()
self._start_proc()
if constraints is not None:
self._send(constraints) |
def update_one(self, filter, update, **kwargs):
"""
See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one
"""
self._arctic_lib.check_quota()
return self._collection.update_one(filter, update, **kwargs) | See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one | Below is the the instruction that describes the task:
### Input:
See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one
### Response:
def update_one(self, filter, update, **kwargs):
"""
See http://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.update_one
"""
self._arctic_lib.check_quota()
return self._collection.update_one(filter, update, **kwargs) |
def load(self):
"""Load data from Graphite."""
LOGGER.debug('%s: start checking: %s', self.name, self.query)
if self.waiting:
self.notify('warning', 'Process takes too much time', target='waiting', ntype='common')
else:
self.waiting = True
try:
response = yield self.client.fetch(self.url, auth_username=self.auth_username,
auth_password=self.auth_password,
request_timeout=self.request_timeout,
connect_timeout=self.connect_timeout,
validate_cert=self.validate_cert)
records = (
GraphiteRecord(line, self.default_nan_value, self.ignore_nan)
for line in response.buffer)
data = [
(None if record.empty else getattr(record, self.method), record.target)
for record in records]
if len(data) == 0:
raise ValueError('No data')
self.check(data)
self.notify('normal', 'Metrics are loaded', target='loading', ntype='common')
except Exception as e:
self.notify(
self.loading_error, 'Loading error: %s' % e, target='loading', ntype='common')
self.waiting = False | Load data from Graphite. | Below is the the instruction that describes the task:
### Input:
Load data from Graphite.
### Response:
def load(self):
"""Load data from Graphite."""
LOGGER.debug('%s: start checking: %s', self.name, self.query)
if self.waiting:
self.notify('warning', 'Process takes too much time', target='waiting', ntype='common')
else:
self.waiting = True
try:
response = yield self.client.fetch(self.url, auth_username=self.auth_username,
auth_password=self.auth_password,
request_timeout=self.request_timeout,
connect_timeout=self.connect_timeout,
validate_cert=self.validate_cert)
records = (
GraphiteRecord(line, self.default_nan_value, self.ignore_nan)
for line in response.buffer)
data = [
(None if record.empty else getattr(record, self.method), record.target)
for record in records]
if len(data) == 0:
raise ValueError('No data')
self.check(data)
self.notify('normal', 'Metrics are loaded', target='loading', ntype='common')
except Exception as e:
self.notify(
self.loading_error, 'Loading error: %s' % e, target='loading', ntype='common')
self.waiting = False |
def write_matrix(outputfile, matrix):
""" Write down the provided matrix in the specified outputfile.
:arg outputfile, name of the outputfile in which the QTLs found are
written.
:arg matrix, the list of lists of data to write.
"""
try:
stream = open(outputfile, 'w')
for row in matrix:
if isinstance(row, list) or isinstance(row, tuple):
row = [str(el).strip() for el in row]
stream.write(','.join(row) + '\n')
else:
stream.write(row + '\n')
except IOError as err: # pragma: no cover
LOG.info('An error occured while writing the file %s'
% outputfile)
LOG.debug("Error: %s" % err)
finally:
stream.close()
LOG.info('Wrote QTLs in file %s' % outputfile) | Write down the provided matrix in the specified outputfile.
:arg outputfile, name of the outputfile in which the QTLs found are
written.
:arg matrix, the list of lists of data to write. | Below is the the instruction that describes the task:
### Input:
Write down the provided matrix in the specified outputfile.
:arg outputfile, name of the outputfile in which the QTLs found are
written.
:arg matrix, the list of lists of data to write.
### Response:
def write_matrix(outputfile, matrix):
""" Write down the provided matrix in the specified outputfile.
:arg outputfile, name of the outputfile in which the QTLs found are
written.
:arg matrix, the list of lists of data to write.
"""
try:
stream = open(outputfile, 'w')
for row in matrix:
if isinstance(row, list) or isinstance(row, tuple):
row = [str(el).strip() for el in row]
stream.write(','.join(row) + '\n')
else:
stream.write(row + '\n')
except IOError as err: # pragma: no cover
LOG.info('An error occured while writing the file %s'
% outputfile)
LOG.debug("Error: %s" % err)
finally:
stream.close()
LOG.info('Wrote QTLs in file %s' % outputfile) |
def host_type(host):
"""
Correctly classify correct RFC 3986 compliant hostnames, but do not try
hard to validate compliance anyway...
NOTE: indeed we allow a small deviation from the RFC 3986: IPv4
addresses are allowed to contain bytes represented in hexadecimal or
octal notation when begining respectively with '0x'/'0X' and '0'
numbers prepended with one or more zero won't be rejected. Anyway
representation of multiple bytes by a single decimal/octal/hexadecimal
integer is not allowed.
Return 1 (HOST_IP_LITERAL), 2 (HOST_IPV4_ADDRESS) or 3 (HOST_REG_NAME)
>>> host_type('[blablabla]')
1
>>> host_type('')
3
>>> host_type('127.0.0.1')
2
>>> host_type('0x7F.0.0.00000000000001')
2
>>> host_type('666.42.131.2')
3
>>> host_type('foobar.42')
3
"""
if not host:
return HOST_REG_NAME
elif host[0] == '[':
return HOST_IP_LITERAL
elif __valid_IPv4address(host):
return HOST_IPV4_ADDRESS
else:
return HOST_REG_NAME | Correctly classify correct RFC 3986 compliant hostnames, but do not try
hard to validate compliance anyway...
NOTE: indeed we allow a small deviation from the RFC 3986: IPv4
addresses are allowed to contain bytes represented in hexadecimal or
octal notation when begining respectively with '0x'/'0X' and '0'
numbers prepended with one or more zero won't be rejected. Anyway
representation of multiple bytes by a single decimal/octal/hexadecimal
integer is not allowed.
Return 1 (HOST_IP_LITERAL), 2 (HOST_IPV4_ADDRESS) or 3 (HOST_REG_NAME)
>>> host_type('[blablabla]')
1
>>> host_type('')
3
>>> host_type('127.0.0.1')
2
>>> host_type('0x7F.0.0.00000000000001')
2
>>> host_type('666.42.131.2')
3
>>> host_type('foobar.42')
3 | Below is the the instruction that describes the task:
### Input:
Correctly classify correct RFC 3986 compliant hostnames, but do not try
hard to validate compliance anyway...
NOTE: indeed we allow a small deviation from the RFC 3986: IPv4
addresses are allowed to contain bytes represented in hexadecimal or
octal notation when begining respectively with '0x'/'0X' and '0'
numbers prepended with one or more zero won't be rejected. Anyway
representation of multiple bytes by a single decimal/octal/hexadecimal
integer is not allowed.
Return 1 (HOST_IP_LITERAL), 2 (HOST_IPV4_ADDRESS) or 3 (HOST_REG_NAME)
>>> host_type('[blablabla]')
1
>>> host_type('')
3
>>> host_type('127.0.0.1')
2
>>> host_type('0x7F.0.0.00000000000001')
2
>>> host_type('666.42.131.2')
3
>>> host_type('foobar.42')
3
### Response:
def host_type(host):
"""
Correctly classify correct RFC 3986 compliant hostnames, but do not try
hard to validate compliance anyway...
NOTE: indeed we allow a small deviation from the RFC 3986: IPv4
addresses are allowed to contain bytes represented in hexadecimal or
octal notation when begining respectively with '0x'/'0X' and '0'
numbers prepended with one or more zero won't be rejected. Anyway
representation of multiple bytes by a single decimal/octal/hexadecimal
integer is not allowed.
Return 1 (HOST_IP_LITERAL), 2 (HOST_IPV4_ADDRESS) or 3 (HOST_REG_NAME)
>>> host_type('[blablabla]')
1
>>> host_type('')
3
>>> host_type('127.0.0.1')
2
>>> host_type('0x7F.0.0.00000000000001')
2
>>> host_type('666.42.131.2')
3
>>> host_type('foobar.42')
3
"""
if not host:
return HOST_REG_NAME
elif host[0] == '[':
return HOST_IP_LITERAL
elif __valid_IPv4address(host):
return HOST_IPV4_ADDRESS
else:
return HOST_REG_NAME |
def try_next(self):
"""Advance the cursor without blocking indefinitely.
This method returns the next change document without waiting
indefinitely for the next change. For example::
with db.collection.watch() as stream:
while stream.alive:
change = stream.try_next()
if change is not None:
print(change)
elif stream.alive:
# We end up here when there are no recent changes.
# Sleep for a while to avoid flooding the server with
# getMore requests when no changes are available.
time.sleep(10)
If no change document is cached locally then this method runs a single
getMore command. If the getMore yields any documents, the next
document is returned, otherwise, if the getMore returns no documents
(because there have been no changes) then ``None`` is returned.
:Returns:
The next change document or ``None`` when no document is available
after running a single getMore or when the cursor is closed.
.. versionadded:: 3.8
"""
# Attempt to get the next change with at most one getMore and at most
# one resume attempt.
try:
change = self._cursor._try_next(True)
except ConnectionFailure:
self._resume()
change = self._cursor._try_next(False)
except OperationFailure as exc:
if exc.code in _NON_RESUMABLE_GETMORE_ERRORS:
raise
self._resume()
change = self._cursor._try_next(False)
# No changes are available.
if change is None:
return None
try:
resume_token = change['_id']
except KeyError:
self.close()
raise InvalidOperation(
"Cannot provide resume functionality when the resume "
"token is missing.")
self._resume_token = copy.copy(resume_token)
self._start_at_operation_time = None
if self._decode_custom:
return _bson_to_dict(change.raw, self._orig_codec_options)
return change | Advance the cursor without blocking indefinitely.
This method returns the next change document without waiting
indefinitely for the next change. For example::
with db.collection.watch() as stream:
while stream.alive:
change = stream.try_next()
if change is not None:
print(change)
elif stream.alive:
# We end up here when there are no recent changes.
# Sleep for a while to avoid flooding the server with
# getMore requests when no changes are available.
time.sleep(10)
If no change document is cached locally then this method runs a single
getMore command. If the getMore yields any documents, the next
document is returned, otherwise, if the getMore returns no documents
(because there have been no changes) then ``None`` is returned.
:Returns:
The next change document or ``None`` when no document is available
after running a single getMore or when the cursor is closed.
.. versionadded:: 3.8 | Below is the the instruction that describes the task:
### Input:
Advance the cursor without blocking indefinitely.
This method returns the next change document without waiting
indefinitely for the next change. For example::
with db.collection.watch() as stream:
while stream.alive:
change = stream.try_next()
if change is not None:
print(change)
elif stream.alive:
# We end up here when there are no recent changes.
# Sleep for a while to avoid flooding the server with
# getMore requests when no changes are available.
time.sleep(10)
If no change document is cached locally then this method runs a single
getMore command. If the getMore yields any documents, the next
document is returned, otherwise, if the getMore returns no documents
(because there have been no changes) then ``None`` is returned.
:Returns:
The next change document or ``None`` when no document is available
after running a single getMore or when the cursor is closed.
.. versionadded:: 3.8
### Response:
def try_next(self):
"""Advance the cursor without blocking indefinitely.
This method returns the next change document without waiting
indefinitely for the next change. For example::
with db.collection.watch() as stream:
while stream.alive:
change = stream.try_next()
if change is not None:
print(change)
elif stream.alive:
# We end up here when there are no recent changes.
# Sleep for a while to avoid flooding the server with
# getMore requests when no changes are available.
time.sleep(10)
If no change document is cached locally then this method runs a single
getMore command. If the getMore yields any documents, the next
document is returned, otherwise, if the getMore returns no documents
(because there have been no changes) then ``None`` is returned.
:Returns:
The next change document or ``None`` when no document is available
after running a single getMore or when the cursor is closed.
.. versionadded:: 3.8
"""
# Attempt to get the next change with at most one getMore and at most
# one resume attempt.
try:
change = self._cursor._try_next(True)
except ConnectionFailure:
self._resume()
change = self._cursor._try_next(False)
except OperationFailure as exc:
if exc.code in _NON_RESUMABLE_GETMORE_ERRORS:
raise
self._resume()
change = self._cursor._try_next(False)
# No changes are available.
if change is None:
return None
try:
resume_token = change['_id']
except KeyError:
self.close()
raise InvalidOperation(
"Cannot provide resume functionality when the resume "
"token is missing.")
self._resume_token = copy.copy(resume_token)
self._start_at_operation_time = None
if self._decode_custom:
return _bson_to_dict(change.raw, self._orig_codec_options)
return change |
def get(self, key, default=None):
"""Return the value at key ``key``, or default value ``default``
which is None by default.
>>> dc = Dictator()
>>> dc['l0'] = [1, 2, 3, 4]
>>> dc.get('l0')
['1', '2', '3', '4']
>>> dc['l0']
['1', '2', '3', '4']
>>> dc.clear()
:param key: key of value to return
:type key: str
:param default: value of any type to return of key doesn't exist.
:type default: Any
:return: value of given key
:rtype: Any
"""
try:
value = self.__getitem__(key)
except KeyError:
value = None
# Py3 Redis compatibiility
if isinstance(value, bytes):
value = value.decode()
return value or default | Return the value at key ``key``, or default value ``default``
which is None by default.
>>> dc = Dictator()
>>> dc['l0'] = [1, 2, 3, 4]
>>> dc.get('l0')
['1', '2', '3', '4']
>>> dc['l0']
['1', '2', '3', '4']
>>> dc.clear()
:param key: key of value to return
:type key: str
:param default: value of any type to return of key doesn't exist.
:type default: Any
:return: value of given key
:rtype: Any | Below is the the instruction that describes the task:
### Input:
Return the value at key ``key``, or default value ``default``
which is None by default.
>>> dc = Dictator()
>>> dc['l0'] = [1, 2, 3, 4]
>>> dc.get('l0')
['1', '2', '3', '4']
>>> dc['l0']
['1', '2', '3', '4']
>>> dc.clear()
:param key: key of value to return
:type key: str
:param default: value of any type to return of key doesn't exist.
:type default: Any
:return: value of given key
:rtype: Any
### Response:
def get(self, key, default=None):
"""Return the value at key ``key``, or default value ``default``
which is None by default.
>>> dc = Dictator()
>>> dc['l0'] = [1, 2, 3, 4]
>>> dc.get('l0')
['1', '2', '3', '4']
>>> dc['l0']
['1', '2', '3', '4']
>>> dc.clear()
:param key: key of value to return
:type key: str
:param default: value of any type to return of key doesn't exist.
:type default: Any
:return: value of given key
:rtype: Any
"""
try:
value = self.__getitem__(key)
except KeyError:
value = None
# Py3 Redis compatibiility
if isinstance(value, bytes):
value = value.decode()
return value or default |
def to_color(self, value, maxvalue, scale, minvalue=0.0):
"""
convert continuous values into colors using matplotlib colorscales
:param value: value to be converted
:param maxvalue: max value in the colorscale
:param scale: lin, log, sqrt
:param minvalue: minimum of the input values in linear scale (default is 0)
:return: the color corresponding to the value
"""
if scale == 'lin':
if minvalue >= maxvalue:
raise Exception('minvalue must be less than maxvalue')
else:
value = 1.*(value-minvalue) / (maxvalue-minvalue)
elif scale == 'log':
if value < 1 or maxvalue <= 1:
raise Exception('value and maxvalue must be >= 1')
else:
value = math.log(value) / math.log(maxvalue)
elif scale == 'sqrt':
if value < 0 or maxvalue <= 0:
raise Exception('value and maxvalue must be greater than 0')
else:
value = math.sqrt(value) / math.sqrt(maxvalue)
else:
raise Exception('scale must be "lin", "log", or "sqrt"')
if value < 0:
value = 0
elif value > 1:
value = 1
value = int(1.*self.levels*value)*1./(self.levels-1)
if value not in self.mapping:
self.mapping[value] = _convert_color_format(self.cmap(value), self.alpha)
return self.mapping[value] | convert continuous values into colors using matplotlib colorscales
:param value: value to be converted
:param maxvalue: max value in the colorscale
:param scale: lin, log, sqrt
:param minvalue: minimum of the input values in linear scale (default is 0)
:return: the color corresponding to the value | Below is the the instruction that describes the task:
### Input:
convert continuous values into colors using matplotlib colorscales
:param value: value to be converted
:param maxvalue: max value in the colorscale
:param scale: lin, log, sqrt
:param minvalue: minimum of the input values in linear scale (default is 0)
:return: the color corresponding to the value
### Response:
def to_color(self, value, maxvalue, scale, minvalue=0.0):
"""
convert continuous values into colors using matplotlib colorscales
:param value: value to be converted
:param maxvalue: max value in the colorscale
:param scale: lin, log, sqrt
:param minvalue: minimum of the input values in linear scale (default is 0)
:return: the color corresponding to the value
"""
if scale == 'lin':
if minvalue >= maxvalue:
raise Exception('minvalue must be less than maxvalue')
else:
value = 1.*(value-minvalue) / (maxvalue-minvalue)
elif scale == 'log':
if value < 1 or maxvalue <= 1:
raise Exception('value and maxvalue must be >= 1')
else:
value = math.log(value) / math.log(maxvalue)
elif scale == 'sqrt':
if value < 0 or maxvalue <= 0:
raise Exception('value and maxvalue must be greater than 0')
else:
value = math.sqrt(value) / math.sqrt(maxvalue)
else:
raise Exception('scale must be "lin", "log", or "sqrt"')
if value < 0:
value = 0
elif value > 1:
value = 1
value = int(1.*self.levels*value)*1./(self.levels-1)
if value not in self.mapping:
self.mapping[value] = _convert_color_format(self.cmap(value), self.alpha)
return self.mapping[value] |
def author_structure(user):
"""
An author structure.
"""
return {'user_id': user.pk,
'user_login': user.get_username(),
'display_name': user.__str__(),
'user_email': user.email} | An author structure. | Below is the the instruction that describes the task:
### Input:
An author structure.
### Response:
def author_structure(user):
"""
An author structure.
"""
return {'user_id': user.pk,
'user_login': user.get_username(),
'display_name': user.__str__(),
'user_email': user.email} |
def check_command(self, command):
"""
Check if command can be called.
"""
# Use `command` to see if command is callable, store exit code
code = os.system("command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
# If exit code is not 0, report which command failed and return False, else return True
if code != 0:
print("Command is not callable: {0}".format(command))
return False
else:
return True | Check if command can be called. | Below is the the instruction that describes the task:
### Input:
Check if command can be called.
### Response:
def check_command(self, command):
"""
Check if command can be called.
"""
# Use `command` to see if command is callable, store exit code
code = os.system("command -v {0} >/dev/null 2>&1 || {{ exit 1; }}".format(command))
# If exit code is not 0, report which command failed and return False, else return True
if code != 0:
print("Command is not callable: {0}".format(command))
return False
else:
return True |
def print_summary(self) -> None:
"""
Prints the tasks' timing summary.
"""
print("Tasks execution time summary:")
for mon_task in self._monitor_tasks:
print("%s:\t%.4f (sec)" % (mon_task.task_name, mon_task.total_time)) | Prints the tasks' timing summary. | Below is the the instruction that describes the task:
### Input:
Prints the tasks' timing summary.
### Response:
def print_summary(self) -> None:
"""
Prints the tasks' timing summary.
"""
print("Tasks execution time summary:")
for mon_task in self._monitor_tasks:
print("%s:\t%.4f (sec)" % (mon_task.task_name, mon_task.total_time)) |
def compile_protos():
"""Builds necessary assets from sources."""
# If there's no makefile, we're likely installing from an sdist,
# so there's no need to compile the protos (they should be already
# compiled).
if not os.path.exists(os.path.join(THIS_DIRECTORY, "makefile.py")):
return
# Only compile protobufs if we're inside GRR source tree.
subprocess.check_call(
["python", "makefile.py", "--clean"], cwd=THIS_DIRECTORY) | Builds necessary assets from sources. | Below is the the instruction that describes the task:
### Input:
Builds necessary assets from sources.
### Response:
def compile_protos():
"""Builds necessary assets from sources."""
# If there's no makefile, we're likely installing from an sdist,
# so there's no need to compile the protos (they should be already
# compiled).
if not os.path.exists(os.path.join(THIS_DIRECTORY, "makefile.py")):
return
# Only compile protobufs if we're inside GRR source tree.
subprocess.check_call(
["python", "makefile.py", "--clean"], cwd=THIS_DIRECTORY) |
def delete_token(self,
token_name,
project_name,
dataset_name):
"""
Delete a token with the given parameters.
Arguments:
project_name (str): Project name
dataset_name (str): Dataset name project is based on
token_name (str): Token name
channel_name (str): Channel name project is based on
Returns:
bool: True if project deleted, false if not deleted.
"""
url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\
+ "/project/{}".format(project_name)\
+ "/token/{}/".format(token_name)
req = self.remote_utils.delete_url(url)
if req.status_code is not 204:
raise RemoteDataUploadError("Could not delete {}".format(req.text))
if req.content == "" or req.content == b'':
return True
else:
return False | Delete a token with the given parameters.
Arguments:
project_name (str): Project name
dataset_name (str): Dataset name project is based on
token_name (str): Token name
channel_name (str): Channel name project is based on
Returns:
bool: True if project deleted, false if not deleted. | Below is the the instruction that describes the task:
### Input:
Delete a token with the given parameters.
Arguments:
project_name (str): Project name
dataset_name (str): Dataset name project is based on
token_name (str): Token name
channel_name (str): Channel name project is based on
Returns:
bool: True if project deleted, false if not deleted.
### Response:
def delete_token(self,
token_name,
project_name,
dataset_name):
"""
Delete a token with the given parameters.
Arguments:
project_name (str): Project name
dataset_name (str): Dataset name project is based on
token_name (str): Token name
channel_name (str): Channel name project is based on
Returns:
bool: True if project deleted, false if not deleted.
"""
url = self.url() + "/nd/resource/dataset/{}".format(dataset_name)\
+ "/project/{}".format(project_name)\
+ "/token/{}/".format(token_name)
req = self.remote_utils.delete_url(url)
if req.status_code is not 204:
raise RemoteDataUploadError("Could not delete {}".format(req.text))
if req.content == "" or req.content == b'':
return True
else:
return False |
def watch(self, resource, namespace=None, name=None, label_selector=None, field_selector=None, resource_version=None, timeout=None):
"""
Stream events for a resource from the Kubernetes API
:param resource: The API resource object that will be used to query the API
:param namespace: The namespace to query
:param name: The name of the resource instance to query
:param label_selector: The label selector with which to filter results
:param field_selector: The field selector with which to filter results
:param resource_version: The version with which to filter results. Only events with
a resource_version greater than this value will be returned
:param timeout: The amount of time in seconds to wait before terminating the stream
:return: Event object with these keys:
'type': The type of event such as "ADDED", "DELETED", etc.
'raw_object': a dict representing the watched object.
'object': A ResourceInstance wrapping raw_object.
Example:
client = DynamicClient(k8s_client)
v1_pods = client.resources.get(api_version='v1', kind='Pod')
for e in v1_pods.watch(resource_version=0, namespace=default, timeout=5):
print(e['type'])
print(e['object'].metadata)
"""
watcher = watch.Watch()
for event in watcher.stream(
resource.get,
namespace=namespace,
name=name,
field_selector=field_selector,
label_selector=label_selector,
resource_version=resource_version,
serialize=False,
timeout_seconds=timeout
):
event['object'] = ResourceInstance(resource, event['object'])
yield event | Stream events for a resource from the Kubernetes API
:param resource: The API resource object that will be used to query the API
:param namespace: The namespace to query
:param name: The name of the resource instance to query
:param label_selector: The label selector with which to filter results
:param field_selector: The field selector with which to filter results
:param resource_version: The version with which to filter results. Only events with
a resource_version greater than this value will be returned
:param timeout: The amount of time in seconds to wait before terminating the stream
:return: Event object with these keys:
'type': The type of event such as "ADDED", "DELETED", etc.
'raw_object': a dict representing the watched object.
'object': A ResourceInstance wrapping raw_object.
Example:
client = DynamicClient(k8s_client)
v1_pods = client.resources.get(api_version='v1', kind='Pod')
for e in v1_pods.watch(resource_version=0, namespace=default, timeout=5):
print(e['type'])
print(e['object'].metadata) | Below is the the instruction that describes the task:
### Input:
Stream events for a resource from the Kubernetes API
:param resource: The API resource object that will be used to query the API
:param namespace: The namespace to query
:param name: The name of the resource instance to query
:param label_selector: The label selector with which to filter results
:param field_selector: The field selector with which to filter results
:param resource_version: The version with which to filter results. Only events with
a resource_version greater than this value will be returned
:param timeout: The amount of time in seconds to wait before terminating the stream
:return: Event object with these keys:
'type': The type of event such as "ADDED", "DELETED", etc.
'raw_object': a dict representing the watched object.
'object': A ResourceInstance wrapping raw_object.
Example:
client = DynamicClient(k8s_client)
v1_pods = client.resources.get(api_version='v1', kind='Pod')
for e in v1_pods.watch(resource_version=0, namespace=default, timeout=5):
print(e['type'])
print(e['object'].metadata)
### Response:
def watch(self, resource, namespace=None, name=None, label_selector=None, field_selector=None, resource_version=None, timeout=None):
"""
Stream events for a resource from the Kubernetes API
:param resource: The API resource object that will be used to query the API
:param namespace: The namespace to query
:param name: The name of the resource instance to query
:param label_selector: The label selector with which to filter results
:param field_selector: The field selector with which to filter results
:param resource_version: The version with which to filter results. Only events with
a resource_version greater than this value will be returned
:param timeout: The amount of time in seconds to wait before terminating the stream
:return: Event object with these keys:
'type': The type of event such as "ADDED", "DELETED", etc.
'raw_object': a dict representing the watched object.
'object': A ResourceInstance wrapping raw_object.
Example:
client = DynamicClient(k8s_client)
v1_pods = client.resources.get(api_version='v1', kind='Pod')
for e in v1_pods.watch(resource_version=0, namespace=default, timeout=5):
print(e['type'])
print(e['object'].metadata)
"""
watcher = watch.Watch()
for event in watcher.stream(
resource.get,
namespace=namespace,
name=name,
field_selector=field_selector,
label_selector=label_selector,
resource_version=resource_version,
serialize=False,
timeout_seconds=timeout
):
event['object'] = ResourceInstance(resource, event['object'])
yield event |
async def _connect_and_read(self):
"""Retreives and connects to Slack's RTM API.
Makes an authenticated call to Slack's RTM API to retrieve
a websocket URL. Then connects to the message server and
reads event messages as they come in.
If 'auto_reconnect' is specified we
retrieve a new url and reconnect any time the connection
is lost unintentionally or an exception is thrown.
Raises:
SlackApiError: Unable to retreive RTM URL from Slack.
websockets.exceptions: Errors thrown by the 'websockets' library.
"""
while not self._stopped:
try:
self._connection_attempts += 1
async with aiohttp.ClientSession(
loop=self._event_loop,
timeout=aiohttp.ClientTimeout(total=self.timeout),
) as session:
self._session = session
url, data = await self._retreive_websocket_info()
async with session.ws_connect(
url,
heartbeat=self.ping_interval,
ssl=self.ssl,
proxy=self.proxy,
) as websocket:
self._logger.debug("The Websocket connection has been opened.")
self._websocket = websocket
self._dispatch_event(event="open", data=data)
await self._read_messages()
except (
client_err.SlackClientNotConnectedError,
client_err.SlackApiError,
# TODO: Catch websocket exceptions thrown by aiohttp.
) as exception:
self._logger.debug(str(exception))
self._dispatch_event(event="error", data=exception)
if self.auto_reconnect and not self._stopped:
await self._wait_exponentially(exception)
continue
self._logger.exception(
"The Websocket encountered an error. Closing the connection..."
)
self._close_websocket()
raise | Retreives and connects to Slack's RTM API.
Makes an authenticated call to Slack's RTM API to retrieve
a websocket URL. Then connects to the message server and
reads event messages as they come in.
If 'auto_reconnect' is specified we
retrieve a new url and reconnect any time the connection
is lost unintentionally or an exception is thrown.
Raises:
SlackApiError: Unable to retreive RTM URL from Slack.
websockets.exceptions: Errors thrown by the 'websockets' library. | Below is the the instruction that describes the task:
### Input:
Retreives and connects to Slack's RTM API.
Makes an authenticated call to Slack's RTM API to retrieve
a websocket URL. Then connects to the message server and
reads event messages as they come in.
If 'auto_reconnect' is specified we
retrieve a new url and reconnect any time the connection
is lost unintentionally or an exception is thrown.
Raises:
SlackApiError: Unable to retreive RTM URL from Slack.
websockets.exceptions: Errors thrown by the 'websockets' library.
### Response:
async def _connect_and_read(self):
"""Retreives and connects to Slack's RTM API.
Makes an authenticated call to Slack's RTM API to retrieve
a websocket URL. Then connects to the message server and
reads event messages as they come in.
If 'auto_reconnect' is specified we
retrieve a new url and reconnect any time the connection
is lost unintentionally or an exception is thrown.
Raises:
SlackApiError: Unable to retreive RTM URL from Slack.
websockets.exceptions: Errors thrown by the 'websockets' library.
"""
while not self._stopped:
try:
self._connection_attempts += 1
async with aiohttp.ClientSession(
loop=self._event_loop,
timeout=aiohttp.ClientTimeout(total=self.timeout),
) as session:
self._session = session
url, data = await self._retreive_websocket_info()
async with session.ws_connect(
url,
heartbeat=self.ping_interval,
ssl=self.ssl,
proxy=self.proxy,
) as websocket:
self._logger.debug("The Websocket connection has been opened.")
self._websocket = websocket
self._dispatch_event(event="open", data=data)
await self._read_messages()
except (
client_err.SlackClientNotConnectedError,
client_err.SlackApiError,
# TODO: Catch websocket exceptions thrown by aiohttp.
) as exception:
self._logger.debug(str(exception))
self._dispatch_event(event="error", data=exception)
if self.auto_reconnect and not self._stopped:
await self._wait_exponentially(exception)
continue
self._logger.exception(
"The Websocket encountered an error. Closing the connection..."
)
self._close_websocket()
raise |
def adjoint(self):
"""Adjoint of this operator.
The adjoint is given by taking the transpose of the matrix
and the adjoint of each component operator.
In weighted product spaces, the adjoint needs to take the
weightings into account. This is currently not supported.
Returns
-------
adjoint : `ProductSpaceOperator`
The adjoint
Examples
--------
>>> r3 = odl.rn(3)
>>> pspace = odl.ProductSpace(r3, r3)
>>> I = odl.IdentityOperator(r3)
>>> x = pspace.element([[1, 2, 3],
... [4, 5, 6]])
Matrix is transposed:
>>> prod_op = ProductSpaceOperator([[0, I], [0, 0]],
... domain=pspace, range=pspace)
>>> prod_op(x)
ProductSpace(rn(3), 2).element([
[ 4., 5., 6.],
[ 0., 0., 0.]
])
>>> prod_op.adjoint(x)
ProductSpace(rn(3), 2).element([
[ 0., 0., 0.],
[ 1., 2., 3.]
])
"""
# Lazy import to improve `import odl` time
import scipy.sparse
adjoint_ops = [op.adjoint for op in self.ops.data]
data = np.empty(len(adjoint_ops), dtype=object)
data[:] = adjoint_ops
indices = [self.ops.col, self.ops.row] # Swap col/row -> transpose
shape = (self.ops.shape[1], self.ops.shape[0])
adj_matrix = scipy.sparse.coo_matrix((data, indices), shape)
return ProductSpaceOperator(adj_matrix, self.range, self.domain) | Adjoint of this operator.
The adjoint is given by taking the transpose of the matrix
and the adjoint of each component operator.
In weighted product spaces, the adjoint needs to take the
weightings into account. This is currently not supported.
Returns
-------
adjoint : `ProductSpaceOperator`
The adjoint
Examples
--------
>>> r3 = odl.rn(3)
>>> pspace = odl.ProductSpace(r3, r3)
>>> I = odl.IdentityOperator(r3)
>>> x = pspace.element([[1, 2, 3],
... [4, 5, 6]])
Matrix is transposed:
>>> prod_op = ProductSpaceOperator([[0, I], [0, 0]],
... domain=pspace, range=pspace)
>>> prod_op(x)
ProductSpace(rn(3), 2).element([
[ 4., 5., 6.],
[ 0., 0., 0.]
])
>>> prod_op.adjoint(x)
ProductSpace(rn(3), 2).element([
[ 0., 0., 0.],
[ 1., 2., 3.]
]) | Below is the the instruction that describes the task:
### Input:
Adjoint of this operator.
The adjoint is given by taking the transpose of the matrix
and the adjoint of each component operator.
In weighted product spaces, the adjoint needs to take the
weightings into account. This is currently not supported.
Returns
-------
adjoint : `ProductSpaceOperator`
The adjoint
Examples
--------
>>> r3 = odl.rn(3)
>>> pspace = odl.ProductSpace(r3, r3)
>>> I = odl.IdentityOperator(r3)
>>> x = pspace.element([[1, 2, 3],
... [4, 5, 6]])
Matrix is transposed:
>>> prod_op = ProductSpaceOperator([[0, I], [0, 0]],
... domain=pspace, range=pspace)
>>> prod_op(x)
ProductSpace(rn(3), 2).element([
[ 4., 5., 6.],
[ 0., 0., 0.]
])
>>> prod_op.adjoint(x)
ProductSpace(rn(3), 2).element([
[ 0., 0., 0.],
[ 1., 2., 3.]
])
### Response:
def adjoint(self):
"""Adjoint of this operator.
The adjoint is given by taking the transpose of the matrix
and the adjoint of each component operator.
In weighted product spaces, the adjoint needs to take the
weightings into account. This is currently not supported.
Returns
-------
adjoint : `ProductSpaceOperator`
The adjoint
Examples
--------
>>> r3 = odl.rn(3)
>>> pspace = odl.ProductSpace(r3, r3)
>>> I = odl.IdentityOperator(r3)
>>> x = pspace.element([[1, 2, 3],
... [4, 5, 6]])
Matrix is transposed:
>>> prod_op = ProductSpaceOperator([[0, I], [0, 0]],
... domain=pspace, range=pspace)
>>> prod_op(x)
ProductSpace(rn(3), 2).element([
[ 4., 5., 6.],
[ 0., 0., 0.]
])
>>> prod_op.adjoint(x)
ProductSpace(rn(3), 2).element([
[ 0., 0., 0.],
[ 1., 2., 3.]
])
"""
# Lazy import to improve `import odl` time
import scipy.sparse
adjoint_ops = [op.adjoint for op in self.ops.data]
data = np.empty(len(adjoint_ops), dtype=object)
data[:] = adjoint_ops
indices = [self.ops.col, self.ops.row] # Swap col/row -> transpose
shape = (self.ops.shape[1], self.ops.shape[0])
adj_matrix = scipy.sparse.coo_matrix((data, indices), shape)
return ProductSpaceOperator(adj_matrix, self.range, self.domain) |
def copies(mapping, s2bins, rna, min_rna = 800, mismatches = 0):
"""
1. determine bin coverage
2. determine rRNA gene coverage
3. compare
"""
cov = {} # cov[scaffold] = [bases, length]
s2bins, bins2s = parse_s2bins(s2bins)
rna_cov = parse_rna(rna, s2bins, min_rna)
s2bins, bins2s = filter_missing_rna(s2bins, bins2s, rna_cov)
# count bases mapped to scaffolds and rRNA gene regions
for line in mapping:
line = line.strip().split()
# get scaffold lengths
if line[0].startswith('@'):
if line[0].startswith('@SQ') is False:
continue
s = line[1].split(':')[1]
l = int(line[2].split(':')[1])
# check if scaffold is binned
if s not in s2bins:
continue
if s not in cov:
cov[s] = [0, l]
# check mismatch threshold
mm = count_mismatches(line)
if mm is False or mm > mismatches:
continue
# check that scaffold is in bin
s, bases = line[2], len(line[9])
if s not in cov:
continue
cov[s][0] += bases
rna_cov = rna_bases(rna_cov, s, bases, line)
print('# mismatches threshold: %s' % (mismatches))
header = ['#rRNA scaffold', 'rRNA genes >=%sbp on scaffold' % (min_rna), \
'rRNA coverage', \
'bin', 'bin info', 'bin coverage', \
'rRNAs >=%sbp in bin' % (min_rna), \
'rRNA coverage/bin coverage', \
'estimated number of copies']
print('\t'.join(header))
for bin, scaffolds in list(bins2s.items()):
rna_count = sum([len(rna_cov[s][2]) for s in scaffolds if s in rna_cov])
for s in scaffolds:
if s not in rna_cov:
continue
out = []
counts = rna_cov[s]
bin_cov = calc_bin_cov(bins2s[bin], cov)
num_genes = len(counts[2])
rna_coverage = float(float(counts[0])/float(counts[1]))
if bin_cov == 0:
rna_div_bin = 0
else:
rna_div_bin = float(rna_coverage/bin_cov)
est = int(max([rna_count, counts, rna_div_bin]))
out = [s, num_genes, rna_coverage, bin, bin_cov, rna_count, rna_div_bin, est]
print('\t'.join([str(i) for i in out])) | 1. determine bin coverage
2. determine rRNA gene coverage
3. compare | Below is the the instruction that describes the task:
### Input:
1. determine bin coverage
2. determine rRNA gene coverage
3. compare
### Response:
def copies(mapping, s2bins, rna, min_rna = 800, mismatches = 0):
"""
1. determine bin coverage
2. determine rRNA gene coverage
3. compare
"""
cov = {} # cov[scaffold] = [bases, length]
s2bins, bins2s = parse_s2bins(s2bins)
rna_cov = parse_rna(rna, s2bins, min_rna)
s2bins, bins2s = filter_missing_rna(s2bins, bins2s, rna_cov)
# count bases mapped to scaffolds and rRNA gene regions
for line in mapping:
line = line.strip().split()
# get scaffold lengths
if line[0].startswith('@'):
if line[0].startswith('@SQ') is False:
continue
s = line[1].split(':')[1]
l = int(line[2].split(':')[1])
# check if scaffold is binned
if s not in s2bins:
continue
if s not in cov:
cov[s] = [0, l]
# check mismatch threshold
mm = count_mismatches(line)
if mm is False or mm > mismatches:
continue
# check that scaffold is in bin
s, bases = line[2], len(line[9])
if s not in cov:
continue
cov[s][0] += bases
rna_cov = rna_bases(rna_cov, s, bases, line)
print('# mismatches threshold: %s' % (mismatches))
header = ['#rRNA scaffold', 'rRNA genes >=%sbp on scaffold' % (min_rna), \
'rRNA coverage', \
'bin', 'bin info', 'bin coverage', \
'rRNAs >=%sbp in bin' % (min_rna), \
'rRNA coverage/bin coverage', \
'estimated number of copies']
print('\t'.join(header))
for bin, scaffolds in list(bins2s.items()):
rna_count = sum([len(rna_cov[s][2]) for s in scaffolds if s in rna_cov])
for s in scaffolds:
if s not in rna_cov:
continue
out = []
counts = rna_cov[s]
bin_cov = calc_bin_cov(bins2s[bin], cov)
num_genes = len(counts[2])
rna_coverage = float(float(counts[0])/float(counts[1]))
if bin_cov == 0:
rna_div_bin = 0
else:
rna_div_bin = float(rna_coverage/bin_cov)
est = int(max([rna_count, counts, rna_div_bin]))
out = [s, num_genes, rna_coverage, bin, bin_cov, rna_count, rna_div_bin, est]
print('\t'.join([str(i) for i in out])) |
def index_with_dupes(values_list, unique_together=2, model_number_i=0, serial_number_i=1, verbosity=1):
'''Create dict from values_list with first N values as a compound key.
Default N (number of columns assumbed to be "unique_together") is 2.
>>> index_with_dupes([(1,2,3), (5,6,7), (5,6,8), (2,1,3)]) == ({(1, 2): (1, 2, 3), (2, 1): (2, 1, 3), (5, 6): (5, 6, 7)}, {(5, 6): [(5, 6, 7), (5, 6, 8)]})
True
'''
try:
N = values_list.count()
except:
N = len(values_list)
if verbosity > 0:
print 'Indexing %d values_lists in a queryset or a sequence of Django model instances (database table rows).' % N
index, dupes = {}, {}
pbar = None
if verbosity and N > min(1000000, max(0, 100000**(1./verbosity))):
widgets = [pb.Counter(), '%d rows: ' % N, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
pbar = pb.ProgressBar(widgets=widgets, maxval=N).start()
rownum = 0
for row in values_list:
normalized_key = [str(row[model_number_i]).strip(), str(row[serial_number_i]).strip()]
normalized_key += [i for i in range(unique_together) if i not in (serial_number_i, model_number_i)]
normalized_key = tuple(normalized_key)
if normalized_key in index:
# need to add the first nondupe before we add the dupes to the list
if normalized_key not in dupes:
dupes[normalized_key] = [index[normalized_key]]
dupes[normalized_key] = dupes[normalized_key] + [row]
if verbosity > 2:
print 'Duplicate "unique_together" tuple found. Here are all the rows that match this key:'
print dupes[normalized_key]
else:
index[normalized_key] = row
if pbar:
pbar.update(rownum)
rownum += 1
if pbar:
pbar.finish()
if verbosity > 0:
print 'Found %d duplicate model-serial pairs in the %d records or %g%%' % (len(dupes), len(index), len(dupes)*100./(len(index) or 1.))
return index, dupes | Create dict from values_list with first N values as a compound key.
Default N (number of columns assumbed to be "unique_together") is 2.
>>> index_with_dupes([(1,2,3), (5,6,7), (5,6,8), (2,1,3)]) == ({(1, 2): (1, 2, 3), (2, 1): (2, 1, 3), (5, 6): (5, 6, 7)}, {(5, 6): [(5, 6, 7), (5, 6, 8)]})
True | Below is the the instruction that describes the task:
### Input:
Create dict from values_list with first N values as a compound key.
Default N (number of columns assumbed to be "unique_together") is 2.
>>> index_with_dupes([(1,2,3), (5,6,7), (5,6,8), (2,1,3)]) == ({(1, 2): (1, 2, 3), (2, 1): (2, 1, 3), (5, 6): (5, 6, 7)}, {(5, 6): [(5, 6, 7), (5, 6, 8)]})
True
### Response:
def index_with_dupes(values_list, unique_together=2, model_number_i=0, serial_number_i=1, verbosity=1):
'''Create dict from values_list with first N values as a compound key.
Default N (number of columns assumbed to be "unique_together") is 2.
>>> index_with_dupes([(1,2,3), (5,6,7), (5,6,8), (2,1,3)]) == ({(1, 2): (1, 2, 3), (2, 1): (2, 1, 3), (5, 6): (5, 6, 7)}, {(5, 6): [(5, 6, 7), (5, 6, 8)]})
True
'''
try:
N = values_list.count()
except:
N = len(values_list)
if verbosity > 0:
print 'Indexing %d values_lists in a queryset or a sequence of Django model instances (database table rows).' % N
index, dupes = {}, {}
pbar = None
if verbosity and N > min(1000000, max(0, 100000**(1./verbosity))):
widgets = [pb.Counter(), '%d rows: ' % N, pb.Percentage(), ' ', pb.RotatingMarker(), ' ', pb.Bar(),' ', pb.ETA()]
pbar = pb.ProgressBar(widgets=widgets, maxval=N).start()
rownum = 0
for row in values_list:
normalized_key = [str(row[model_number_i]).strip(), str(row[serial_number_i]).strip()]
normalized_key += [i for i in range(unique_together) if i not in (serial_number_i, model_number_i)]
normalized_key = tuple(normalized_key)
if normalized_key in index:
# need to add the first nondupe before we add the dupes to the list
if normalized_key not in dupes:
dupes[normalized_key] = [index[normalized_key]]
dupes[normalized_key] = dupes[normalized_key] + [row]
if verbosity > 2:
print 'Duplicate "unique_together" tuple found. Here are all the rows that match this key:'
print dupes[normalized_key]
else:
index[normalized_key] = row
if pbar:
pbar.update(rownum)
rownum += 1
if pbar:
pbar.finish()
if verbosity > 0:
print 'Found %d duplicate model-serial pairs in the %d records or %g%%' % (len(dupes), len(index), len(dupes)*100./(len(index) or 1.))
return index, dupes |
def experiment(parser, token):
"""
Split Testing experiment tag has the following syntax :
{% experiment <experiment_name> <alternative> %}
experiment content goes here
{% endexperiment %}
If the alternative name is neither 'test' nor 'control' an exception is raised
during rendering.
"""
try:
token_contents = token.split_contents()
experiment_name, alternative, weight, user_variable = _parse_token_contents(token_contents)
node_list = parser.parse(('endexperiment', ))
parser.delete_first_token()
except ValueError:
raise template.TemplateSyntaxError("Syntax should be like :"
"{% experiment experiment_name alternative [weight=val] [user=val] %}")
return ExperimentNode(node_list, experiment_name, alternative, weight, user_variable) | Split Testing experiment tag has the following syntax :
{% experiment <experiment_name> <alternative> %}
experiment content goes here
{% endexperiment %}
If the alternative name is neither 'test' nor 'control' an exception is raised
during rendering. | Below is the the instruction that describes the task:
### Input:
Split Testing experiment tag has the following syntax :
{% experiment <experiment_name> <alternative> %}
experiment content goes here
{% endexperiment %}
If the alternative name is neither 'test' nor 'control' an exception is raised
during rendering.
### Response:
def experiment(parser, token):
"""
Split Testing experiment tag has the following syntax :
{% experiment <experiment_name> <alternative> %}
experiment content goes here
{% endexperiment %}
If the alternative name is neither 'test' nor 'control' an exception is raised
during rendering.
"""
try:
token_contents = token.split_contents()
experiment_name, alternative, weight, user_variable = _parse_token_contents(token_contents)
node_list = parser.parse(('endexperiment', ))
parser.delete_first_token()
except ValueError:
raise template.TemplateSyntaxError("Syntax should be like :"
"{% experiment experiment_name alternative [weight=val] [user=val] %}")
return ExperimentNode(node_list, experiment_name, alternative, weight, user_variable) |
def set_mode(self, mode):
"""Set Abode alarm mode."""
if not mode:
raise AbodeException(ERROR.MISSING_ALARM_MODE)
elif mode.lower() not in CONST.ALL_MODES:
raise AbodeException(ERROR.INVALID_ALARM_MODE, CONST.ALL_MODES)
mode = mode.lower()
response = self._abode.send_request(
"put", CONST.get_panel_mode_url(self._area, mode))
_LOGGER.debug("Set Alarm Home Response: %s", response.text)
response_object = json.loads(response.text)
if response_object['area'] != self._area:
raise AbodeException(ERROR.SET_MODE_AREA)
if response_object['mode'] != mode:
raise AbodeException(ERROR.SET_MODE_MODE)
self._json_state['mode'][(self.device_id)] = response_object['mode']
_LOGGER.info("Set alarm %s mode to: %s",
self._device_id, response_object['mode'])
return True | Set Abode alarm mode. | Below is the the instruction that describes the task:
### Input:
Set Abode alarm mode.
### Response:
def set_mode(self, mode):
"""Set Abode alarm mode."""
if not mode:
raise AbodeException(ERROR.MISSING_ALARM_MODE)
elif mode.lower() not in CONST.ALL_MODES:
raise AbodeException(ERROR.INVALID_ALARM_MODE, CONST.ALL_MODES)
mode = mode.lower()
response = self._abode.send_request(
"put", CONST.get_panel_mode_url(self._area, mode))
_LOGGER.debug("Set Alarm Home Response: %s", response.text)
response_object = json.loads(response.text)
if response_object['area'] != self._area:
raise AbodeException(ERROR.SET_MODE_AREA)
if response_object['mode'] != mode:
raise AbodeException(ERROR.SET_MODE_MODE)
self._json_state['mode'][(self.device_id)] = response_object['mode']
_LOGGER.info("Set alarm %s mode to: %s",
self._device_id, response_object['mode'])
return True |
def acme_sign_certificate(common_name, size=DEFAULT_KEY_SIZE):
'''
Sign certificate with acme_tiny for let's encrypt
'''
private_key_path = '{}/{}.key'.format(CERTIFICATES_PATH, common_name)
certificate_path = '{}/{}.crt'.format(CERTIFICATES_PATH, common_name)
certificate_request_path = '{}/{}.csr'.format(CERTIFICATES_PATH,
common_name)
signed_cert = '{certificates_path}/{common_name}-signed.crt'.format(
certificates_path=CERTIFICATES_PATH,
common_name=common_name)
generate_certificate(common_name, size)
cmd = 'openssl req -new -sha256 -key {private_key_path}'
cmd += ' -subj "/CN={common_name}" -out {certificate_request_path}'
cmd = cmd.format(
private_key_path=private_key_path,
common_name=common_name,
certificate_request_path=certificate_request_path
)
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
close_fds=True)
p.communicate()
_internal_sign_certificate(certificate_path, certificate_request_path,
signed_cert)
cron = "/etc/cron.monthly/acme-renew"
if not os.path.exists(cron):
with open(cron, "w") as file:
file.write("#!/bin/bash\ncozy_management renew_certificates\n")
st = os.stat(cron)
os.chmod(cron, st.st_mode | S_IXUSR) | Sign certificate with acme_tiny for let's encrypt | Below is the the instruction that describes the task:
### Input:
Sign certificate with acme_tiny for let's encrypt
### Response:
def acme_sign_certificate(common_name, size=DEFAULT_KEY_SIZE):
'''
Sign certificate with acme_tiny for let's encrypt
'''
private_key_path = '{}/{}.key'.format(CERTIFICATES_PATH, common_name)
certificate_path = '{}/{}.crt'.format(CERTIFICATES_PATH, common_name)
certificate_request_path = '{}/{}.csr'.format(CERTIFICATES_PATH,
common_name)
signed_cert = '{certificates_path}/{common_name}-signed.crt'.format(
certificates_path=CERTIFICATES_PATH,
common_name=common_name)
generate_certificate(common_name, size)
cmd = 'openssl req -new -sha256 -key {private_key_path}'
cmd += ' -subj "/CN={common_name}" -out {certificate_request_path}'
cmd = cmd.format(
private_key_path=private_key_path,
common_name=common_name,
certificate_request_path=certificate_request_path
)
p = subprocess.Popen(cmd,
shell=True,
stdout=subprocess.PIPE,
close_fds=True)
p.communicate()
_internal_sign_certificate(certificate_path, certificate_request_path,
signed_cert)
cron = "/etc/cron.monthly/acme-renew"
if not os.path.exists(cron):
with open(cron, "w") as file:
file.write("#!/bin/bash\ncozy_management renew_certificates\n")
st = os.stat(cron)
os.chmod(cron, st.st_mode | S_IXUSR) |
def move_active_window(x, y):
"""
Moves the active window to a given position given the window_id and absolute co ordinates,
--sync option auto passed in, will wait until actually moved before giving control back to us
will do nothing if the window is maximized
"""
window_id = get_window_id()
cmd=['xdotool','windowmove', window_id, str(x), str(y)]
subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate() | Moves the active window to a given position given the window_id and absolute co ordinates,
--sync option auto passed in, will wait until actually moved before giving control back to us
will do nothing if the window is maximized | Below is the the instruction that describes the task:
### Input:
Moves the active window to a given position given the window_id and absolute co ordinates,
--sync option auto passed in, will wait until actually moved before giving control back to us
will do nothing if the window is maximized
### Response:
def move_active_window(x, y):
"""
Moves the active window to a given position given the window_id and absolute co ordinates,
--sync option auto passed in, will wait until actually moved before giving control back to us
will do nothing if the window is maximized
"""
window_id = get_window_id()
cmd=['xdotool','windowmove', window_id, str(x), str(y)]
subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr= subprocess.PIPE).communicate() |
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.children) == (other.type, other.children) | Compare two nodes for equality. | Below is the the instruction that describes the task:
### Input:
Compare two nodes for equality.
### Response:
def _eq(self, other):
"""Compare two nodes for equality."""
return (self.type, self.children) == (other.type, other.children) |
def _to_chi(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Chi representation."""
if rep == 'Chi':
return data
# Check valid n-qubit input
_check_nqubit_dim(input_dim, output_dim)
if rep == 'Operator':
return _from_operator('Chi', data, input_dim, output_dim)
# Convert via Choi representation
if rep != 'Choi':
data = _to_choi(rep, data, input_dim, output_dim)
return _choi_to_chi(data, input_dim, output_dim) | Transform a QuantumChannel to the Chi representation. | Below is the the instruction that describes the task:
### Input:
Transform a QuantumChannel to the Chi representation.
### Response:
def _to_chi(rep, data, input_dim, output_dim):
"""Transform a QuantumChannel to the Chi representation."""
if rep == 'Chi':
return data
# Check valid n-qubit input
_check_nqubit_dim(input_dim, output_dim)
if rep == 'Operator':
return _from_operator('Chi', data, input_dim, output_dim)
# Convert via Choi representation
if rep != 'Choi':
data = _to_choi(rep, data, input_dim, output_dim)
return _choi_to_chi(data, input_dim, output_dim) |
def save_session(zap_helper, file_path):
"""Save the session."""
console.debug('Saving the session to "{0}"'.format(file_path))
zap_helper.zap.core.save_session(file_path, overwrite='true') | Save the session. | Below is the the instruction that describes the task:
### Input:
Save the session.
### Response:
def save_session(zap_helper, file_path):
"""Save the session."""
console.debug('Saving the session to "{0}"'.format(file_path))
zap_helper.zap.core.save_session(file_path, overwrite='true') |
def sanitize_url(url: str) -> str:
"""
Sanitize the given url so that it can be used as a valid filename.
:param url: url to create filename from
:raise ValueError: when the given url can not be sanitized
:return: created filename
"""
for part in reversed(url.split('/')):
filename = re.sub(r'[^a-zA-Z0-9_.\-]', '', part)
if len(filename) > 0:
break
else:
raise ValueError('Could not create reasonable name for file from url %s', url)
return filename | Sanitize the given url so that it can be used as a valid filename.
:param url: url to create filename from
:raise ValueError: when the given url can not be sanitized
:return: created filename | Below is the the instruction that describes the task:
### Input:
Sanitize the given url so that it can be used as a valid filename.
:param url: url to create filename from
:raise ValueError: when the given url can not be sanitized
:return: created filename
### Response:
def sanitize_url(url: str) -> str:
"""
Sanitize the given url so that it can be used as a valid filename.
:param url: url to create filename from
:raise ValueError: when the given url can not be sanitized
:return: created filename
"""
for part in reversed(url.split('/')):
filename = re.sub(r'[^a-zA-Z0-9_.\-]', '', part)
if len(filename) > 0:
break
else:
raise ValueError('Could not create reasonable name for file from url %s', url)
return filename |
def start(configfile=None, daemonize=False, environment=None,
fastcgi=False, scgi=False, pidfile=None,
cgi=False, debug=False):
"""Subscribe all engine plugins and start the engine."""
sys.path = [''] + sys.path
# monkey patching cherrypy to disable config interpolation
def new_as_dict(self, raw=True, vars=None):
"""Convert an INI file to a dictionary"""
# Load INI file into a dict
result = {}
for section in self.sections():
if section not in result:
result[section] = {}
for option in self.options(section):
value = self.get(section, option, raw=raw, vars=vars)
try:
value = cherrypy.lib.reprconf.unrepr(value)
except Exception:
x = sys.exc_info()[1]
msg = ("Config error in section: %r, option: %r, "
"value: %r. Config values must be valid Python." %
(section, option, value))
raise ValueError(msg, x.__class__.__name__, x.args)
result[section][option] = value
return result
cherrypy.lib.reprconf.Parser.as_dict = new_as_dict
instance = LdapCherry()
app = cherrypy.tree.mount(instance, '/', configfile)
cherrypy.config.update(configfile)
instance.reload(app.config, debug)
engine = cherrypy.engine
# Turn off autoreload
cherrypy.config.update({'engine.autoreload.on': False})
if environment is not None:
cherrypy.config.update({'environment': environment})
# Only daemonize if asked to.
if daemonize:
# Don't print anything to stdout/sterr.
cherrypy.config.update({'log.screen': False})
plugins.Daemonizer(engine).subscribe()
if pidfile:
plugins.PIDFile(engine, pidfile).subscribe()
if hasattr(engine, "signal_handler"):
engine.signal_handler.subscribe()
if hasattr(engine, "console_control_handler"):
engine.console_control_handler.subscribe()
if (fastcgi and (scgi or cgi)) or (scgi and cgi):
cherrypy.log.error("You may only specify one of the cgi, fastcgi, and "
"scgi options.", 'ENGINE')
sys.exit(1)
elif fastcgi or scgi or cgi:
# Turn off the default HTTP server (which is subscribed by default).
cherrypy.server.unsubscribe()
addr = cherrypy.server.bind_addr
if fastcgi:
f = servers.FlupFCGIServer(application=cherrypy.tree,
bindAddress=addr)
elif scgi:
f = servers.FlupSCGIServer(application=cherrypy.tree,
bindAddress=addr)
else:
f = servers.FlupCGIServer(application=cherrypy.tree,
bindAddress=addr)
s = servers.ServerAdapter(engine, httpserver=f, bind_addr=addr)
s.subscribe()
# Always start the engine; this will start all other services
try:
engine.start()
except Exception as e:
# Assume the error has been logged already via bus.log.
sys.exit(1)
else:
engine.block() | Subscribe all engine plugins and start the engine. | Below is the the instruction that describes the task:
### Input:
Subscribe all engine plugins and start the engine.
### Response:
def start(configfile=None, daemonize=False, environment=None,
fastcgi=False, scgi=False, pidfile=None,
cgi=False, debug=False):
"""Subscribe all engine plugins and start the engine."""
sys.path = [''] + sys.path
# monkey patching cherrypy to disable config interpolation
def new_as_dict(self, raw=True, vars=None):
"""Convert an INI file to a dictionary"""
# Load INI file into a dict
result = {}
for section in self.sections():
if section not in result:
result[section] = {}
for option in self.options(section):
value = self.get(section, option, raw=raw, vars=vars)
try:
value = cherrypy.lib.reprconf.unrepr(value)
except Exception:
x = sys.exc_info()[1]
msg = ("Config error in section: %r, option: %r, "
"value: %r. Config values must be valid Python." %
(section, option, value))
raise ValueError(msg, x.__class__.__name__, x.args)
result[section][option] = value
return result
cherrypy.lib.reprconf.Parser.as_dict = new_as_dict
instance = LdapCherry()
app = cherrypy.tree.mount(instance, '/', configfile)
cherrypy.config.update(configfile)
instance.reload(app.config, debug)
engine = cherrypy.engine
# Turn off autoreload
cherrypy.config.update({'engine.autoreload.on': False})
if environment is not None:
cherrypy.config.update({'environment': environment})
# Only daemonize if asked to.
if daemonize:
# Don't print anything to stdout/sterr.
cherrypy.config.update({'log.screen': False})
plugins.Daemonizer(engine).subscribe()
if pidfile:
plugins.PIDFile(engine, pidfile).subscribe()
if hasattr(engine, "signal_handler"):
engine.signal_handler.subscribe()
if hasattr(engine, "console_control_handler"):
engine.console_control_handler.subscribe()
if (fastcgi and (scgi or cgi)) or (scgi and cgi):
cherrypy.log.error("You may only specify one of the cgi, fastcgi, and "
"scgi options.", 'ENGINE')
sys.exit(1)
elif fastcgi or scgi or cgi:
# Turn off the default HTTP server (which is subscribed by default).
cherrypy.server.unsubscribe()
addr = cherrypy.server.bind_addr
if fastcgi:
f = servers.FlupFCGIServer(application=cherrypy.tree,
bindAddress=addr)
elif scgi:
f = servers.FlupSCGIServer(application=cherrypy.tree,
bindAddress=addr)
else:
f = servers.FlupCGIServer(application=cherrypy.tree,
bindAddress=addr)
s = servers.ServerAdapter(engine, httpserver=f, bind_addr=addr)
s.subscribe()
# Always start the engine; this will start all other services
try:
engine.start()
except Exception as e:
# Assume the error has been logged already via bus.log.
sys.exit(1)
else:
engine.block() |
def combine_lists_reducer(
key: str,
merged_list: list,
component: COMPONENT
) -> list:
"""
Reducer function to combine the lists for the specified key into a
single, flat list
:param key:
The key on the COMPONENT instances to operate upon
:param merged_list:
The accumulated list of values populated by previous calls to this
reducer function
:param component:
The COMPONENT instance from which to append values to the
merged_list
:return:
The updated merged_list with the values for the COMPONENT added
onto it
"""
merged_list.extend(getattr(component, key))
return merged_list | Reducer function to combine the lists for the specified key into a
single, flat list
:param key:
The key on the COMPONENT instances to operate upon
:param merged_list:
The accumulated list of values populated by previous calls to this
reducer function
:param component:
The COMPONENT instance from which to append values to the
merged_list
:return:
The updated merged_list with the values for the COMPONENT added
onto it | Below is the the instruction that describes the task:
### Input:
Reducer function to combine the lists for the specified key into a
single, flat list
:param key:
The key on the COMPONENT instances to operate upon
:param merged_list:
The accumulated list of values populated by previous calls to this
reducer function
:param component:
The COMPONENT instance from which to append values to the
merged_list
:return:
The updated merged_list with the values for the COMPONENT added
onto it
### Response:
def combine_lists_reducer(
key: str,
merged_list: list,
component: COMPONENT
) -> list:
"""
Reducer function to combine the lists for the specified key into a
single, flat list
:param key:
The key on the COMPONENT instances to operate upon
:param merged_list:
The accumulated list of values populated by previous calls to this
reducer function
:param component:
The COMPONENT instance from which to append values to the
merged_list
:return:
The updated merged_list with the values for the COMPONENT added
onto it
"""
merged_list.extend(getattr(component, key))
return merged_list |
def chrome_tracing_object_transfer_dump(self, filename=None):
"""Return a list of transfer events that can viewed as a timeline.
To view this information as a timeline, simply dump it as a json file
by passing in "filename" or using using json.dump, and then load go to
chrome://tracing in the Chrome web browser and load the dumped file.
Make sure to enable "Flow events" in the "View Options" menu.
Args:
filename: If a filename is provided, the timeline is dumped to that
file.
Returns:
If filename is not provided, this returns a list of profiling
events. Each profile event is a dictionary.
"""
client_id_to_address = {}
for client_info in ray.global_state.client_table():
client_id_to_address[client_info["ClientID"]] = "{}:{}".format(
client_info["NodeManagerAddress"],
client_info["ObjectManagerPort"])
all_events = []
for key, items in self.profile_table().items():
# Only consider object manager events.
if items[0]["component_type"] != "object_manager":
continue
for event in items:
if event["event_type"] == "transfer_send":
object_id, remote_client_id, _, _ = event["extra_data"]
elif event["event_type"] == "transfer_receive":
object_id, remote_client_id, _, _ = event["extra_data"]
elif event["event_type"] == "receive_pull_request":
object_id, remote_client_id = event["extra_data"]
else:
assert False, "This should be unreachable."
# Choose a color by reading the first couple of hex digits of
# the object ID as an integer and turning that into a color.
object_id_int = int(object_id[:2], 16)
color = self._chrome_tracing_colors[object_id_int % len(
self._chrome_tracing_colors)]
new_event = {
# The category of the event.
"cat": event["event_type"],
# The string displayed on the event.
"name": event["event_type"],
# The identifier for the group of rows that the event
# appears in.
"pid": client_id_to_address[key],
# The identifier for the row that the event appears in.
"tid": client_id_to_address[remote_client_id],
# The start time in microseconds.
"ts": self._seconds_to_microseconds(event["start_time"]),
# The duration in microseconds.
"dur": self._seconds_to_microseconds(event["end_time"] -
event["start_time"]),
# What is this?
"ph": "X",
# This is the name of the color to display the box in.
"cname": color,
# The extra user-defined data.
"args": event["extra_data"],
}
all_events.append(new_event)
# Add another box with a color indicating whether it was a send
# or a receive event.
if event["event_type"] == "transfer_send":
additional_event = new_event.copy()
additional_event["cname"] = "black"
all_events.append(additional_event)
elif event["event_type"] == "transfer_receive":
additional_event = new_event.copy()
additional_event["cname"] = "grey"
all_events.append(additional_event)
else:
pass
if filename is not None:
with open(filename, "w") as outfile:
json.dump(all_events, outfile)
else:
return all_events | Return a list of transfer events that can viewed as a timeline.
To view this information as a timeline, simply dump it as a json file
by passing in "filename" or using using json.dump, and then load go to
chrome://tracing in the Chrome web browser and load the dumped file.
Make sure to enable "Flow events" in the "View Options" menu.
Args:
filename: If a filename is provided, the timeline is dumped to that
file.
Returns:
If filename is not provided, this returns a list of profiling
events. Each profile event is a dictionary. | Below is the the instruction that describes the task:
### Input:
Return a list of transfer events that can viewed as a timeline.
To view this information as a timeline, simply dump it as a json file
by passing in "filename" or using using json.dump, and then load go to
chrome://tracing in the Chrome web browser and load the dumped file.
Make sure to enable "Flow events" in the "View Options" menu.
Args:
filename: If a filename is provided, the timeline is dumped to that
file.
Returns:
If filename is not provided, this returns a list of profiling
events. Each profile event is a dictionary.
### Response:
def chrome_tracing_object_transfer_dump(self, filename=None):
"""Return a list of transfer events that can viewed as a timeline.
To view this information as a timeline, simply dump it as a json file
by passing in "filename" or using using json.dump, and then load go to
chrome://tracing in the Chrome web browser and load the dumped file.
Make sure to enable "Flow events" in the "View Options" menu.
Args:
filename: If a filename is provided, the timeline is dumped to that
file.
Returns:
If filename is not provided, this returns a list of profiling
events. Each profile event is a dictionary.
"""
client_id_to_address = {}
for client_info in ray.global_state.client_table():
client_id_to_address[client_info["ClientID"]] = "{}:{}".format(
client_info["NodeManagerAddress"],
client_info["ObjectManagerPort"])
all_events = []
for key, items in self.profile_table().items():
# Only consider object manager events.
if items[0]["component_type"] != "object_manager":
continue
for event in items:
if event["event_type"] == "transfer_send":
object_id, remote_client_id, _, _ = event["extra_data"]
elif event["event_type"] == "transfer_receive":
object_id, remote_client_id, _, _ = event["extra_data"]
elif event["event_type"] == "receive_pull_request":
object_id, remote_client_id = event["extra_data"]
else:
assert False, "This should be unreachable."
# Choose a color by reading the first couple of hex digits of
# the object ID as an integer and turning that into a color.
object_id_int = int(object_id[:2], 16)
color = self._chrome_tracing_colors[object_id_int % len(
self._chrome_tracing_colors)]
new_event = {
# The category of the event.
"cat": event["event_type"],
# The string displayed on the event.
"name": event["event_type"],
# The identifier for the group of rows that the event
# appears in.
"pid": client_id_to_address[key],
# The identifier for the row that the event appears in.
"tid": client_id_to_address[remote_client_id],
# The start time in microseconds.
"ts": self._seconds_to_microseconds(event["start_time"]),
# The duration in microseconds.
"dur": self._seconds_to_microseconds(event["end_time"] -
event["start_time"]),
# What is this?
"ph": "X",
# This is the name of the color to display the box in.
"cname": color,
# The extra user-defined data.
"args": event["extra_data"],
}
all_events.append(new_event)
# Add another box with a color indicating whether it was a send
# or a receive event.
if event["event_type"] == "transfer_send":
additional_event = new_event.copy()
additional_event["cname"] = "black"
all_events.append(additional_event)
elif event["event_type"] == "transfer_receive":
additional_event = new_event.copy()
additional_event["cname"] = "grey"
all_events.append(additional_event)
else:
pass
if filename is not None:
with open(filename, "w") as outfile:
json.dump(all_events, outfile)
else:
return all_events |
def get(cls, id, api=None):
"""
Fetches the resource from the server.
:param id: Resource identifier
:param api: sevenbridges Api instance.
:return: Resource object.
"""
id = Transform.to_resource(id)
api = api if api else cls._API
if 'get' in cls._URL:
extra = {'resource': cls.__name__, 'query': {'id': id}}
logger.info('Fetching {} resource'.format(cls), extra=extra)
resource = api.get(url=cls._URL['get'].format(id=id)).json()
return cls(api=api, **resource)
else:
raise SbgError('Unable to fetch resource!') | Fetches the resource from the server.
:param id: Resource identifier
:param api: sevenbridges Api instance.
:return: Resource object. | Below is the the instruction that describes the task:
### Input:
Fetches the resource from the server.
:param id: Resource identifier
:param api: sevenbridges Api instance.
:return: Resource object.
### Response:
def get(cls, id, api=None):
"""
Fetches the resource from the server.
:param id: Resource identifier
:param api: sevenbridges Api instance.
:return: Resource object.
"""
id = Transform.to_resource(id)
api = api if api else cls._API
if 'get' in cls._URL:
extra = {'resource': cls.__name__, 'query': {'id': id}}
logger.info('Fetching {} resource'.format(cls), extra=extra)
resource = api.get(url=cls._URL['get'].format(id=id)).json()
return cls(api=api, **resource)
else:
raise SbgError('Unable to fetch resource!') |
def make_encoder(self,formula_dict,inter_list,param_dict):
"""
make the encoder function
"""
X_dict = {}
Xcol_dict = {}
encoder_dict = {}
# first, replace param_dict[key] = values, with param_dict[key] = dmatrix
for key in formula_dict:
encoding,arg = formula_dict[key]
if 'Dev' in encoding:
# make deviation encoded design matrix
drop_name = arg
# encode
deviation_encoder,X_sub,colnames_sub = _dev_encode(param_dict,drop_name,key)
# additionally, store in dictionary for use by interactions
X_dict[key] = X_sub
Xcol_dict[key] = colnames_sub
# store dictionary of encoder functions to keep for prediction
encoder_dict[key] = deviation_encoder
elif 'Dum' in encoding:
# make dummy variable encoding design mat
ref_name = arg
dummy_encoder,X_sub,colnames_sub = _dum_encode(param_dict,ref_name,key)
# additionally, store in dictionary for use by interactions
X_dict[key] = X_sub
Xcol_dict[key] = colnames_sub
# store dictionary of encoder functions to keep for prediction
encoder_dict[key] = dummy_encoder
elif 'Poly' in encoding:
# make polynomial encoding design mat
degree = arg
polynomial_encoder,X_sub,colnames_sub = _poly_encode(param_dict,degree,key)
# additionally, store in dictionary for use by interactions
X_dict[key] = X_sub
Xcol_dict[key] = colnames_sub
# store dictionary of encoder functions to keep for prediction
encoder_dict[key] = polynomial_encoder
else:
print encoding
raise Exception("Encoding name error")
# now compute interaction designmatrices
for interaction in inter_list:
if len(interaction) >= 3:
raise Exception("Doesn't allow 4-way or higher interaction terms")
elif len(interaction) == 3:
param_name1 = interaction[0]
param_name2 = interaction[1]
param_name3 = interaction[2]
col_names1 = Xcol_dict[param_name1]
col_names2 = Xcol_dict[param_name2]
col_names3 = Xcol_dict[param_name3]
# make 3-way encoder function
def threeway_encoder(param_name1,param_name2,param_name3, \
col_names1, col_names2, col_names3, X_dict):
"""
needs the three names of the parameters to be encoded, as well as
a dictionary containing the already encoded single parameter
design matrices, keyed by name
"""
X1 = X_dict[param_name1]
X2 = X_dict[param_name2]
X3 = X_dict[param_name3]
X_int = []
names_int = []
for i in np.arange(0,X1.shape[1]):
for j in np.arange(0,X2.shape[1]):
for k in np.arange(0,X3.shape[1]):
X_int.append(X1[:,i]*X2[:,j]*X3[:,k])
names_int.append(col_names1[i] + "*" + \
col_names2[j] + "*" + col_names3[k])
# make X_int from lists to np array
X_int = np.array(X_int).T
return X_int, names_int
encoder_dict['threeway'] = threeway_encoder
elif len(interaction) == 2:
# there are two interaction terms (A*B)
param_name1 = interaction[0]
param_name2 = interaction[1]
col_names1 = Xcol_dict[param_name1]
col_names2 = Xcol_dict[param_name2]
# make twoway_encoder function
def twoway_encoder(param_name1,param_name2, col_names1, col_names2, X_dict):
X1 = X_dict[param_name1]
X2 = X_dict[param_name2]
X_int = []
names_int = []
for i in np.arange(0,X1.shape[1]):
for j in np.arange(0,X2.shape[1]):
X_int.append(X1[:,i]*X2[:,j])
names_int.append(col_names1[i] + "*" + col_names2[j])
X_int = np.array(X_int).T
return X_int, names_int
encoder_dict['twoway'] = twoway_encoder
else:
raise Exception("Error while evaluating meaning of interaction term")
# make key in encoder to specify which columns are active
encoder_dict['trimmed_columns'] = self._trimmed_columns
return encoder_dict | make the encoder function | Below is the the instruction that describes the task:
### Input:
make the encoder function
### Response:
def make_encoder(self,formula_dict,inter_list,param_dict):
"""
make the encoder function
"""
X_dict = {}
Xcol_dict = {}
encoder_dict = {}
# first, replace param_dict[key] = values, with param_dict[key] = dmatrix
for key in formula_dict:
encoding,arg = formula_dict[key]
if 'Dev' in encoding:
# make deviation encoded design matrix
drop_name = arg
# encode
deviation_encoder,X_sub,colnames_sub = _dev_encode(param_dict,drop_name,key)
# additionally, store in dictionary for use by interactions
X_dict[key] = X_sub
Xcol_dict[key] = colnames_sub
# store dictionary of encoder functions to keep for prediction
encoder_dict[key] = deviation_encoder
elif 'Dum' in encoding:
# make dummy variable encoding design mat
ref_name = arg
dummy_encoder,X_sub,colnames_sub = _dum_encode(param_dict,ref_name,key)
# additionally, store in dictionary for use by interactions
X_dict[key] = X_sub
Xcol_dict[key] = colnames_sub
# store dictionary of encoder functions to keep for prediction
encoder_dict[key] = dummy_encoder
elif 'Poly' in encoding:
# make polynomial encoding design mat
degree = arg
polynomial_encoder,X_sub,colnames_sub = _poly_encode(param_dict,degree,key)
# additionally, store in dictionary for use by interactions
X_dict[key] = X_sub
Xcol_dict[key] = colnames_sub
# store dictionary of encoder functions to keep for prediction
encoder_dict[key] = polynomial_encoder
else:
print encoding
raise Exception("Encoding name error")
# now compute interaction designmatrices
for interaction in inter_list:
if len(interaction) >= 3:
raise Exception("Doesn't allow 4-way or higher interaction terms")
elif len(interaction) == 3:
param_name1 = interaction[0]
param_name2 = interaction[1]
param_name3 = interaction[2]
col_names1 = Xcol_dict[param_name1]
col_names2 = Xcol_dict[param_name2]
col_names3 = Xcol_dict[param_name3]
# make 3-way encoder function
def threeway_encoder(param_name1,param_name2,param_name3, \
col_names1, col_names2, col_names3, X_dict):
"""
needs the three names of the parameters to be encoded, as well as
a dictionary containing the already encoded single parameter
design matrices, keyed by name
"""
X1 = X_dict[param_name1]
X2 = X_dict[param_name2]
X3 = X_dict[param_name3]
X_int = []
names_int = []
for i in np.arange(0,X1.shape[1]):
for j in np.arange(0,X2.shape[1]):
for k in np.arange(0,X3.shape[1]):
X_int.append(X1[:,i]*X2[:,j]*X3[:,k])
names_int.append(col_names1[i] + "*" + \
col_names2[j] + "*" + col_names3[k])
# make X_int from lists to np array
X_int = np.array(X_int).T
return X_int, names_int
encoder_dict['threeway'] = threeway_encoder
elif len(interaction) == 2:
# there are two interaction terms (A*B)
param_name1 = interaction[0]
param_name2 = interaction[1]
col_names1 = Xcol_dict[param_name1]
col_names2 = Xcol_dict[param_name2]
# make twoway_encoder function
def twoway_encoder(param_name1,param_name2, col_names1, col_names2, X_dict):
X1 = X_dict[param_name1]
X2 = X_dict[param_name2]
X_int = []
names_int = []
for i in np.arange(0,X1.shape[1]):
for j in np.arange(0,X2.shape[1]):
X_int.append(X1[:,i]*X2[:,j])
names_int.append(col_names1[i] + "*" + col_names2[j])
X_int = np.array(X_int).T
return X_int, names_int
encoder_dict['twoway'] = twoway_encoder
else:
raise Exception("Error while evaluating meaning of interaction term")
# make key in encoder to specify which columns are active
encoder_dict['trimmed_columns'] = self._trimmed_columns
return encoder_dict |
def get_text_classifier(arch:Callable, vocab_sz:int, n_class:int, bptt:int=70, max_len:int=20*70, config:dict=None,
drop_mult:float=1., lin_ftrs:Collection[int]=None, ps:Collection[float]=None,
pad_idx:int=1) -> nn.Module:
"Create a text classifier from `arch` and its `config`, maybe `pretrained`."
meta = _model_meta[arch]
config = ifnone(config, meta['config_clas'].copy())
for k in config.keys():
if k.endswith('_p'): config[k] *= drop_mult
if lin_ftrs is None: lin_ftrs = [50]
if ps is None: ps = [0.1]*len(lin_ftrs)
layers = [config[meta['hid_name']] * 3] + lin_ftrs + [n_class]
ps = [config.pop('output_p')] + ps
init = config.pop('init') if 'init' in config else None
encoder = MultiBatchEncoder(bptt, max_len, arch(vocab_sz, **config), pad_idx=pad_idx)
model = SequentialRNN(encoder, PoolingLinearClassifier(layers, ps))
return model if init is None else model.apply(init) | Create a text classifier from `arch` and its `config`, maybe `pretrained`. | Below is the the instruction that describes the task:
### Input:
Create a text classifier from `arch` and its `config`, maybe `pretrained`.
### Response:
def get_text_classifier(arch:Callable, vocab_sz:int, n_class:int, bptt:int=70, max_len:int=20*70, config:dict=None,
drop_mult:float=1., lin_ftrs:Collection[int]=None, ps:Collection[float]=None,
pad_idx:int=1) -> nn.Module:
"Create a text classifier from `arch` and its `config`, maybe `pretrained`."
meta = _model_meta[arch]
config = ifnone(config, meta['config_clas'].copy())
for k in config.keys():
if k.endswith('_p'): config[k] *= drop_mult
if lin_ftrs is None: lin_ftrs = [50]
if ps is None: ps = [0.1]*len(lin_ftrs)
layers = [config[meta['hid_name']] * 3] + lin_ftrs + [n_class]
ps = [config.pop('output_p')] + ps
init = config.pop('init') if 'init' in config else None
encoder = MultiBatchEncoder(bptt, max_len, arch(vocab_sz, **config), pad_idx=pad_idx)
model = SequentialRNN(encoder, PoolingLinearClassifier(layers, ps))
return model if init is None else model.apply(init) |
def sign(self, method, params):
"""Calculate signature with the SIG_METHOD(HMAC-SHA1)
Returns a base64 encoeded string of the hex signature
:param method: the http verb
:param params: the params needs calculate
"""
query_str = utils.percent_encode(params.items(), True)
str_to_sign = "{0}&%2F&{1}".format(
method, utils.percent_quote(query_str)
)
sig = hmac.new(
utils.to_bytes(self._secret_key + "&"),
utils.to_bytes(str_to_sign),
hashlib.sha1
)
return base64.b64encode(sig.digest()) | Calculate signature with the SIG_METHOD(HMAC-SHA1)
Returns a base64 encoeded string of the hex signature
:param method: the http verb
:param params: the params needs calculate | Below is the the instruction that describes the task:
### Input:
Calculate signature with the SIG_METHOD(HMAC-SHA1)
Returns a base64 encoeded string of the hex signature
:param method: the http verb
:param params: the params needs calculate
### Response:
def sign(self, method, params):
"""Calculate signature with the SIG_METHOD(HMAC-SHA1)
Returns a base64 encoeded string of the hex signature
:param method: the http verb
:param params: the params needs calculate
"""
query_str = utils.percent_encode(params.items(), True)
str_to_sign = "{0}&%2F&{1}".format(
method, utils.percent_quote(query_str)
)
sig = hmac.new(
utils.to_bytes(self._secret_key + "&"),
utils.to_bytes(str_to_sign),
hashlib.sha1
)
return base64.b64encode(sig.digest()) |
def get_bids(session, project_ids=[], bid_ids=[], limit=10, offset=0):
"""
Get the list of bids
"""
get_bids_data = {}
if bid_ids:
get_bids_data['bids[]'] = bid_ids
if project_ids:
get_bids_data['projects[]'] = project_ids
get_bids_data['limit'] = limit
get_bids_data['offset'] = offset
# GET /api/projects/0.1/bids/
response = make_get_request(session, 'bids', params_data=get_bids_data)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise BidsNotFoundException(
message=json_data['message'], error_code=json_data['error_code'],
request_id=json_data['request_id']
) | Get the list of bids | Below is the the instruction that describes the task:
### Input:
Get the list of bids
### Response:
def get_bids(session, project_ids=[], bid_ids=[], limit=10, offset=0):
"""
Get the list of bids
"""
get_bids_data = {}
if bid_ids:
get_bids_data['bids[]'] = bid_ids
if project_ids:
get_bids_data['projects[]'] = project_ids
get_bids_data['limit'] = limit
get_bids_data['offset'] = offset
# GET /api/projects/0.1/bids/
response = make_get_request(session, 'bids', params_data=get_bids_data)
json_data = response.json()
if response.status_code == 200:
return json_data['result']
else:
raise BidsNotFoundException(
message=json_data['message'], error_code=json_data['error_code'],
request_id=json_data['request_id']
) |
def acquire_lock(cls, mode, user=None):
"""Set a context manager to lock the whole storage.
``mode`` must either be "r" for shared access or "w" for exclusive
access.
``user`` is the name of the logged in user or empty.
"""
if not user:
return
with EteSyncCache.lock:
cls.user = user
cls.etesync = cls._get_etesync_for_user(cls.user)
if cls._should_sync():
cls._mark_sync()
cls.etesync.get_or_create_user_info(force_fetch=True)
cls.etesync.sync_journal_list()
for journal in cls.etesync.list():
cls.etesync.pull_journal(journal.uid)
yield
if cls.etesync.journal_list_is_dirty():
cls.etesync.sync_journal_list()
for journal in cls.etesync.list():
if cls.etesync.journal_is_dirty(journal.uid):
cls.etesync.sync_journal(journal.uid)
cls.etesync = None
cls.user = None | Set a context manager to lock the whole storage.
``mode`` must either be "r" for shared access or "w" for exclusive
access.
``user`` is the name of the logged in user or empty. | Below is the the instruction that describes the task:
### Input:
Set a context manager to lock the whole storage.
``mode`` must either be "r" for shared access or "w" for exclusive
access.
``user`` is the name of the logged in user or empty.
### Response:
def acquire_lock(cls, mode, user=None):
"""Set a context manager to lock the whole storage.
``mode`` must either be "r" for shared access or "w" for exclusive
access.
``user`` is the name of the logged in user or empty.
"""
if not user:
return
with EteSyncCache.lock:
cls.user = user
cls.etesync = cls._get_etesync_for_user(cls.user)
if cls._should_sync():
cls._mark_sync()
cls.etesync.get_or_create_user_info(force_fetch=True)
cls.etesync.sync_journal_list()
for journal in cls.etesync.list():
cls.etesync.pull_journal(journal.uid)
yield
if cls.etesync.journal_list_is_dirty():
cls.etesync.sync_journal_list()
for journal in cls.etesync.list():
if cls.etesync.journal_is_dirty(journal.uid):
cls.etesync.sync_journal(journal.uid)
cls.etesync = None
cls.user = None |
def get_next_invalid_time_from_t(self, timestamp):
# pylint: disable=too-many-branches
"""
Get the next invalid time
:param timestamp: timestamp in seconds (of course)
:type timestamp: int or float
:return: timestamp of next invalid time
:rtype: int or float
"""
timestamp = int(timestamp)
original_t = timestamp
dr_mins = []
for daterange in self.dateranges:
timestamp = original_t
cont = True
while cont:
start = daterange.get_next_valid_time_from_t(timestamp)
if start is not None:
end = daterange.get_next_invalid_time_from_t(start)
dr_mins.append((start, end))
timestamp = end
else:
cont = False
if timestamp > original_t + (3600 * 24 * 365):
cont = False
periods = merge_periods(dr_mins)
# manage exclude periods
dr_mins = []
for exclude in self.exclude:
for daterange in exclude.dateranges:
timestamp = original_t
cont = True
while cont:
start = daterange.get_next_valid_time_from_t(timestamp)
if start is not None:
end = daterange.get_next_invalid_time_from_t(start)
dr_mins.append((start, end))
timestamp = end
else:
cont = False
if timestamp > original_t + (3600 * 24 * 365):
cont = False
if not dr_mins:
periods_exclude = []
else:
periods_exclude = merge_periods(dr_mins)
if len(periods) >= 1:
# if first valid period is after original timestamp, the first invalid time
# is the original timestamp
if periods[0][0] > original_t:
return original_t
# check the first period + first period of exclude
if len(periods_exclude) >= 1:
if periods_exclude[0][0] < periods[0][1]:
return periods_exclude[0][0]
return periods[0][1]
return original_t | Get the next invalid time
:param timestamp: timestamp in seconds (of course)
:type timestamp: int or float
:return: timestamp of next invalid time
:rtype: int or float | Below is the the instruction that describes the task:
### Input:
Get the next invalid time
:param timestamp: timestamp in seconds (of course)
:type timestamp: int or float
:return: timestamp of next invalid time
:rtype: int or float
### Response:
def get_next_invalid_time_from_t(self, timestamp):
# pylint: disable=too-many-branches
"""
Get the next invalid time
:param timestamp: timestamp in seconds (of course)
:type timestamp: int or float
:return: timestamp of next invalid time
:rtype: int or float
"""
timestamp = int(timestamp)
original_t = timestamp
dr_mins = []
for daterange in self.dateranges:
timestamp = original_t
cont = True
while cont:
start = daterange.get_next_valid_time_from_t(timestamp)
if start is not None:
end = daterange.get_next_invalid_time_from_t(start)
dr_mins.append((start, end))
timestamp = end
else:
cont = False
if timestamp > original_t + (3600 * 24 * 365):
cont = False
periods = merge_periods(dr_mins)
# manage exclude periods
dr_mins = []
for exclude in self.exclude:
for daterange in exclude.dateranges:
timestamp = original_t
cont = True
while cont:
start = daterange.get_next_valid_time_from_t(timestamp)
if start is not None:
end = daterange.get_next_invalid_time_from_t(start)
dr_mins.append((start, end))
timestamp = end
else:
cont = False
if timestamp > original_t + (3600 * 24 * 365):
cont = False
if not dr_mins:
periods_exclude = []
else:
periods_exclude = merge_periods(dr_mins)
if len(periods) >= 1:
# if first valid period is after original timestamp, the first invalid time
# is the original timestamp
if periods[0][0] > original_t:
return original_t
# check the first period + first period of exclude
if len(periods_exclude) >= 1:
if periods_exclude[0][0] < periods[0][1]:
return periods_exclude[0][0]
return periods[0][1]
return original_t |
def _subset(self, subset):
"""Return a new pipeline with a subset of the sections"""
pl = Pipeline(bundle=self.bundle)
for group_name, pl_segment in iteritems(self):
if group_name not in subset:
continue
pl[group_name] = pl_segment
return pl | Return a new pipeline with a subset of the sections | Below is the the instruction that describes the task:
### Input:
Return a new pipeline with a subset of the sections
### Response:
def _subset(self, subset):
"""Return a new pipeline with a subset of the sections"""
pl = Pipeline(bundle=self.bundle)
for group_name, pl_segment in iteritems(self):
if group_name not in subset:
continue
pl[group_name] = pl_segment
return pl |
def hide(self, selections):
'''Hide objects in this representation. BallAndStickRepresentation
support selections of atoms and bonds.
To hide the first atom and the first bond you can use the
following code::
from chemlab.mviewer.state import Selection
representation.hide({'atoms': Selection([0], system.n_atoms),
'bonds': Selection([0], system.n_bonds)})
Returns the current Selection of hidden atoms and bonds.
'''
if 'atoms' in selections:
self.hidden_state['atoms'] = selections['atoms']
self.on_atom_hidden_changed()
if 'bonds' in selections:
self.hidden_state['bonds'] = selections['bonds']
self.on_bond_hidden_changed()
if 'box' in selections:
self.hidden_state['box'] = box_s = selections['box']
if box_s.mask[0]:
if self.viewer.has_renderer(self.box_renderer):
self.viewer.remove_renderer(self.box_renderer)
else:
if not self.viewer.has_renderer(self.box_renderer):
self.viewer.add_renderer(self.box_renderer)
return self.hidden_state | Hide objects in this representation. BallAndStickRepresentation
support selections of atoms and bonds.
To hide the first atom and the first bond you can use the
following code::
from chemlab.mviewer.state import Selection
representation.hide({'atoms': Selection([0], system.n_atoms),
'bonds': Selection([0], system.n_bonds)})
Returns the current Selection of hidden atoms and bonds. | Below is the the instruction that describes the task:
### Input:
Hide objects in this representation. BallAndStickRepresentation
support selections of atoms and bonds.
To hide the first atom and the first bond you can use the
following code::
from chemlab.mviewer.state import Selection
representation.hide({'atoms': Selection([0], system.n_atoms),
'bonds': Selection([0], system.n_bonds)})
Returns the current Selection of hidden atoms and bonds.
### Response:
def hide(self, selections):
'''Hide objects in this representation. BallAndStickRepresentation
support selections of atoms and bonds.
To hide the first atom and the first bond you can use the
following code::
from chemlab.mviewer.state import Selection
representation.hide({'atoms': Selection([0], system.n_atoms),
'bonds': Selection([0], system.n_bonds)})
Returns the current Selection of hidden atoms and bonds.
'''
if 'atoms' in selections:
self.hidden_state['atoms'] = selections['atoms']
self.on_atom_hidden_changed()
if 'bonds' in selections:
self.hidden_state['bonds'] = selections['bonds']
self.on_bond_hidden_changed()
if 'box' in selections:
self.hidden_state['box'] = box_s = selections['box']
if box_s.mask[0]:
if self.viewer.has_renderer(self.box_renderer):
self.viewer.remove_renderer(self.box_renderer)
else:
if not self.viewer.has_renderer(self.box_renderer):
self.viewer.add_renderer(self.box_renderer)
return self.hidden_state |
def variableMissingValue(ncVar):
""" Returns the missingData given a NetCDF variable
Looks for one of the following attributes: _FillValue, missing_value, MissingValue,
missingValue. Returns None if these attributes are not found.
"""
attributes = ncVarAttributes(ncVar)
if not attributes:
return None # a premature optimization :-)
for key in ('missing_value', 'MissingValue', 'missingValue', 'FillValue', '_FillValue'):
if key in attributes:
missingDataValue = attributes[key]
return missingDataValue
return None | Returns the missingData given a NetCDF variable
Looks for one of the following attributes: _FillValue, missing_value, MissingValue,
missingValue. Returns None if these attributes are not found. | Below is the the instruction that describes the task:
### Input:
Returns the missingData given a NetCDF variable
Looks for one of the following attributes: _FillValue, missing_value, MissingValue,
missingValue. Returns None if these attributes are not found.
### Response:
def variableMissingValue(ncVar):
""" Returns the missingData given a NetCDF variable
Looks for one of the following attributes: _FillValue, missing_value, MissingValue,
missingValue. Returns None if these attributes are not found.
"""
attributes = ncVarAttributes(ncVar)
if not attributes:
return None # a premature optimization :-)
for key in ('missing_value', 'MissingValue', 'missingValue', 'FillValue', '_FillValue'):
if key in attributes:
missingDataValue = attributes[key]
return missingDataValue
return None |
def process_cancel(self):
"""
Process the incoming token stream until it finds
an end token DONE with the cancel flag set.
At that point the connection should be ready to handle a new query.
In case when no cancel request is pending this function does nothing.
"""
self.log_response_message('got CANCEL message')
# silly cases, nothing to do
if not self.in_cancel:
return
while True:
token_id = self.get_token_id()
self.process_token(token_id)
if not self.in_cancel:
return | Process the incoming token stream until it finds
an end token DONE with the cancel flag set.
At that point the connection should be ready to handle a new query.
In case when no cancel request is pending this function does nothing. | Below is the the instruction that describes the task:
### Input:
Process the incoming token stream until it finds
an end token DONE with the cancel flag set.
At that point the connection should be ready to handle a new query.
In case when no cancel request is pending this function does nothing.
### Response:
def process_cancel(self):
"""
Process the incoming token stream until it finds
an end token DONE with the cancel flag set.
At that point the connection should be ready to handle a new query.
In case when no cancel request is pending this function does nothing.
"""
self.log_response_message('got CANCEL message')
# silly cases, nothing to do
if not self.in_cancel:
return
while True:
token_id = self.get_token_id()
self.process_token(token_id)
if not self.in_cancel:
return |
def deserialize_by_field(value, field):
"""
Some types get serialized to JSON, as strings.
If we know what they are supposed to be, we can deserialize them
"""
if isinstance(field, forms.DateTimeField):
value = parse_datetime(value)
elif isinstance(field, forms.DateField):
value = parse_date(value)
elif isinstance(field, forms.TimeField):
value = parse_time(value)
return value | Some types get serialized to JSON, as strings.
If we know what they are supposed to be, we can deserialize them | Below is the the instruction that describes the task:
### Input:
Some types get serialized to JSON, as strings.
If we know what they are supposed to be, we can deserialize them
### Response:
def deserialize_by_field(value, field):
"""
Some types get serialized to JSON, as strings.
If we know what they are supposed to be, we can deserialize them
"""
if isinstance(field, forms.DateTimeField):
value = parse_datetime(value)
elif isinstance(field, forms.DateField):
value = parse_date(value)
elif isinstance(field, forms.TimeField):
value = parse_time(value)
return value |
def write_data(hyper_params,
mode,
sequence,
num_threads):
"""
Write a tf record containing a feature dict and a label dict.
:param hyper_params: The hyper parameters required for writing {"problem": {"augmentation": {"steps": Int}}}
:param mode: The mode specifies the purpose of the data. Typically it is either "train" or "validation".
:param sequence: A tf.keras.utils.sequence.
:param num_threads: The number of threads. (Recommended: 4 for training and 2 for validation seems to works nice)
:return:
"""
if not isinstance(sequence, Sequence) and not (callable(getattr(sequence, "__getitem__", None)) and callable(getattr(sequence, "__len__", None))):
raise ValueError("sequence must be tf.keras.utils.Sequence or a subtype or implement __len__(self) and __getitem__(self, idx)")
prefix = os.path.join(hyper_params.train.get("tf_records_path", "tfrecords"), mode)
prefix = prefix.replace("\\", "/")
data_tmp_folder = "/".join(prefix.split("/")[:-1])
if not os.path.exists(data_tmp_folder):
os.makedirs(data_tmp_folder)
args = [(hyper_params, sequence, num_threads, i, (prefix + "_%d.tfrecords") % i) for i in range(num_threads)]
# Retrieve a single batch
sample_feature, sample_label = sequence[0]
config = {"num_threads": num_threads}
for k in sample_feature.keys():
config["feature_" + k] = {"shape": sample_feature[k].shape[1:], "dtype": sample_feature[k].dtype.name}
for k in sample_label.keys():
config["label_" + k] = {"shape": sample_label[k].shape[1:], "dtype": sample_label[k].dtype.name}
with open(prefix + '_config.json', 'w') as outfile:
json.dump(config, outfile)
pool = Pool(processes=num_threads)
pool.map(_write_tf_record_pool_helper, args) | Write a tf record containing a feature dict and a label dict.
:param hyper_params: The hyper parameters required for writing {"problem": {"augmentation": {"steps": Int}}}
:param mode: The mode specifies the purpose of the data. Typically it is either "train" or "validation".
:param sequence: A tf.keras.utils.sequence.
:param num_threads: The number of threads. (Recommended: 4 for training and 2 for validation seems to works nice)
:return: | Below is the the instruction that describes the task:
### Input:
Write a tf record containing a feature dict and a label dict.
:param hyper_params: The hyper parameters required for writing {"problem": {"augmentation": {"steps": Int}}}
:param mode: The mode specifies the purpose of the data. Typically it is either "train" or "validation".
:param sequence: A tf.keras.utils.sequence.
:param num_threads: The number of threads. (Recommended: 4 for training and 2 for validation seems to works nice)
:return:
### Response:
def write_data(hyper_params,
mode,
sequence,
num_threads):
"""
Write a tf record containing a feature dict and a label dict.
:param hyper_params: The hyper parameters required for writing {"problem": {"augmentation": {"steps": Int}}}
:param mode: The mode specifies the purpose of the data. Typically it is either "train" or "validation".
:param sequence: A tf.keras.utils.sequence.
:param num_threads: The number of threads. (Recommended: 4 for training and 2 for validation seems to works nice)
:return:
"""
if not isinstance(sequence, Sequence) and not (callable(getattr(sequence, "__getitem__", None)) and callable(getattr(sequence, "__len__", None))):
raise ValueError("sequence must be tf.keras.utils.Sequence or a subtype or implement __len__(self) and __getitem__(self, idx)")
prefix = os.path.join(hyper_params.train.get("tf_records_path", "tfrecords"), mode)
prefix = prefix.replace("\\", "/")
data_tmp_folder = "/".join(prefix.split("/")[:-1])
if not os.path.exists(data_tmp_folder):
os.makedirs(data_tmp_folder)
args = [(hyper_params, sequence, num_threads, i, (prefix + "_%d.tfrecords") % i) for i in range(num_threads)]
# Retrieve a single batch
sample_feature, sample_label = sequence[0]
config = {"num_threads": num_threads}
for k in sample_feature.keys():
config["feature_" + k] = {"shape": sample_feature[k].shape[1:], "dtype": sample_feature[k].dtype.name}
for k in sample_label.keys():
config["label_" + k] = {"shape": sample_label[k].shape[1:], "dtype": sample_label[k].dtype.name}
with open(prefix + '_config.json', 'w') as outfile:
json.dump(config, outfile)
pool = Pool(processes=num_threads)
pool.map(_write_tf_record_pool_helper, args) |
def _api_put(self, url, **kwargs):
"""
A convenience wrapper for _put. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
self._put(**kwargs) | A convenience wrapper for _put. Adds headers, auth and base url by
default | Below is the the instruction that describes the task:
### Input:
A convenience wrapper for _put. Adds headers, auth and base url by
default
### Response:
def _api_put(self, url, **kwargs):
"""
A convenience wrapper for _put. Adds headers, auth and base url by
default
"""
kwargs['url'] = self.url + url
kwargs['auth'] = self.auth
headers = deepcopy(self.headers)
headers.update(kwargs.get('headers', {}))
kwargs['headers'] = headers
self._put(**kwargs) |
def send_audio_packet(self, data, *, encode=True):
"""Sends an audio packet composed of the data.
You must be connected to play audio.
Parameters
----------
data: bytes
The :term:`py:bytes-like object` denoting PCM or Opus voice data.
encode: bool
Indicates if ``data`` should be encoded into Opus.
Raises
-------
ClientException
You are not connected.
OpusError
Encoding the data failed.
"""
self.checked_add('sequence', 1, 65535)
if encode:
encoded_data = self.encoder.encode(data, self.encoder.SAMPLES_PER_FRAME)
else:
encoded_data = data
packet = self._get_voice_packet(encoded_data)
try:
self.socket.sendto(packet, (self.endpoint_ip, self.voice_port))
except BlockingIOError:
log.warning('A packet has been dropped (seq: %s, timestamp: %s)', self.sequence, self.timestamp)
self.checked_add('timestamp', self.encoder.SAMPLES_PER_FRAME, 4294967295) | Sends an audio packet composed of the data.
You must be connected to play audio.
Parameters
----------
data: bytes
The :term:`py:bytes-like object` denoting PCM or Opus voice data.
encode: bool
Indicates if ``data`` should be encoded into Opus.
Raises
-------
ClientException
You are not connected.
OpusError
Encoding the data failed. | Below is the the instruction that describes the task:
### Input:
Sends an audio packet composed of the data.
You must be connected to play audio.
Parameters
----------
data: bytes
The :term:`py:bytes-like object` denoting PCM or Opus voice data.
encode: bool
Indicates if ``data`` should be encoded into Opus.
Raises
-------
ClientException
You are not connected.
OpusError
Encoding the data failed.
### Response:
def send_audio_packet(self, data, *, encode=True):
"""Sends an audio packet composed of the data.
You must be connected to play audio.
Parameters
----------
data: bytes
The :term:`py:bytes-like object` denoting PCM or Opus voice data.
encode: bool
Indicates if ``data`` should be encoded into Opus.
Raises
-------
ClientException
You are not connected.
OpusError
Encoding the data failed.
"""
self.checked_add('sequence', 1, 65535)
if encode:
encoded_data = self.encoder.encode(data, self.encoder.SAMPLES_PER_FRAME)
else:
encoded_data = data
packet = self._get_voice_packet(encoded_data)
try:
self.socket.sendto(packet, (self.endpoint_ip, self.voice_port))
except BlockingIOError:
log.warning('A packet has been dropped (seq: %s, timestamp: %s)', self.sequence, self.timestamp)
self.checked_add('timestamp', self.encoder.SAMPLES_PER_FRAME, 4294967295) |
def get_objects(self):
"""
Returns dictionary with the instance objects for each form. Keys should match the
corresponding form.
"""
objects = {}
for key in six.iterkeys(self.form_classes):
objects[key] = None
return objects | Returns dictionary with the instance objects for each form. Keys should match the
corresponding form. | Below is the the instruction that describes the task:
### Input:
Returns dictionary with the instance objects for each form. Keys should match the
corresponding form.
### Response:
def get_objects(self):
"""
Returns dictionary with the instance objects for each form. Keys should match the
corresponding form.
"""
objects = {}
for key in six.iterkeys(self.form_classes):
objects[key] = None
return objects |
def process(self, frames, eod):
"""Returns an iterator over tuples of the form (buffer, eod)
where buffer is a fixed-sized block of data, and eod indicates whether
this is the last block.
In case padding is deactivated the last block may be smaller than
the buffer size.
"""
src_index = 0
remaining = len(frames)
while remaining:
space = self.buffer_size - self.len
copylen = remaining < space and remaining or space
src = frames[src_index:src_index + copylen]
if self.len == 0 and copylen == self.buffer_size:
# avoid unnecessary copy
buffer = src
else:
buffer = self.buffer
buffer[self.len:self.len + copylen] = src
remaining -= copylen
src_index += copylen
self.len += copylen
if self.len == self.buffer_size:
yield buffer, (eod and not remaining)
self.len = 0
if eod and self.len:
block = self.buffer
if self.pad:
self.buffer[self.len:self.buffer_size] = 0
else:
block = self.buffer[0:self.len]
yield block, True
self.len = 0 | Returns an iterator over tuples of the form (buffer, eod)
where buffer is a fixed-sized block of data, and eod indicates whether
this is the last block.
In case padding is deactivated the last block may be smaller than
the buffer size. | Below is the the instruction that describes the task:
### Input:
Returns an iterator over tuples of the form (buffer, eod)
where buffer is a fixed-sized block of data, and eod indicates whether
this is the last block.
In case padding is deactivated the last block may be smaller than
the buffer size.
### Response:
def process(self, frames, eod):
"""Returns an iterator over tuples of the form (buffer, eod)
where buffer is a fixed-sized block of data, and eod indicates whether
this is the last block.
In case padding is deactivated the last block may be smaller than
the buffer size.
"""
src_index = 0
remaining = len(frames)
while remaining:
space = self.buffer_size - self.len
copylen = remaining < space and remaining or space
src = frames[src_index:src_index + copylen]
if self.len == 0 and copylen == self.buffer_size:
# avoid unnecessary copy
buffer = src
else:
buffer = self.buffer
buffer[self.len:self.len + copylen] = src
remaining -= copylen
src_index += copylen
self.len += copylen
if self.len == self.buffer_size:
yield buffer, (eod and not remaining)
self.len = 0
if eod and self.len:
block = self.buffer
if self.pad:
self.buffer[self.len:self.buffer_size] = 0
else:
block = self.buffer[0:self.len]
yield block, True
self.len = 0 |
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
dt = p[self._param_name]
return datetime(dt.year, dt.month, dt.day) | Given a dictionary of parameters, will extract the ranged task parameter value | Below is the the instruction that describes the task:
### Input:
Given a dictionary of parameters, will extract the ranged task parameter value
### Response:
def parameters_to_datetime(self, p):
"""
Given a dictionary of parameters, will extract the ranged task parameter value
"""
dt = p[self._param_name]
return datetime(dt.year, dt.month, dt.day) |
def mget(self, *keys):
""" -> #list of values at the specified @keys """
keys = list(map(self.get_key, keys))
return list(map(self._loads, self._client.mget(*keys))) | -> #list of values at the specified @keys | Below is the the instruction that describes the task:
### Input:
-> #list of values at the specified @keys
### Response:
def mget(self, *keys):
""" -> #list of values at the specified @keys """
keys = list(map(self.get_key, keys))
return list(map(self._loads, self._client.mget(*keys))) |
def deleteMetadata(self, remote, address, key):
"""Delete metadata of device"""
try:
return self.proxies["%s-%s" % (self._interface_id, remote)].deleteMetadata(address, key)
except Exception as err:
LOG.debug("ServerThread.deleteMetadata: Exception: %s" % str(err)) | Delete metadata of device | Below is the the instruction that describes the task:
### Input:
Delete metadata of device
### Response:
def deleteMetadata(self, remote, address, key):
"""Delete metadata of device"""
try:
return self.proxies["%s-%s" % (self._interface_id, remote)].deleteMetadata(address, key)
except Exception as err:
LOG.debug("ServerThread.deleteMetadata: Exception: %s" % str(err)) |
def parse_refresh(text):
'''Parses text for HTTP Refresh URL.
Returns:
str, None
'''
match = re.search(r'url\s*=(.+)', text, re.IGNORECASE)
if match:
url = match.group(1)
if url.startswith('"'):
url = url.strip('"')
elif url.startswith("'"):
url = url.strip("'")
return clean_link_soup(url) | Parses text for HTTP Refresh URL.
Returns:
str, None | Below is the the instruction that describes the task:
### Input:
Parses text for HTTP Refresh URL.
Returns:
str, None
### Response:
def parse_refresh(text):
'''Parses text for HTTP Refresh URL.
Returns:
str, None
'''
match = re.search(r'url\s*=(.+)', text, re.IGNORECASE)
if match:
url = match.group(1)
if url.startswith('"'):
url = url.strip('"')
elif url.startswith("'"):
url = url.strip("'")
return clean_link_soup(url) |
def from_dict(self, description):
"""Configures the task store to be the task_store described
in description"""
assert(self.ident == description['ident'])
self.partitions = description['partitions']
self.indices = description['indices'] | Configures the task store to be the task_store described
in description | Below is the the instruction that describes the task:
### Input:
Configures the task store to be the task_store described
in description
### Response:
def from_dict(self, description):
"""Configures the task store to be the task_store described
in description"""
assert(self.ident == description['ident'])
self.partitions = description['partitions']
self.indices = description['indices'] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.