code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def calculate_check_digit(gtin):
'''Given a GTIN (8-14) or SSCC, calculate its appropriate check digit'''
reverse_gtin = gtin[::-1]
total = 0
count = 0
for char in reverse_gtin:
digit = int(char)
if count % 2 == 0:
digit = digit * 3
total = total + digit
count = count + 1
nearest_multiple_of_ten = int(math.ceil(total / 10.0) * 10)
return nearest_multiple_of_ten - total | Given a GTIN (8-14) or SSCC, calculate its appropriate check digit | Below is the the instruction that describes the task:
### Input:
Given a GTIN (8-14) or SSCC, calculate its appropriate check digit
### Response:
def calculate_check_digit(gtin):
'''Given a GTIN (8-14) or SSCC, calculate its appropriate check digit'''
reverse_gtin = gtin[::-1]
total = 0
count = 0
for char in reverse_gtin:
digit = int(char)
if count % 2 == 0:
digit = digit * 3
total = total + digit
count = count + 1
nearest_multiple_of_ten = int(math.ceil(total / 10.0) * 10)
return nearest_multiple_of_ten - total |
def _combined_regex(regexes, flags=re.IGNORECASE, use_re2=False, max_mem=None):
"""
Return a compiled regex combined (using OR) from a list of ``regexes``.
If there is nothing to combine, None is returned.
re2 library (https://github.com/axiak/pyre2) often can match and compile
large regexes much faster than stdlib re module (10x is not uncommon),
but there are some gotchas:
* in case of "DFA out of memory" errors use ``max_mem`` argument
to increase the amount of memory re2 is allowed to use.
"""
joined_regexes = "|".join(r for r in regexes if r)
if not joined_regexes:
return None
if use_re2:
import re2
return re2.compile(joined_regexes, flags=flags, max_mem=max_mem)
return re.compile(joined_regexes, flags=flags) | Return a compiled regex combined (using OR) from a list of ``regexes``.
If there is nothing to combine, None is returned.
re2 library (https://github.com/axiak/pyre2) often can match and compile
large regexes much faster than stdlib re module (10x is not uncommon),
but there are some gotchas:
* in case of "DFA out of memory" errors use ``max_mem`` argument
to increase the amount of memory re2 is allowed to use. | Below is the the instruction that describes the task:
### Input:
Return a compiled regex combined (using OR) from a list of ``regexes``.
If there is nothing to combine, None is returned.
re2 library (https://github.com/axiak/pyre2) often can match and compile
large regexes much faster than stdlib re module (10x is not uncommon),
but there are some gotchas:
* in case of "DFA out of memory" errors use ``max_mem`` argument
to increase the amount of memory re2 is allowed to use.
### Response:
def _combined_regex(regexes, flags=re.IGNORECASE, use_re2=False, max_mem=None):
"""
Return a compiled regex combined (using OR) from a list of ``regexes``.
If there is nothing to combine, None is returned.
re2 library (https://github.com/axiak/pyre2) often can match and compile
large regexes much faster than stdlib re module (10x is not uncommon),
but there are some gotchas:
* in case of "DFA out of memory" errors use ``max_mem`` argument
to increase the amount of memory re2 is allowed to use.
"""
joined_regexes = "|".join(r for r in regexes if r)
if not joined_regexes:
return None
if use_re2:
import re2
return re2.compile(joined_regexes, flags=flags, max_mem=max_mem)
return re.compile(joined_regexes, flags=flags) |
def validate_instance_size(self, size):
''' integer between 5-1024 (inclusive) '''
try:
int(size)
except ValueError:
return '*** Error: size must be a whole number between 5 and 1024.'
if int(size) < 5 or int(size) > 1024:
return '*** Error: size must be between 5-1024 GB.'
return True | integer between 5-1024 (inclusive) | Below is the the instruction that describes the task:
### Input:
integer between 5-1024 (inclusive)
### Response:
def validate_instance_size(self, size):
''' integer between 5-1024 (inclusive) '''
try:
int(size)
except ValueError:
return '*** Error: size must be a whole number between 5 and 1024.'
if int(size) < 5 or int(size) > 1024:
return '*** Error: size must be between 5-1024 GB.'
return True |
def get_gdb_response(
self, timeout_sec=DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True
):
"""Get response from GDB, and block while doing so. If GDB does not have any response ready to be read
by timeout_sec, an exception is raised.
Args:
timeout_sec (float): Maximum time to wait for reponse. Must be >= 0. Will return after
raise_error_on_timeout (bool): Whether an exception should be raised if no response was found
after timeout_sec
Returns:
List of parsed GDB responses, returned from gdbmiparser.parse_response, with the
additional key 'stream' which is either 'stdout' or 'stderr'
Raises:
GdbTimeoutError if response is not received within timeout_sec
ValueError if select returned unexpected file number
NoGdbProcessError if there is no gdb subprocess running
"""
self.verify_valid_gdb_subprocess()
if timeout_sec < 0:
self.logger.warning("timeout_sec was negative, replacing with 0")
timeout_sec = 0
if USING_WINDOWS:
retval = self._get_responses_windows(timeout_sec)
else:
retval = self._get_responses_unix(timeout_sec)
if not retval and raise_error_on_timeout:
raise GdbTimeoutError(
"Did not get response from gdb after %s seconds" % timeout_sec
)
else:
return retval | Get response from GDB, and block while doing so. If GDB does not have any response ready to be read
by timeout_sec, an exception is raised.
Args:
timeout_sec (float): Maximum time to wait for reponse. Must be >= 0. Will return after
raise_error_on_timeout (bool): Whether an exception should be raised if no response was found
after timeout_sec
Returns:
List of parsed GDB responses, returned from gdbmiparser.parse_response, with the
additional key 'stream' which is either 'stdout' or 'stderr'
Raises:
GdbTimeoutError if response is not received within timeout_sec
ValueError if select returned unexpected file number
NoGdbProcessError if there is no gdb subprocess running | Below is the the instruction that describes the task:
### Input:
Get response from GDB, and block while doing so. If GDB does not have any response ready to be read
by timeout_sec, an exception is raised.
Args:
timeout_sec (float): Maximum time to wait for reponse. Must be >= 0. Will return after
raise_error_on_timeout (bool): Whether an exception should be raised if no response was found
after timeout_sec
Returns:
List of parsed GDB responses, returned from gdbmiparser.parse_response, with the
additional key 'stream' which is either 'stdout' or 'stderr'
Raises:
GdbTimeoutError if response is not received within timeout_sec
ValueError if select returned unexpected file number
NoGdbProcessError if there is no gdb subprocess running
### Response:
def get_gdb_response(
self, timeout_sec=DEFAULT_GDB_TIMEOUT_SEC, raise_error_on_timeout=True
):
"""Get response from GDB, and block while doing so. If GDB does not have any response ready to be read
by timeout_sec, an exception is raised.
Args:
timeout_sec (float): Maximum time to wait for reponse. Must be >= 0. Will return after
raise_error_on_timeout (bool): Whether an exception should be raised if no response was found
after timeout_sec
Returns:
List of parsed GDB responses, returned from gdbmiparser.parse_response, with the
additional key 'stream' which is either 'stdout' or 'stderr'
Raises:
GdbTimeoutError if response is not received within timeout_sec
ValueError if select returned unexpected file number
NoGdbProcessError if there is no gdb subprocess running
"""
self.verify_valid_gdb_subprocess()
if timeout_sec < 0:
self.logger.warning("timeout_sec was negative, replacing with 0")
timeout_sec = 0
if USING_WINDOWS:
retval = self._get_responses_windows(timeout_sec)
else:
retval = self._get_responses_unix(timeout_sec)
if not retval and raise_error_on_timeout:
raise GdbTimeoutError(
"Did not get response from gdb after %s seconds" % timeout_sec
)
else:
return retval |
def are_flags_valid(packet_type, flags):
"""True when flags comply with [MQTT-2.2.2-1] requirements based on
packet_type; False otherwise.
Parameters
----------
packet_type: MqttControlPacketType
flags: int
Integer representation of 4-bit MQTT header flags field.
Values outside of the range [0, 15] will certainly cause the
function to return False.
Returns
-------
bool
"""
if packet_type == MqttControlPacketType.publish:
rv = 0 <= flags <= 15
elif packet_type in (MqttControlPacketType.pubrel,
MqttControlPacketType.subscribe,
MqttControlPacketType.unsubscribe):
rv = flags == 2
elif packet_type in (MqttControlPacketType.connect,
MqttControlPacketType.connack,
MqttControlPacketType.puback,
MqttControlPacketType.pubrec,
MqttControlPacketType.pubcomp,
MqttControlPacketType.suback,
MqttControlPacketType.unsuback,
MqttControlPacketType.pingreq,
MqttControlPacketType.pingresp,
MqttControlPacketType.disconnect):
rv = flags == 0
else:
raise NotImplementedError(packet_type)
return rv | True when flags comply with [MQTT-2.2.2-1] requirements based on
packet_type; False otherwise.
Parameters
----------
packet_type: MqttControlPacketType
flags: int
Integer representation of 4-bit MQTT header flags field.
Values outside of the range [0, 15] will certainly cause the
function to return False.
Returns
-------
bool | Below is the the instruction that describes the task:
### Input:
True when flags comply with [MQTT-2.2.2-1] requirements based on
packet_type; False otherwise.
Parameters
----------
packet_type: MqttControlPacketType
flags: int
Integer representation of 4-bit MQTT header flags field.
Values outside of the range [0, 15] will certainly cause the
function to return False.
Returns
-------
bool
### Response:
def are_flags_valid(packet_type, flags):
"""True when flags comply with [MQTT-2.2.2-1] requirements based on
packet_type; False otherwise.
Parameters
----------
packet_type: MqttControlPacketType
flags: int
Integer representation of 4-bit MQTT header flags field.
Values outside of the range [0, 15] will certainly cause the
function to return False.
Returns
-------
bool
"""
if packet_type == MqttControlPacketType.publish:
rv = 0 <= flags <= 15
elif packet_type in (MqttControlPacketType.pubrel,
MqttControlPacketType.subscribe,
MqttControlPacketType.unsubscribe):
rv = flags == 2
elif packet_type in (MqttControlPacketType.connect,
MqttControlPacketType.connack,
MqttControlPacketType.puback,
MqttControlPacketType.pubrec,
MqttControlPacketType.pubcomp,
MqttControlPacketType.suback,
MqttControlPacketType.unsuback,
MqttControlPacketType.pingreq,
MqttControlPacketType.pingresp,
MqttControlPacketType.disconnect):
rv = flags == 0
else:
raise NotImplementedError(packet_type)
return rv |
def parse_declaration_expressn_memberaccess(self, lhsAST, rhsAST, es):
"""
Instead of "Class.variablename", use "Class.rv('variablename')".
:param lhsAST:
:param rhsAST:
:param es:
:return:
"""
if isinstance(lhsAST, wdl_parser.Terminal):
es = es + lhsAST.source_string
elif isinstance(lhsAST, wdl_parser.Ast):
raise NotImplementedError
elif isinstance(lhsAST, wdl_parser.AstList):
raise NotImplementedError
es = es + '_'
if isinstance(rhsAST, wdl_parser.Terminal):
es = es + rhsAST.source_string
elif isinstance(rhsAST, wdl_parser.Ast):
raise NotImplementedError
elif isinstance(rhsAST, wdl_parser.AstList):
raise NotImplementedError
return es | Instead of "Class.variablename", use "Class.rv('variablename')".
:param lhsAST:
:param rhsAST:
:param es:
:return: | Below is the the instruction that describes the task:
### Input:
Instead of "Class.variablename", use "Class.rv('variablename')".
:param lhsAST:
:param rhsAST:
:param es:
:return:
### Response:
def parse_declaration_expressn_memberaccess(self, lhsAST, rhsAST, es):
"""
Instead of "Class.variablename", use "Class.rv('variablename')".
:param lhsAST:
:param rhsAST:
:param es:
:return:
"""
if isinstance(lhsAST, wdl_parser.Terminal):
es = es + lhsAST.source_string
elif isinstance(lhsAST, wdl_parser.Ast):
raise NotImplementedError
elif isinstance(lhsAST, wdl_parser.AstList):
raise NotImplementedError
es = es + '_'
if isinstance(rhsAST, wdl_parser.Terminal):
es = es + rhsAST.source_string
elif isinstance(rhsAST, wdl_parser.Ast):
raise NotImplementedError
elif isinstance(rhsAST, wdl_parser.AstList):
raise NotImplementedError
return es |
def envvar_constructor(loader, node):
"""Tag constructor to use environment variables in YAML files. Usage:
- !TAG VARIABLE
raise while loading the document if variable does not exists
- !TAG VARIABLE:=DEFAULT_VALUE
For instance:
credentials:
user: !env USER:=root
group: !env GROUP:= root
"""
value = loader.construct_python_unicode(node)
data = value.split(':=', 1)
if len(data) == 2:
var, default = data
return os.environ.get(var, default)
else:
return os.environ[value] | Tag constructor to use environment variables in YAML files. Usage:
- !TAG VARIABLE
raise while loading the document if variable does not exists
- !TAG VARIABLE:=DEFAULT_VALUE
For instance:
credentials:
user: !env USER:=root
group: !env GROUP:= root | Below is the the instruction that describes the task:
### Input:
Tag constructor to use environment variables in YAML files. Usage:
- !TAG VARIABLE
raise while loading the document if variable does not exists
- !TAG VARIABLE:=DEFAULT_VALUE
For instance:
credentials:
user: !env USER:=root
group: !env GROUP:= root
### Response:
def envvar_constructor(loader, node):
"""Tag constructor to use environment variables in YAML files. Usage:
- !TAG VARIABLE
raise while loading the document if variable does not exists
- !TAG VARIABLE:=DEFAULT_VALUE
For instance:
credentials:
user: !env USER:=root
group: !env GROUP:= root
"""
value = loader.construct_python_unicode(node)
data = value.split(':=', 1)
if len(data) == 2:
var, default = data
return os.environ.get(var, default)
else:
return os.environ[value] |
def on_close(self, stats, previous_stats):
"""Print the extended JSON report to reporter's output.
:param dict stats: Metrics for the current pylint run
:param dict previous_stats: Metrics for the previous pylint run
"""
reports = {
'messages': self._messages,
'stats': stats,
'previous': previous_stats,
}
print(json.dumps(reports, cls=JSONSetEncoder, indent=4), file=self.out) | Print the extended JSON report to reporter's output.
:param dict stats: Metrics for the current pylint run
:param dict previous_stats: Metrics for the previous pylint run | Below is the the instruction that describes the task:
### Input:
Print the extended JSON report to reporter's output.
:param dict stats: Metrics for the current pylint run
:param dict previous_stats: Metrics for the previous pylint run
### Response:
def on_close(self, stats, previous_stats):
"""Print the extended JSON report to reporter's output.
:param dict stats: Metrics for the current pylint run
:param dict previous_stats: Metrics for the previous pylint run
"""
reports = {
'messages': self._messages,
'stats': stats,
'previous': previous_stats,
}
print(json.dumps(reports, cls=JSONSetEncoder, indent=4), file=self.out) |
def skew_y(self, y):
"""Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees
"""
self.root.set("transform", "%s skewY(%f)" %
(self.root.get("transform") or '', y))
return self | Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees | Below is the the instruction that describes the task:
### Input:
Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees
### Response:
def skew_y(self, y):
"""Skew element along the y-axis by the given angle.
Parameters
----------
y : float
y-axis skew angle in degrees
"""
self.root.set("transform", "%s skewY(%f)" %
(self.root.get("transform") or '', y))
return self |
def update(self, id, **kwargs):
"""
Updates an existing License
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: License id (required)
:param LicenseRest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_with_http_info(id, **kwargs)
else:
(data) = self.update_with_http_info(id, **kwargs)
return data | Updates an existing License
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: License id (required)
:param LicenseRest body:
:return: None
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Updates an existing License
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: License id (required)
:param LicenseRest body:
:return: None
If the method is called asynchronously,
returns the request thread.
### Response:
def update(self, id, **kwargs):
"""
Updates an existing License
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.update(id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int id: License id (required)
:param LicenseRest body:
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.update_with_http_info(id, **kwargs)
else:
(data) = self.update_with_http_info(id, **kwargs)
return data |
def calc_nfalse(d):
""" Calculate the number of thermal-noise false positives per segment.
"""
dtfactor = n.sum([1./i for i in d['dtarr']]) # assumes dedisperse-all algorithm
ntrials = d['readints'] * dtfactor * len(d['dmarr']) * d['npixx'] * d['npixy']
qfrac = 1 - (erf(d['sigma_image1']/n.sqrt(2)) + 1)/2.
nfalse = int(qfrac*ntrials)
return nfalse | Calculate the number of thermal-noise false positives per segment. | Below is the the instruction that describes the task:
### Input:
Calculate the number of thermal-noise false positives per segment.
### Response:
def calc_nfalse(d):
""" Calculate the number of thermal-noise false positives per segment.
"""
dtfactor = n.sum([1./i for i in d['dtarr']]) # assumes dedisperse-all algorithm
ntrials = d['readints'] * dtfactor * len(d['dmarr']) * d['npixx'] * d['npixy']
qfrac = 1 - (erf(d['sigma_image1']/n.sqrt(2)) + 1)/2.
nfalse = int(qfrac*ntrials)
return nfalse |
def toDict(self):
"""
Get information about a title alignment as a dictionary.
@return: A C{dict} representation of the title aligment.
"""
return {
'hsps': [hsp.toDict() for hsp in self.hsps],
'read': self.read.toDict(),
} | Get information about a title alignment as a dictionary.
@return: A C{dict} representation of the title aligment. | Below is the the instruction that describes the task:
### Input:
Get information about a title alignment as a dictionary.
@return: A C{dict} representation of the title aligment.
### Response:
def toDict(self):
"""
Get information about a title alignment as a dictionary.
@return: A C{dict} representation of the title aligment.
"""
return {
'hsps': [hsp.toDict() for hsp in self.hsps],
'read': self.read.toDict(),
} |
def cluster_path(cls, project, instance, cluster):
"""Return a fully-qualified cluster string."""
return google.api_core.path_template.expand(
"projects/{project}/instances/{instance}/clusters/{cluster}",
project=project,
instance=instance,
cluster=cluster,
) | Return a fully-qualified cluster string. | Below is the the instruction that describes the task:
### Input:
Return a fully-qualified cluster string.
### Response:
def cluster_path(cls, project, instance, cluster):
"""Return a fully-qualified cluster string."""
return google.api_core.path_template.expand(
"projects/{project}/instances/{instance}/clusters/{cluster}",
project=project,
instance=instance,
cluster=cluster,
) |
def listing(
source: list,
ordered: bool = False,
expand_full: bool = False
):
"""
An unordered or ordered list of the specified *source* iterable where
each element is converted to a string representation for display.
:param source:
The iterable to display as a list.
:param ordered:
Whether or not the list should be ordered. If False, which is the
default, an unordered bulleted list is created.
:param expand_full:
Whether or not the list should expand to fill the screen horizontally.
When defaulted to False, the list is constrained to the center view
area of the screen along with other text. This can be useful to keep
lists aligned with the text flow.
"""
r = _get_report()
r.append_body(render.listing(
source=source,
ordered=ordered,
expand_full=expand_full
))
r.stdout_interceptor.write_source('[ADDED] Listing\n') | An unordered or ordered list of the specified *source* iterable where
each element is converted to a string representation for display.
:param source:
The iterable to display as a list.
:param ordered:
Whether or not the list should be ordered. If False, which is the
default, an unordered bulleted list is created.
:param expand_full:
Whether or not the list should expand to fill the screen horizontally.
When defaulted to False, the list is constrained to the center view
area of the screen along with other text. This can be useful to keep
lists aligned with the text flow. | Below is the the instruction that describes the task:
### Input:
An unordered or ordered list of the specified *source* iterable where
each element is converted to a string representation for display.
:param source:
The iterable to display as a list.
:param ordered:
Whether or not the list should be ordered. If False, which is the
default, an unordered bulleted list is created.
:param expand_full:
Whether or not the list should expand to fill the screen horizontally.
When defaulted to False, the list is constrained to the center view
area of the screen along with other text. This can be useful to keep
lists aligned with the text flow.
### Response:
def listing(
source: list,
ordered: bool = False,
expand_full: bool = False
):
"""
An unordered or ordered list of the specified *source* iterable where
each element is converted to a string representation for display.
:param source:
The iterable to display as a list.
:param ordered:
Whether or not the list should be ordered. If False, which is the
default, an unordered bulleted list is created.
:param expand_full:
Whether or not the list should expand to fill the screen horizontally.
When defaulted to False, the list is constrained to the center view
area of the screen along with other text. This can be useful to keep
lists aligned with the text flow.
"""
r = _get_report()
r.append_body(render.listing(
source=source,
ordered=ordered,
expand_full=expand_full
))
r.stdout_interceptor.write_source('[ADDED] Listing\n') |
def titled_box(self, titles, contents, tdir='h', cdir='h'):
"""
Helper function to build a box containing a list of elements,
with a title above and/or below, or left and/or right of the
box. (e.g. a class name on top, or brackets on both sides.)
The elements given must already have been transformed into
Tag instances.
Arguments:
titles: A pair of strings to display on top and bottom
(if tdir=='v') or left and right (if tdir=='h').
If either or both titles are None, they will be
omitted.
contents: A list of Tags.
tdir: tdir=='h' (default) means the titles will be on
the left and right. tdir=='v' means they will be
on top and bottom.
cdir: cdir=='h' (default) means the contents will be
stacked horizontally. cdir=='v' means they will
be stacked vertically.
"""
H = self.H
def wrapt(x):
return H.div['hrepr-title'](x)
rval = H.div[f'hrepr-titled-{tdir}']
contents = H.div[f'hrepr-contents-{cdir}'].fill(contents)
if isinstance(titles, tuple) and len(titles) == 2:
open, close = titles
else:
open, close = titles, None
if open:
rval = rval(wrapt(open))
rval = rval(contents)
if close:
rval = rval(wrapt(close))
return rval | Helper function to build a box containing a list of elements,
with a title above and/or below, or left and/or right of the
box. (e.g. a class name on top, or brackets on both sides.)
The elements given must already have been transformed into
Tag instances.
Arguments:
titles: A pair of strings to display on top and bottom
(if tdir=='v') or left and right (if tdir=='h').
If either or both titles are None, they will be
omitted.
contents: A list of Tags.
tdir: tdir=='h' (default) means the titles will be on
the left and right. tdir=='v' means they will be
on top and bottom.
cdir: cdir=='h' (default) means the contents will be
stacked horizontally. cdir=='v' means they will
be stacked vertically. | Below is the the instruction that describes the task:
### Input:
Helper function to build a box containing a list of elements,
with a title above and/or below, or left and/or right of the
box. (e.g. a class name on top, or brackets on both sides.)
The elements given must already have been transformed into
Tag instances.
Arguments:
titles: A pair of strings to display on top and bottom
(if tdir=='v') or left and right (if tdir=='h').
If either or both titles are None, they will be
omitted.
contents: A list of Tags.
tdir: tdir=='h' (default) means the titles will be on
the left and right. tdir=='v' means they will be
on top and bottom.
cdir: cdir=='h' (default) means the contents will be
stacked horizontally. cdir=='v' means they will
be stacked vertically.
### Response:
def titled_box(self, titles, contents, tdir='h', cdir='h'):
"""
Helper function to build a box containing a list of elements,
with a title above and/or below, or left and/or right of the
box. (e.g. a class name on top, or brackets on both sides.)
The elements given must already have been transformed into
Tag instances.
Arguments:
titles: A pair of strings to display on top and bottom
(if tdir=='v') or left and right (if tdir=='h').
If either or both titles are None, they will be
omitted.
contents: A list of Tags.
tdir: tdir=='h' (default) means the titles will be on
the left and right. tdir=='v' means they will be
on top and bottom.
cdir: cdir=='h' (default) means the contents will be
stacked horizontally. cdir=='v' means they will
be stacked vertically.
"""
H = self.H
def wrapt(x):
return H.div['hrepr-title'](x)
rval = H.div[f'hrepr-titled-{tdir}']
contents = H.div[f'hrepr-contents-{cdir}'].fill(contents)
if isinstance(titles, tuple) and len(titles) == 2:
open, close = titles
else:
open, close = titles, None
if open:
rval = rval(wrapt(open))
rval = rval(contents)
if close:
rval = rval(wrapt(close))
return rval |
def speak(self, message):
""" Post a message.
Args:
message (:class:`Message` or string): Message
Returns:
bool. Success
"""
campfire = self.get_campfire()
if not isinstance(message, Message):
message = Message(campfire, message)
result = self._connection.post(
"room/%s/speak" % self.id,
{"message": message.get_data()},
parse_data=True,
key="message"
)
if result["success"]:
return Message(campfire, result["data"])
return result["success"] | Post a message.
Args:
message (:class:`Message` or string): Message
Returns:
bool. Success | Below is the the instruction that describes the task:
### Input:
Post a message.
Args:
message (:class:`Message` or string): Message
Returns:
bool. Success
### Response:
def speak(self, message):
""" Post a message.
Args:
message (:class:`Message` or string): Message
Returns:
bool. Success
"""
campfire = self.get_campfire()
if not isinstance(message, Message):
message = Message(campfire, message)
result = self._connection.post(
"room/%s/speak" % self.id,
{"message": message.get_data()},
parse_data=True,
key="message"
)
if result["success"]:
return Message(campfire, result["data"])
return result["success"] |
def showEvent(self, event):
"""When this widget is shown it has an effect of putting
other widgets in the parent widget into different editing modes, emits
signal to notify other widgets. Restores the previous selection the last
time this widget was visible"""
selected = self.paramList.selectedIndexes()
model = self.paramList.model()
self.visibilityChanged.emit(1)
if len(selected) > 0:
# select the correct components in the StimulusView
self.paramList.parameterChanged.emit(model.selection(selected[0]))
self.hintRequested.emit('Select parameter to edit. \n\nParameter must have selected components in order to edit fields')
elif model.rowCount() > 0:
# just select first item
self.paramList.selectRow(0)
self.paramList.parameterChanged.emit(model.selection(model.index(0,0)))
self.hintRequested.emit('Select parameter to edit. \n\nParameter must have selected components in order to edit fields')
else:
model.emptied.emit(True)
self.hintRequested.emit('To add a parameter, Drag "Add" onto empty auto-parameter table') | When this widget is shown it has an effect of putting
other widgets in the parent widget into different editing modes, emits
signal to notify other widgets. Restores the previous selection the last
time this widget was visible | Below is the the instruction that describes the task:
### Input:
When this widget is shown it has an effect of putting
other widgets in the parent widget into different editing modes, emits
signal to notify other widgets. Restores the previous selection the last
time this widget was visible
### Response:
def showEvent(self, event):
"""When this widget is shown it has an effect of putting
other widgets in the parent widget into different editing modes, emits
signal to notify other widgets. Restores the previous selection the last
time this widget was visible"""
selected = self.paramList.selectedIndexes()
model = self.paramList.model()
self.visibilityChanged.emit(1)
if len(selected) > 0:
# select the correct components in the StimulusView
self.paramList.parameterChanged.emit(model.selection(selected[0]))
self.hintRequested.emit('Select parameter to edit. \n\nParameter must have selected components in order to edit fields')
elif model.rowCount() > 0:
# just select first item
self.paramList.selectRow(0)
self.paramList.parameterChanged.emit(model.selection(model.index(0,0)))
self.hintRequested.emit('Select parameter to edit. \n\nParameter must have selected components in order to edit fields')
else:
model.emptied.emit(True)
self.hintRequested.emit('To add a parameter, Drag "Add" onto empty auto-parameter table') |
def get_syslog(self, service_id, version_number, name):
"""Get the Syslog for a particular service and version."""
content = self._fetch("/service/%s/version/%d/syslog/%s" % (service_id, version_number, name))
return FastlySyslog(self, content) | Get the Syslog for a particular service and version. | Below is the the instruction that describes the task:
### Input:
Get the Syslog for a particular service and version.
### Response:
def get_syslog(self, service_id, version_number, name):
"""Get the Syslog for a particular service and version."""
content = self._fetch("/service/%s/version/%d/syslog/%s" % (service_id, version_number, name))
return FastlySyslog(self, content) |
def Join(self):
"""Waits until all outstanding tasks are completed."""
for _ in range(self.JOIN_TIMEOUT_DECISECONDS):
if self._queue.empty() and not self.busy_threads:
return
time.sleep(0.1)
raise ValueError("Timeout during Join() for threadpool %s." % self.name) | Waits until all outstanding tasks are completed. | Below is the the instruction that describes the task:
### Input:
Waits until all outstanding tasks are completed.
### Response:
def Join(self):
"""Waits until all outstanding tasks are completed."""
for _ in range(self.JOIN_TIMEOUT_DECISECONDS):
if self._queue.empty() and not self.busy_threads:
return
time.sleep(0.1)
raise ValueError("Timeout during Join() for threadpool %s." % self.name) |
def select_seqs(ol,seqs):
'''
from elist.elist import *
ol = ['a','b','c','d']
select_seqs(ol,[1,2])
'''
rslt =copy.deepcopy(ol)
rslt = itemgetter(*seqs)(ol)
if(seqs.__len__()==0):
rslt = []
elif(seqs.__len__()==1):
rslt = [rslt]
else:
rslt = list(rslt)
return(rslt) | from elist.elist import *
ol = ['a','b','c','d']
select_seqs(ol,[1,2]) | Below is the the instruction that describes the task:
### Input:
from elist.elist import *
ol = ['a','b','c','d']
select_seqs(ol,[1,2])
### Response:
def select_seqs(ol,seqs):
'''
from elist.elist import *
ol = ['a','b','c','d']
select_seqs(ol,[1,2])
'''
rslt =copy.deepcopy(ol)
rslt = itemgetter(*seqs)(ol)
if(seqs.__len__()==0):
rslt = []
elif(seqs.__len__()==1):
rslt = [rslt]
else:
rslt = list(rslt)
return(rslt) |
def apply_customization(self, serializer, customization):
"""
Applies fields customization to a nested or embedded DocumentSerializer.
"""
# apply fields or exclude
if customization.fields is not None:
if len(customization.fields) == 0:
# customization fields are empty, set Meta.fields to '__all__'
serializer.Meta.fields = ALL_FIELDS
else:
serializer.Meta.fields = customization.fields
if customization.exclude is not None:
serializer.Meta.exclude = customization.exclude
# apply extra_kwargs
if customization.extra_kwargs is not None:
serializer.Meta.extra_kwargs = customization.extra_kwargs
# apply validate_methods
for method_name, method in customization.validate_methods.items():
setattr(serializer, method_name, method) | Applies fields customization to a nested or embedded DocumentSerializer. | Below is the the instruction that describes the task:
### Input:
Applies fields customization to a nested or embedded DocumentSerializer.
### Response:
def apply_customization(self, serializer, customization):
"""
Applies fields customization to a nested or embedded DocumentSerializer.
"""
# apply fields or exclude
if customization.fields is not None:
if len(customization.fields) == 0:
# customization fields are empty, set Meta.fields to '__all__'
serializer.Meta.fields = ALL_FIELDS
else:
serializer.Meta.fields = customization.fields
if customization.exclude is not None:
serializer.Meta.exclude = customization.exclude
# apply extra_kwargs
if customization.extra_kwargs is not None:
serializer.Meta.extra_kwargs = customization.extra_kwargs
# apply validate_methods
for method_name, method in customization.validate_methods.items():
setattr(serializer, method_name, method) |
def _match_dfs_expr(lo_meta, expr, tt):
"""
Use the given expression to get all data frames that match the criteria (i.e. "paleo measurement tables")
:param dict lo_meta: Lipd object metadata
:param str expr: Search expression
:param str tt: Table type (chron or paleo)
:return list: All filenames that match the expression
"""
logger_dataframes.info("enter match_dfs_expr")
filenames = []
s = "{}Data".format(tt)
# Top table level. Going through all tables of certain type (i.e. chron or paleo)
for k, v in lo_meta["{}Data".format(tt)].items():
# Inner table level. Get data from one specific table
if "measurement" in expr:
for k1, v1 in v["{}MeasurementTable".format(tt)].items():
try:
f = v1["filename"]
if f.endswith(".csv"):
filenames.append(f)
except KeyError:
# Not concerned if the key wasn't found.
logger_dataframes.info("match_dfs_expr: KeyError: filename not found in: {} {}".format(tt, "measurement"))
elif "ensemble" in expr:
for k1, v1 in v["{}Model".format(tt)].items():
try:
f = v1["ensembleTable"]["filename"]
if f.endswith(".csv"):
filenames.append(f)
except KeyError:
# Not concerned if the key wasn't found.
logger_dataframes.info("match_dfs_expr: KeyError: filename not found in: {} {}".format(tt, "ensemble"))
elif "model" in expr:
for k1, v1 in v["{}Model".format(tt)].items():
try:
f = v1["{}ModelTable".format(tt)]["filename"]
if f.endswith(".csv"):
filenames.append(f)
except KeyError:
# Not concerned if the key wasn't found.
logger_dataframes.info("match_dfs_expr: KeyError: filename not found in: {} {}".format(tt, "model"))
elif "dist" in expr:
for k1, v1 in v["{}Model".format(tt)].items():
for k2, v2 in v1["distribution"].items():
try:
f = v2["filename"]
if f.endswith(".csv"):
filenames.append(f)
except KeyError:
# Not concerned if the key wasn't found.
logger_dataframes.info(
"match_dfs_expr: KeyError: filename not found in: {} {}".format(tt, "dist"))
logger_dataframes.info("exit match_dfs_expr")
return filenames | Use the given expression to get all data frames that match the criteria (i.e. "paleo measurement tables")
:param dict lo_meta: Lipd object metadata
:param str expr: Search expression
:param str tt: Table type (chron or paleo)
:return list: All filenames that match the expression | Below is the the instruction that describes the task:
### Input:
Use the given expression to get all data frames that match the criteria (i.e. "paleo measurement tables")
:param dict lo_meta: Lipd object metadata
:param str expr: Search expression
:param str tt: Table type (chron or paleo)
:return list: All filenames that match the expression
### Response:
def _match_dfs_expr(lo_meta, expr, tt):
"""
Use the given expression to get all data frames that match the criteria (i.e. "paleo measurement tables")
:param dict lo_meta: Lipd object metadata
:param str expr: Search expression
:param str tt: Table type (chron or paleo)
:return list: All filenames that match the expression
"""
logger_dataframes.info("enter match_dfs_expr")
filenames = []
s = "{}Data".format(tt)
# Top table level. Going through all tables of certain type (i.e. chron or paleo)
for k, v in lo_meta["{}Data".format(tt)].items():
# Inner table level. Get data from one specific table
if "measurement" in expr:
for k1, v1 in v["{}MeasurementTable".format(tt)].items():
try:
f = v1["filename"]
if f.endswith(".csv"):
filenames.append(f)
except KeyError:
# Not concerned if the key wasn't found.
logger_dataframes.info("match_dfs_expr: KeyError: filename not found in: {} {}".format(tt, "measurement"))
elif "ensemble" in expr:
for k1, v1 in v["{}Model".format(tt)].items():
try:
f = v1["ensembleTable"]["filename"]
if f.endswith(".csv"):
filenames.append(f)
except KeyError:
# Not concerned if the key wasn't found.
logger_dataframes.info("match_dfs_expr: KeyError: filename not found in: {} {}".format(tt, "ensemble"))
elif "model" in expr:
for k1, v1 in v["{}Model".format(tt)].items():
try:
f = v1["{}ModelTable".format(tt)]["filename"]
if f.endswith(".csv"):
filenames.append(f)
except KeyError:
# Not concerned if the key wasn't found.
logger_dataframes.info("match_dfs_expr: KeyError: filename not found in: {} {}".format(tt, "model"))
elif "dist" in expr:
for k1, v1 in v["{}Model".format(tt)].items():
for k2, v2 in v1["distribution"].items():
try:
f = v2["filename"]
if f.endswith(".csv"):
filenames.append(f)
except KeyError:
# Not concerned if the key wasn't found.
logger_dataframes.info(
"match_dfs_expr: KeyError: filename not found in: {} {}".format(tt, "dist"))
logger_dataframes.info("exit match_dfs_expr")
return filenames |
def _serialize_lnk(lnk):
"""Serialize a predication lnk to surface form into the SimpleMRS
encoding."""
s = ""
if lnk is not None:
s = '<'
if lnk.type == Lnk.CHARSPAN:
cfrom, cto = lnk.data
s += ''.join([str(cfrom), ':', str(cto)])
elif lnk.type == Lnk.CHARTSPAN:
cfrom, cto = lnk.data
s += ''.join([str(cfrom), '#', str(cto)])
elif lnk.type == Lnk.TOKENS:
s += ' '.join([str(t) for t in lnk.data])
elif lnk.type == Lnk.EDGE:
s += ''.join(['@', str(lnk.data)])
s += '>'
return s | Serialize a predication lnk to surface form into the SimpleMRS
encoding. | Below is the the instruction that describes the task:
### Input:
Serialize a predication lnk to surface form into the SimpleMRS
encoding.
### Response:
def _serialize_lnk(lnk):
"""Serialize a predication lnk to surface form into the SimpleMRS
encoding."""
s = ""
if lnk is not None:
s = '<'
if lnk.type == Lnk.CHARSPAN:
cfrom, cto = lnk.data
s += ''.join([str(cfrom), ':', str(cto)])
elif lnk.type == Lnk.CHARTSPAN:
cfrom, cto = lnk.data
s += ''.join([str(cfrom), '#', str(cto)])
elif lnk.type == Lnk.TOKENS:
s += ' '.join([str(t) for t in lnk.data])
elif lnk.type == Lnk.EDGE:
s += ''.join(['@', str(lnk.data)])
s += '>'
return s |
def _set_pspf_timer(self, v, load=False):
"""
Setter method for pspf_timer, mapped from YANG variable /isis_state/router_isis_config/pspf_timer (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_pspf_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pspf_timer() directly.
YANG Description: Timer for IS-IS Partial SPF calculation for IPv4
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=pspf_timer.pspf_timer, is_container='container', presence=False, yang_name="pspf-timer", rest_name="pspf-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-pspf-timer', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """pspf_timer must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=pspf_timer.pspf_timer, is_container='container', presence=False, yang_name="pspf-timer", rest_name="pspf-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-pspf-timer', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""",
})
self.__pspf_timer = t
if hasattr(self, '_set'):
self._set() | Setter method for pspf_timer, mapped from YANG variable /isis_state/router_isis_config/pspf_timer (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_pspf_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pspf_timer() directly.
YANG Description: Timer for IS-IS Partial SPF calculation for IPv4 | Below is the the instruction that describes the task:
### Input:
Setter method for pspf_timer, mapped from YANG variable /isis_state/router_isis_config/pspf_timer (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_pspf_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pspf_timer() directly.
YANG Description: Timer for IS-IS Partial SPF calculation for IPv4
### Response:
def _set_pspf_timer(self, v, load=False):
"""
Setter method for pspf_timer, mapped from YANG variable /isis_state/router_isis_config/pspf_timer (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_pspf_timer is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_pspf_timer() directly.
YANG Description: Timer for IS-IS Partial SPF calculation for IPv4
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=pspf_timer.pspf_timer, is_container='container', presence=False, yang_name="pspf-timer", rest_name="pspf-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-pspf-timer', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)
except (TypeError, ValueError):
raise ValueError({
'error-string': """pspf_timer must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=pspf_timer.pspf_timer, is_container='container', presence=False, yang_name="pspf-timer", rest_name="pspf-timer", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'isis-pspf-timer', u'cli-suppress-show-path': None}}, namespace='urn:brocade.com:mgmt:brocade-isis-operational', defining_module='brocade-isis-operational', yang_type='container', is_config=False)""",
})
self.__pspf_timer = t
if hasattr(self, '_set'):
self._set() |
def process_sentence(sentence, start_word="<S>", end_word="</S>"):
"""Seperate a sentence string into a list of string words, add start_word and end_word,
see ``create_vocab()`` and ``tutorial_tfrecord3.py``.
Parameters
----------
sentence : str
A sentence.
start_word : str or None
The start word. If None, no start word will be appended.
end_word : str or None
The end word. If None, no end word will be appended.
Returns
---------
list of str
A list of strings that separated into words.
Examples
-----------
>>> c = "how are you?"
>>> c = tl.nlp.process_sentence(c)
>>> print(c)
['<S>', 'how', 'are', 'you', '?', '</S>']
Notes
-------
- You have to install the following package.
- `Installing NLTK <http://www.nltk.org/install.html>`__
- `Installing NLTK data <http://www.nltk.org/data.html>`__
"""
if start_word is not None:
process_sentence = [start_word]
else:
process_sentence = []
process_sentence.extend(nltk.tokenize.word_tokenize(sentence.lower()))
if end_word is not None:
process_sentence.append(end_word)
return process_sentence | Seperate a sentence string into a list of string words, add start_word and end_word,
see ``create_vocab()`` and ``tutorial_tfrecord3.py``.
Parameters
----------
sentence : str
A sentence.
start_word : str or None
The start word. If None, no start word will be appended.
end_word : str or None
The end word. If None, no end word will be appended.
Returns
---------
list of str
A list of strings that separated into words.
Examples
-----------
>>> c = "how are you?"
>>> c = tl.nlp.process_sentence(c)
>>> print(c)
['<S>', 'how', 'are', 'you', '?', '</S>']
Notes
-------
- You have to install the following package.
- `Installing NLTK <http://www.nltk.org/install.html>`__
- `Installing NLTK data <http://www.nltk.org/data.html>`__ | Below is the the instruction that describes the task:
### Input:
Seperate a sentence string into a list of string words, add start_word and end_word,
see ``create_vocab()`` and ``tutorial_tfrecord3.py``.
Parameters
----------
sentence : str
A sentence.
start_word : str or None
The start word. If None, no start word will be appended.
end_word : str or None
The end word. If None, no end word will be appended.
Returns
---------
list of str
A list of strings that separated into words.
Examples
-----------
>>> c = "how are you?"
>>> c = tl.nlp.process_sentence(c)
>>> print(c)
['<S>', 'how', 'are', 'you', '?', '</S>']
Notes
-------
- You have to install the following package.
- `Installing NLTK <http://www.nltk.org/install.html>`__
- `Installing NLTK data <http://www.nltk.org/data.html>`__
### Response:
def process_sentence(sentence, start_word="<S>", end_word="</S>"):
"""Seperate a sentence string into a list of string words, add start_word and end_word,
see ``create_vocab()`` and ``tutorial_tfrecord3.py``.
Parameters
----------
sentence : str
A sentence.
start_word : str or None
The start word. If None, no start word will be appended.
end_word : str or None
The end word. If None, no end word will be appended.
Returns
---------
list of str
A list of strings that separated into words.
Examples
-----------
>>> c = "how are you?"
>>> c = tl.nlp.process_sentence(c)
>>> print(c)
['<S>', 'how', 'are', 'you', '?', '</S>']
Notes
-------
- You have to install the following package.
- `Installing NLTK <http://www.nltk.org/install.html>`__
- `Installing NLTK data <http://www.nltk.org/data.html>`__
"""
if start_word is not None:
process_sentence = [start_word]
else:
process_sentence = []
process_sentence.extend(nltk.tokenize.word_tokenize(sentence.lower()))
if end_word is not None:
process_sentence.append(end_word)
return process_sentence |
def _recover_shape_information(self, inputs, outputs):
"""Recover output tensor shape value to enable shape inference.
The batch size of `inputs` isn't preserved by the convolution op. Calculate
what the proper output shape will be for `outputs`.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`.
outputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`. The output of `inputs` from a transpose
convolution op.
Returns:
outputs: The passed-in `outputs` with all shape information filled in.
"""
batch_size_value = inputs.get_shape()[0]
if self._data_format.startswith("NC"):
output_shape_value = ((batch_size_value, self.output_channels) +
self.output_shape)
elif self._data_format.startswith("N") and self._data_format.endswith("C"):
output_shape_value = ((batch_size_value,) + self.output_shape +
(self.output_channels,))
outputs.set_shape(output_shape_value)
return outputs | Recover output tensor shape value to enable shape inference.
The batch size of `inputs` isn't preserved by the convolution op. Calculate
what the proper output shape will be for `outputs`.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`.
outputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`. The output of `inputs` from a transpose
convolution op.
Returns:
outputs: The passed-in `outputs` with all shape information filled in. | Below is the the instruction that describes the task:
### Input:
Recover output tensor shape value to enable shape inference.
The batch size of `inputs` isn't preserved by the convolution op. Calculate
what the proper output shape will be for `outputs`.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`.
outputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`. The output of `inputs` from a transpose
convolution op.
Returns:
outputs: The passed-in `outputs` with all shape information filled in.
### Response:
def _recover_shape_information(self, inputs, outputs):
"""Recover output tensor shape value to enable shape inference.
The batch size of `inputs` isn't preserved by the convolution op. Calculate
what the proper output shape will be for `outputs`.
Args:
inputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`.
outputs: A Tensor of shape `data_format` and of type `tf.float16`,
`tf.bfloat16` or `tf.float32`. The output of `inputs` from a transpose
convolution op.
Returns:
outputs: The passed-in `outputs` with all shape information filled in.
"""
batch_size_value = inputs.get_shape()[0]
if self._data_format.startswith("NC"):
output_shape_value = ((batch_size_value, self.output_channels) +
self.output_shape)
elif self._data_format.startswith("N") and self._data_format.endswith("C"):
output_shape_value = ((batch_size_value,) + self.output_shape +
(self.output_channels,))
outputs.set_shape(output_shape_value)
return outputs |
def status(self):
"""
Get server status. Uses GET to /status interface.
:Returns: (dict) Server status as described `here <https://cloud.knuverse.com/docs/api/#api-General-Status>`_.
"""
response = self._get(url.status)
self._check_response(response, 200)
return self._create_response(response) | Get server status. Uses GET to /status interface.
:Returns: (dict) Server status as described `here <https://cloud.knuverse.com/docs/api/#api-General-Status>`_. | Below is the the instruction that describes the task:
### Input:
Get server status. Uses GET to /status interface.
:Returns: (dict) Server status as described `here <https://cloud.knuverse.com/docs/api/#api-General-Status>`_.
### Response:
def status(self):
"""
Get server status. Uses GET to /status interface.
:Returns: (dict) Server status as described `here <https://cloud.knuverse.com/docs/api/#api-General-Status>`_.
"""
response = self._get(url.status)
self._check_response(response, 200)
return self._create_response(response) |
def _fulfill(self, bits, ignore_nonpromised_bits=False):
"""Supply the promise with the bits from its associated primitive's execution.
The fulfillment process must walk the promise chain backwards
until it reaches the original promise and can supply the final
value.
The data that comes in can either be all a bit read for every
bit written by the associated primitive, or (if the primitive
supports it), only the bits that are used by promises. The
ignore_nonpromised_bits flag specifies which format the
incoming data is in.
Args:
bits: A bitarray (or compatible) containing the data read from the jtag controller's TDO pin.
ignore_nonpromised_bits: A boolean specifying if only promised bits are being returned (and thus the 2nd index of the promise must be used for slicing the incoming data).
"""
if self._allsubsfulfilled():
if not self._components:
if ignore_nonpromised_bits:
self._value = bits[self._bitstartselective:
self._bitstartselective +
self._bitlength]
else:
self._value = bits[self._bitstart:self._bitend]
else:
self._value = self._components[0][0]._value
for sub, offset in self._components[1:]:
self._value += sub._value
if self._parent is not None:
self._parent._fulfill(None) | Supply the promise with the bits from its associated primitive's execution.
The fulfillment process must walk the promise chain backwards
until it reaches the original promise and can supply the final
value.
The data that comes in can either be all a bit read for every
bit written by the associated primitive, or (if the primitive
supports it), only the bits that are used by promises. The
ignore_nonpromised_bits flag specifies which format the
incoming data is in.
Args:
bits: A bitarray (or compatible) containing the data read from the jtag controller's TDO pin.
ignore_nonpromised_bits: A boolean specifying if only promised bits are being returned (and thus the 2nd index of the promise must be used for slicing the incoming data). | Below is the the instruction that describes the task:
### Input:
Supply the promise with the bits from its associated primitive's execution.
The fulfillment process must walk the promise chain backwards
until it reaches the original promise and can supply the final
value.
The data that comes in can either be all a bit read for every
bit written by the associated primitive, or (if the primitive
supports it), only the bits that are used by promises. The
ignore_nonpromised_bits flag specifies which format the
incoming data is in.
Args:
bits: A bitarray (or compatible) containing the data read from the jtag controller's TDO pin.
ignore_nonpromised_bits: A boolean specifying if only promised bits are being returned (and thus the 2nd index of the promise must be used for slicing the incoming data).
### Response:
def _fulfill(self, bits, ignore_nonpromised_bits=False):
"""Supply the promise with the bits from its associated primitive's execution.
The fulfillment process must walk the promise chain backwards
until it reaches the original promise and can supply the final
value.
The data that comes in can either be all a bit read for every
bit written by the associated primitive, or (if the primitive
supports it), only the bits that are used by promises. The
ignore_nonpromised_bits flag specifies which format the
incoming data is in.
Args:
bits: A bitarray (or compatible) containing the data read from the jtag controller's TDO pin.
ignore_nonpromised_bits: A boolean specifying if only promised bits are being returned (and thus the 2nd index of the promise must be used for slicing the incoming data).
"""
if self._allsubsfulfilled():
if not self._components:
if ignore_nonpromised_bits:
self._value = bits[self._bitstartselective:
self._bitstartselective +
self._bitlength]
else:
self._value = bits[self._bitstart:self._bitend]
else:
self._value = self._components[0][0]._value
for sub, offset in self._components[1:]:
self._value += sub._value
if self._parent is not None:
self._parent._fulfill(None) |
def coupling(self, source_y, target_y, weight):
"""How to couple the output of one subsystem to the input of another.
This is a fallback default coupling function that should usually be
replaced with your own.
This example coupling function takes the mean of all variables of the
source subsystem and uses that value weighted by the connection
strength to drive all variables of the target subsystem.
Arguments:
source_y (array of shape (d,)): State of the source subsystem.
target_y (array of shape (d,)): State of target subsystem.
weight (float): the connection strength for this connection.
Returns:
input (array of shape (d,)): Values to drive each variable of the
target system.
"""
return np.ones_like(target_y)*np.mean(source_y)*weight | How to couple the output of one subsystem to the input of another.
This is a fallback default coupling function that should usually be
replaced with your own.
This example coupling function takes the mean of all variables of the
source subsystem and uses that value weighted by the connection
strength to drive all variables of the target subsystem.
Arguments:
source_y (array of shape (d,)): State of the source subsystem.
target_y (array of shape (d,)): State of target subsystem.
weight (float): the connection strength for this connection.
Returns:
input (array of shape (d,)): Values to drive each variable of the
target system. | Below is the the instruction that describes the task:
### Input:
How to couple the output of one subsystem to the input of another.
This is a fallback default coupling function that should usually be
replaced with your own.
This example coupling function takes the mean of all variables of the
source subsystem and uses that value weighted by the connection
strength to drive all variables of the target subsystem.
Arguments:
source_y (array of shape (d,)): State of the source subsystem.
target_y (array of shape (d,)): State of target subsystem.
weight (float): the connection strength for this connection.
Returns:
input (array of shape (d,)): Values to drive each variable of the
target system.
### Response:
def coupling(self, source_y, target_y, weight):
"""How to couple the output of one subsystem to the input of another.
This is a fallback default coupling function that should usually be
replaced with your own.
This example coupling function takes the mean of all variables of the
source subsystem and uses that value weighted by the connection
strength to drive all variables of the target subsystem.
Arguments:
source_y (array of shape (d,)): State of the source subsystem.
target_y (array of shape (d,)): State of target subsystem.
weight (float): the connection strength for this connection.
Returns:
input (array of shape (d,)): Values to drive each variable of the
target system.
"""
return np.ones_like(target_y)*np.mean(source_y)*weight |
def add_virtual_columns_cartesian_velocities_to_polar(self, x="x", y="y", vx="vx", radius_polar=None, vy="vy", vr_out="vr_polar", vazimuth_out="vphi_polar",
propagate_uncertainties=False,):
"""Convert cartesian to polar velocities.
:param x:
:param y:
:param vx:
:param radius_polar: Optional expression for the radius, may lead to a better performance when given.
:param vy:
:param vr_out:
:param vazimuth_out:
:param propagate_uncertainties: {propagate_uncertainties}
:return:
"""
x = self._expr(x)
y = self._expr(y)
vx = self._expr(vx)
vy = self._expr(vy)
if radius_polar is None:
radius_polar = np.sqrt(x**2 + y**2)
radius_polar = self._expr(radius_polar)
self[vr_out] = (x*vx + y*vy) / radius_polar
self[vazimuth_out] = (x*vy - y*vx) / radius_polar
if propagate_uncertainties:
self.propagate_uncertainties([self[vr_out], self[vazimuth_out]]) | Convert cartesian to polar velocities.
:param x:
:param y:
:param vx:
:param radius_polar: Optional expression for the radius, may lead to a better performance when given.
:param vy:
:param vr_out:
:param vazimuth_out:
:param propagate_uncertainties: {propagate_uncertainties}
:return: | Below is the the instruction that describes the task:
### Input:
Convert cartesian to polar velocities.
:param x:
:param y:
:param vx:
:param radius_polar: Optional expression for the radius, may lead to a better performance when given.
:param vy:
:param vr_out:
:param vazimuth_out:
:param propagate_uncertainties: {propagate_uncertainties}
:return:
### Response:
def add_virtual_columns_cartesian_velocities_to_polar(self, x="x", y="y", vx="vx", radius_polar=None, vy="vy", vr_out="vr_polar", vazimuth_out="vphi_polar",
propagate_uncertainties=False,):
"""Convert cartesian to polar velocities.
:param x:
:param y:
:param vx:
:param radius_polar: Optional expression for the radius, may lead to a better performance when given.
:param vy:
:param vr_out:
:param vazimuth_out:
:param propagate_uncertainties: {propagate_uncertainties}
:return:
"""
x = self._expr(x)
y = self._expr(y)
vx = self._expr(vx)
vy = self._expr(vy)
if radius_polar is None:
radius_polar = np.sqrt(x**2 + y**2)
radius_polar = self._expr(radius_polar)
self[vr_out] = (x*vx + y*vy) / radius_polar
self[vazimuth_out] = (x*vy - y*vx) / radius_polar
if propagate_uncertainties:
self.propagate_uncertainties([self[vr_out], self[vazimuth_out]]) |
def system(session, py):
"""Run the system test suite."""
# Sanity check: Only run system tests if the environment variable is set.
if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
session.skip('Credentials must be set via environment variable.')
# Run the system tests against latest Python 2 and Python 3 only.
session.interpreter = 'python{}'.format(py)
# Set the virtualenv dirname.
session.virtualenv_dirname = 'sys-' + py
# Install all test dependencies.
session.install('-r', 'requirements-test.txt')
# Install dev packages into the virtualenv's dist-packages.
_install_dev_packages(session)
# Run py.test against the system tests.
session.run(
'py.test',
'-s',
'tests/system/',
*session.posargs
) | Run the system test suite. | Below is the the instruction that describes the task:
### Input:
Run the system test suite.
### Response:
def system(session, py):
"""Run the system test suite."""
# Sanity check: Only run system tests if the environment variable is set.
if not os.environ.get('GOOGLE_APPLICATION_CREDENTIALS', ''):
session.skip('Credentials must be set via environment variable.')
# Run the system tests against latest Python 2 and Python 3 only.
session.interpreter = 'python{}'.format(py)
# Set the virtualenv dirname.
session.virtualenv_dirname = 'sys-' + py
# Install all test dependencies.
session.install('-r', 'requirements-test.txt')
# Install dev packages into the virtualenv's dist-packages.
_install_dev_packages(session)
# Run py.test against the system tests.
session.run(
'py.test',
'-s',
'tests/system/',
*session.posargs
) |
def formatTime(self, record, datefmt=None):
"""Format the log timestamp."""
_seconds_fraction = record.created - int(record.created)
_datetime_utc = time.mktime(time.gmtime(record.created))
_datetime_utc += _seconds_fraction
_created = self.converter(_datetime_utc)
if datefmt:
time_string = _created.strftime(datefmt)
else:
time_string = _created.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
time_string = "%s,%03d" % (time_string, record.msecs)
return time_string | Format the log timestamp. | Below is the the instruction that describes the task:
### Input:
Format the log timestamp.
### Response:
def formatTime(self, record, datefmt=None):
"""Format the log timestamp."""
_seconds_fraction = record.created - int(record.created)
_datetime_utc = time.mktime(time.gmtime(record.created))
_datetime_utc += _seconds_fraction
_created = self.converter(_datetime_utc)
if datefmt:
time_string = _created.strftime(datefmt)
else:
time_string = _created.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
time_string = "%s,%03d" % (time_string, record.msecs)
return time_string |
def quadratic_forms(h1, h2):
r"""
Quadrativ forms metric.
Notes
-----
UNDER DEVELOPMENT
This distance measure shows very strange behaviour. The expression
transpose(h1-h2) * A * (h1-h2) yields egative values that can not be processed by the
square root. Some examples::
h1 h2 transpose(h1-h2) * A * (h1-h2)
[1, 0] to [0.0, 1.0] : -2.0
[1, 0] to [0.5, 0.5] : 0.0
[1, 0] to [0.6666666666666667, 0.3333333333333333] : 0.111111111111
[1, 0] to [0.75, 0.25] : 0.0833333333333
[1, 0] to [0.8, 0.2] : 0.06
[1, 0] to [0.8333333333333334, 0.16666666666666666] : 0.0444444444444
[1, 0] to [0.8571428571428572, 0.14285714285714285] : 0.0340136054422
[1, 0] to [0.875, 0.125] : 0.0267857142857
[1, 0] to [0.8888888888888888, 0.1111111111111111] : 0.0216049382716
[1, 0] to [0.9, 0.1] : 0.0177777777778
[1, 0] to [1, 0]: 0.0
It is clearly undesireable to recieve negative values and even worse to get a value
of zero for other cases than the same histograms.
"""
h1, h2 = __prepare_histogram(h1, h2)
A = __quadratic_forms_matrix_euclidean(h1, h2)
return math.sqrt((h1-h2).dot(A.dot(h1-h2))) | r"""
Quadrativ forms metric.
Notes
-----
UNDER DEVELOPMENT
This distance measure shows very strange behaviour. The expression
transpose(h1-h2) * A * (h1-h2) yields egative values that can not be processed by the
square root. Some examples::
h1 h2 transpose(h1-h2) * A * (h1-h2)
[1, 0] to [0.0, 1.0] : -2.0
[1, 0] to [0.5, 0.5] : 0.0
[1, 0] to [0.6666666666666667, 0.3333333333333333] : 0.111111111111
[1, 0] to [0.75, 0.25] : 0.0833333333333
[1, 0] to [0.8, 0.2] : 0.06
[1, 0] to [0.8333333333333334, 0.16666666666666666] : 0.0444444444444
[1, 0] to [0.8571428571428572, 0.14285714285714285] : 0.0340136054422
[1, 0] to [0.875, 0.125] : 0.0267857142857
[1, 0] to [0.8888888888888888, 0.1111111111111111] : 0.0216049382716
[1, 0] to [0.9, 0.1] : 0.0177777777778
[1, 0] to [1, 0]: 0.0
It is clearly undesireable to recieve negative values and even worse to get a value
of zero for other cases than the same histograms. | Below is the the instruction that describes the task:
### Input:
r"""
Quadrativ forms metric.
Notes
-----
UNDER DEVELOPMENT
This distance measure shows very strange behaviour. The expression
transpose(h1-h2) * A * (h1-h2) yields egative values that can not be processed by the
square root. Some examples::
h1 h2 transpose(h1-h2) * A * (h1-h2)
[1, 0] to [0.0, 1.0] : -2.0
[1, 0] to [0.5, 0.5] : 0.0
[1, 0] to [0.6666666666666667, 0.3333333333333333] : 0.111111111111
[1, 0] to [0.75, 0.25] : 0.0833333333333
[1, 0] to [0.8, 0.2] : 0.06
[1, 0] to [0.8333333333333334, 0.16666666666666666] : 0.0444444444444
[1, 0] to [0.8571428571428572, 0.14285714285714285] : 0.0340136054422
[1, 0] to [0.875, 0.125] : 0.0267857142857
[1, 0] to [0.8888888888888888, 0.1111111111111111] : 0.0216049382716
[1, 0] to [0.9, 0.1] : 0.0177777777778
[1, 0] to [1, 0]: 0.0
It is clearly undesireable to recieve negative values and even worse to get a value
of zero for other cases than the same histograms.
### Response:
def quadratic_forms(h1, h2):
r"""
Quadrativ forms metric.
Notes
-----
UNDER DEVELOPMENT
This distance measure shows very strange behaviour. The expression
transpose(h1-h2) * A * (h1-h2) yields egative values that can not be processed by the
square root. Some examples::
h1 h2 transpose(h1-h2) * A * (h1-h2)
[1, 0] to [0.0, 1.0] : -2.0
[1, 0] to [0.5, 0.5] : 0.0
[1, 0] to [0.6666666666666667, 0.3333333333333333] : 0.111111111111
[1, 0] to [0.75, 0.25] : 0.0833333333333
[1, 0] to [0.8, 0.2] : 0.06
[1, 0] to [0.8333333333333334, 0.16666666666666666] : 0.0444444444444
[1, 0] to [0.8571428571428572, 0.14285714285714285] : 0.0340136054422
[1, 0] to [0.875, 0.125] : 0.0267857142857
[1, 0] to [0.8888888888888888, 0.1111111111111111] : 0.0216049382716
[1, 0] to [0.9, 0.1] : 0.0177777777778
[1, 0] to [1, 0]: 0.0
It is clearly undesireable to recieve negative values and even worse to get a value
of zero for other cases than the same histograms.
"""
h1, h2 = __prepare_histogram(h1, h2)
A = __quadratic_forms_matrix_euclidean(h1, h2)
return math.sqrt((h1-h2).dot(A.dot(h1-h2))) |
def get_all_instances(include_fastboot=False):
"""Create AndroidDevice instances for all attached android devices.
Args:
include_fastboot: Whether to include devices in bootloader mode or not.
Returns:
A list of AndroidDevice objects each representing an android device
attached to the computer.
"""
if include_fastboot:
serial_list = list_adb_devices() + list_fastboot_devices()
return get_instances(serial_list)
return get_instances(list_adb_devices()) | Create AndroidDevice instances for all attached android devices.
Args:
include_fastboot: Whether to include devices in bootloader mode or not.
Returns:
A list of AndroidDevice objects each representing an android device
attached to the computer. | Below is the the instruction that describes the task:
### Input:
Create AndroidDevice instances for all attached android devices.
Args:
include_fastboot: Whether to include devices in bootloader mode or not.
Returns:
A list of AndroidDevice objects each representing an android device
attached to the computer.
### Response:
def get_all_instances(include_fastboot=False):
"""Create AndroidDevice instances for all attached android devices.
Args:
include_fastboot: Whether to include devices in bootloader mode or not.
Returns:
A list of AndroidDevice objects each representing an android device
attached to the computer.
"""
if include_fastboot:
serial_list = list_adb_devices() + list_fastboot_devices()
return get_instances(serial_list)
return get_instances(list_adb_devices()) |
def gist(self, id_num):
"""Gets the gist using the specified id number.
:param int id_num: (required), unique id of the gist
:returns: :class:`Gist <github3.gists.Gist>`
"""
url = self._build_url('gists', str(id_num))
json = self._json(self._get(url), 200)
return Gist(json, self) if json else None | Gets the gist using the specified id number.
:param int id_num: (required), unique id of the gist
:returns: :class:`Gist <github3.gists.Gist>` | Below is the the instruction that describes the task:
### Input:
Gets the gist using the specified id number.
:param int id_num: (required), unique id of the gist
:returns: :class:`Gist <github3.gists.Gist>`
### Response:
def gist(self, id_num):
"""Gets the gist using the specified id number.
:param int id_num: (required), unique id of the gist
:returns: :class:`Gist <github3.gists.Gist>`
"""
url = self._build_url('gists', str(id_num))
json = self._json(self._get(url), 200)
return Gist(json, self) if json else None |
def _get_segmentation_id(self, netid, segid, source):
"""Allocate segmentation id. """
return self.seg_drvr.allocate_segmentation_id(netid, seg_id=segid,
source=source) | Allocate segmentation id. | Below is the the instruction that describes the task:
### Input:
Allocate segmentation id.
### Response:
def _get_segmentation_id(self, netid, segid, source):
"""Allocate segmentation id. """
return self.seg_drvr.allocate_segmentation_id(netid, seg_id=segid,
source=source) |
def ack(self):
"""Acknowledge Message.
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
if not self._method:
raise AMQPMessageError(
'Message.ack only available on incoming messages'
)
self._channel.basic.ack(delivery_tag=self.delivery_tag) | Acknowledge Message.
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return: | Below is the the instruction that describes the task:
### Input:
Acknowledge Message.
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
### Response:
def ack(self):
"""Acknowledge Message.
:raises AMQPInvalidArgument: Invalid Parameters
:raises AMQPChannelError: Raises if the channel encountered an error.
:raises AMQPConnectionError: Raises if the connection
encountered an error.
:return:
"""
if not self._method:
raise AMQPMessageError(
'Message.ack only available on incoming messages'
)
self._channel.basic.ack(delivery_tag=self.delivery_tag) |
def read_alignment(out_sam, loci, seqs, out_file):
"""read which seqs map to which loci and
return a tab separated file"""
hits = defaultdict(list)
with open(out_file, "w") as out_handle:
samfile = pysam.Samfile(out_sam, "r")
for a in samfile.fetch():
if not a.is_unmapped:
nm = int([t[1] for t in a.tags if t[0] == "NM"][0])
a = makeBED(a)
if not a:
continue
ref, locus = get_loci(samfile.getrname(int(a.chr)), loci)
hits[a.name].append((nm, "%s %s %s %s %s %s" % (a.name, a.name.split("-")[0], locus, ref, a.start, a.end)))
for hit in hits.values():
nm = hit[0][0]
for l in hit:
if nm == l[0]:
print(l[1], file=out_handle)
return out_file | read which seqs map to which loci and
return a tab separated file | Below is the the instruction that describes the task:
### Input:
read which seqs map to which loci and
return a tab separated file
### Response:
def read_alignment(out_sam, loci, seqs, out_file):
"""read which seqs map to which loci and
return a tab separated file"""
hits = defaultdict(list)
with open(out_file, "w") as out_handle:
samfile = pysam.Samfile(out_sam, "r")
for a in samfile.fetch():
if not a.is_unmapped:
nm = int([t[1] for t in a.tags if t[0] == "NM"][0])
a = makeBED(a)
if not a:
continue
ref, locus = get_loci(samfile.getrname(int(a.chr)), loci)
hits[a.name].append((nm, "%s %s %s %s %s %s" % (a.name, a.name.split("-")[0], locus, ref, a.start, a.end)))
for hit in hits.values():
nm = hit[0][0]
for l in hit:
if nm == l[0]:
print(l[1], file=out_handle)
return out_file |
def take_screen_shot_to_array(self, screen_id, width, height, bitmap_format):
"""Takes a guest screen shot of the requested size and format
and returns it as an array of bytes.
in screen_id of type int
The guest monitor to take screenshot from.
in width of type int
Desired image width.
in height of type int
Desired image height.
in bitmap_format of type :class:`BitmapFormat`
The requested format.
return screen_data of type str
Array with resulting screen data.
"""
if not isinstance(screen_id, baseinteger):
raise TypeError("screen_id can only be an instance of type baseinteger")
if not isinstance(width, baseinteger):
raise TypeError("width can only be an instance of type baseinteger")
if not isinstance(height, baseinteger):
raise TypeError("height can only be an instance of type baseinteger")
if not isinstance(bitmap_format, BitmapFormat):
raise TypeError("bitmap_format can only be an instance of type BitmapFormat")
screen_data = self._call("takeScreenShotToArray",
in_p=[screen_id, width, height, bitmap_format])
return screen_data | Takes a guest screen shot of the requested size and format
and returns it as an array of bytes.
in screen_id of type int
The guest monitor to take screenshot from.
in width of type int
Desired image width.
in height of type int
Desired image height.
in bitmap_format of type :class:`BitmapFormat`
The requested format.
return screen_data of type str
Array with resulting screen data. | Below is the the instruction that describes the task:
### Input:
Takes a guest screen shot of the requested size and format
and returns it as an array of bytes.
in screen_id of type int
The guest monitor to take screenshot from.
in width of type int
Desired image width.
in height of type int
Desired image height.
in bitmap_format of type :class:`BitmapFormat`
The requested format.
return screen_data of type str
Array with resulting screen data.
### Response:
def take_screen_shot_to_array(self, screen_id, width, height, bitmap_format):
"""Takes a guest screen shot of the requested size and format
and returns it as an array of bytes.
in screen_id of type int
The guest monitor to take screenshot from.
in width of type int
Desired image width.
in height of type int
Desired image height.
in bitmap_format of type :class:`BitmapFormat`
The requested format.
return screen_data of type str
Array with resulting screen data.
"""
if not isinstance(screen_id, baseinteger):
raise TypeError("screen_id can only be an instance of type baseinteger")
if not isinstance(width, baseinteger):
raise TypeError("width can only be an instance of type baseinteger")
if not isinstance(height, baseinteger):
raise TypeError("height can only be an instance of type baseinteger")
if not isinstance(bitmap_format, BitmapFormat):
raise TypeError("bitmap_format can only be an instance of type BitmapFormat")
screen_data = self._call("takeScreenShotToArray",
in_p=[screen_id, width, height, bitmap_format])
return screen_data |
def get_scene(self):
"""
- get_scene(): It return the x and y position, the smoothing length
of the particles and the index of the particles that are active in
the scene. In principle this is an internal function and you don't
need this data.
"""
return self._x, self._y, self._hsml, self._m, self._kview | - get_scene(): It return the x and y position, the smoothing length
of the particles and the index of the particles that are active in
the scene. In principle this is an internal function and you don't
need this data. | Below is the the instruction that describes the task:
### Input:
- get_scene(): It return the x and y position, the smoothing length
of the particles and the index of the particles that are active in
the scene. In principle this is an internal function and you don't
need this data.
### Response:
def get_scene(self):
"""
- get_scene(): It return the x and y position, the smoothing length
of the particles and the index of the particles that are active in
the scene. In principle this is an internal function and you don't
need this data.
"""
return self._x, self._y, self._hsml, self._m, self._kview |
def fromxml(node):
"""Create a Parameter instance (of any class derived from AbstractParameter!) given its XML description. Node can be a string containing XML or an lxml _Element"""
if not isinstance(node,ElementTree._Element): #pylint: disable=protected-access
node = ElementTree.parse(StringIO(node)).getroot()
if node.tag in globals():
id = ''
paramflag = ''
name = ''
description = ''
kwargs = {}
error = None
for attrib, value in node.attrib.items():
if attrib == 'id':
id = value
elif attrib == 'paramflag':
paramflag = value
elif attrib == 'name':
name = value
elif attrib == 'description':
description = value
elif attrib == 'error':
error = value
else:
kwargs[attrib] = value
#extra parsing for choice parameter (TODO: put in a better spot)
if 'multi' in kwargs and (kwargs['multi'] == 'yes' or kwargs['multi'] == '1' or kwargs['multi'] == 'true'):
kwargs['value'] = []
for subtag in node: #parse possible subtags
if subtag.tag == 'choice': #extra parsing for choice parameter (TODO: put in a better spot)
if 'choices' not in kwargs: kwargs['choices'] = {}
kwargs['choices'][subtag.attrib['id']] = subtag.text
if 'selected' in subtag.attrib and (subtag.attrib['selected'] == '1' or subtag.attrib['selected'] == 'yes'):
if 'multi' in kwargs and (kwargs['multi'] == 'yes' or kwargs['multi'] == '1' or kwargs['multi'] == 'true'):
kwargs['value'].append(subtag.attrib['id'])
else:
kwargs['value'] = subtag.attrib['id']
parameter = globals()[node.tag](id, name, description, **kwargs) #return parameter instance
if error:
parameter.error = error #prevent error from getting reset
return parameter
else:
raise Exception("No such parameter exists: " + node.tag) | Create a Parameter instance (of any class derived from AbstractParameter!) given its XML description. Node can be a string containing XML or an lxml _Element | Below is the the instruction that describes the task:
### Input:
Create a Parameter instance (of any class derived from AbstractParameter!) given its XML description. Node can be a string containing XML or an lxml _Element
### Response:
def fromxml(node):
"""Create a Parameter instance (of any class derived from AbstractParameter!) given its XML description. Node can be a string containing XML or an lxml _Element"""
if not isinstance(node,ElementTree._Element): #pylint: disable=protected-access
node = ElementTree.parse(StringIO(node)).getroot()
if node.tag in globals():
id = ''
paramflag = ''
name = ''
description = ''
kwargs = {}
error = None
for attrib, value in node.attrib.items():
if attrib == 'id':
id = value
elif attrib == 'paramflag':
paramflag = value
elif attrib == 'name':
name = value
elif attrib == 'description':
description = value
elif attrib == 'error':
error = value
else:
kwargs[attrib] = value
#extra parsing for choice parameter (TODO: put in a better spot)
if 'multi' in kwargs and (kwargs['multi'] == 'yes' or kwargs['multi'] == '1' or kwargs['multi'] == 'true'):
kwargs['value'] = []
for subtag in node: #parse possible subtags
if subtag.tag == 'choice': #extra parsing for choice parameter (TODO: put in a better spot)
if 'choices' not in kwargs: kwargs['choices'] = {}
kwargs['choices'][subtag.attrib['id']] = subtag.text
if 'selected' in subtag.attrib and (subtag.attrib['selected'] == '1' or subtag.attrib['selected'] == 'yes'):
if 'multi' in kwargs and (kwargs['multi'] == 'yes' or kwargs['multi'] == '1' or kwargs['multi'] == 'true'):
kwargs['value'].append(subtag.attrib['id'])
else:
kwargs['value'] = subtag.attrib['id']
parameter = globals()[node.tag](id, name, description, **kwargs) #return parameter instance
if error:
parameter.error = error #prevent error from getting reset
return parameter
else:
raise Exception("No such parameter exists: " + node.tag) |
def cyclic(self):
"Returns True if the options cycle, otherwise False"
return any(isinstance(val, Cycle) for val in self.kwargs.values()) | Returns True if the options cycle, otherwise False | Below is the the instruction that describes the task:
### Input:
Returns True if the options cycle, otherwise False
### Response:
def cyclic(self):
"Returns True if the options cycle, otherwise False"
return any(isinstance(val, Cycle) for val in self.kwargs.values()) |
def nodePop(ctxt):
"""Pops the top element node from the node stack """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.nodePop(ctxt__o)
if ret is None:raise treeError('nodePop() failed')
return xmlNode(_obj=ret) | Pops the top element node from the node stack | Below is the the instruction that describes the task:
### Input:
Pops the top element node from the node stack
### Response:
def nodePop(ctxt):
"""Pops the top element node from the node stack """
if ctxt is None: ctxt__o = None
else: ctxt__o = ctxt._o
ret = libxml2mod.nodePop(ctxt__o)
if ret is None:raise treeError('nodePop() failed')
return xmlNode(_obj=ret) |
def json_loads(cls, s, **kwargs):
"""
A rewrap of json.loads done for one reason - to inject a custom `cls` kwarg
:param s:
:param kwargs:
:return:
:rtype: dict
"""
if 'cls' not in kwargs:
kwargs['cls'] = cls.json_decoder
return json.loads(s, **kwargs) | A rewrap of json.loads done for one reason - to inject a custom `cls` kwarg
:param s:
:param kwargs:
:return:
:rtype: dict | Below is the the instruction that describes the task:
### Input:
A rewrap of json.loads done for one reason - to inject a custom `cls` kwarg
:param s:
:param kwargs:
:return:
:rtype: dict
### Response:
def json_loads(cls, s, **kwargs):
"""
A rewrap of json.loads done for one reason - to inject a custom `cls` kwarg
:param s:
:param kwargs:
:return:
:rtype: dict
"""
if 'cls' not in kwargs:
kwargs['cls'] = cls.json_decoder
return json.loads(s, **kwargs) |
def _get_nets_radb(self, *args, **kwargs):
"""
Deprecated. This will be removed in a future release.
"""
from warnings import warn
warn('ASNOrigin._get_nets_radb() has been deprecated and will be '
'removed. You should now use ASNOrigin.get_nets_radb().')
return self.get_nets_radb(*args, **kwargs) | Deprecated. This will be removed in a future release. | Below is the the instruction that describes the task:
### Input:
Deprecated. This will be removed in a future release.
### Response:
def _get_nets_radb(self, *args, **kwargs):
"""
Deprecated. This will be removed in a future release.
"""
from warnings import warn
warn('ASNOrigin._get_nets_radb() has been deprecated and will be '
'removed. You should now use ASNOrigin.get_nets_radb().')
return self.get_nets_radb(*args, **kwargs) |
def stSpectralEntropy(X, n_short_blocks=10):
"""Computes the spectral entropy"""
L = len(X) # number of frame samples
Eol = numpy.sum(X ** 2) # total spectral energy
sub_win_len = int(numpy.floor(L / n_short_blocks)) # length of sub-frame
if L != sub_win_len * n_short_blocks:
X = X[0:sub_win_len * n_short_blocks]
sub_wins = X.reshape(sub_win_len, n_short_blocks, order='F').copy() # define sub-frames (using matrix reshape)
s = numpy.sum(sub_wins ** 2, axis=0) / (Eol + eps) # compute spectral sub-energies
En = -numpy.sum(s*numpy.log2(s + eps)) # compute spectral entropy
return En | Computes the spectral entropy | Below is the the instruction that describes the task:
### Input:
Computes the spectral entropy
### Response:
def stSpectralEntropy(X, n_short_blocks=10):
"""Computes the spectral entropy"""
L = len(X) # number of frame samples
Eol = numpy.sum(X ** 2) # total spectral energy
sub_win_len = int(numpy.floor(L / n_short_blocks)) # length of sub-frame
if L != sub_win_len * n_short_blocks:
X = X[0:sub_win_len * n_short_blocks]
sub_wins = X.reshape(sub_win_len, n_short_blocks, order='F').copy() # define sub-frames (using matrix reshape)
s = numpy.sum(sub_wins ** 2, axis=0) / (Eol + eps) # compute spectral sub-energies
En = -numpy.sum(s*numpy.log2(s + eps)) # compute spectral entropy
return En |
def _resolve_by_callback(request, url, urlconf=None):
"""
Finds a view function by urlconf. If the function has attribute
'navigation', it is used as breadcrumb title. Such title can be either a
callable or an object with `__unicode__` attribute. If it is callable, it
must follow the views API (i.e. the only required argument is request
object). It is also expected to return a `unicode` value.
"""
try:
callback, args, kwargs = _resolve_url(url, request, urlconf=urlconf)
except urlresolvers.Resolver404:
return None
bc = getattr(callback, 'breadcrumb', None)
if bc is None:
bc = getattr(callback, 'navigation', None)
if bc is not None: # pragma: nocover
import warnings
warnings.warn('The "navigation" attribute is deprecated, use '
'"breadcrumb" instead.')
if bc is None:
return None
if hasattr(bc, '__call__'):
# the breadcrumb is a function with an API identical to that of views.
try:
title = bc(request, *args, **kwargs)
except http.Http404:
return None
assert isinstance(title, basestring), (
'Breadcrumb function must return Unicode, not %s' % title)
else:
title = unicode(bc) # handle i18n proxy objects
return Crumb(url, title) | Finds a view function by urlconf. If the function has attribute
'navigation', it is used as breadcrumb title. Such title can be either a
callable or an object with `__unicode__` attribute. If it is callable, it
must follow the views API (i.e. the only required argument is request
object). It is also expected to return a `unicode` value. | Below is the the instruction that describes the task:
### Input:
Finds a view function by urlconf. If the function has attribute
'navigation', it is used as breadcrumb title. Such title can be either a
callable or an object with `__unicode__` attribute. If it is callable, it
must follow the views API (i.e. the only required argument is request
object). It is also expected to return a `unicode` value.
### Response:
def _resolve_by_callback(request, url, urlconf=None):
"""
Finds a view function by urlconf. If the function has attribute
'navigation', it is used as breadcrumb title. Such title can be either a
callable or an object with `__unicode__` attribute. If it is callable, it
must follow the views API (i.e. the only required argument is request
object). It is also expected to return a `unicode` value.
"""
try:
callback, args, kwargs = _resolve_url(url, request, urlconf=urlconf)
except urlresolvers.Resolver404:
return None
bc = getattr(callback, 'breadcrumb', None)
if bc is None:
bc = getattr(callback, 'navigation', None)
if bc is not None: # pragma: nocover
import warnings
warnings.warn('The "navigation" attribute is deprecated, use '
'"breadcrumb" instead.')
if bc is None:
return None
if hasattr(bc, '__call__'):
# the breadcrumb is a function with an API identical to that of views.
try:
title = bc(request, *args, **kwargs)
except http.Http404:
return None
assert isinstance(title, basestring), (
'Breadcrumb function must return Unicode, not %s' % title)
else:
title = unicode(bc) # handle i18n proxy objects
return Crumb(url, title) |
def correct_bounding_box_list_for_nonzero_origin(bbox_list, full_box_list):
"""The bounding box calculated from an image has coordinates relative to the
lower-left point in the PDF being at zero. Similarly, Ghostscript reports a
bounding box relative to a zero lower-left point. If the MediaBox (or full
page box) has been shifted, like when cropping a previously cropped
document, then we need to correct the bounding box by an additive
translation on all the points."""
corrected_box_list = []
for bbox, full_box in zip(bbox_list, full_box_list):
left_x = full_box[0]
lower_y = full_box[1]
corrected_box_list.append([bbox[0]+left_x, bbox[1]+lower_y,
bbox[2]+left_x, bbox[3]+lower_y])
return corrected_box_list | The bounding box calculated from an image has coordinates relative to the
lower-left point in the PDF being at zero. Similarly, Ghostscript reports a
bounding box relative to a zero lower-left point. If the MediaBox (or full
page box) has been shifted, like when cropping a previously cropped
document, then we need to correct the bounding box by an additive
translation on all the points. | Below is the the instruction that describes the task:
### Input:
The bounding box calculated from an image has coordinates relative to the
lower-left point in the PDF being at zero. Similarly, Ghostscript reports a
bounding box relative to a zero lower-left point. If the MediaBox (or full
page box) has been shifted, like when cropping a previously cropped
document, then we need to correct the bounding box by an additive
translation on all the points.
### Response:
def correct_bounding_box_list_for_nonzero_origin(bbox_list, full_box_list):
"""The bounding box calculated from an image has coordinates relative to the
lower-left point in the PDF being at zero. Similarly, Ghostscript reports a
bounding box relative to a zero lower-left point. If the MediaBox (or full
page box) has been shifted, like when cropping a previously cropped
document, then we need to correct the bounding box by an additive
translation on all the points."""
corrected_box_list = []
for bbox, full_box in zip(bbox_list, full_box_list):
left_x = full_box[0]
lower_y = full_box[1]
corrected_box_list.append([bbox[0]+left_x, bbox[1]+lower_y,
bbox[2]+left_x, bbox[3]+lower_y])
return corrected_box_list |
def config_to_string(config):
"""Nice output string for the config, which is a nested defaultdict.
Args:
config (defaultdict(defaultdict)): The configuration information.
Returns:
str: A human-readable output string detailing the contents of the config.
"""
output = []
for section, section_content in config.items():
output.append("[{}]".format(section))
for option, option_value in section_content.items():
output.append("{} = {}".format(option, option_value))
return "\n".join(output) | Nice output string for the config, which is a nested defaultdict.
Args:
config (defaultdict(defaultdict)): The configuration information.
Returns:
str: A human-readable output string detailing the contents of the config. | Below is the the instruction that describes the task:
### Input:
Nice output string for the config, which is a nested defaultdict.
Args:
config (defaultdict(defaultdict)): The configuration information.
Returns:
str: A human-readable output string detailing the contents of the config.
### Response:
def config_to_string(config):
"""Nice output string for the config, which is a nested defaultdict.
Args:
config (defaultdict(defaultdict)): The configuration information.
Returns:
str: A human-readable output string detailing the contents of the config.
"""
output = []
for section, section_content in config.items():
output.append("[{}]".format(section))
for option, option_value in section_content.items():
output.append("{} = {}".format(option, option_value))
return "\n".join(output) |
def zthread_fork(ctx, func, *args, **kwargs):
"""
Create an attached thread. An attached thread gets a ctx and a PAIR
pipe back to its parent. It must monitor its pipe, and exit if the
pipe becomes unreadable. Returns pipe, or NULL if there was an error.
"""
a = ctx.socket(zmq.PAIR)
a.setsockopt(zmq.LINGER, 0)
a.setsockopt(zmq.RCVHWM, 100)
a.setsockopt(zmq.SNDHWM, 100)
a.setsockopt(zmq.SNDTIMEO, 5000)
a.setsockopt(zmq.RCVTIMEO, 5000)
b = ctx.socket(zmq.PAIR)
b.setsockopt(zmq.LINGER, 0)
b.setsockopt(zmq.RCVHWM, 100)
b.setsockopt(zmq.SNDHWM, 100)
b.setsockopt(zmq.SNDTIMEO, 5000)
a.setsockopt(zmq.RCVTIMEO, 5000)
iface = "inproc://%s" % binascii.hexlify(os.urandom(8))
a.bind(iface)
b.connect(iface)
thread = threading.Thread(target=func, args=((ctx, b) + args), kwargs=kwargs)
thread.daemon = False
thread.start()
return a | Create an attached thread. An attached thread gets a ctx and a PAIR
pipe back to its parent. It must monitor its pipe, and exit if the
pipe becomes unreadable. Returns pipe, or NULL if there was an error. | Below is the the instruction that describes the task:
### Input:
Create an attached thread. An attached thread gets a ctx and a PAIR
pipe back to its parent. It must monitor its pipe, and exit if the
pipe becomes unreadable. Returns pipe, or NULL if there was an error.
### Response:
def zthread_fork(ctx, func, *args, **kwargs):
"""
Create an attached thread. An attached thread gets a ctx and a PAIR
pipe back to its parent. It must monitor its pipe, and exit if the
pipe becomes unreadable. Returns pipe, or NULL if there was an error.
"""
a = ctx.socket(zmq.PAIR)
a.setsockopt(zmq.LINGER, 0)
a.setsockopt(zmq.RCVHWM, 100)
a.setsockopt(zmq.SNDHWM, 100)
a.setsockopt(zmq.SNDTIMEO, 5000)
a.setsockopt(zmq.RCVTIMEO, 5000)
b = ctx.socket(zmq.PAIR)
b.setsockopt(zmq.LINGER, 0)
b.setsockopt(zmq.RCVHWM, 100)
b.setsockopt(zmq.SNDHWM, 100)
b.setsockopt(zmq.SNDTIMEO, 5000)
a.setsockopt(zmq.RCVTIMEO, 5000)
iface = "inproc://%s" % binascii.hexlify(os.urandom(8))
a.bind(iface)
b.connect(iface)
thread = threading.Thread(target=func, args=((ctx, b) + args), kwargs=kwargs)
thread.daemon = False
thread.start()
return a |
def set_header(self,header):
"""
Sets the header of the object
@type header: L{CHeader}
@param header: the header object
"""
self.header = header
self.root.insert(0,header.get_node()) | Sets the header of the object
@type header: L{CHeader}
@param header: the header object | Below is the the instruction that describes the task:
### Input:
Sets the header of the object
@type header: L{CHeader}
@param header: the header object
### Response:
def set_header(self,header):
"""
Sets the header of the object
@type header: L{CHeader}
@param header: the header object
"""
self.header = header
self.root.insert(0,header.get_node()) |
def requiv_to_pot_contact(requiv, q, sma, compno=1):
"""
:param requiv: user-provided equivalent radius
:param q: mass ratio
:param sma: semi-major axis (d = sma because we explicitly assume circular orbits for contacts)
:param compno: 1 for primary, 2 for secondary
:return: potential and fillout factor
"""
logger.debug("requiv_to_pot_contact(requiv={}, q={}, sma={}, compno={})".format(requiv, q, sma, compno))
# since the functions called here work with normalized r, we need to set d=D=sma=1.
# or provide sma as a function parameter and normalize r here as requiv = requiv/sma
requiv = requiv/sma
vequiv = 4./3*np.pi*requiv**3
d = 1.
F = 1.
logger.debug("libphoebe.roche_contact_Omega_at_partial_vol(vol={}, phi=pi/2, q={}, d={}, choice={})".format(vequiv, q, d, compno-1))
return libphoebe.roche_contact_Omega_at_partial_vol(vequiv, np.pi/2, q, d, choice=compno-1) | :param requiv: user-provided equivalent radius
:param q: mass ratio
:param sma: semi-major axis (d = sma because we explicitly assume circular orbits for contacts)
:param compno: 1 for primary, 2 for secondary
:return: potential and fillout factor | Below is the the instruction that describes the task:
### Input:
:param requiv: user-provided equivalent radius
:param q: mass ratio
:param sma: semi-major axis (d = sma because we explicitly assume circular orbits for contacts)
:param compno: 1 for primary, 2 for secondary
:return: potential and fillout factor
### Response:
def requiv_to_pot_contact(requiv, q, sma, compno=1):
"""
:param requiv: user-provided equivalent radius
:param q: mass ratio
:param sma: semi-major axis (d = sma because we explicitly assume circular orbits for contacts)
:param compno: 1 for primary, 2 for secondary
:return: potential and fillout factor
"""
logger.debug("requiv_to_pot_contact(requiv={}, q={}, sma={}, compno={})".format(requiv, q, sma, compno))
# since the functions called here work with normalized r, we need to set d=D=sma=1.
# or provide sma as a function parameter and normalize r here as requiv = requiv/sma
requiv = requiv/sma
vequiv = 4./3*np.pi*requiv**3
d = 1.
F = 1.
logger.debug("libphoebe.roche_contact_Omega_at_partial_vol(vol={}, phi=pi/2, q={}, d={}, choice={})".format(vequiv, q, d, compno-1))
return libphoebe.roche_contact_Omega_at_partial_vol(vequiv, np.pi/2, q, d, choice=compno-1) |
def check_keepalive(self):
"""Send keepalive/PING if necessary."""
if self.sock != NC.INVALID_SOCKET and time.time() - self.last_msg_out >= self.keep_alive:
if self.state == NC.CS_CONNECTED:
self.send_pingreq()
else:
self.socket_close() | Send keepalive/PING if necessary. | Below is the the instruction that describes the task:
### Input:
Send keepalive/PING if necessary.
### Response:
def check_keepalive(self):
"""Send keepalive/PING if necessary."""
if self.sock != NC.INVALID_SOCKET and time.time() - self.last_msg_out >= self.keep_alive:
if self.state == NC.CS_CONNECTED:
self.send_pingreq()
else:
self.socket_close() |
def query_bypass(self, query, raw_output=True):
''' Bypass query meaning that field check and validation is skipped, then query object directly executed by pymongo.
:param raw_output: Skip OmMongo ORM layer (default: True)
'''
if not isinstance(query, dict):
raise BadQueryException('Query must be dict.')
self.__query = query
if raw_output:
self._raw_output = True
return self.__get_query_result().cursor
else:
return self | Bypass query meaning that field check and validation is skipped, then query object directly executed by pymongo.
:param raw_output: Skip OmMongo ORM layer (default: True) | Below is the the instruction that describes the task:
### Input:
Bypass query meaning that field check and validation is skipped, then query object directly executed by pymongo.
:param raw_output: Skip OmMongo ORM layer (default: True)
### Response:
def query_bypass(self, query, raw_output=True):
''' Bypass query meaning that field check and validation is skipped, then query object directly executed by pymongo.
:param raw_output: Skip OmMongo ORM layer (default: True)
'''
if not isinstance(query, dict):
raise BadQueryException('Query must be dict.')
self.__query = query
if raw_output:
self._raw_output = True
return self.__get_query_result().cursor
else:
return self |
def _load(self, scale=1.0):
"""Load the SLSTR relative spectral responses
"""
LOG.debug("File: %s", str(self.requested_band_filename))
ncf = Dataset(self.requested_band_filename, 'r')
wvl = ncf.variables['wavelength'][:] * scale
resp = ncf.variables['response'][:]
self.rsr = {'wavelength': wvl, 'response': resp} | Load the SLSTR relative spectral responses | Below is the the instruction that describes the task:
### Input:
Load the SLSTR relative spectral responses
### Response:
def _load(self, scale=1.0):
"""Load the SLSTR relative spectral responses
"""
LOG.debug("File: %s", str(self.requested_band_filename))
ncf = Dataset(self.requested_band_filename, 'r')
wvl = ncf.variables['wavelength'][:] * scale
resp = ncf.variables['response'][:]
self.rsr = {'wavelength': wvl, 'response': resp} |
def proba2onehot(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> np.ndarray:
"""
Convert vectors of probabilities to one-hot representations using confident threshold
Args:
proba: samples where each sample is a vector of probabilities to belong with given classes
confident_threshold: boundary of probability to belong with a class
classes: array of classes' names
Returns:
2d array with one-hot representation of given samples
"""
return labels2onehot(proba2labels(proba, confident_threshold, classes), classes) | Convert vectors of probabilities to one-hot representations using confident threshold
Args:
proba: samples where each sample is a vector of probabilities to belong with given classes
confident_threshold: boundary of probability to belong with a class
classes: array of classes' names
Returns:
2d array with one-hot representation of given samples | Below is the the instruction that describes the task:
### Input:
Convert vectors of probabilities to one-hot representations using confident threshold
Args:
proba: samples where each sample is a vector of probabilities to belong with given classes
confident_threshold: boundary of probability to belong with a class
classes: array of classes' names
Returns:
2d array with one-hot representation of given samples
### Response:
def proba2onehot(proba: [list, np.ndarray], confident_threshold: float, classes: [list, np.ndarray]) -> np.ndarray:
"""
Convert vectors of probabilities to one-hot representations using confident threshold
Args:
proba: samples where each sample is a vector of probabilities to belong with given classes
confident_threshold: boundary of probability to belong with a class
classes: array of classes' names
Returns:
2d array with one-hot representation of given samples
"""
return labels2onehot(proba2labels(proba, confident_threshold, classes), classes) |
def update(self, ell, k):
"""Update the posterior and estimates after a label is sampled
Parameters
----------
ell : int
sampled label: 0 or 1
k : int
index of stratum where label was sampled
"""
self.alpha_[k] += ell
self.beta_[k] += 1 - ell
self._calc_theta()
if self.store_variance:
self._calc_var_theta() | Update the posterior and estimates after a label is sampled
Parameters
----------
ell : int
sampled label: 0 or 1
k : int
index of stratum where label was sampled | Below is the the instruction that describes the task:
### Input:
Update the posterior and estimates after a label is sampled
Parameters
----------
ell : int
sampled label: 0 or 1
k : int
index of stratum where label was sampled
### Response:
def update(self, ell, k):
"""Update the posterior and estimates after a label is sampled
Parameters
----------
ell : int
sampled label: 0 or 1
k : int
index of stratum where label was sampled
"""
self.alpha_[k] += ell
self.beta_[k] += 1 - ell
self._calc_theta()
if self.store_variance:
self._calc_var_theta() |
def get_metric_group_definitions(self):
"""
Get the faked metric group definitions for this context object
that are to be returned from its create operation.
If a 'metric-groups' property had been specified for this context,
only those faked metric group definitions of its manager object that
are in that list, are included in the result. Otherwise, all metric
group definitions of its manager are included in the result.
Returns:
iterable of :class:~zhmcclient.FakedMetricGroupDefinition`: The faked
metric group definitions, in the order they had been added.
"""
group_names = self.properties.get('metric-groups', None)
if not group_names:
group_names = self.manager.get_metric_group_definition_names()
mg_defs = []
for group_name in group_names:
try:
mg_def = self.manager.get_metric_group_definition(group_name)
mg_defs.append(mg_def)
except ValueError:
pass # ignore metric groups without metric group defs
return mg_defs | Get the faked metric group definitions for this context object
that are to be returned from its create operation.
If a 'metric-groups' property had been specified for this context,
only those faked metric group definitions of its manager object that
are in that list, are included in the result. Otherwise, all metric
group definitions of its manager are included in the result.
Returns:
iterable of :class:~zhmcclient.FakedMetricGroupDefinition`: The faked
metric group definitions, in the order they had been added. | Below is the the instruction that describes the task:
### Input:
Get the faked metric group definitions for this context object
that are to be returned from its create operation.
If a 'metric-groups' property had been specified for this context,
only those faked metric group definitions of its manager object that
are in that list, are included in the result. Otherwise, all metric
group definitions of its manager are included in the result.
Returns:
iterable of :class:~zhmcclient.FakedMetricGroupDefinition`: The faked
metric group definitions, in the order they had been added.
### Response:
def get_metric_group_definitions(self):
"""
Get the faked metric group definitions for this context object
that are to be returned from its create operation.
If a 'metric-groups' property had been specified for this context,
only those faked metric group definitions of its manager object that
are in that list, are included in the result. Otherwise, all metric
group definitions of its manager are included in the result.
Returns:
iterable of :class:~zhmcclient.FakedMetricGroupDefinition`: The faked
metric group definitions, in the order they had been added.
"""
group_names = self.properties.get('metric-groups', None)
if not group_names:
group_names = self.manager.get_metric_group_definition_names()
mg_defs = []
for group_name in group_names:
try:
mg_def = self.manager.get_metric_group_definition(group_name)
mg_defs.append(mg_def)
except ValueError:
pass # ignore metric groups without metric group defs
return mg_defs |
def restore_instances(self, instances):
"""Restore a set of instances into the CLIPS data base.
The Python equivalent of the CLIPS restore-instances command.
Instances can be passed as a set of strings or as a file.
"""
instances = instances.encode()
if os.path.exists(instances):
ret = lib.EnvRestoreInstances(self._env, instances)
if ret == -1:
raise CLIPSError(self._env)
else:
ret = lib.EnvRestoreInstancesFromString(self._env, instances, -1)
if ret == -1:
raise CLIPSError(self._env)
return ret | Restore a set of instances into the CLIPS data base.
The Python equivalent of the CLIPS restore-instances command.
Instances can be passed as a set of strings or as a file. | Below is the the instruction that describes the task:
### Input:
Restore a set of instances into the CLIPS data base.
The Python equivalent of the CLIPS restore-instances command.
Instances can be passed as a set of strings or as a file.
### Response:
def restore_instances(self, instances):
"""Restore a set of instances into the CLIPS data base.
The Python equivalent of the CLIPS restore-instances command.
Instances can be passed as a set of strings or as a file.
"""
instances = instances.encode()
if os.path.exists(instances):
ret = lib.EnvRestoreInstances(self._env, instances)
if ret == -1:
raise CLIPSError(self._env)
else:
ret = lib.EnvRestoreInstancesFromString(self._env, instances, -1)
if ret == -1:
raise CLIPSError(self._env)
return ret |
def sanitize_args(cmd: List[str]) -> List[str]:
""" Filter the command so that it no longer contains passwords
"""
sanitized = []
for idx, fieldname in enumerate(cmd):
def _is_password(cmdstr):
return 'wifi-sec.psk' in cmdstr\
or 'password' in cmdstr.lower()
if idx > 0 and _is_password(cmd[idx-1]):
sanitized.append('****')
else:
sanitized.append(fieldname)
return sanitized | Filter the command so that it no longer contains passwords | Below is the the instruction that describes the task:
### Input:
Filter the command so that it no longer contains passwords
### Response:
def sanitize_args(cmd: List[str]) -> List[str]:
""" Filter the command so that it no longer contains passwords
"""
sanitized = []
for idx, fieldname in enumerate(cmd):
def _is_password(cmdstr):
return 'wifi-sec.psk' in cmdstr\
or 'password' in cmdstr.lower()
if idx > 0 and _is_password(cmd[idx-1]):
sanitized.append('****')
else:
sanitized.append(fieldname)
return sanitized |
def get_spectrum(self, nr_id=None, abmn=None, plot_filename=None):
"""Return a spectrum and its reciprocal counter part, if present in the
dataset. Optimally, refer to the spectrum by its normal-reciprocal id.
Returns
-------
spectrum_nor : :py:class:`reda.eis.plots.sip_response`
Normal spectrum. None if no normal spectrum is available
spectrum_rec : :py:class:`reda.eis.plots.sip_response` or None
Reciprocal spectrum. None if no reciprocal spectrum is available
fig : :py:class:`matplotlib.Figure.Figure` , optional
Figure object (only if plot_filename is set)
"""
assert nr_id is None or abmn is None
# determine nr_id for given abmn tuple
if abmn is not None:
subdata = self.data.query(
'a == {} and b == {} and m == {} and n == {}'.format(*abmn)
).sort_values('frequency')
if subdata.shape[0] == 0:
return None, None
# determine the norrec-id of this spectrum
nr_id = subdata['id'].iloc[0]
# get spectra
subdata_nor = self.data.query(
'id == {} and norrec=="nor"'.format(nr_id)
).sort_values('frequency')
subdata_rec = self.data.query(
'id == {} and norrec=="rec"'.format(nr_id)
).sort_values('frequency')
# create spectrum objects
spectrum_nor = None
spectrum_rec = None
if subdata_nor.shape[0] > 0:
spectrum_nor = eis_plot.sip_response(
frequencies=subdata_nor['frequency'].values,
rmag=subdata_nor['r'],
rpha=subdata_nor['rpha'],
)
if subdata_rec.shape[0] > 0:
spectrum_rec = eis_plot.sip_response(
frequencies=subdata_rec['frequency'].values,
rmag=subdata_rec['r'],
rpha=subdata_rec['rpha'],
)
if plot_filename is not None:
if spectrum_nor is not None:
fig = spectrum_nor.plot(
plot_filename,
reciprocal=spectrum_rec,
return_fig=True,
title='a: {} b: {} m: {}: n: {}'.format(
*subdata_nor[['a', 'b', 'm', 'n']].values[0, :]
)
)
return spectrum_nor, spectrum_rec, fig
return spectrum_nor, spectrum_rec | Return a spectrum and its reciprocal counter part, if present in the
dataset. Optimally, refer to the spectrum by its normal-reciprocal id.
Returns
-------
spectrum_nor : :py:class:`reda.eis.plots.sip_response`
Normal spectrum. None if no normal spectrum is available
spectrum_rec : :py:class:`reda.eis.plots.sip_response` or None
Reciprocal spectrum. None if no reciprocal spectrum is available
fig : :py:class:`matplotlib.Figure.Figure` , optional
Figure object (only if plot_filename is set) | Below is the the instruction that describes the task:
### Input:
Return a spectrum and its reciprocal counter part, if present in the
dataset. Optimally, refer to the spectrum by its normal-reciprocal id.
Returns
-------
spectrum_nor : :py:class:`reda.eis.plots.sip_response`
Normal spectrum. None if no normal spectrum is available
spectrum_rec : :py:class:`reda.eis.plots.sip_response` or None
Reciprocal spectrum. None if no reciprocal spectrum is available
fig : :py:class:`matplotlib.Figure.Figure` , optional
Figure object (only if plot_filename is set)
### Response:
def get_spectrum(self, nr_id=None, abmn=None, plot_filename=None):
"""Return a spectrum and its reciprocal counter part, if present in the
dataset. Optimally, refer to the spectrum by its normal-reciprocal id.
Returns
-------
spectrum_nor : :py:class:`reda.eis.plots.sip_response`
Normal spectrum. None if no normal spectrum is available
spectrum_rec : :py:class:`reda.eis.plots.sip_response` or None
Reciprocal spectrum. None if no reciprocal spectrum is available
fig : :py:class:`matplotlib.Figure.Figure` , optional
Figure object (only if plot_filename is set)
"""
assert nr_id is None or abmn is None
# determine nr_id for given abmn tuple
if abmn is not None:
subdata = self.data.query(
'a == {} and b == {} and m == {} and n == {}'.format(*abmn)
).sort_values('frequency')
if subdata.shape[0] == 0:
return None, None
# determine the norrec-id of this spectrum
nr_id = subdata['id'].iloc[0]
# get spectra
subdata_nor = self.data.query(
'id == {} and norrec=="nor"'.format(nr_id)
).sort_values('frequency')
subdata_rec = self.data.query(
'id == {} and norrec=="rec"'.format(nr_id)
).sort_values('frequency')
# create spectrum objects
spectrum_nor = None
spectrum_rec = None
if subdata_nor.shape[0] > 0:
spectrum_nor = eis_plot.sip_response(
frequencies=subdata_nor['frequency'].values,
rmag=subdata_nor['r'],
rpha=subdata_nor['rpha'],
)
if subdata_rec.shape[0] > 0:
spectrum_rec = eis_plot.sip_response(
frequencies=subdata_rec['frequency'].values,
rmag=subdata_rec['r'],
rpha=subdata_rec['rpha'],
)
if plot_filename is not None:
if spectrum_nor is not None:
fig = spectrum_nor.plot(
plot_filename,
reciprocal=spectrum_rec,
return_fig=True,
title='a: {} b: {} m: {}: n: {}'.format(
*subdata_nor[['a', 'b', 'm', 'n']].values[0, :]
)
)
return spectrum_nor, spectrum_rec, fig
return spectrum_nor, spectrum_rec |
def create_project(self, project_path):
"""
Create Trionyx project in given path
:param str path: path to create project in.
:raises FileExistsError:
"""
shutil.copytree(self.project_path, project_path)
self.update_file(project_path, 'requirements.txt', {
'trionyx_version': trionyx.__version__
})
self.update_file(project_path, 'config/local_settings.py', {
'secret_key': utils.random_string(32)
}) | Create Trionyx project in given path
:param str path: path to create project in.
:raises FileExistsError: | Below is the the instruction that describes the task:
### Input:
Create Trionyx project in given path
:param str path: path to create project in.
:raises FileExistsError:
### Response:
def create_project(self, project_path):
"""
Create Trionyx project in given path
:param str path: path to create project in.
:raises FileExistsError:
"""
shutil.copytree(self.project_path, project_path)
self.update_file(project_path, 'requirements.txt', {
'trionyx_version': trionyx.__version__
})
self.update_file(project_path, 'config/local_settings.py', {
'secret_key': utils.random_string(32)
}) |
def query_one(self, *args, **kwargs):
"""Return first document from :meth:`query`, with same parameters.
"""
for r in self.query(*args, **kwargs):
return r
return None | Return first document from :meth:`query`, with same parameters. | Below is the the instruction that describes the task:
### Input:
Return first document from :meth:`query`, with same parameters.
### Response:
def query_one(self, *args, **kwargs):
"""Return first document from :meth:`query`, with same parameters.
"""
for r in self.query(*args, **kwargs):
return r
return None |
def recalculate_current_specimen_interpreatations(self):
"""
recalculates all interpretations on all specimens for all coordinate
systems. Does not display recalcuated data.
"""
self.initialize_CART_rot(self.s)
if str(self.s) in self.pmag_results_data['specimens']:
for fit in self.pmag_results_data['specimens'][self.s]:
if fit.get('specimen') and 'calculation_type' in fit.get('specimen'):
fit.put(self.s, 'specimen', self.get_PCA_parameters(
self.s, fit, fit.tmin, fit.tmax, 'specimen', fit.get('specimen')['calculation_type']))
if len(self.Data[self.s]['zijdblock_geo']) > 0 and fit.get('geographic') and 'calculation_type' in fit.get('geographic'):
fit.put(self.s, 'geographic', self.get_PCA_parameters(
self.s, fit, fit.tmin, fit.tmax, 'geographic', fit.get('geographic')['calculation_type']))
if len(self.Data[self.s]['zijdblock_tilt']) > 0 and fit.get('tilt-corrected') and 'calculation_type' in fit.get('tilt-corrected'):
fit.put(self.s, 'tilt-corrected', self.get_PCA_parameters(self.s, fit, fit.tmin,
fit.tmax, 'tilt-corrected', fit.get('tilt-corrected')['calculation_type'])) | recalculates all interpretations on all specimens for all coordinate
systems. Does not display recalcuated data. | Below is the the instruction that describes the task:
### Input:
recalculates all interpretations on all specimens for all coordinate
systems. Does not display recalcuated data.
### Response:
def recalculate_current_specimen_interpreatations(self):
"""
recalculates all interpretations on all specimens for all coordinate
systems. Does not display recalcuated data.
"""
self.initialize_CART_rot(self.s)
if str(self.s) in self.pmag_results_data['specimens']:
for fit in self.pmag_results_data['specimens'][self.s]:
if fit.get('specimen') and 'calculation_type' in fit.get('specimen'):
fit.put(self.s, 'specimen', self.get_PCA_parameters(
self.s, fit, fit.tmin, fit.tmax, 'specimen', fit.get('specimen')['calculation_type']))
if len(self.Data[self.s]['zijdblock_geo']) > 0 and fit.get('geographic') and 'calculation_type' in fit.get('geographic'):
fit.put(self.s, 'geographic', self.get_PCA_parameters(
self.s, fit, fit.tmin, fit.tmax, 'geographic', fit.get('geographic')['calculation_type']))
if len(self.Data[self.s]['zijdblock_tilt']) > 0 and fit.get('tilt-corrected') and 'calculation_type' in fit.get('tilt-corrected'):
fit.put(self.s, 'tilt-corrected', self.get_PCA_parameters(self.s, fit, fit.tmin,
fit.tmax, 'tilt-corrected', fit.get('tilt-corrected')['calculation_type'])) |
def sync_entities_watching(instance):
"""
Syncs entities watching changes of a model instance.
"""
for entity_model, entity_model_getter in entity_registry.entity_watching[instance.__class__]:
model_objs = list(entity_model_getter(instance))
if model_objs:
sync_entities(*model_objs) | Syncs entities watching changes of a model instance. | Below is the the instruction that describes the task:
### Input:
Syncs entities watching changes of a model instance.
### Response:
def sync_entities_watching(instance):
"""
Syncs entities watching changes of a model instance.
"""
for entity_model, entity_model_getter in entity_registry.entity_watching[instance.__class__]:
model_objs = list(entity_model_getter(instance))
if model_objs:
sync_entities(*model_objs) |
def _set_ipv6_interface(self, v, load=False):
"""
Setter method for ipv6_interface, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_local_anycast_gateway/ipv6_track/ipv6_interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_interface() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("ipv6_interface_type ipv6_interface_name",ipv6_interface.ipv6_interface, yang_name="ipv6-interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ipv6-interface-type ipv6-interface-name', extensions={u'tailf-common': {u'callpoint': u'AnycastGatewayLocalIpv6TrackInterfaceConfig', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-incomplete-command': None, u'cli-no-match-completion': None, u'cli-full-no': None}}), is_container='list', yang_name="ipv6-interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'AnycastGatewayLocalIpv6TrackInterfaceConfig', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-incomplete-command': None, u'cli-no-match-completion': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6_interface must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("ipv6_interface_type ipv6_interface_name",ipv6_interface.ipv6_interface, yang_name="ipv6-interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ipv6-interface-type ipv6-interface-name', extensions={u'tailf-common': {u'callpoint': u'AnycastGatewayLocalIpv6TrackInterfaceConfig', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-incomplete-command': None, u'cli-no-match-completion': None, u'cli-full-no': None}}), is_container='list', yang_name="ipv6-interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'AnycastGatewayLocalIpv6TrackInterfaceConfig', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-incomplete-command': None, u'cli-no-match-completion': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='list', is_config=True)""",
})
self.__ipv6_interface = t
if hasattr(self, '_set'):
self._set() | Setter method for ipv6_interface, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_local_anycast_gateway/ipv6_track/ipv6_interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_interface() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for ipv6_interface, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_local_anycast_gateway/ipv6_track/ipv6_interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_interface() directly.
### Response:
def _set_ipv6_interface(self, v, load=False):
"""
Setter method for ipv6_interface, mapped from YANG variable /rbridge_id/interface/ve/ipv6/ipv6_local_anycast_gateway/ipv6_track/ipv6_interface (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_ipv6_interface is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_ipv6_interface() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("ipv6_interface_type ipv6_interface_name",ipv6_interface.ipv6_interface, yang_name="ipv6-interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ipv6-interface-type ipv6-interface-name', extensions={u'tailf-common': {u'callpoint': u'AnycastGatewayLocalIpv6TrackInterfaceConfig', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-incomplete-command': None, u'cli-no-match-completion': None, u'cli-full-no': None}}), is_container='list', yang_name="ipv6-interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'AnycastGatewayLocalIpv6TrackInterfaceConfig', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-incomplete-command': None, u'cli-no-match-completion': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """ipv6_interface must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("ipv6_interface_type ipv6_interface_name",ipv6_interface.ipv6_interface, yang_name="ipv6-interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ipv6-interface-type ipv6-interface-name', extensions={u'tailf-common': {u'callpoint': u'AnycastGatewayLocalIpv6TrackInterfaceConfig', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-incomplete-command': None, u'cli-no-match-completion': None, u'cli-full-no': None}}), is_container='list', yang_name="ipv6-interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'AnycastGatewayLocalIpv6TrackInterfaceConfig', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-incomplete-command': None, u'cli-no-match-completion': None, u'cli-full-no': None}}, namespace='urn:brocade.com:mgmt:brocade-anycast-gateway', defining_module='brocade-anycast-gateway', yang_type='list', is_config=True)""",
})
self.__ipv6_interface = t
if hasattr(self, '_set'):
self._set() |
def stop():
'''
Stop Riak
.. versionchanged:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.stop
'''
ret = {'comment': '', 'success': False}
cmd = __execute_cmd('riak', 'stop')
if cmd['retcode'] != 0:
ret['comment'] = cmd['stderr']
else:
ret['comment'] = cmd['stdout']
ret['success'] = True
return ret | Stop Riak
.. versionchanged:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.stop | Below is the the instruction that describes the task:
### Input:
Stop Riak
.. versionchanged:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.stop
### Response:
def stop():
'''
Stop Riak
.. versionchanged:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' riak.stop
'''
ret = {'comment': '', 'success': False}
cmd = __execute_cmd('riak', 'stop')
if cmd['retcode'] != 0:
ret['comment'] = cmd['stderr']
else:
ret['comment'] = cmd['stdout']
ret['success'] = True
return ret |
def user_admin_view(model, login_view="Login", template_dir=None):
"""
:param UserStruct: The User model structure containing other classes
:param login_view: The login view interface
:param template_dir: The directory containing the view pages
:return: UserAdmin
Doc:
User Admin is a view that allows you to admin users.
You must create a Pylot view called `UserAdmin` to activate it
UserAdmin = app.views.user_admin(User, Login)
class UserAdmin(UserAdmin, Pylot):
pass
The user admin create some global available vars under '__.user_admin'
It's also best to add some security access on it
class UserAdmin(UserAdmin, Pylot):
decorators = [login_required]
You can customize the user info page (::get) by creating the directory in your
templates dir, and include the get.html inside of it
ie:
>/admin/templates/UserAdmin/get.html
<div>
{% include "Pylot/UserAdmin/get.html" %}
<div>
<div>Hello {{ __.user_admin.user.name }}<div>
"""
Pylot.context_(COMPONENT_USER_ADMIN=True)
User = model.UserStruct.User
LoginView = login_view
if not template_dir:
template_dir = "Pylot/UserAdmin"
template_page = template_dir + "/%s.html"
class UserAdmin(object):
route_base = "user-admin"
@classmethod
def _options(cls):
return {
"user_role": [("Rol", "Role")], #[(role, role) for i, role in enumerate(.all_roles)],
"user_status": [("Sta", "Stat")] #[(status, status) for i, status in enumerate(User.all_status)]
}
@classmethod
def search_handler(cls, per_page=20):
"""
To initiate a search
"""
page = request.args.get("page", 1)
show_deleted = True if request.args.get("show-deleted") else False
name = request.args.get("name")
email = request.args.get("email")
users = User.all(include_deleted=show_deleted)
users = users.order_by(User.name.asc())
if name:
users = users.filter(User.name.contains(name))
if email:
users = users.filter(User.email.contains(email))
users = users.paginate(page=page, per_page=per_page)
cls.__(user_admin=dict(
options=cls._options(),
users=users,
search_query={
"excluded_deleted": request.args.get("show-deleted"),
"role": request.args.get("role"),
"status": request.args.get("status"),
"name": request.args.get("name"),
"email": request.args.get("email")
}
))
return users
@classmethod
def get_user_handler(cls, id):
"""
Get a user
"""
user = User.get(id, include_deleted=True)
if not user:
abort(404, "User doesn't exist")
cls.__(user_admin=dict(user=user, options=cls._options()))
return user
def index(self):
self.search_handler()
return self.render(view_template=template_page % "index")
def get(self, id):
self.get_user_handler(id)
return self.render(view_template=template_page % "get")
def post(self):
try:
id = request.form.get("id")
user = User.get(id, include_deleted=True)
if not user:
self.error_("Can't change user info. Invalid user")
return redirect(url_for("UserAdmin:index"))
delete_entry = True if request.form.get("delete-entry") else False
if delete_entry:
user.update(status=user.STATUS_SUSPENDED)
user.delete()
self.success_("User DELETED Successfully!")
return redirect(url_for("UserAdmin:get", id=id))
email = request.form.get("email")
password = request.form.get("password")
password2 = request.form.get("password2")
name = request.form.get("name")
role = request.form.get("user_role")
status = request.form.get("user_status")
upd = {}
if email and email != user.email:
LoginView.change_login_handler(user_context=user)
if password and password2:
LoginView.change_password_handler(user_context=user)
if name != user.name:
upd.update({"name": name})
if role and role != user.role:
upd.update({"role": role})
if status and status != user.status:
if user.is_deleted and status == user.STATUS_ACTIVE:
user.delete(False)
upd.update({"status": status})
if upd:
user.update(**upd)
self.success_("User's Info updated successfully!")
except Exception as ex:
self.error_("Error: %s " % ex.message)
return redirect(url_for("UserAdmin:get", id=id))
@route("reset-password", methods=["POST"])
def reset_password(self):
try:
id = request.form.get("id")
user = User.get(id)
if not user:
self.error_("Can't reset password. Invalid user")
return redirect(url_for("User:index"))
password = LoginView.reset_password_handler(user_context=user)
self.success_("User's password reset successfully!")
except Exception as ex:
self.error_("Error: %s " % ex.message)
return redirect(url_for("UserAdmin:get", id=id))
@route("create", methods=["POST"])
def create(self):
try:
account = LoginView.signup_handler()
account.set_role(request.form.get("role", "USER"))
self.success_("User created successfully!")
return redirect(url_for("UserAdmin:get", id=account.id))
except Exception as ex:
self.error_("Error: %s" % ex.message)
return redirect(url_for("UserAdmin:index"))
return UserAdmin | :param UserStruct: The User model structure containing other classes
:param login_view: The login view interface
:param template_dir: The directory containing the view pages
:return: UserAdmin
Doc:
User Admin is a view that allows you to admin users.
You must create a Pylot view called `UserAdmin` to activate it
UserAdmin = app.views.user_admin(User, Login)
class UserAdmin(UserAdmin, Pylot):
pass
The user admin create some global available vars under '__.user_admin'
It's also best to add some security access on it
class UserAdmin(UserAdmin, Pylot):
decorators = [login_required]
You can customize the user info page (::get) by creating the directory in your
templates dir, and include the get.html inside of it
ie:
>/admin/templates/UserAdmin/get.html
<div>
{% include "Pylot/UserAdmin/get.html" %}
<div>
<div>Hello {{ __.user_admin.user.name }}<div> | Below is the the instruction that describes the task:
### Input:
:param UserStruct: The User model structure containing other classes
:param login_view: The login view interface
:param template_dir: The directory containing the view pages
:return: UserAdmin
Doc:
User Admin is a view that allows you to admin users.
You must create a Pylot view called `UserAdmin` to activate it
UserAdmin = app.views.user_admin(User, Login)
class UserAdmin(UserAdmin, Pylot):
pass
The user admin create some global available vars under '__.user_admin'
It's also best to add some security access on it
class UserAdmin(UserAdmin, Pylot):
decorators = [login_required]
You can customize the user info page (::get) by creating the directory in your
templates dir, and include the get.html inside of it
ie:
>/admin/templates/UserAdmin/get.html
<div>
{% include "Pylot/UserAdmin/get.html" %}
<div>
<div>Hello {{ __.user_admin.user.name }}<div>
### Response:
def user_admin_view(model, login_view="Login", template_dir=None):
"""
:param UserStruct: The User model structure containing other classes
:param login_view: The login view interface
:param template_dir: The directory containing the view pages
:return: UserAdmin
Doc:
User Admin is a view that allows you to admin users.
You must create a Pylot view called `UserAdmin` to activate it
UserAdmin = app.views.user_admin(User, Login)
class UserAdmin(UserAdmin, Pylot):
pass
The user admin create some global available vars under '__.user_admin'
It's also best to add some security access on it
class UserAdmin(UserAdmin, Pylot):
decorators = [login_required]
You can customize the user info page (::get) by creating the directory in your
templates dir, and include the get.html inside of it
ie:
>/admin/templates/UserAdmin/get.html
<div>
{% include "Pylot/UserAdmin/get.html" %}
<div>
<div>Hello {{ __.user_admin.user.name }}<div>
"""
Pylot.context_(COMPONENT_USER_ADMIN=True)
User = model.UserStruct.User
LoginView = login_view
if not template_dir:
template_dir = "Pylot/UserAdmin"
template_page = template_dir + "/%s.html"
class UserAdmin(object):
route_base = "user-admin"
@classmethod
def _options(cls):
return {
"user_role": [("Rol", "Role")], #[(role, role) for i, role in enumerate(.all_roles)],
"user_status": [("Sta", "Stat")] #[(status, status) for i, status in enumerate(User.all_status)]
}
@classmethod
def search_handler(cls, per_page=20):
"""
To initiate a search
"""
page = request.args.get("page", 1)
show_deleted = True if request.args.get("show-deleted") else False
name = request.args.get("name")
email = request.args.get("email")
users = User.all(include_deleted=show_deleted)
users = users.order_by(User.name.asc())
if name:
users = users.filter(User.name.contains(name))
if email:
users = users.filter(User.email.contains(email))
users = users.paginate(page=page, per_page=per_page)
cls.__(user_admin=dict(
options=cls._options(),
users=users,
search_query={
"excluded_deleted": request.args.get("show-deleted"),
"role": request.args.get("role"),
"status": request.args.get("status"),
"name": request.args.get("name"),
"email": request.args.get("email")
}
))
return users
@classmethod
def get_user_handler(cls, id):
"""
Get a user
"""
user = User.get(id, include_deleted=True)
if not user:
abort(404, "User doesn't exist")
cls.__(user_admin=dict(user=user, options=cls._options()))
return user
def index(self):
self.search_handler()
return self.render(view_template=template_page % "index")
def get(self, id):
self.get_user_handler(id)
return self.render(view_template=template_page % "get")
def post(self):
try:
id = request.form.get("id")
user = User.get(id, include_deleted=True)
if not user:
self.error_("Can't change user info. Invalid user")
return redirect(url_for("UserAdmin:index"))
delete_entry = True if request.form.get("delete-entry") else False
if delete_entry:
user.update(status=user.STATUS_SUSPENDED)
user.delete()
self.success_("User DELETED Successfully!")
return redirect(url_for("UserAdmin:get", id=id))
email = request.form.get("email")
password = request.form.get("password")
password2 = request.form.get("password2")
name = request.form.get("name")
role = request.form.get("user_role")
status = request.form.get("user_status")
upd = {}
if email and email != user.email:
LoginView.change_login_handler(user_context=user)
if password and password2:
LoginView.change_password_handler(user_context=user)
if name != user.name:
upd.update({"name": name})
if role and role != user.role:
upd.update({"role": role})
if status and status != user.status:
if user.is_deleted and status == user.STATUS_ACTIVE:
user.delete(False)
upd.update({"status": status})
if upd:
user.update(**upd)
self.success_("User's Info updated successfully!")
except Exception as ex:
self.error_("Error: %s " % ex.message)
return redirect(url_for("UserAdmin:get", id=id))
@route("reset-password", methods=["POST"])
def reset_password(self):
try:
id = request.form.get("id")
user = User.get(id)
if not user:
self.error_("Can't reset password. Invalid user")
return redirect(url_for("User:index"))
password = LoginView.reset_password_handler(user_context=user)
self.success_("User's password reset successfully!")
except Exception as ex:
self.error_("Error: %s " % ex.message)
return redirect(url_for("UserAdmin:get", id=id))
@route("create", methods=["POST"])
def create(self):
try:
account = LoginView.signup_handler()
account.set_role(request.form.get("role", "USER"))
self.success_("User created successfully!")
return redirect(url_for("UserAdmin:get", id=account.id))
except Exception as ex:
self.error_("Error: %s" % ex.message)
return redirect(url_for("UserAdmin:index"))
return UserAdmin |
def minimum_enclosing_circle(labels, indexes = None,
hull_and_point_count = None):
"""Find the location of the minimum enclosing circle and its radius
labels - a labels matrix
indexes - an array giving the label indexes to be processed
hull_and_point_count - convex_hull output if already done. None = calculate
returns an Nx3 array organized as i,j of the center and radius
Algorithm from
http://www.personal.kent.edu/~rmuhamma/Compgeometry/MyCG/CG-Applets/Center/centercli.htm
who calls it the Applet's Algorithm and ascribes it to Pr. Chrystal
The original citation is Professor Chrystal, "On the problem to construct
the minimum circle enclosing n given points in a plane", Proceedings of
the Edinburgh Mathematical Society, vol 3, 1884
"""
if indexes is None:
if hull_and_point_count is not None:
indexes = np.array(np.unique(hull_and_point_count[0][:,0]),dtype=np.int32)
else:
max_label = np.max(labels)
indexes = np.array(list(range(1,max_label+1)),dtype=np.int32)
else:
indexes = np.array(indexes,dtype=np.int32)
if indexes.shape[0] == 0:
return np.zeros((0,2)),np.zeros((0,))
if hull_and_point_count is None:
hull, point_count = convex_hull(labels, indexes)
else:
hull, point_count = hull_and_point_count
centers = np.zeros((len(indexes),2))
radii = np.zeros((len(indexes),))
#
# point_index is the index to the first point in "hull" for a label
#
point_index = np.zeros((indexes.shape[0],),int)
point_index[1:] = np.cumsum(point_count[:-1])
#########################################################################
#
# The algorithm is this:
# * Choose a line S from S0 to S1 at random from the set of adjacent
# S0 and S1
# * For every vertex (V) other than S, compute the angle from S0
# to V to S. If this angle is obtuse, the vertex V lies within the
# minimum enclosing circle and can be ignored.
# * Find the minimum angle for all V.
# If the minimum angle is obtuse, stop and accept S as the diameter of
# the circle.
# * If the vertex with the minimum angle makes angles S0-S1-V and
# S1-S0-V that are acute and right, then take S0, S1 and V as the
# triangle within the circumscribed minimum enclosing circle.
# * Otherwise, find the largest obtuse angle among S0-S1-V and
# S1-S0-V (V is the vertex with the minimum angle, not all of them).
# If S0-S1-V is obtuse, make V the new S1, otherwise make V the new S0
#
##########################################################################
#
# anti_indexes is used to transform a label # into an index in the above array
# anti_indexes_per_point gives the label index of any vertex
#
anti_indexes=np.zeros((np.max(indexes)+1,),int)
anti_indexes[indexes] = list(range(indexes.shape[0]))
anti_indexes_per_point = anti_indexes[hull[:,0]]
#
# Start out by eliminating the degenerate cases: 0, 1 and 2
#
centers[point_count==0,:]= np.NaN
if np.all(point_count == 0):
# Bail if there are no points in any hull to prevent
# index failures below.
return centers,radii
centers[point_count==1,:]=hull[point_index[point_count==1],1:]
radii[point_count < 2]=0
centers[point_count==2,:]=(hull[point_index[point_count==2],1:]+
hull[point_index[point_count==2]+1,1:])/2
distance = centers[point_count==2,:] - hull[point_index[point_count==2],1:]
radii[point_count==2]=np.sqrt(distance[:,0]**2+distance[:,1]**2)
#
# Get rid of the degenerate points
#
keep_me = point_count > 2
#
# Pick S0 as the first point in each label
# and S1 as the second.
#
s0_idx = point_index.copy()
s1_idx = s0_idx+1
#
# number each of the points in a label with an index # which gives
# the order in which we'll get their angles. We use this to pick out
# points # 2 to N which are the candidate vertices to S
#
within_label_indexes = (np.array(list(range(hull.shape[0])),int) -
point_index[anti_indexes_per_point])
while(np.any(keep_me)):
#############################################################
# Label indexing for active labels
#############################################################
#
# labels_to_consider contains the labels of the objects which
# have not been completed
#
labels_to_consider = indexes[keep_me]
#
# anti_indexes_to_consider gives the index into any vector
# shaped similarly to labels_to_consider (for instance, min_angle
# below) for every label in labels_to_consider.
#
anti_indexes_to_consider =\
np.zeros((np.max(labels_to_consider)+1,),int)
anti_indexes_to_consider[labels_to_consider] = \
np.array(list(range(labels_to_consider.shape[0])))
##############################################################
# Vertex indexing for active vertexes other than S0 and S1
##############################################################
#
# The vertices are hull-points with indexes of 2 or more
# keep_me_vertices is a mask of the vertices to operate on
# during this iteration
#
keep_me_vertices = np.logical_and(keep_me[anti_indexes_per_point],
within_label_indexes >= 2)
#
# v is the vertex coordinates for each vertex considered
#
v = hull[keep_me_vertices,1:]
#
# v_labels is the label from the label matrix for each vertex
#
v_labels = hull[keep_me_vertices,0]
#
# v_indexes is the index into "hull" for each vertex (and similarly
# shaped vectors such as within_label_indexes
#
v_indexes=np.argwhere(keep_me_vertices).flatten().astype(np.int32)
#
# anti_indexes_per_vertex gives the index into "indexes" and
# any similarly shaped array of per-label values
# (for instance s0_idx) for each vertex being considered
#
anti_indexes_per_vertex = anti_indexes_per_point[keep_me_vertices]
#
# anti_indexes_to_consider_per_vertex gives the index into any
# vector shaped similarly to labels_to_consider for each
# vertex being analyzed
#
anti_indexes_to_consider_per_vertex = anti_indexes_to_consider[v_labels]
#
# Get S0 and S1 per vertex
#
s0 = hull[s0_idx[keep_me],1:]
s1 = hull[s1_idx[keep_me],1:]
s0 = s0[anti_indexes_to_consider_per_vertex]
s1 = s1[anti_indexes_to_consider_per_vertex]
#
# Compute the angle S0-S1-V
#
# the first vector of the angles is between S0 and S1
#
s01 = (s0 - s1).astype(float)
#
# compute V-S1 and V-S0 at each of the vertices to be considered
#
vs0 = (v - s0).astype(float)
vs1 = (v - s1).astype(float)
#
#` Take the dot product of s01 and vs1 divided by the length of s01 *
# the length of vs1. This gives the cosine of the angle between.
#
dot_vs1s0 = (np.sum(s01*vs1,1) /
np.sqrt(np.sum(s01**2,1)*np.sum(vs1**2,1)))
angle_vs1s0 = np.abs(np.arccos(dot_vs1s0))
s10 = -s01
dot_vs0s1 = (np.sum(s10*vs0,1) /
np.sqrt(np.sum(s01**2,1)*np.sum(vs0**2,1)))
angle_vs0s1 = np.abs(np.arccos(dot_vs0s1))
#
# S0-V-S1 is pi - the other two
#
angle_s0vs1 = np.pi - angle_vs1s0 - angle_vs0s1
assert np.all(angle_s0vs1 >= 0)
#
# Now we find the minimum angle per label
#
min_angle = scind.minimum(angle_s0vs1,v_labels,
labels_to_consider)
min_angle = fixup_scipy_ndimage_result(min_angle)
min_angle_per_vertex = min_angle[anti_indexes_to_consider_per_vertex]
#
# Calculate the index into V of the minimum angle per label.
# Use "indexes" instead of labels_to_consider so we get something
# with the same shape as keep_me
#
min_position = scind.minimum_position(angle_s0vs1,v_labels,
indexes)
min_position = fixup_scipy_ndimage_result(min_position).astype(int)
min_position = min_position.flatten()
#
# Case 1: minimum angle is obtuse or right. Accept S as the diameter.
# Case 1a: there are no vertices. Accept S as the diameter.
#
vertex_counts = scind.sum(keep_me_vertices,
hull[:,0],
labels_to_consider)
vertex_counts = fixup_scipy_ndimage_result(vertex_counts)
case_1 = np.logical_or(min_angle >= np.pi / 2,
vertex_counts == 0)
if np.any(case_1):
# convert from a boolean over indexes_to_consider to a boolean
# over indexes
finish_me = np.zeros((indexes.shape[0],),bool)
finish_me[anti_indexes[labels_to_consider[case_1]]] = True
s0_finish_me = hull[s0_idx[finish_me],1:].astype(float)
s1_finish_me = hull[s1_idx[finish_me],1:].astype(float)
centers[finish_me] = (s0_finish_me + s1_finish_me)/2
radii[finish_me] = np.sqrt(np.sum((s0_finish_me -
s1_finish_me)**2,1))/2
keep_me[finish_me] = False
#
# Case 2: all angles for the minimum angle vertex are acute
# or right.
# Pick S0, S1 and the vertex with the
# smallest angle as 3 points on the circle. If you look at the
# geometry, the diameter is the length of S0-S1 divided by
# the cosine of 1/2 of the angle. The center of the circle
# is at the circumcenter of the triangle formed by S0, S1 and
# V.
case_2 = keep_me.copy()
case_2[angle_vs1s0[min_position] > np.pi/2] = False
case_2[angle_vs0s1[min_position] > np.pi/2] = False
case_2[angle_s0vs1[min_position] > np.pi/2] = False
if np.any(case_2):
#
# Wikipedia (http://en.wikipedia.org/wiki/Circumcircle#Cartesian_coordinates)
# gives the following:
# D = 2(S0y Vx + S1y S0x - S1y Vx - S0y S1x - S0x Vy + S1x Vy)
# D = 2(S0x (S1y-Vy) + S1x(Vy-S0y) + Vx(S0y-S1y)
# x = ((S0x**2+S0y**2)(S1y-Vy)+(S1x**2+S1y**2)(Vy-S0y)+(Vx**2+Vy**2)(S0y-S1y)) / D
# y = ((S0x**2+S0y**2)(Vx-S1x)+(S1x**2+S1y**2)(S0y-Vy)+(Vx**2+Vy**2)(S1y-S0y)) / D
#
ss0 = hull[s0_idx[case_2],1:].astype(float)
ss1 = hull[s1_idx[case_2],1:].astype(float)
vv = v[min_position[case_2]].astype(float)
Y = 0
X = 1
D = 2*(ss0[:,X] * (ss1[:,Y] - vv[:,Y]) +
ss1[:,X] * (vv[:,Y] - ss0[:,Y]) +
vv[:,X] * (ss0[:,Y] - ss1[:,Y]))
x = (np.sum(ss0**2,1)*(ss1[:,Y] - vv[:,Y]) +
np.sum(ss1**2,1)*(vv[:,Y] - ss0[:,Y]) +
np.sum(vv**2,1) *(ss0[:,Y] - ss1[:,Y])) / D
y = (np.sum(ss0**2,1)*(vv[:,X] - ss1[:,X]) +
np.sum(ss1**2,1)*(ss0[:,X] - vv[:,X]) +
np.sum(vv**2,1) *(ss1[:,X] - ss0[:,X])) / D
centers[case_2,X] = x
centers[case_2,Y] = y
distances = ss0-centers[case_2]
radii[case_2] = np.sqrt(np.sum(distances**2,1))
keep_me[case_2] = False
#
# Finally, for anybody who's left, for each of S0-S1-V and
# S1-S0-V, for V, the vertex with the minimum angle,
# find the largest obtuse angle. The vertex of this
# angle (S0 or S1) is inside the enclosing circle, so take V
# and either S1 or S0 as the new S.
#
# This involves a relabeling of within_label_indexes. We replace
# either S0 or S1 with V and assign V either 0 or 1
#
if np.any(keep_me):
labels_to_consider = indexes[keep_me]
indexes_to_consider = anti_indexes[labels_to_consider]
#
# Index into within_label_indexes for each V with the
# smallest angle
#
v_obtuse_indexes = v_indexes[min_position[keep_me]]
angle_vs0s1_to_consider = angle_vs0s1[min_position[keep_me]]
angle_vs1s0_to_consider = angle_vs1s0[min_position[keep_me]]
#
# Do the cases where S0 is larger
#
s0_is_obtuse = angle_vs0s1_to_consider > np.pi/2
if np.any(s0_is_obtuse):
#
# The index of the obtuse S0
#
v_obtuse_s0_indexes = v_obtuse_indexes[s0_is_obtuse]
obtuse_s0_idx = s0_idx[indexes_to_consider[s0_is_obtuse]]
#
# S0 gets the within_label_index of the vertex
#
within_label_indexes[obtuse_s0_idx] = \
within_label_indexes[v_obtuse_s0_indexes]
#
# Assign V as the new S0
#
s0_idx[indexes_to_consider[s0_is_obtuse]] = v_obtuse_s0_indexes
within_label_indexes[v_obtuse_s0_indexes] = 0
#
# Do the cases where S1 is larger
#
s1_is_obtuse = np.logical_not(s0_is_obtuse)
if np.any(s1_is_obtuse):
#
# The index of the obtuse S1
#
v_obtuse_s1_indexes = v_obtuse_indexes[s1_is_obtuse]
obtuse_s1_idx = s1_idx[indexes_to_consider[s1_is_obtuse]]
#
# S1 gets V's within_label_index and goes onto the list
# of considered vertices.
#
within_label_indexes[obtuse_s1_idx] = \
within_label_indexes[v_obtuse_s1_indexes]
#
# Assign V as the new S1
#
s1_idx[indexes_to_consider[s1_is_obtuse]] = v_obtuse_s1_indexes
within_label_indexes[v_obtuse_s1_indexes] = 1
return centers, radii | Find the location of the minimum enclosing circle and its radius
labels - a labels matrix
indexes - an array giving the label indexes to be processed
hull_and_point_count - convex_hull output if already done. None = calculate
returns an Nx3 array organized as i,j of the center and radius
Algorithm from
http://www.personal.kent.edu/~rmuhamma/Compgeometry/MyCG/CG-Applets/Center/centercli.htm
who calls it the Applet's Algorithm and ascribes it to Pr. Chrystal
The original citation is Professor Chrystal, "On the problem to construct
the minimum circle enclosing n given points in a plane", Proceedings of
the Edinburgh Mathematical Society, vol 3, 1884 | Below is the the instruction that describes the task:
### Input:
Find the location of the minimum enclosing circle and its radius
labels - a labels matrix
indexes - an array giving the label indexes to be processed
hull_and_point_count - convex_hull output if already done. None = calculate
returns an Nx3 array organized as i,j of the center and radius
Algorithm from
http://www.personal.kent.edu/~rmuhamma/Compgeometry/MyCG/CG-Applets/Center/centercli.htm
who calls it the Applet's Algorithm and ascribes it to Pr. Chrystal
The original citation is Professor Chrystal, "On the problem to construct
the minimum circle enclosing n given points in a plane", Proceedings of
the Edinburgh Mathematical Society, vol 3, 1884
### Response:
def minimum_enclosing_circle(labels, indexes = None,
hull_and_point_count = None):
"""Find the location of the minimum enclosing circle and its radius
labels - a labels matrix
indexes - an array giving the label indexes to be processed
hull_and_point_count - convex_hull output if already done. None = calculate
returns an Nx3 array organized as i,j of the center and radius
Algorithm from
http://www.personal.kent.edu/~rmuhamma/Compgeometry/MyCG/CG-Applets/Center/centercli.htm
who calls it the Applet's Algorithm and ascribes it to Pr. Chrystal
The original citation is Professor Chrystal, "On the problem to construct
the minimum circle enclosing n given points in a plane", Proceedings of
the Edinburgh Mathematical Society, vol 3, 1884
"""
if indexes is None:
if hull_and_point_count is not None:
indexes = np.array(np.unique(hull_and_point_count[0][:,0]),dtype=np.int32)
else:
max_label = np.max(labels)
indexes = np.array(list(range(1,max_label+1)),dtype=np.int32)
else:
indexes = np.array(indexes,dtype=np.int32)
if indexes.shape[0] == 0:
return np.zeros((0,2)),np.zeros((0,))
if hull_and_point_count is None:
hull, point_count = convex_hull(labels, indexes)
else:
hull, point_count = hull_and_point_count
centers = np.zeros((len(indexes),2))
radii = np.zeros((len(indexes),))
#
# point_index is the index to the first point in "hull" for a label
#
point_index = np.zeros((indexes.shape[0],),int)
point_index[1:] = np.cumsum(point_count[:-1])
#########################################################################
#
# The algorithm is this:
# * Choose a line S from S0 to S1 at random from the set of adjacent
# S0 and S1
# * For every vertex (V) other than S, compute the angle from S0
# to V to S. If this angle is obtuse, the vertex V lies within the
# minimum enclosing circle and can be ignored.
# * Find the minimum angle for all V.
# If the minimum angle is obtuse, stop and accept S as the diameter of
# the circle.
# * If the vertex with the minimum angle makes angles S0-S1-V and
# S1-S0-V that are acute and right, then take S0, S1 and V as the
# triangle within the circumscribed minimum enclosing circle.
# * Otherwise, find the largest obtuse angle among S0-S1-V and
# S1-S0-V (V is the vertex with the minimum angle, not all of them).
# If S0-S1-V is obtuse, make V the new S1, otherwise make V the new S0
#
##########################################################################
#
# anti_indexes is used to transform a label # into an index in the above array
# anti_indexes_per_point gives the label index of any vertex
#
anti_indexes=np.zeros((np.max(indexes)+1,),int)
anti_indexes[indexes] = list(range(indexes.shape[0]))
anti_indexes_per_point = anti_indexes[hull[:,0]]
#
# Start out by eliminating the degenerate cases: 0, 1 and 2
#
centers[point_count==0,:]= np.NaN
if np.all(point_count == 0):
# Bail if there are no points in any hull to prevent
# index failures below.
return centers,radii
centers[point_count==1,:]=hull[point_index[point_count==1],1:]
radii[point_count < 2]=0
centers[point_count==2,:]=(hull[point_index[point_count==2],1:]+
hull[point_index[point_count==2]+1,1:])/2
distance = centers[point_count==2,:] - hull[point_index[point_count==2],1:]
radii[point_count==2]=np.sqrt(distance[:,0]**2+distance[:,1]**2)
#
# Get rid of the degenerate points
#
keep_me = point_count > 2
#
# Pick S0 as the first point in each label
# and S1 as the second.
#
s0_idx = point_index.copy()
s1_idx = s0_idx+1
#
# number each of the points in a label with an index # which gives
# the order in which we'll get their angles. We use this to pick out
# points # 2 to N which are the candidate vertices to S
#
within_label_indexes = (np.array(list(range(hull.shape[0])),int) -
point_index[anti_indexes_per_point])
while(np.any(keep_me)):
#############################################################
# Label indexing for active labels
#############################################################
#
# labels_to_consider contains the labels of the objects which
# have not been completed
#
labels_to_consider = indexes[keep_me]
#
# anti_indexes_to_consider gives the index into any vector
# shaped similarly to labels_to_consider (for instance, min_angle
# below) for every label in labels_to_consider.
#
anti_indexes_to_consider =\
np.zeros((np.max(labels_to_consider)+1,),int)
anti_indexes_to_consider[labels_to_consider] = \
np.array(list(range(labels_to_consider.shape[0])))
##############################################################
# Vertex indexing for active vertexes other than S0 and S1
##############################################################
#
# The vertices are hull-points with indexes of 2 or more
# keep_me_vertices is a mask of the vertices to operate on
# during this iteration
#
keep_me_vertices = np.logical_and(keep_me[anti_indexes_per_point],
within_label_indexes >= 2)
#
# v is the vertex coordinates for each vertex considered
#
v = hull[keep_me_vertices,1:]
#
# v_labels is the label from the label matrix for each vertex
#
v_labels = hull[keep_me_vertices,0]
#
# v_indexes is the index into "hull" for each vertex (and similarly
# shaped vectors such as within_label_indexes
#
v_indexes=np.argwhere(keep_me_vertices).flatten().astype(np.int32)
#
# anti_indexes_per_vertex gives the index into "indexes" and
# any similarly shaped array of per-label values
# (for instance s0_idx) for each vertex being considered
#
anti_indexes_per_vertex = anti_indexes_per_point[keep_me_vertices]
#
# anti_indexes_to_consider_per_vertex gives the index into any
# vector shaped similarly to labels_to_consider for each
# vertex being analyzed
#
anti_indexes_to_consider_per_vertex = anti_indexes_to_consider[v_labels]
#
# Get S0 and S1 per vertex
#
s0 = hull[s0_idx[keep_me],1:]
s1 = hull[s1_idx[keep_me],1:]
s0 = s0[anti_indexes_to_consider_per_vertex]
s1 = s1[anti_indexes_to_consider_per_vertex]
#
# Compute the angle S0-S1-V
#
# the first vector of the angles is between S0 and S1
#
s01 = (s0 - s1).astype(float)
#
# compute V-S1 and V-S0 at each of the vertices to be considered
#
vs0 = (v - s0).astype(float)
vs1 = (v - s1).astype(float)
#
#` Take the dot product of s01 and vs1 divided by the length of s01 *
# the length of vs1. This gives the cosine of the angle between.
#
dot_vs1s0 = (np.sum(s01*vs1,1) /
np.sqrt(np.sum(s01**2,1)*np.sum(vs1**2,1)))
angle_vs1s0 = np.abs(np.arccos(dot_vs1s0))
s10 = -s01
dot_vs0s1 = (np.sum(s10*vs0,1) /
np.sqrt(np.sum(s01**2,1)*np.sum(vs0**2,1)))
angle_vs0s1 = np.abs(np.arccos(dot_vs0s1))
#
# S0-V-S1 is pi - the other two
#
angle_s0vs1 = np.pi - angle_vs1s0 - angle_vs0s1
assert np.all(angle_s0vs1 >= 0)
#
# Now we find the minimum angle per label
#
min_angle = scind.minimum(angle_s0vs1,v_labels,
labels_to_consider)
min_angle = fixup_scipy_ndimage_result(min_angle)
min_angle_per_vertex = min_angle[anti_indexes_to_consider_per_vertex]
#
# Calculate the index into V of the minimum angle per label.
# Use "indexes" instead of labels_to_consider so we get something
# with the same shape as keep_me
#
min_position = scind.minimum_position(angle_s0vs1,v_labels,
indexes)
min_position = fixup_scipy_ndimage_result(min_position).astype(int)
min_position = min_position.flatten()
#
# Case 1: minimum angle is obtuse or right. Accept S as the diameter.
# Case 1a: there are no vertices. Accept S as the diameter.
#
vertex_counts = scind.sum(keep_me_vertices,
hull[:,0],
labels_to_consider)
vertex_counts = fixup_scipy_ndimage_result(vertex_counts)
case_1 = np.logical_or(min_angle >= np.pi / 2,
vertex_counts == 0)
if np.any(case_1):
# convert from a boolean over indexes_to_consider to a boolean
# over indexes
finish_me = np.zeros((indexes.shape[0],),bool)
finish_me[anti_indexes[labels_to_consider[case_1]]] = True
s0_finish_me = hull[s0_idx[finish_me],1:].astype(float)
s1_finish_me = hull[s1_idx[finish_me],1:].astype(float)
centers[finish_me] = (s0_finish_me + s1_finish_me)/2
radii[finish_me] = np.sqrt(np.sum((s0_finish_me -
s1_finish_me)**2,1))/2
keep_me[finish_me] = False
#
# Case 2: all angles for the minimum angle vertex are acute
# or right.
# Pick S0, S1 and the vertex with the
# smallest angle as 3 points on the circle. If you look at the
# geometry, the diameter is the length of S0-S1 divided by
# the cosine of 1/2 of the angle. The center of the circle
# is at the circumcenter of the triangle formed by S0, S1 and
# V.
case_2 = keep_me.copy()
case_2[angle_vs1s0[min_position] > np.pi/2] = False
case_2[angle_vs0s1[min_position] > np.pi/2] = False
case_2[angle_s0vs1[min_position] > np.pi/2] = False
if np.any(case_2):
#
# Wikipedia (http://en.wikipedia.org/wiki/Circumcircle#Cartesian_coordinates)
# gives the following:
# D = 2(S0y Vx + S1y S0x - S1y Vx - S0y S1x - S0x Vy + S1x Vy)
# D = 2(S0x (S1y-Vy) + S1x(Vy-S0y) + Vx(S0y-S1y)
# x = ((S0x**2+S0y**2)(S1y-Vy)+(S1x**2+S1y**2)(Vy-S0y)+(Vx**2+Vy**2)(S0y-S1y)) / D
# y = ((S0x**2+S0y**2)(Vx-S1x)+(S1x**2+S1y**2)(S0y-Vy)+(Vx**2+Vy**2)(S1y-S0y)) / D
#
ss0 = hull[s0_idx[case_2],1:].astype(float)
ss1 = hull[s1_idx[case_2],1:].astype(float)
vv = v[min_position[case_2]].astype(float)
Y = 0
X = 1
D = 2*(ss0[:,X] * (ss1[:,Y] - vv[:,Y]) +
ss1[:,X] * (vv[:,Y] - ss0[:,Y]) +
vv[:,X] * (ss0[:,Y] - ss1[:,Y]))
x = (np.sum(ss0**2,1)*(ss1[:,Y] - vv[:,Y]) +
np.sum(ss1**2,1)*(vv[:,Y] - ss0[:,Y]) +
np.sum(vv**2,1) *(ss0[:,Y] - ss1[:,Y])) / D
y = (np.sum(ss0**2,1)*(vv[:,X] - ss1[:,X]) +
np.sum(ss1**2,1)*(ss0[:,X] - vv[:,X]) +
np.sum(vv**2,1) *(ss1[:,X] - ss0[:,X])) / D
centers[case_2,X] = x
centers[case_2,Y] = y
distances = ss0-centers[case_2]
radii[case_2] = np.sqrt(np.sum(distances**2,1))
keep_me[case_2] = False
#
# Finally, for anybody who's left, for each of S0-S1-V and
# S1-S0-V, for V, the vertex with the minimum angle,
# find the largest obtuse angle. The vertex of this
# angle (S0 or S1) is inside the enclosing circle, so take V
# and either S1 or S0 as the new S.
#
# This involves a relabeling of within_label_indexes. We replace
# either S0 or S1 with V and assign V either 0 or 1
#
if np.any(keep_me):
labels_to_consider = indexes[keep_me]
indexes_to_consider = anti_indexes[labels_to_consider]
#
# Index into within_label_indexes for each V with the
# smallest angle
#
v_obtuse_indexes = v_indexes[min_position[keep_me]]
angle_vs0s1_to_consider = angle_vs0s1[min_position[keep_me]]
angle_vs1s0_to_consider = angle_vs1s0[min_position[keep_me]]
#
# Do the cases where S0 is larger
#
s0_is_obtuse = angle_vs0s1_to_consider > np.pi/2
if np.any(s0_is_obtuse):
#
# The index of the obtuse S0
#
v_obtuse_s0_indexes = v_obtuse_indexes[s0_is_obtuse]
obtuse_s0_idx = s0_idx[indexes_to_consider[s0_is_obtuse]]
#
# S0 gets the within_label_index of the vertex
#
within_label_indexes[obtuse_s0_idx] = \
within_label_indexes[v_obtuse_s0_indexes]
#
# Assign V as the new S0
#
s0_idx[indexes_to_consider[s0_is_obtuse]] = v_obtuse_s0_indexes
within_label_indexes[v_obtuse_s0_indexes] = 0
#
# Do the cases where S1 is larger
#
s1_is_obtuse = np.logical_not(s0_is_obtuse)
if np.any(s1_is_obtuse):
#
# The index of the obtuse S1
#
v_obtuse_s1_indexes = v_obtuse_indexes[s1_is_obtuse]
obtuse_s1_idx = s1_idx[indexes_to_consider[s1_is_obtuse]]
#
# S1 gets V's within_label_index and goes onto the list
# of considered vertices.
#
within_label_indexes[obtuse_s1_idx] = \
within_label_indexes[v_obtuse_s1_indexes]
#
# Assign V as the new S1
#
s1_idx[indexes_to_consider[s1_is_obtuse]] = v_obtuse_s1_indexes
within_label_indexes[v_obtuse_s1_indexes] = 1
return centers, radii |
def unplug(self):
'''Remove the actor's methods from the callback registry.'''
if not self.__plugged:
return
members = set([method for _, method
in inspect.getmembers(self, predicate=inspect.ismethod)])
for message in global_callbacks:
global_callbacks[message] -= members
self.__plugged = False | Remove the actor's methods from the callback registry. | Below is the the instruction that describes the task:
### Input:
Remove the actor's methods from the callback registry.
### Response:
def unplug(self):
'''Remove the actor's methods from the callback registry.'''
if not self.__plugged:
return
members = set([method for _, method
in inspect.getmembers(self, predicate=inspect.ismethod)])
for message in global_callbacks:
global_callbacks[message] -= members
self.__plugged = False |
def read_analogy_file(filename):
"""
Read the analogy task test set from a file.
"""
section = None
with open(filename, 'r') as questions_file:
for line in questions_file:
if line.startswith(':'):
section = line[2:].replace('\n', '')
continue
else:
words = line.replace('\n', '').split(' ')
yield section, words | Read the analogy task test set from a file. | Below is the the instruction that describes the task:
### Input:
Read the analogy task test set from a file.
### Response:
def read_analogy_file(filename):
"""
Read the analogy task test set from a file.
"""
section = None
with open(filename, 'r') as questions_file:
for line in questions_file:
if line.startswith(':'):
section = line[2:].replace('\n', '')
continue
else:
words = line.replace('\n', '').split(' ')
yield section, words |
def dump_t_coords(dataset_dir, data_dir, dataset, root=None, compress=True):
"""dump vtkjs texture coordinates"""
if root is None:
root = {}
tcoords = dataset.GetPointData().GetTCoords()
if tcoords:
dumped_array = dump_data_array(dataset_dir, data_dir, tcoords, {}, compress)
root['pointData']['activeTCoords'] = len(root['pointData']['arrays'])
root['pointData']['arrays'].append({'data': dumped_array}) | dump vtkjs texture coordinates | Below is the the instruction that describes the task:
### Input:
dump vtkjs texture coordinates
### Response:
def dump_t_coords(dataset_dir, data_dir, dataset, root=None, compress=True):
"""dump vtkjs texture coordinates"""
if root is None:
root = {}
tcoords = dataset.GetPointData().GetTCoords()
if tcoords:
dumped_array = dump_data_array(dataset_dir, data_dir, tcoords, {}, compress)
root['pointData']['activeTCoords'] = len(root['pointData']['arrays'])
root['pointData']['arrays'].append({'data': dumped_array}) |
def versions(self):
"""Return all version changes."""
versions = []
for v, _ in self.restarts:
if len(versions) == 0 or v != versions[-1]:
versions.append(v)
return versions | Return all version changes. | Below is the the instruction that describes the task:
### Input:
Return all version changes.
### Response:
def versions(self):
"""Return all version changes."""
versions = []
for v, _ in self.restarts:
if len(versions) == 0 or v != versions[-1]:
versions.append(v)
return versions |
def get_ties(G):
"""
If you specify a target that shares a dependency with another target,
both targets need to be updated. This is because running one will resolve
the sha mismatch and sake will think that the other one doesn't have to
run. This is called a "tie". This function will find such ties.
"""
# we are going to make a dictionary whose keys are every dependency
# and whose values are a list of all targets that use that dependency.
# after making the dictionary, values whose length is above one will
# be called "ties"
ties = []
dep_dict = {}
for node in G.nodes(data=True):
if 'dependencies' in node[1]:
for item in node[1]['dependencies']:
if item not in dep_dict:
dep_dict[item] = []
dep_dict[item].append(node[0])
for item in dep_dict:
if len(list(set(dep_dict[item]))) > 1:
ties.append(list(set(dep_dict[item])))
return ties | If you specify a target that shares a dependency with another target,
both targets need to be updated. This is because running one will resolve
the sha mismatch and sake will think that the other one doesn't have to
run. This is called a "tie". This function will find such ties. | Below is the the instruction that describes the task:
### Input:
If you specify a target that shares a dependency with another target,
both targets need to be updated. This is because running one will resolve
the sha mismatch and sake will think that the other one doesn't have to
run. This is called a "tie". This function will find such ties.
### Response:
def get_ties(G):
"""
If you specify a target that shares a dependency with another target,
both targets need to be updated. This is because running one will resolve
the sha mismatch and sake will think that the other one doesn't have to
run. This is called a "tie". This function will find such ties.
"""
# we are going to make a dictionary whose keys are every dependency
# and whose values are a list of all targets that use that dependency.
# after making the dictionary, values whose length is above one will
# be called "ties"
ties = []
dep_dict = {}
for node in G.nodes(data=True):
if 'dependencies' in node[1]:
for item in node[1]['dependencies']:
if item not in dep_dict:
dep_dict[item] = []
dep_dict[item].append(node[0])
for item in dep_dict:
if len(list(set(dep_dict[item]))) > 1:
ties.append(list(set(dep_dict[item])))
return ties |
def join(self, _id):
""" Join a room """
if not SockJSRoomHandler._room.has_key(self._gcls() + _id):
SockJSRoomHandler._room[self._gcls() + _id] = set()
SockJSRoomHandler._room[self._gcls() + _id].add(self) | Join a room | Below is the the instruction that describes the task:
### Input:
Join a room
### Response:
def join(self, _id):
""" Join a room """
if not SockJSRoomHandler._room.has_key(self._gcls() + _id):
SockJSRoomHandler._room[self._gcls() + _id] = set()
SockJSRoomHandler._room[self._gcls() + _id].add(self) |
def addDEX(self, filename, data, dx=None):
"""
Add a DEX file to the Session and run analysis.
:param filename: the (file)name of the DEX file
:param data: binary data of the dex file
:param dx: an existing Analysis Object (optional)
:return: A tuple of SHA256 Hash, DalvikVMFormat Object and Analysis object
"""
digest = hashlib.sha256(data).hexdigest()
log.debug("add DEX:%s" % digest)
log.debug("Parsing format ...")
d = DalvikVMFormat(data)
log.debug("added DEX:%s" % digest)
self.analyzed_files[filename].append(digest)
self.analyzed_digest[digest] = filename
self.analyzed_dex[digest] = d
if dx is None:
dx = Analysis()
dx.add(d)
dx.create_xref()
# TODO: If multidex: this will called many times per dex, even if already set
for d in dx.vms:
# TODO: allow different decompiler here!
d.set_decompiler(DecompilerDAD(d, dx))
d.set_vmanalysis(dx)
self.analyzed_vms[digest] = dx
if self.export_ipython:
log.debug("Exporting in ipython")
d.create_python_export()
return digest, d, dx | Add a DEX file to the Session and run analysis.
:param filename: the (file)name of the DEX file
:param data: binary data of the dex file
:param dx: an existing Analysis Object (optional)
:return: A tuple of SHA256 Hash, DalvikVMFormat Object and Analysis object | Below is the the instruction that describes the task:
### Input:
Add a DEX file to the Session and run analysis.
:param filename: the (file)name of the DEX file
:param data: binary data of the dex file
:param dx: an existing Analysis Object (optional)
:return: A tuple of SHA256 Hash, DalvikVMFormat Object and Analysis object
### Response:
def addDEX(self, filename, data, dx=None):
"""
Add a DEX file to the Session and run analysis.
:param filename: the (file)name of the DEX file
:param data: binary data of the dex file
:param dx: an existing Analysis Object (optional)
:return: A tuple of SHA256 Hash, DalvikVMFormat Object and Analysis object
"""
digest = hashlib.sha256(data).hexdigest()
log.debug("add DEX:%s" % digest)
log.debug("Parsing format ...")
d = DalvikVMFormat(data)
log.debug("added DEX:%s" % digest)
self.analyzed_files[filename].append(digest)
self.analyzed_digest[digest] = filename
self.analyzed_dex[digest] = d
if dx is None:
dx = Analysis()
dx.add(d)
dx.create_xref()
# TODO: If multidex: this will called many times per dex, even if already set
for d in dx.vms:
# TODO: allow different decompiler here!
d.set_decompiler(DecompilerDAD(d, dx))
d.set_vmanalysis(dx)
self.analyzed_vms[digest] = dx
if self.export_ipython:
log.debug("Exporting in ipython")
d.create_python_export()
return digest, d, dx |
def add(setname=None, entry=None, family='ipv4', **kwargs):
'''
Append an entry to the specified set.
CLI Example:
.. code-block:: bash
salt '*' ipset.add setname 192.168.1.26
salt '*' ipset.add setname 192.168.0.3,AA:BB:CC:DD:EE:FF
'''
if not setname:
return 'Error: Set needs to be specified'
if not entry:
return 'Error: Entry needs to be specified'
setinfo = _find_set_info(setname)
if not setinfo:
return 'Error: Set {0} does not exist'.format(setname)
settype = setinfo['Type']
cmd = '{0}'.format(entry)
if 'timeout' in kwargs:
if 'timeout' not in setinfo['Header']:
return 'Error: Set {0} not created with timeout support'.format(setname)
if 'packets' in kwargs or 'bytes' in kwargs:
if 'counters' not in setinfo['Header']:
return 'Error: Set {0} not created with counters support'.format(setname)
if 'comment' in kwargs:
if 'comment' not in setinfo['Header']:
return 'Error: Set {0} not created with comment support'.format(setname)
if 'comment' not in entry:
cmd = '{0} comment "{1}"'.format(cmd, kwargs['comment'])
if set(['skbmark', 'skbprio', 'skbqueue']) & set(kwargs):
if 'skbinfo' not in setinfo['Header']:
return 'Error: Set {0} not created with skbinfo support'.format(setname)
for item in _ADD_OPTIONS[settype]:
if item in kwargs:
cmd = '{0} {1} {2}'.format(cmd, item, kwargs[item])
current_members = _find_set_members(setname)
if cmd in current_members:
return 'Warn: Entry {0} already exists in set {1}'.format(cmd, setname)
# Using -exist to ensure entries are updated if the comment changes
cmd = '{0} add -exist {1} {2}'.format(_ipset_cmd(), setname, cmd)
out = __salt__['cmd.run'](cmd, python_shell=False)
if not out:
return 'Success'
return 'Error: {0}'.format(out) | Append an entry to the specified set.
CLI Example:
.. code-block:: bash
salt '*' ipset.add setname 192.168.1.26
salt '*' ipset.add setname 192.168.0.3,AA:BB:CC:DD:EE:FF | Below is the the instruction that describes the task:
### Input:
Append an entry to the specified set.
CLI Example:
.. code-block:: bash
salt '*' ipset.add setname 192.168.1.26
salt '*' ipset.add setname 192.168.0.3,AA:BB:CC:DD:EE:FF
### Response:
def add(setname=None, entry=None, family='ipv4', **kwargs):
'''
Append an entry to the specified set.
CLI Example:
.. code-block:: bash
salt '*' ipset.add setname 192.168.1.26
salt '*' ipset.add setname 192.168.0.3,AA:BB:CC:DD:EE:FF
'''
if not setname:
return 'Error: Set needs to be specified'
if not entry:
return 'Error: Entry needs to be specified'
setinfo = _find_set_info(setname)
if not setinfo:
return 'Error: Set {0} does not exist'.format(setname)
settype = setinfo['Type']
cmd = '{0}'.format(entry)
if 'timeout' in kwargs:
if 'timeout' not in setinfo['Header']:
return 'Error: Set {0} not created with timeout support'.format(setname)
if 'packets' in kwargs or 'bytes' in kwargs:
if 'counters' not in setinfo['Header']:
return 'Error: Set {0} not created with counters support'.format(setname)
if 'comment' in kwargs:
if 'comment' not in setinfo['Header']:
return 'Error: Set {0} not created with comment support'.format(setname)
if 'comment' not in entry:
cmd = '{0} comment "{1}"'.format(cmd, kwargs['comment'])
if set(['skbmark', 'skbprio', 'skbqueue']) & set(kwargs):
if 'skbinfo' not in setinfo['Header']:
return 'Error: Set {0} not created with skbinfo support'.format(setname)
for item in _ADD_OPTIONS[settype]:
if item in kwargs:
cmd = '{0} {1} {2}'.format(cmd, item, kwargs[item])
current_members = _find_set_members(setname)
if cmd in current_members:
return 'Warn: Entry {0} already exists in set {1}'.format(cmd, setname)
# Using -exist to ensure entries are updated if the comment changes
cmd = '{0} add -exist {1} {2}'.format(_ipset_cmd(), setname, cmd)
out = __salt__['cmd.run'](cmd, python_shell=False)
if not out:
return 'Success'
return 'Error: {0}'.format(out) |
def _create_binary_trigger(trigger):
"""Create an 8-bit binary trigger from an InputTrigger, TrueTrigger, FalseTrigger."""
ops = {
0: ">",
1: "<",
2: ">=",
3: "<=",
4: "==",
5: 'always'
}
op_codes = {y: x for x, y in ops.items()}
source = 0
if isinstance(trigger, TrueTrigger):
op_code = op_codes['always']
elif isinstance(trigger, FalseTrigger):
raise ArgumentError("Cannot express a never trigger in binary descriptor", trigger=trigger)
else:
op_code = op_codes[trigger.comp_string]
if trigger.use_count:
source = 1
return (op_code << 1) | source | Create an 8-bit binary trigger from an InputTrigger, TrueTrigger, FalseTrigger. | Below is the the instruction that describes the task:
### Input:
Create an 8-bit binary trigger from an InputTrigger, TrueTrigger, FalseTrigger.
### Response:
def _create_binary_trigger(trigger):
"""Create an 8-bit binary trigger from an InputTrigger, TrueTrigger, FalseTrigger."""
ops = {
0: ">",
1: "<",
2: ">=",
3: "<=",
4: "==",
5: 'always'
}
op_codes = {y: x for x, y in ops.items()}
source = 0
if isinstance(trigger, TrueTrigger):
op_code = op_codes['always']
elif isinstance(trigger, FalseTrigger):
raise ArgumentError("Cannot express a never trigger in binary descriptor", trigger=trigger)
else:
op_code = op_codes[trigger.comp_string]
if trigger.use_count:
source = 1
return (op_code << 1) | source |
def build_dummy_request(newsitem):
"""
Construct a HttpRequest object that is, as far as possible,
representative of ones that would receive this page as a response. Used
for previewing / moderation and any other place where we want to
display a view of this page in the admin interface without going
through the regular page routing logic.
"""
url = newsitem.full_url
if url:
url_info = urlparse(url)
hostname = url_info.hostname
path = url_info.path
port = url_info.port or 80
else:
# Cannot determine a URL to this page - cobble one together based on
# whatever we find in ALLOWED_HOSTS
try:
hostname = settings.ALLOWED_HOSTS[0]
except IndexError:
hostname = 'localhost'
path = '/'
port = 80
request = WSGIRequest({
'REQUEST_METHOD': 'GET',
'PATH_INFO': path,
'SERVER_NAME': hostname,
'SERVER_PORT': port,
'HTTP_HOST': hostname,
'wsgi.input': StringIO(),
})
# Apply middleware to the request - see http://www.mellowmorning.com/2011/04/18/mock-django-request-for-testing/
handler = BaseHandler()
handler.load_middleware()
# call each middleware in turn and throw away any responses that they might return
if hasattr(handler, '_request_middleware'):
for middleware_method in handler._request_middleware:
middleware_method(request)
else:
handler.get_response(request)
return request | Construct a HttpRequest object that is, as far as possible,
representative of ones that would receive this page as a response. Used
for previewing / moderation and any other place where we want to
display a view of this page in the admin interface without going
through the regular page routing logic. | Below is the the instruction that describes the task:
### Input:
Construct a HttpRequest object that is, as far as possible,
representative of ones that would receive this page as a response. Used
for previewing / moderation and any other place where we want to
display a view of this page in the admin interface without going
through the regular page routing logic.
### Response:
def build_dummy_request(newsitem):
"""
Construct a HttpRequest object that is, as far as possible,
representative of ones that would receive this page as a response. Used
for previewing / moderation and any other place where we want to
display a view of this page in the admin interface without going
through the regular page routing logic.
"""
url = newsitem.full_url
if url:
url_info = urlparse(url)
hostname = url_info.hostname
path = url_info.path
port = url_info.port or 80
else:
# Cannot determine a URL to this page - cobble one together based on
# whatever we find in ALLOWED_HOSTS
try:
hostname = settings.ALLOWED_HOSTS[0]
except IndexError:
hostname = 'localhost'
path = '/'
port = 80
request = WSGIRequest({
'REQUEST_METHOD': 'GET',
'PATH_INFO': path,
'SERVER_NAME': hostname,
'SERVER_PORT': port,
'HTTP_HOST': hostname,
'wsgi.input': StringIO(),
})
# Apply middleware to the request - see http://www.mellowmorning.com/2011/04/18/mock-django-request-for-testing/
handler = BaseHandler()
handler.load_middleware()
# call each middleware in turn and throw away any responses that they might return
if hasattr(handler, '_request_middleware'):
for middleware_method in handler._request_middleware:
middleware_method(request)
else:
handler.get_response(request)
return request |
def tzinfo_eq(tzinfo1, tzinfo2, startYear = 2000, endYear=2020):
"""
Compare offsets and DST transitions from startYear to endYear.
"""
if tzinfo1 == tzinfo2:
return True
elif tzinfo1 is None or tzinfo2 is None:
return False
def dt_test(dt):
if dt is None:
return True
return tzinfo1.utcoffset(dt) == tzinfo2.utcoffset(dt)
if not dt_test(datetime.datetime(startYear, 1, 1)):
return False
for year in range(startYear, endYear):
for transitionTo in 'daylight', 'standard':
t1=getTransition(transitionTo, year, tzinfo1)
t2=getTransition(transitionTo, year, tzinfo2)
if t1 != t2 or not dt_test(t1):
return False
return True | Compare offsets and DST transitions from startYear to endYear. | Below is the the instruction that describes the task:
### Input:
Compare offsets and DST transitions from startYear to endYear.
### Response:
def tzinfo_eq(tzinfo1, tzinfo2, startYear = 2000, endYear=2020):
"""
Compare offsets and DST transitions from startYear to endYear.
"""
if tzinfo1 == tzinfo2:
return True
elif tzinfo1 is None or tzinfo2 is None:
return False
def dt_test(dt):
if dt is None:
return True
return tzinfo1.utcoffset(dt) == tzinfo2.utcoffset(dt)
if not dt_test(datetime.datetime(startYear, 1, 1)):
return False
for year in range(startYear, endYear):
for transitionTo in 'daylight', 'standard':
t1=getTransition(transitionTo, year, tzinfo1)
t2=getTransition(transitionTo, year, tzinfo2)
if t1 != t2 or not dt_test(t1):
return False
return True |
def resolve_metric_as_tuple(metric):
"""
Resolve metric key to a given target.
:param metric: the metric name.
:type metric: ``str``
:rtype: :class:`Metric`
"""
if "." in metric:
_, metric = metric.split(".")
r = [
(operator, match) for operator, match in ALL_METRICS if match[0] == metric
]
if not r or len(r) == 0:
raise ValueError(f"Metric {metric} not recognised.")
else:
return r[0] | Resolve metric key to a given target.
:param metric: the metric name.
:type metric: ``str``
:rtype: :class:`Metric` | Below is the the instruction that describes the task:
### Input:
Resolve metric key to a given target.
:param metric: the metric name.
:type metric: ``str``
:rtype: :class:`Metric`
### Response:
def resolve_metric_as_tuple(metric):
"""
Resolve metric key to a given target.
:param metric: the metric name.
:type metric: ``str``
:rtype: :class:`Metric`
"""
if "." in metric:
_, metric = metric.split(".")
r = [
(operator, match) for operator, match in ALL_METRICS if match[0] == metric
]
if not r or len(r) == 0:
raise ValueError(f"Metric {metric} not recognised.")
else:
return r[0] |
def versions_information(include_salt_cloud=False):
'''
Report the versions of dependent software.
'''
salt_info = list(salt_information())
lib_info = list(dependency_information(include_salt_cloud))
sys_info = list(system_information())
return {'Salt Version': dict(salt_info),
'Dependency Versions': dict(lib_info),
'System Versions': dict(sys_info)} | Report the versions of dependent software. | Below is the the instruction that describes the task:
### Input:
Report the versions of dependent software.
### Response:
def versions_information(include_salt_cloud=False):
'''
Report the versions of dependent software.
'''
salt_info = list(salt_information())
lib_info = list(dependency_information(include_salt_cloud))
sys_info = list(system_information())
return {'Salt Version': dict(salt_info),
'Dependency Versions': dict(lib_info),
'System Versions': dict(sys_info)} |
def from_hsv(cls, h, s, v):
"""Constructs a :class:`Colour` from an HSV tuple."""
rgb = colorsys.hsv_to_rgb(h, s, v)
return cls.from_rgb(*(int(x * 255) for x in rgb)) | Constructs a :class:`Colour` from an HSV tuple. | Below is the the instruction that describes the task:
### Input:
Constructs a :class:`Colour` from an HSV tuple.
### Response:
def from_hsv(cls, h, s, v):
"""Constructs a :class:`Colour` from an HSV tuple."""
rgb = colorsys.hsv_to_rgb(h, s, v)
return cls.from_rgb(*(int(x * 255) for x in rgb)) |
def generate_name(length=15, not_in=None):
"""
Generates a random string of lowercase letters with the given length.
Parameters:
length (int): Length of the string to output.
not_in (list): Only return a string not in the given iterator.
Returns:
str: A new name thats not in the given list.
"""
value = ''.join(random.choice(string.ascii_lowercase) for i in range(length))
while (not_in is not None) and (value in not_in):
value = ''.join(random.choice(string.ascii_lowercase) for i in range(length))
return value | Generates a random string of lowercase letters with the given length.
Parameters:
length (int): Length of the string to output.
not_in (list): Only return a string not in the given iterator.
Returns:
str: A new name thats not in the given list. | Below is the the instruction that describes the task:
### Input:
Generates a random string of lowercase letters with the given length.
Parameters:
length (int): Length of the string to output.
not_in (list): Only return a string not in the given iterator.
Returns:
str: A new name thats not in the given list.
### Response:
def generate_name(length=15, not_in=None):
"""
Generates a random string of lowercase letters with the given length.
Parameters:
length (int): Length of the string to output.
not_in (list): Only return a string not in the given iterator.
Returns:
str: A new name thats not in the given list.
"""
value = ''.join(random.choice(string.ascii_lowercase) for i in range(length))
while (not_in is not None) and (value in not_in):
value = ''.join(random.choice(string.ascii_lowercase) for i in range(length))
return value |
def telemetry_client(self, value: BotTelemetryClient) -> None:
"""
Sets the telemetry client for logging events.
"""
if value is None:
self._telemetry_client = NullTelemetryClient()
else:
self._telemetry_client = value | Sets the telemetry client for logging events. | Below is the the instruction that describes the task:
### Input:
Sets the telemetry client for logging events.
### Response:
def telemetry_client(self, value: BotTelemetryClient) -> None:
"""
Sets the telemetry client for logging events.
"""
if value is None:
self._telemetry_client = NullTelemetryClient()
else:
self._telemetry_client = value |
def wrap_callable(cls, uri, methods, callable_obj):
"""Wraps function-based callable_obj into a `Route` instance, else
proxies a `bottle_neck.handlers.BaseHandler` subclass instance.
Args:
uri (str): The uri relative path.
methods (tuple): A tuple of valid method strings.
callable_obj (instance): The callable object.
Returns:
A route instance.
Raises:
RouteError for invalid callable object type.
"""
if isinstance(callable_obj, HandlerMeta):
callable_obj.base_endpoint = uri
callable_obj.is_valid = True
return callable_obj
if isinstance(callable_obj, types.FunctionType):
return cls(uri=uri, methods=methods, callable_obj=callable_obj)
raise RouteError("Invalid handler type.") | Wraps function-based callable_obj into a `Route` instance, else
proxies a `bottle_neck.handlers.BaseHandler` subclass instance.
Args:
uri (str): The uri relative path.
methods (tuple): A tuple of valid method strings.
callable_obj (instance): The callable object.
Returns:
A route instance.
Raises:
RouteError for invalid callable object type. | Below is the the instruction that describes the task:
### Input:
Wraps function-based callable_obj into a `Route` instance, else
proxies a `bottle_neck.handlers.BaseHandler` subclass instance.
Args:
uri (str): The uri relative path.
methods (tuple): A tuple of valid method strings.
callable_obj (instance): The callable object.
Returns:
A route instance.
Raises:
RouteError for invalid callable object type.
### Response:
def wrap_callable(cls, uri, methods, callable_obj):
"""Wraps function-based callable_obj into a `Route` instance, else
proxies a `bottle_neck.handlers.BaseHandler` subclass instance.
Args:
uri (str): The uri relative path.
methods (tuple): A tuple of valid method strings.
callable_obj (instance): The callable object.
Returns:
A route instance.
Raises:
RouteError for invalid callable object type.
"""
if isinstance(callable_obj, HandlerMeta):
callable_obj.base_endpoint = uri
callable_obj.is_valid = True
return callable_obj
if isinstance(callable_obj, types.FunctionType):
return cls(uri=uri, methods=methods, callable_obj=callable_obj)
raise RouteError("Invalid handler type.") |
def modules_directory():
"""
Get the core modules directory.
"""
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "modules") | Get the core modules directory. | Below is the the instruction that describes the task:
### Input:
Get the core modules directory.
### Response:
def modules_directory():
"""
Get the core modules directory.
"""
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "modules") |
def resource_path(relative_path=None, expect=None):
"""Return the absolute path to a resource in iotile-build.
This method finds the path to the `config` folder inside
iotile-build, appends `relative_path` to it and then
checks to make sure the desired file or directory exists.
You can specify expect=(None, 'file', or 'folder') for
what you expect to find at the given path.
Args:
relative_path (str): The relative_path from the config
folder to the resource in question. This path can
be specified using / characters on all operating
systems since it will be normalized before usage.
If None is passed, the based config folder will
be returned.
expect (str): What the path should resolve to, which is
checked before returning, raising a DataError if
the check fails. You can pass None for no checking,
file for checking `os.path.isfile`, or folder for
checking `os.path.isdir`. Default: None
Returns:
str: The normalized absolute path to the resource.
"""
if expect not in (None, 'file', 'folder'):
raise ArgumentError("Invalid expect parameter, must be None, 'file' or 'folder'",
expect=expect)
this_dir = os.path.dirname(__file__)
_resource_path = os.path.join(this_dir, '..', 'config')
if relative_path is not None:
path = os.path.normpath(relative_path)
_resource_path = os.path.join(_resource_path, path)
if expect == 'file' and not os.path.isfile(_resource_path):
raise DataError("Expected resource %s to be a file and it wasn't" % _resource_path)
elif expect == 'folder' and not os.path.isdir(_resource_path):
raise DataError("Expected resource %s to be a folder and it wasn't" % _resource_path)
return os.path.abspath(_resource_path) | Return the absolute path to a resource in iotile-build.
This method finds the path to the `config` folder inside
iotile-build, appends `relative_path` to it and then
checks to make sure the desired file or directory exists.
You can specify expect=(None, 'file', or 'folder') for
what you expect to find at the given path.
Args:
relative_path (str): The relative_path from the config
folder to the resource in question. This path can
be specified using / characters on all operating
systems since it will be normalized before usage.
If None is passed, the based config folder will
be returned.
expect (str): What the path should resolve to, which is
checked before returning, raising a DataError if
the check fails. You can pass None for no checking,
file for checking `os.path.isfile`, or folder for
checking `os.path.isdir`. Default: None
Returns:
str: The normalized absolute path to the resource. | Below is the the instruction that describes the task:
### Input:
Return the absolute path to a resource in iotile-build.
This method finds the path to the `config` folder inside
iotile-build, appends `relative_path` to it and then
checks to make sure the desired file or directory exists.
You can specify expect=(None, 'file', or 'folder') for
what you expect to find at the given path.
Args:
relative_path (str): The relative_path from the config
folder to the resource in question. This path can
be specified using / characters on all operating
systems since it will be normalized before usage.
If None is passed, the based config folder will
be returned.
expect (str): What the path should resolve to, which is
checked before returning, raising a DataError if
the check fails. You can pass None for no checking,
file for checking `os.path.isfile`, or folder for
checking `os.path.isdir`. Default: None
Returns:
str: The normalized absolute path to the resource.
### Response:
def resource_path(relative_path=None, expect=None):
"""Return the absolute path to a resource in iotile-build.
This method finds the path to the `config` folder inside
iotile-build, appends `relative_path` to it and then
checks to make sure the desired file or directory exists.
You can specify expect=(None, 'file', or 'folder') for
what you expect to find at the given path.
Args:
relative_path (str): The relative_path from the config
folder to the resource in question. This path can
be specified using / characters on all operating
systems since it will be normalized before usage.
If None is passed, the based config folder will
be returned.
expect (str): What the path should resolve to, which is
checked before returning, raising a DataError if
the check fails. You can pass None for no checking,
file for checking `os.path.isfile`, or folder for
checking `os.path.isdir`. Default: None
Returns:
str: The normalized absolute path to the resource.
"""
if expect not in (None, 'file', 'folder'):
raise ArgumentError("Invalid expect parameter, must be None, 'file' or 'folder'",
expect=expect)
this_dir = os.path.dirname(__file__)
_resource_path = os.path.join(this_dir, '..', 'config')
if relative_path is not None:
path = os.path.normpath(relative_path)
_resource_path = os.path.join(_resource_path, path)
if expect == 'file' and not os.path.isfile(_resource_path):
raise DataError("Expected resource %s to be a file and it wasn't" % _resource_path)
elif expect == 'folder' and not os.path.isdir(_resource_path):
raise DataError("Expected resource %s to be a folder and it wasn't" % _resource_path)
return os.path.abspath(_resource_path) |
def _get_not_annotated(func, annotations=None):
"""Return non-optional parameters that are not annotated."""
argspec = inspect.getfullargspec(func)
args = argspec.args
if argspec.defaults is not None:
args = args[:-len(argspec.defaults)]
if inspect.isclass(func) or inspect.ismethod(func):
args = args[1:] # Strip off ``cls`` or ``self``.
kwonlyargs = argspec.kwonlyargs
if argspec.kwonlydefaults is not None:
kwonlyargs = kwonlyargs[:-len(argspec.kwonlydefaults)]
annotations = annotations or argspec.annotations
return [arg for arg in args + kwonlyargs if arg not in annotations] | Return non-optional parameters that are not annotated. | Below is the the instruction that describes the task:
### Input:
Return non-optional parameters that are not annotated.
### Response:
def _get_not_annotated(func, annotations=None):
"""Return non-optional parameters that are not annotated."""
argspec = inspect.getfullargspec(func)
args = argspec.args
if argspec.defaults is not None:
args = args[:-len(argspec.defaults)]
if inspect.isclass(func) or inspect.ismethod(func):
args = args[1:] # Strip off ``cls`` or ``self``.
kwonlyargs = argspec.kwonlyargs
if argspec.kwonlydefaults is not None:
kwonlyargs = kwonlyargs[:-len(argspec.kwonlydefaults)]
annotations = annotations or argspec.annotations
return [arg for arg in args + kwonlyargs if arg not in annotations] |
def parse_motion_state(val):
"""Convert motion state byte to seconds."""
number = val & 0b00111111
unit = (val & 0b11000000) >> 6
if unit == 1:
number *= 60 # minutes
elif unit == 2:
number *= 60 * 60 # hours
elif unit == 3 and number < 32:
number *= 60 * 60 * 24 # days
elif unit == 3:
number -= 32
number *= 60 * 60 * 24 * 7 # weeks
return number | Convert motion state byte to seconds. | Below is the the instruction that describes the task:
### Input:
Convert motion state byte to seconds.
### Response:
def parse_motion_state(val):
"""Convert motion state byte to seconds."""
number = val & 0b00111111
unit = (val & 0b11000000) >> 6
if unit == 1:
number *= 60 # minutes
elif unit == 2:
number *= 60 * 60 # hours
elif unit == 3 and number < 32:
number *= 60 * 60 * 24 # days
elif unit == 3:
number -= 32
number *= 60 * 60 * 24 * 7 # weeks
return number |
def old(self):
"""Assess to the state value(s) at beginning of the time step, which
has been processed most recently. When using *HydPy* in the
normal manner. But it can be helpful for demonstration and debugging
purposes.
"""
value = getattr(self.fastaccess_old, self.name, None)
if value is None:
raise RuntimeError(
'No value/values of sequence %s has/have '
'not been defined so far.'
% objecttools.elementphrase(self))
else:
if self.NDIM:
value = numpy.asarray(value)
return value | Assess to the state value(s) at beginning of the time step, which
has been processed most recently. When using *HydPy* in the
normal manner. But it can be helpful for demonstration and debugging
purposes. | Below is the the instruction that describes the task:
### Input:
Assess to the state value(s) at beginning of the time step, which
has been processed most recently. When using *HydPy* in the
normal manner. But it can be helpful for demonstration and debugging
purposes.
### Response:
def old(self):
"""Assess to the state value(s) at beginning of the time step, which
has been processed most recently. When using *HydPy* in the
normal manner. But it can be helpful for demonstration and debugging
purposes.
"""
value = getattr(self.fastaccess_old, self.name, None)
if value is None:
raise RuntimeError(
'No value/values of sequence %s has/have '
'not been defined so far.'
% objecttools.elementphrase(self))
else:
if self.NDIM:
value = numpy.asarray(value)
return value |
def parse(self, p_todo):
"""
Returns fully parsed string from 'format_string' attribute with all
placeholders properly substituted by content obtained from p_todo.
It uses preprocessed form of 'format_string' (result of
ListFormatParser._preprocess_format) stored in 'format_list'
attribute.
"""
parsed_list = []
repl_trunc = None
for substr, placeholder, getter in self.format_list:
repl = getter(p_todo) if getter else ''
pattern = MAIN_PATTERN.format(ph=placeholder)
if placeholder == 'S':
repl_trunc = repl
try:
if repl == '':
substr = re.sub(pattern, '', substr)
else:
substr = re.sub(pattern, _strip_placeholder_braces, substr)
substr = re.sub(r'(?<!\\)%({ph}|\[{ph}\])'.format(ph=placeholder), repl, substr)
except re.error:
raise ListFormatError
parsed_list.append(substr)
parsed_str = _unescape_percent_sign(''.join(parsed_list))
parsed_str = _remove_redundant_spaces(parsed_str)
if self.one_line and len(escape_ansi(parsed_str)) >= _columns():
parsed_str = _truncate(parsed_str, repl_trunc)
if re.search('.*\t', parsed_str):
parsed_str = _right_align(parsed_str)
return parsed_str.rstrip() | Returns fully parsed string from 'format_string' attribute with all
placeholders properly substituted by content obtained from p_todo.
It uses preprocessed form of 'format_string' (result of
ListFormatParser._preprocess_format) stored in 'format_list'
attribute. | Below is the the instruction that describes the task:
### Input:
Returns fully parsed string from 'format_string' attribute with all
placeholders properly substituted by content obtained from p_todo.
It uses preprocessed form of 'format_string' (result of
ListFormatParser._preprocess_format) stored in 'format_list'
attribute.
### Response:
def parse(self, p_todo):
"""
Returns fully parsed string from 'format_string' attribute with all
placeholders properly substituted by content obtained from p_todo.
It uses preprocessed form of 'format_string' (result of
ListFormatParser._preprocess_format) stored in 'format_list'
attribute.
"""
parsed_list = []
repl_trunc = None
for substr, placeholder, getter in self.format_list:
repl = getter(p_todo) if getter else ''
pattern = MAIN_PATTERN.format(ph=placeholder)
if placeholder == 'S':
repl_trunc = repl
try:
if repl == '':
substr = re.sub(pattern, '', substr)
else:
substr = re.sub(pattern, _strip_placeholder_braces, substr)
substr = re.sub(r'(?<!\\)%({ph}|\[{ph}\])'.format(ph=placeholder), repl, substr)
except re.error:
raise ListFormatError
parsed_list.append(substr)
parsed_str = _unescape_percent_sign(''.join(parsed_list))
parsed_str = _remove_redundant_spaces(parsed_str)
if self.one_line and len(escape_ansi(parsed_str)) >= _columns():
parsed_str = _truncate(parsed_str, repl_trunc)
if re.search('.*\t', parsed_str):
parsed_str = _right_align(parsed_str)
return parsed_str.rstrip() |
def session_info(consul_url=None, token=None, session=None, **kwargs):
'''
Information about a session
:param consul_url: The Consul server URL.
:param session: The ID of the session to return information about.
:param dc: By default, the datacenter of the agent is queried;
however, the dc can be provided using the "dc" parameter.
:return: Boolean & message of success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.session_info session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716'
'''
ret = {}
if not consul_url:
consul_url = _get_config()
if not consul_url:
log.error('No Consul URL found.')
ret['message'] = 'No Consul URL found.'
ret['res'] = False
return ret
if not session:
raise SaltInvocationError('Required argument "session" is missing.')
query_params = {}
if 'dc' in kwargs:
query_params['dc'] = kwargs['dc']
function = 'session/info/{0}'.format(session)
ret = _query(consul_url=consul_url,
function=function,
token=token,
query_params=query_params)
return ret | Information about a session
:param consul_url: The Consul server URL.
:param session: The ID of the session to return information about.
:param dc: By default, the datacenter of the agent is queried;
however, the dc can be provided using the "dc" parameter.
:return: Boolean & message of success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.session_info session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716' | Below is the the instruction that describes the task:
### Input:
Information about a session
:param consul_url: The Consul server URL.
:param session: The ID of the session to return information about.
:param dc: By default, the datacenter of the agent is queried;
however, the dc can be provided using the "dc" parameter.
:return: Boolean & message of success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.session_info session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716'
### Response:
def session_info(consul_url=None, token=None, session=None, **kwargs):
'''
Information about a session
:param consul_url: The Consul server URL.
:param session: The ID of the session to return information about.
:param dc: By default, the datacenter of the agent is queried;
however, the dc can be provided using the "dc" parameter.
:return: Boolean & message of success or failure.
CLI Example:
.. code-block:: bash
salt '*' consul.session_info session='c1c4d223-91cb-3d1f-1ee8-f2af9e7b6716'
'''
ret = {}
if not consul_url:
consul_url = _get_config()
if not consul_url:
log.error('No Consul URL found.')
ret['message'] = 'No Consul URL found.'
ret['res'] = False
return ret
if not session:
raise SaltInvocationError('Required argument "session" is missing.')
query_params = {}
if 'dc' in kwargs:
query_params['dc'] = kwargs['dc']
function = 'session/info/{0}'.format(session)
ret = _query(consul_url=consul_url,
function=function,
token=token,
query_params=query_params)
return ret |
def add_and_get(self, delta):
"""
Adds the given value to the current value and returns the updated value.
:raises NoDataMemberInClusterError: if the cluster does not contain any data members.
:raises UnsupportedOperationError: if the cluster version is less than 3.10.
:raises ConsistencyLostError: if the session guarantees have been lost.
:param delta: (int), the value to add.
:return: (int), the updated value.
"""
return self._invoke_internal(pn_counter_add_codec, delta=delta, get_before_update=False) | Adds the given value to the current value and returns the updated value.
:raises NoDataMemberInClusterError: if the cluster does not contain any data members.
:raises UnsupportedOperationError: if the cluster version is less than 3.10.
:raises ConsistencyLostError: if the session guarantees have been lost.
:param delta: (int), the value to add.
:return: (int), the updated value. | Below is the the instruction that describes the task:
### Input:
Adds the given value to the current value and returns the updated value.
:raises NoDataMemberInClusterError: if the cluster does not contain any data members.
:raises UnsupportedOperationError: if the cluster version is less than 3.10.
:raises ConsistencyLostError: if the session guarantees have been lost.
:param delta: (int), the value to add.
:return: (int), the updated value.
### Response:
def add_and_get(self, delta):
"""
Adds the given value to the current value and returns the updated value.
:raises NoDataMemberInClusterError: if the cluster does not contain any data members.
:raises UnsupportedOperationError: if the cluster version is less than 3.10.
:raises ConsistencyLostError: if the session guarantees have been lost.
:param delta: (int), the value to add.
:return: (int), the updated value.
"""
return self._invoke_internal(pn_counter_add_codec, delta=delta, get_before_update=False) |
def getHeaders(self):
"""
Get common headers
:return:
"""
basicauth = base64.encodestring(b(self.user + ':' + self.password)).strip()
return {
"Depth": "1",
"Authorization": 'Basic ' + _decode_utf8(basicauth),
"Accept": "*/*"
} | Get common headers
:return: | Below is the the instruction that describes the task:
### Input:
Get common headers
:return:
### Response:
def getHeaders(self):
"""
Get common headers
:return:
"""
basicauth = base64.encodestring(b(self.user + ':' + self.password)).strip()
return {
"Depth": "1",
"Authorization": 'Basic ' + _decode_utf8(basicauth),
"Accept": "*/*"
} |
def place_instruction(order_type, selection_id, side, handicap=None, limit_order=None, limit_on_close_order=None,
market_on_close_order=None, customer_order_ref=None):
"""
Create order instructions to place an order at exchange.
:param str order_type: define type of order to place.
:param int selection_id: selection on which to place order
:param float handicap: handicap if placing order on asianhandicap type market
:param str side: side of order
:param resources.LimitOrder limit_order: if orderType is a limitOrder structure details of the order.
:param resources.LimitOnCloseOrder limit_on_close_order: if orderType is a
limitOnCloseOrder structure details of the order.
:param resources.MarketOnCloseOrder market_on_close_order: if orderType is
a marketOnCloseOrder structure details of the order.
:param str customer_order_ref: an optional reference customers can set to identify instructions..
:return: orders to place.
:rtype: dict
"""
args = locals()
return {
to_camel_case(k): v for k, v in args.items() if v is not None
} | Create order instructions to place an order at exchange.
:param str order_type: define type of order to place.
:param int selection_id: selection on which to place order
:param float handicap: handicap if placing order on asianhandicap type market
:param str side: side of order
:param resources.LimitOrder limit_order: if orderType is a limitOrder structure details of the order.
:param resources.LimitOnCloseOrder limit_on_close_order: if orderType is a
limitOnCloseOrder structure details of the order.
:param resources.MarketOnCloseOrder market_on_close_order: if orderType is
a marketOnCloseOrder structure details of the order.
:param str customer_order_ref: an optional reference customers can set to identify instructions..
:return: orders to place.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Create order instructions to place an order at exchange.
:param str order_type: define type of order to place.
:param int selection_id: selection on which to place order
:param float handicap: handicap if placing order on asianhandicap type market
:param str side: side of order
:param resources.LimitOrder limit_order: if orderType is a limitOrder structure details of the order.
:param resources.LimitOnCloseOrder limit_on_close_order: if orderType is a
limitOnCloseOrder structure details of the order.
:param resources.MarketOnCloseOrder market_on_close_order: if orderType is
a marketOnCloseOrder structure details of the order.
:param str customer_order_ref: an optional reference customers can set to identify instructions..
:return: orders to place.
:rtype: dict
### Response:
def place_instruction(order_type, selection_id, side, handicap=None, limit_order=None, limit_on_close_order=None,
market_on_close_order=None, customer_order_ref=None):
"""
Create order instructions to place an order at exchange.
:param str order_type: define type of order to place.
:param int selection_id: selection on which to place order
:param float handicap: handicap if placing order on asianhandicap type market
:param str side: side of order
:param resources.LimitOrder limit_order: if orderType is a limitOrder structure details of the order.
:param resources.LimitOnCloseOrder limit_on_close_order: if orderType is a
limitOnCloseOrder structure details of the order.
:param resources.MarketOnCloseOrder market_on_close_order: if orderType is
a marketOnCloseOrder structure details of the order.
:param str customer_order_ref: an optional reference customers can set to identify instructions..
:return: orders to place.
:rtype: dict
"""
args = locals()
return {
to_camel_case(k): v for k, v in args.items() if v is not None
} |
def patch_custom_resource_definition_status(self, name, body, **kwargs): # noqa: E501
"""patch_custom_resource_definition_status # noqa: E501
partially update status of the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_custom_resource_definition_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CustomResourceDefinition (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1CustomResourceDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_custom_resource_definition_status_with_http_info(name, body, **kwargs) # noqa: E501
else:
(data) = self.patch_custom_resource_definition_status_with_http_info(name, body, **kwargs) # noqa: E501
return data | patch_custom_resource_definition_status # noqa: E501
partially update status of the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_custom_resource_definition_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CustomResourceDefinition (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1CustomResourceDefinition
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
patch_custom_resource_definition_status # noqa: E501
partially update status of the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_custom_resource_definition_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CustomResourceDefinition (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1CustomResourceDefinition
If the method is called asynchronously,
returns the request thread.
### Response:
def patch_custom_resource_definition_status(self, name, body, **kwargs): # noqa: E501
"""patch_custom_resource_definition_status # noqa: E501
partially update status of the specified CustomResourceDefinition # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_custom_resource_definition_status(name, body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the CustomResourceDefinition (required)
:param UNKNOWN_BASE_TYPE body: (required)
:param str pretty: If 'true', then the output is pretty printed.
:param str dry_run: When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed
:return: V1beta1CustomResourceDefinition
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_custom_resource_definition_status_with_http_info(name, body, **kwargs) # noqa: E501
else:
(data) = self.patch_custom_resource_definition_status_with_http_info(name, body, **kwargs) # noqa: E501
return data |
def parse_bismark_mbias(self, f):
""" Parse the Bismark M-Bias plot data """
s = f['s_name']
self.bismark_mbias_data['meth']['CpG_R1'][s] = {}
self.bismark_mbias_data['meth']['CHG_R1'][s] = {}
self.bismark_mbias_data['meth']['CHH_R1'][s] = {}
self.bismark_mbias_data['cov']['CpG_R1'][s] = {}
self.bismark_mbias_data['cov']['CHG_R1'][s] = {}
self.bismark_mbias_data['cov']['CHH_R1'][s] = {}
self.bismark_mbias_data['meth']['CpG_R2'][s] = {}
self.bismark_mbias_data['meth']['CHG_R2'][s] = {}
self.bismark_mbias_data['meth']['CHH_R2'][s] = {}
self.bismark_mbias_data['cov']['CpG_R2'][s] = {}
self.bismark_mbias_data['cov']['CHG_R2'][s] = {}
self.bismark_mbias_data['cov']['CHH_R2'][s] = {}
key = None
for l in f['f']:
if 'context' in l:
if 'CpG' in l:
key = 'CpG'
elif 'CHG' in l:
key = 'CHG'
elif 'CHH' in l:
key = 'CHH'
if '(R1)' in l:
key += '_R1'
elif '(R2)' in l:
key += '_R2'
else:
key += '_R1'
if key is not None:
sections = l.split()
try:
pos = int(sections[0])
self.bismark_mbias_data['meth'][key][s][pos] = float(sections[3])
self.bismark_mbias_data['cov'][key][s][pos] = int(sections[4])
except (IndexError, ValueError):
continue
# Remove empty dicts (eg. R2 for SE data)
for t in self.bismark_mbias_data:
for k in self.bismark_mbias_data[t]:
self.bismark_mbias_data[t][k] = {
s_name: self.bismark_mbias_data[t][k][s_name]
for s_name in self.bismark_mbias_data[t][k]
if len(self.bismark_mbias_data[t][k][s_name]) > 0
} | Parse the Bismark M-Bias plot data | Below is the the instruction that describes the task:
### Input:
Parse the Bismark M-Bias plot data
### Response:
def parse_bismark_mbias(self, f):
""" Parse the Bismark M-Bias plot data """
s = f['s_name']
self.bismark_mbias_data['meth']['CpG_R1'][s] = {}
self.bismark_mbias_data['meth']['CHG_R1'][s] = {}
self.bismark_mbias_data['meth']['CHH_R1'][s] = {}
self.bismark_mbias_data['cov']['CpG_R1'][s] = {}
self.bismark_mbias_data['cov']['CHG_R1'][s] = {}
self.bismark_mbias_data['cov']['CHH_R1'][s] = {}
self.bismark_mbias_data['meth']['CpG_R2'][s] = {}
self.bismark_mbias_data['meth']['CHG_R2'][s] = {}
self.bismark_mbias_data['meth']['CHH_R2'][s] = {}
self.bismark_mbias_data['cov']['CpG_R2'][s] = {}
self.bismark_mbias_data['cov']['CHG_R2'][s] = {}
self.bismark_mbias_data['cov']['CHH_R2'][s] = {}
key = None
for l in f['f']:
if 'context' in l:
if 'CpG' in l:
key = 'CpG'
elif 'CHG' in l:
key = 'CHG'
elif 'CHH' in l:
key = 'CHH'
if '(R1)' in l:
key += '_R1'
elif '(R2)' in l:
key += '_R2'
else:
key += '_R1'
if key is not None:
sections = l.split()
try:
pos = int(sections[0])
self.bismark_mbias_data['meth'][key][s][pos] = float(sections[3])
self.bismark_mbias_data['cov'][key][s][pos] = int(sections[4])
except (IndexError, ValueError):
continue
# Remove empty dicts (eg. R2 for SE data)
for t in self.bismark_mbias_data:
for k in self.bismark_mbias_data[t]:
self.bismark_mbias_data[t][k] = {
s_name: self.bismark_mbias_data[t][k][s_name]
for s_name in self.bismark_mbias_data[t][k]
if len(self.bismark_mbias_data[t][k][s_name]) > 0
} |
def wrap_http_for_jwt_access(credentials, http):
"""Prepares an HTTP object's request method for JWT access.
Wraps HTTP requests with logic to catch auth failures (typically
identified via a 401 status code). In the event of failure, tries
to refresh the token used and then retry the original request.
Args:
credentials: _JWTAccessCredentials, the credentials used to identify
a service account that uses JWT access tokens.
http: httplib2.Http, an http object to be used to make
auth requests.
"""
orig_request_method = http.request
wrap_http_for_auth(credentials, http)
# The new value of ``http.request`` set by ``wrap_http_for_auth``.
authenticated_request_method = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
if 'aud' in credentials._kwargs:
# Preemptively refresh token, this is not done for OAuth2
if (credentials.access_token is None or
credentials.access_token_expired):
credentials.refresh(None)
return request(authenticated_request_method, uri,
method, body, headers, redirections,
connection_type)
else:
# If we don't have an 'aud' (audience) claim,
# create a 1-time token with the uri root as the audience
headers = _initialize_headers(headers)
_apply_user_agent(headers, credentials.user_agent)
uri_root = uri.split('?', 1)[0]
token, unused_expiry = credentials._create_token({'aud': uri_root})
headers['Authorization'] = 'Bearer ' + token
return request(orig_request_method, uri, method, body,
clean_headers(headers),
redirections, connection_type)
# Replace the request method with our own closure.
http.request = new_request
# Set credentials as a property of the request method.
http.request.credentials = credentials | Prepares an HTTP object's request method for JWT access.
Wraps HTTP requests with logic to catch auth failures (typically
identified via a 401 status code). In the event of failure, tries
to refresh the token used and then retry the original request.
Args:
credentials: _JWTAccessCredentials, the credentials used to identify
a service account that uses JWT access tokens.
http: httplib2.Http, an http object to be used to make
auth requests. | Below is the the instruction that describes the task:
### Input:
Prepares an HTTP object's request method for JWT access.
Wraps HTTP requests with logic to catch auth failures (typically
identified via a 401 status code). In the event of failure, tries
to refresh the token used and then retry the original request.
Args:
credentials: _JWTAccessCredentials, the credentials used to identify
a service account that uses JWT access tokens.
http: httplib2.Http, an http object to be used to make
auth requests.
### Response:
def wrap_http_for_jwt_access(credentials, http):
"""Prepares an HTTP object's request method for JWT access.
Wraps HTTP requests with logic to catch auth failures (typically
identified via a 401 status code). In the event of failure, tries
to refresh the token used and then retry the original request.
Args:
credentials: _JWTAccessCredentials, the credentials used to identify
a service account that uses JWT access tokens.
http: httplib2.Http, an http object to be used to make
auth requests.
"""
orig_request_method = http.request
wrap_http_for_auth(credentials, http)
# The new value of ``http.request`` set by ``wrap_http_for_auth``.
authenticated_request_method = http.request
# The closure that will replace 'httplib2.Http.request'.
def new_request(uri, method='GET', body=None, headers=None,
redirections=httplib2.DEFAULT_MAX_REDIRECTS,
connection_type=None):
if 'aud' in credentials._kwargs:
# Preemptively refresh token, this is not done for OAuth2
if (credentials.access_token is None or
credentials.access_token_expired):
credentials.refresh(None)
return request(authenticated_request_method, uri,
method, body, headers, redirections,
connection_type)
else:
# If we don't have an 'aud' (audience) claim,
# create a 1-time token with the uri root as the audience
headers = _initialize_headers(headers)
_apply_user_agent(headers, credentials.user_agent)
uri_root = uri.split('?', 1)[0]
token, unused_expiry = credentials._create_token({'aud': uri_root})
headers['Authorization'] = 'Bearer ' + token
return request(orig_request_method, uri, method, body,
clean_headers(headers),
redirections, connection_type)
# Replace the request method with our own closure.
http.request = new_request
# Set credentials as a property of the request method.
http.request.credentials = credentials |
def pairwise_distances(x, y=None, **kwargs):
r"""
pairwise_distances(x, y=None, *, exponent=1)
Pairwise distance between points.
Return the pairwise distance between points in two sets, or
in the same set if only one set is passed.
Parameters
----------
x: array_like
An :math:`n \times m` array of :math:`n` observations in
a :math:`m`-dimensional space.
y: array_like
An :math:`l \times m` array of :math:`l` observations in
a :math:`m`-dimensional space. If None, the distances will
be computed between the points in :math:`x`.
exponent: float
Exponent of the Euclidean distance.
Returns
-------
numpy ndarray
A :math:`n \times l` matrix where the :math:`(i, j)`-th entry is the
distance between :math:`x[i]` and :math:`y[j]`.
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[16, 15, 14, 13],
... [12, 11, 10, 9],
... [8, 7, 6, 5],
... [4, 3, 2, 1]])
>>> dcor.distances.pairwise_distances(a)
array([[ 0., 8., 16., 24.],
[ 8., 0., 8., 16.],
[16., 8., 0., 8.],
[24., 16., 8., 0.]])
>>> dcor.distances.pairwise_distances(a, b)
array([[24.41311123, 16.61324773, 9.16515139, 4.47213595],
[16.61324773, 9.16515139, 4.47213595, 9.16515139],
[ 9.16515139, 4.47213595, 9.16515139, 16.61324773],
[ 4.47213595, 9.16515139, 16.61324773, 24.41311123]])
"""
x = _transform_to_2d(x)
if y is None or y is x:
return _pdist(x, **kwargs)
else:
y = _transform_to_2d(y)
return _cdist(x, y, **kwargs) | r"""
pairwise_distances(x, y=None, *, exponent=1)
Pairwise distance between points.
Return the pairwise distance between points in two sets, or
in the same set if only one set is passed.
Parameters
----------
x: array_like
An :math:`n \times m` array of :math:`n` observations in
a :math:`m`-dimensional space.
y: array_like
An :math:`l \times m` array of :math:`l` observations in
a :math:`m`-dimensional space. If None, the distances will
be computed between the points in :math:`x`.
exponent: float
Exponent of the Euclidean distance.
Returns
-------
numpy ndarray
A :math:`n \times l` matrix where the :math:`(i, j)`-th entry is the
distance between :math:`x[i]` and :math:`y[j]`.
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[16, 15, 14, 13],
... [12, 11, 10, 9],
... [8, 7, 6, 5],
... [4, 3, 2, 1]])
>>> dcor.distances.pairwise_distances(a)
array([[ 0., 8., 16., 24.],
[ 8., 0., 8., 16.],
[16., 8., 0., 8.],
[24., 16., 8., 0.]])
>>> dcor.distances.pairwise_distances(a, b)
array([[24.41311123, 16.61324773, 9.16515139, 4.47213595],
[16.61324773, 9.16515139, 4.47213595, 9.16515139],
[ 9.16515139, 4.47213595, 9.16515139, 16.61324773],
[ 4.47213595, 9.16515139, 16.61324773, 24.41311123]]) | Below is the the instruction that describes the task:
### Input:
r"""
pairwise_distances(x, y=None, *, exponent=1)
Pairwise distance between points.
Return the pairwise distance between points in two sets, or
in the same set if only one set is passed.
Parameters
----------
x: array_like
An :math:`n \times m` array of :math:`n` observations in
a :math:`m`-dimensional space.
y: array_like
An :math:`l \times m` array of :math:`l` observations in
a :math:`m`-dimensional space. If None, the distances will
be computed between the points in :math:`x`.
exponent: float
Exponent of the Euclidean distance.
Returns
-------
numpy ndarray
A :math:`n \times l` matrix where the :math:`(i, j)`-th entry is the
distance between :math:`x[i]` and :math:`y[j]`.
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[16, 15, 14, 13],
... [12, 11, 10, 9],
... [8, 7, 6, 5],
... [4, 3, 2, 1]])
>>> dcor.distances.pairwise_distances(a)
array([[ 0., 8., 16., 24.],
[ 8., 0., 8., 16.],
[16., 8., 0., 8.],
[24., 16., 8., 0.]])
>>> dcor.distances.pairwise_distances(a, b)
array([[24.41311123, 16.61324773, 9.16515139, 4.47213595],
[16.61324773, 9.16515139, 4.47213595, 9.16515139],
[ 9.16515139, 4.47213595, 9.16515139, 16.61324773],
[ 4.47213595, 9.16515139, 16.61324773, 24.41311123]])
### Response:
def pairwise_distances(x, y=None, **kwargs):
r"""
pairwise_distances(x, y=None, *, exponent=1)
Pairwise distance between points.
Return the pairwise distance between points in two sets, or
in the same set if only one set is passed.
Parameters
----------
x: array_like
An :math:`n \times m` array of :math:`n` observations in
a :math:`m`-dimensional space.
y: array_like
An :math:`l \times m` array of :math:`l` observations in
a :math:`m`-dimensional space. If None, the distances will
be computed between the points in :math:`x`.
exponent: float
Exponent of the Euclidean distance.
Returns
-------
numpy ndarray
A :math:`n \times l` matrix where the :math:`(i, j)`-th entry is the
distance between :math:`x[i]` and :math:`y[j]`.
Examples
--------
>>> import numpy as np
>>> import dcor
>>> a = np.array([[1, 2, 3, 4],
... [5, 6, 7, 8],
... [9, 10, 11, 12],
... [13, 14, 15, 16]])
>>> b = np.array([[16, 15, 14, 13],
... [12, 11, 10, 9],
... [8, 7, 6, 5],
... [4, 3, 2, 1]])
>>> dcor.distances.pairwise_distances(a)
array([[ 0., 8., 16., 24.],
[ 8., 0., 8., 16.],
[16., 8., 0., 8.],
[24., 16., 8., 0.]])
>>> dcor.distances.pairwise_distances(a, b)
array([[24.41311123, 16.61324773, 9.16515139, 4.47213595],
[16.61324773, 9.16515139, 4.47213595, 9.16515139],
[ 9.16515139, 4.47213595, 9.16515139, 16.61324773],
[ 4.47213595, 9.16515139, 16.61324773, 24.41311123]])
"""
x = _transform_to_2d(x)
if y is None or y is x:
return _pdist(x, **kwargs)
else:
y = _transform_to_2d(y)
return _cdist(x, y, **kwargs) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.