code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def check_syntax(self):
"""Only logs that this URL is unknown."""
super(ItmsServicesUrl, self).check_syntax()
if u"url=" not in self.urlparts[3]:
self.set_result(_("Missing required url parameter"), valid=False) | Only logs that this URL is unknown. | Below is the the instruction that describes the task:
### Input:
Only logs that this URL is unknown.
### Response:
def check_syntax(self):
"""Only logs that this URL is unknown."""
super(ItmsServicesUrl, self).check_syntax()
if u"url=" not in self.urlparts[3]:
self.set_result(_("Missing required url parameter"), valid=False) |
def get_node_annotation_layers(docgraph):
"""
WARNING: this is higly inefficient!
Fix this via Issue #36.
Returns
-------
all_layers : set or dict
the set of all annotation layers used for annotating nodes in the given
graph
"""
all_layers = set()
for node_id, node_attribs in docgraph.nodes_iter(data=True):
for layer in node_attribs['layers']:
all_layers.add(layer)
return all_layers | WARNING: this is higly inefficient!
Fix this via Issue #36.
Returns
-------
all_layers : set or dict
the set of all annotation layers used for annotating nodes in the given
graph | Below is the the instruction that describes the task:
### Input:
WARNING: this is higly inefficient!
Fix this via Issue #36.
Returns
-------
all_layers : set or dict
the set of all annotation layers used for annotating nodes in the given
graph
### Response:
def get_node_annotation_layers(docgraph):
"""
WARNING: this is higly inefficient!
Fix this via Issue #36.
Returns
-------
all_layers : set or dict
the set of all annotation layers used for annotating nodes in the given
graph
"""
all_layers = set()
for node_id, node_attribs in docgraph.nodes_iter(data=True):
for layer in node_attribs['layers']:
all_layers.add(layer)
return all_layers |
def iter(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Get table and fallbacks
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
# Open and close transaction
with self.__connection.begin():
# Streaming could be not working for some backends:
# http://docs.sqlalchemy.org/en/latest/core/connections.html
select = table.select().execution_options(stream_results=True)
result = select.execute()
for row in result:
row = self.__mapper.restore_row(row, schema=schema)
yield row | https://github.com/frictionlessdata/tableschema-sql-py#storage | Below is the the instruction that describes the task:
### Input:
https://github.com/frictionlessdata/tableschema-sql-py#storage
### Response:
def iter(self, bucket):
"""https://github.com/frictionlessdata/tableschema-sql-py#storage
"""
# Get table and fallbacks
table = self.__get_table(bucket)
schema = tableschema.Schema(self.describe(bucket))
# Open and close transaction
with self.__connection.begin():
# Streaming could be not working for some backends:
# http://docs.sqlalchemy.org/en/latest/core/connections.html
select = table.select().execution_options(stream_results=True)
result = select.execute()
for row in result:
row = self.__mapper.restore_row(row, schema=schema)
yield row |
def removeUnconnectedSignals(netlist):
"""
If signal is not driving anything remove it
"""
toDelete = set()
toSearch = netlist.signals
while toSearch:
_toSearch = set()
for sig in toSearch:
if not sig.endpoints:
try:
if sig._interface is not None:
# skip interfaces before we want to check them,
# they should not be optimized out from design
continue
except AttributeError:
pass
for e in sig.drivers:
# drivers of this signal are useless rm them
if isinstance(e, Operator):
inputs = e.operands
if e.result is sig:
e.result = None
else:
inputs = e._inputs
netlist.statements.discard(e)
for op in inputs:
if not isinstance(op, Value):
try:
op.endpoints.remove(e)
except KeyError:
# this operator has 2x+ same operand
continue
_toSearch.add(op)
toDelete.add(sig)
if toDelete:
for sig in toDelete:
if sig.ctx == netlist:
netlist.signals.remove(sig)
_toSearch.discard(sig)
toDelete = set()
toSearch = _toSearch | If signal is not driving anything remove it | Below is the the instruction that describes the task:
### Input:
If signal is not driving anything remove it
### Response:
def removeUnconnectedSignals(netlist):
"""
If signal is not driving anything remove it
"""
toDelete = set()
toSearch = netlist.signals
while toSearch:
_toSearch = set()
for sig in toSearch:
if not sig.endpoints:
try:
if sig._interface is not None:
# skip interfaces before we want to check them,
# they should not be optimized out from design
continue
except AttributeError:
pass
for e in sig.drivers:
# drivers of this signal are useless rm them
if isinstance(e, Operator):
inputs = e.operands
if e.result is sig:
e.result = None
else:
inputs = e._inputs
netlist.statements.discard(e)
for op in inputs:
if not isinstance(op, Value):
try:
op.endpoints.remove(e)
except KeyError:
# this operator has 2x+ same operand
continue
_toSearch.add(op)
toDelete.add(sig)
if toDelete:
for sig in toDelete:
if sig.ctx == netlist:
netlist.signals.remove(sig)
_toSearch.discard(sig)
toDelete = set()
toSearch = _toSearch |
def send_file(self, filename, status=200):
"""
Reads in the file 'filename' and sends bytes to client
Parameters
----------
filename : str
Filename of the file to read
status : int, optional
The HTTP status code, defaults to 200 (OK)
"""
if isinstance(filename, Path) and sys.version_info >= (3, 5):
self.message = filename.read_bytes()
else:
with io.FileIO(str(filename)) as f:
self.message = f.read()
self.status_code = status
self.send_headers()
self.write()
self.write_eof() | Reads in the file 'filename' and sends bytes to client
Parameters
----------
filename : str
Filename of the file to read
status : int, optional
The HTTP status code, defaults to 200 (OK) | Below is the the instruction that describes the task:
### Input:
Reads in the file 'filename' and sends bytes to client
Parameters
----------
filename : str
Filename of the file to read
status : int, optional
The HTTP status code, defaults to 200 (OK)
### Response:
def send_file(self, filename, status=200):
"""
Reads in the file 'filename' and sends bytes to client
Parameters
----------
filename : str
Filename of the file to read
status : int, optional
The HTTP status code, defaults to 200 (OK)
"""
if isinstance(filename, Path) and sys.version_info >= (3, 5):
self.message = filename.read_bytes()
else:
with io.FileIO(str(filename)) as f:
self.message = f.read()
self.status_code = status
self.send_headers()
self.write()
self.write_eof() |
def python_executable(self):
"""The absolute pathname of the Python executable (a string)."""
return self.get(property_name='python_executable',
default=sys.executable or os.path.join(self.install_prefix, 'bin', 'python')) | The absolute pathname of the Python executable (a string). | Below is the the instruction that describes the task:
### Input:
The absolute pathname of the Python executable (a string).
### Response:
def python_executable(self):
"""The absolute pathname of the Python executable (a string)."""
return self.get(property_name='python_executable',
default=sys.executable or os.path.join(self.install_prefix, 'bin', 'python')) |
def fetch_and_parse(method, uri, params_prefix=None, **params):
"""Fetch the given uri and return python dictionary with parsed data-types."""
response = fetch(method, uri, params_prefix, **params)
return _parse(json.loads(response.text)) | Fetch the given uri and return python dictionary with parsed data-types. | Below is the the instruction that describes the task:
### Input:
Fetch the given uri and return python dictionary with parsed data-types.
### Response:
def fetch_and_parse(method, uri, params_prefix=None, **params):
"""Fetch the given uri and return python dictionary with parsed data-types."""
response = fetch(method, uri, params_prefix, **params)
return _parse(json.loads(response.text)) |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(CPUCollector, self).get_default_config()
config.update({
'path': 'cpu',
'percore': 'True',
'xenfix': None,
'simple': 'False',
'normalize': 'False',
})
return config | Returns the default collector settings | Below is the the instruction that describes the task:
### Input:
Returns the default collector settings
### Response:
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(CPUCollector, self).get_default_config()
config.update({
'path': 'cpu',
'percore': 'True',
'xenfix': None,
'simple': 'False',
'normalize': 'False',
})
return config |
def _get_row_within_width(self, row):
"""Process a row so that it is clamped by column_width.
Parameters
----------
row : array_like
A single row.
Returns
-------
list of list:
List representation of the `row` after it has been processed
according to width exceed policy.
"""
table = self._table
lpw, rpw = table.left_padding_widths, table.right_padding_widths
wep = table.width_exceed_policy
list_of_rows = []
if (wep is WidthExceedPolicy.WEP_STRIP or
wep is WidthExceedPolicy.WEP_ELLIPSIS):
# Let's strip the row
delimiter = '' if wep is WidthExceedPolicy.WEP_STRIP else '...'
row_item_list = []
for index, row_item in enumerate(row):
left_pad = table._column_pad * lpw[index]
right_pad = table._column_pad * rpw[index]
clmp_str = (left_pad
+ self._clamp_string(row_item, index, delimiter)
+ right_pad)
row_item_list.append(clmp_str)
list_of_rows.append(row_item_list)
elif wep is WidthExceedPolicy.WEP_WRAP:
# Let's wrap the row
string_partition = []
for index, row_item in enumerate(row):
width = table.column_widths[index] - lpw[index] - rpw[index]
string_partition.append(textwrap(row_item, width))
for row_items in zip_longest(*string_partition, fillvalue=''):
row_item_list = []
for index, row_item in enumerate(row_items):
left_pad = table._column_pad * lpw[index]
right_pad = table._column_pad * rpw[index]
row_item_list.append(left_pad + row_item + right_pad)
list_of_rows.append(row_item_list)
if len(list_of_rows) == 0:
return [[''] * table.column_count]
else:
return list_of_rows | Process a row so that it is clamped by column_width.
Parameters
----------
row : array_like
A single row.
Returns
-------
list of list:
List representation of the `row` after it has been processed
according to width exceed policy. | Below is the the instruction that describes the task:
### Input:
Process a row so that it is clamped by column_width.
Parameters
----------
row : array_like
A single row.
Returns
-------
list of list:
List representation of the `row` after it has been processed
according to width exceed policy.
### Response:
def _get_row_within_width(self, row):
"""Process a row so that it is clamped by column_width.
Parameters
----------
row : array_like
A single row.
Returns
-------
list of list:
List representation of the `row` after it has been processed
according to width exceed policy.
"""
table = self._table
lpw, rpw = table.left_padding_widths, table.right_padding_widths
wep = table.width_exceed_policy
list_of_rows = []
if (wep is WidthExceedPolicy.WEP_STRIP or
wep is WidthExceedPolicy.WEP_ELLIPSIS):
# Let's strip the row
delimiter = '' if wep is WidthExceedPolicy.WEP_STRIP else '...'
row_item_list = []
for index, row_item in enumerate(row):
left_pad = table._column_pad * lpw[index]
right_pad = table._column_pad * rpw[index]
clmp_str = (left_pad
+ self._clamp_string(row_item, index, delimiter)
+ right_pad)
row_item_list.append(clmp_str)
list_of_rows.append(row_item_list)
elif wep is WidthExceedPolicy.WEP_WRAP:
# Let's wrap the row
string_partition = []
for index, row_item in enumerate(row):
width = table.column_widths[index] - lpw[index] - rpw[index]
string_partition.append(textwrap(row_item, width))
for row_items in zip_longest(*string_partition, fillvalue=''):
row_item_list = []
for index, row_item in enumerate(row_items):
left_pad = table._column_pad * lpw[index]
right_pad = table._column_pad * rpw[index]
row_item_list.append(left_pad + row_item + right_pad)
list_of_rows.append(row_item_list)
if len(list_of_rows) == 0:
return [[''] * table.column_count]
else:
return list_of_rows |
def update_subnet(self, subnet, name=None):
'''
Updates a subnet
'''
subnet_id = self._find_subnet_id(subnet)
return self.network_conn.update_subnet(
subnet=subnet_id, body={'subnet': {'name': name}}) | Updates a subnet | Below is the the instruction that describes the task:
### Input:
Updates a subnet
### Response:
def update_subnet(self, subnet, name=None):
'''
Updates a subnet
'''
subnet_id = self._find_subnet_id(subnet)
return self.network_conn.update_subnet(
subnet=subnet_id, body={'subnet': {'name': name}}) |
def do_version():
"""Return version details of the running server api"""
v = ApiPool.ping.model.Version(
name=ApiPool().current_server_name,
version=ApiPool().current_server_api.get_version(),
container=get_container_version(),
)
log.info("/version: " + pprint.pformat(v))
return v | Return version details of the running server api | Below is the the instruction that describes the task:
### Input:
Return version details of the running server api
### Response:
def do_version():
"""Return version details of the running server api"""
v = ApiPool.ping.model.Version(
name=ApiPool().current_server_name,
version=ApiPool().current_server_api.get_version(),
container=get_container_version(),
)
log.info("/version: " + pprint.pformat(v))
return v |
def parse_html(infile, xpath):
"""Filter HTML using XPath."""
if not isinstance(infile, lh.HtmlElement):
infile = lh.fromstring(infile)
infile = infile.xpath(xpath)
if not infile:
raise ValueError('XPath {0} returned no results.'.format(xpath))
return infile | Filter HTML using XPath. | Below is the the instruction that describes the task:
### Input:
Filter HTML using XPath.
### Response:
def parse_html(infile, xpath):
"""Filter HTML using XPath."""
if not isinstance(infile, lh.HtmlElement):
infile = lh.fromstring(infile)
infile = infile.xpath(xpath)
if not infile:
raise ValueError('XPath {0} returned no results.'.format(xpath))
return infile |
def create_from_xmlfile(cls, xmlfile, extdir=None):
"""Create a Source object from an XML file.
Parameters
----------
xmlfile : str
Path to XML file.
extdir : str
Path to the extended source archive.
"""
root = ElementTree.ElementTree(file=xmlfile).getroot()
srcs = root.findall('source')
if len(srcs) == 0:
raise Exception('No sources found.')
return cls.create_from_xml(srcs[0], extdir=extdir) | Create a Source object from an XML file.
Parameters
----------
xmlfile : str
Path to XML file.
extdir : str
Path to the extended source archive. | Below is the the instruction that describes the task:
### Input:
Create a Source object from an XML file.
Parameters
----------
xmlfile : str
Path to XML file.
extdir : str
Path to the extended source archive.
### Response:
def create_from_xmlfile(cls, xmlfile, extdir=None):
"""Create a Source object from an XML file.
Parameters
----------
xmlfile : str
Path to XML file.
extdir : str
Path to the extended source archive.
"""
root = ElementTree.ElementTree(file=xmlfile).getroot()
srcs = root.findall('source')
if len(srcs) == 0:
raise Exception('No sources found.')
return cls.create_from_xml(srcs[0], extdir=extdir) |
def router_del(self, cluster_id, router_id):
"""remove router from the ShardedCluster"""
cluster = self._storage[cluster_id]
result = cluster.router_remove(router_id)
self._storage[cluster_id] = cluster
return result | remove router from the ShardedCluster | Below is the the instruction that describes the task:
### Input:
remove router from the ShardedCluster
### Response:
def router_del(self, cluster_id, router_id):
"""remove router from the ShardedCluster"""
cluster = self._storage[cluster_id]
result = cluster.router_remove(router_id)
self._storage[cluster_id] = cluster
return result |
def _flatten_dict(original_dict):
"""Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting.
"""
flat_dict = {}
for key, value in original_dict.items():
if isinstance(value, dict):
for name, tensor in value.items():
if isinstance(tensor, dict):
raise ValueError("flatten_dict only handles 2 levels of nesting.")
flat_key = "__" + key + "_" + name
flat_dict[flat_key] = tensor
else:
flat_dict[key] = value
return flat_dict | Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting. | Below is the the instruction that describes the task:
### Input:
Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting.
### Response:
def _flatten_dict(original_dict):
"""Flatten dict of dicts into a single dict with appropriate prefixes.
Handles only 2 levels of nesting in the original dict.
Args:
original_dict: Dict which may contain one or more dicts.
Returns:
flat_dict: Dict without any nesting. Any dicts in the original dict have
their keys as prefixes in the new dict.
Raises:
ValueError if the original dict has more than two levels of nesting.
"""
flat_dict = {}
for key, value in original_dict.items():
if isinstance(value, dict):
for name, tensor in value.items():
if isinstance(tensor, dict):
raise ValueError("flatten_dict only handles 2 levels of nesting.")
flat_key = "__" + key + "_" + name
flat_dict[flat_key] = tensor
else:
flat_dict[key] = value
return flat_dict |
def assign_composition_to_repository(self, composition_id, repository_id):
"""Adds an existing ``Composition`` to a ``Repository``.
arg: composition_id (osid.id.Id): the ``Id`` of the
``Composition``
arg: repository_id (osid.id.Id): the ``Id`` of the
``Repository``
raise: AlreadyExists - ``composition_id`` already assigned to
``repository_id``
raise: NotFound - ``composition_id`` or ``repository_id`` not
found
raise: NullArgument - ``composition_id`` or ``repository_id``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
mgr = self._get_provider_manager('REPOSITORY', local=True)
lookup_session = mgr.get_repository_lookup_session(proxy=self._proxy)
lookup_session.get_repository(repository_id) # to raise NotFound
self._assign_object_to_catalog(composition_id, repository_id) | Adds an existing ``Composition`` to a ``Repository``.
arg: composition_id (osid.id.Id): the ``Id`` of the
``Composition``
arg: repository_id (osid.id.Id): the ``Id`` of the
``Repository``
raise: AlreadyExists - ``composition_id`` already assigned to
``repository_id``
raise: NotFound - ``composition_id`` or ``repository_id`` not
found
raise: NullArgument - ``composition_id`` or ``repository_id``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Adds an existing ``Composition`` to a ``Repository``.
arg: composition_id (osid.id.Id): the ``Id`` of the
``Composition``
arg: repository_id (osid.id.Id): the ``Id`` of the
``Repository``
raise: AlreadyExists - ``composition_id`` already assigned to
``repository_id``
raise: NotFound - ``composition_id`` or ``repository_id`` not
found
raise: NullArgument - ``composition_id`` or ``repository_id``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def assign_composition_to_repository(self, composition_id, repository_id):
"""Adds an existing ``Composition`` to a ``Repository``.
arg: composition_id (osid.id.Id): the ``Id`` of the
``Composition``
arg: repository_id (osid.id.Id): the ``Id`` of the
``Repository``
raise: AlreadyExists - ``composition_id`` already assigned to
``repository_id``
raise: NotFound - ``composition_id`` or ``repository_id`` not
found
raise: NullArgument - ``composition_id`` or ``repository_id``
is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
mgr = self._get_provider_manager('REPOSITORY', local=True)
lookup_session = mgr.get_repository_lookup_session(proxy=self._proxy)
lookup_session.get_repository(repository_id) # to raise NotFound
self._assign_object_to_catalog(composition_id, repository_id) |
def start_transport(self):
"""If a transport object has been defined then connect it now."""
if self.transport:
if self.transport.connect():
self.log.debug("Service successfully connected to transport layer")
else:
raise RuntimeError("Service could not connect to transport layer")
# direct all transport callbacks into the main queue
self._transport_interceptor_counter = itertools.count()
self.transport.subscription_callback_set_intercept(
self._transport_interceptor
)
else:
self.log.debug("No transport layer defined for service. Skipping.") | If a transport object has been defined then connect it now. | Below is the the instruction that describes the task:
### Input:
If a transport object has been defined then connect it now.
### Response:
def start_transport(self):
"""If a transport object has been defined then connect it now."""
if self.transport:
if self.transport.connect():
self.log.debug("Service successfully connected to transport layer")
else:
raise RuntimeError("Service could not connect to transport layer")
# direct all transport callbacks into the main queue
self._transport_interceptor_counter = itertools.count()
self.transport.subscription_callback_set_intercept(
self._transport_interceptor
)
else:
self.log.debug("No transport layer defined for service. Skipping.") |
def flatten(self, lst):
"""Turn a list of lists into a list."""
if lst == []:
return lst
if isinstance(lst[0], list):
return self.flatten(lst[0]) + self.flatten(lst[1:])
return lst[:1] + self.flatten(lst[1:]) | Turn a list of lists into a list. | Below is the the instruction that describes the task:
### Input:
Turn a list of lists into a list.
### Response:
def flatten(self, lst):
"""Turn a list of lists into a list."""
if lst == []:
return lst
if isinstance(lst[0], list):
return self.flatten(lst[0]) + self.flatten(lst[1:])
return lst[:1] + self.flatten(lst[1:]) |
def find_threshold_near_density(img, density, low=0, high=255):
"""Find a threshold where the fraction of pixels above the threshold
is closest to density where density is (count of pixels above
threshold / count of pixels).
The highest threshold closest to the desired density will be returned.
Use low and high to exclude undesirable thresholds.
:param img: target image
:type img: 2d :class:`numpy.ndarray`
:param density: target density
:type density: float between 0.0 and 1.0
:param low: min threshold to test
:type low: ubyte
:param migh: max threshold to test
:type low: ubyte
:rtype: ubyte
"""
size = numpy.size(img)
densities = []
last_t = None
while True:
t = ((high - low) // 2) + low
if t == last_t:
densities.sort(key=lambda x: (abs(x[0] - density), 256 - x[1]))
return densities[0][1]
else:
last_t = t
d = numpy.count_nonzero(img > t) / size
densities.append((d, t))
if d < density:
high = t
elif d >= density: # search away from low
low = t | Find a threshold where the fraction of pixels above the threshold
is closest to density where density is (count of pixels above
threshold / count of pixels).
The highest threshold closest to the desired density will be returned.
Use low and high to exclude undesirable thresholds.
:param img: target image
:type img: 2d :class:`numpy.ndarray`
:param density: target density
:type density: float between 0.0 and 1.0
:param low: min threshold to test
:type low: ubyte
:param migh: max threshold to test
:type low: ubyte
:rtype: ubyte | Below is the the instruction that describes the task:
### Input:
Find a threshold where the fraction of pixels above the threshold
is closest to density where density is (count of pixels above
threshold / count of pixels).
The highest threshold closest to the desired density will be returned.
Use low and high to exclude undesirable thresholds.
:param img: target image
:type img: 2d :class:`numpy.ndarray`
:param density: target density
:type density: float between 0.0 and 1.0
:param low: min threshold to test
:type low: ubyte
:param migh: max threshold to test
:type low: ubyte
:rtype: ubyte
### Response:
def find_threshold_near_density(img, density, low=0, high=255):
"""Find a threshold where the fraction of pixels above the threshold
is closest to density where density is (count of pixels above
threshold / count of pixels).
The highest threshold closest to the desired density will be returned.
Use low and high to exclude undesirable thresholds.
:param img: target image
:type img: 2d :class:`numpy.ndarray`
:param density: target density
:type density: float between 0.0 and 1.0
:param low: min threshold to test
:type low: ubyte
:param migh: max threshold to test
:type low: ubyte
:rtype: ubyte
"""
size = numpy.size(img)
densities = []
last_t = None
while True:
t = ((high - low) // 2) + low
if t == last_t:
densities.sort(key=lambda x: (abs(x[0] - density), 256 - x[1]))
return densities[0][1]
else:
last_t = t
d = numpy.count_nonzero(img > t) / size
densities.append((d, t))
if d < density:
high = t
elif d >= density: # search away from low
low = t |
def _extract_columns(d, tmp_tso, pc):
"""
Extract data from one paleoData column
:param dict d: Column dictionary
:param dict tmp_tso: TSO dictionary with only root items
:return dict: Finished TSO
"""
logger_ts.info("enter extract_columns")
for k, v in d.items():
if isinstance(v, dict):
flat_data = _extract_nested(pc + "_" + k, v, {})
for n,m in flat_data.items():
tmp_tso[n] = m
else:
# Assume if it's not a special nested case, then it's a string value
tmp_tso[pc + '_' + k] = v
return tmp_tso | Extract data from one paleoData column
:param dict d: Column dictionary
:param dict tmp_tso: TSO dictionary with only root items
:return dict: Finished TSO | Below is the the instruction that describes the task:
### Input:
Extract data from one paleoData column
:param dict d: Column dictionary
:param dict tmp_tso: TSO dictionary with only root items
:return dict: Finished TSO
### Response:
def _extract_columns(d, tmp_tso, pc):
"""
Extract data from one paleoData column
:param dict d: Column dictionary
:param dict tmp_tso: TSO dictionary with only root items
:return dict: Finished TSO
"""
logger_ts.info("enter extract_columns")
for k, v in d.items():
if isinstance(v, dict):
flat_data = _extract_nested(pc + "_" + k, v, {})
for n,m in flat_data.items():
tmp_tso[n] = m
else:
# Assume if it's not a special nested case, then it's a string value
tmp_tso[pc + '_' + k] = v
return tmp_tso |
def delete_comment(self, msg_data_id, index, user_comment_id):
"""
删除评论
"""
return self._post(
'comment/delete',
data={
'msg_data_id': msg_data_id,
'index': index,
'user_comment_id': user_comment_id,
}) | 删除评论 | Below is the the instruction that describes the task:
### Input:
删除评论
### Response:
def delete_comment(self, msg_data_id, index, user_comment_id):
"""
删除评论
"""
return self._post(
'comment/delete',
data={
'msg_data_id': msg_data_id,
'index': index,
'user_comment_id': user_comment_id,
}) |
def params(self):
"""
Return a list of parameters in this task's request.
If self.request is already a list, simply return it.
If self.request is a raw XML-RPC string, parse it and return the
params.
"""
if isinstance(self.request, list):
return unmunchify(self.request)
(params, _) = xmlrpc.loads(self.request)
return params | Return a list of parameters in this task's request.
If self.request is already a list, simply return it.
If self.request is a raw XML-RPC string, parse it and return the
params. | Below is the the instruction that describes the task:
### Input:
Return a list of parameters in this task's request.
If self.request is already a list, simply return it.
If self.request is a raw XML-RPC string, parse it and return the
params.
### Response:
def params(self):
"""
Return a list of parameters in this task's request.
If self.request is already a list, simply return it.
If self.request is a raw XML-RPC string, parse it and return the
params.
"""
if isinstance(self.request, list):
return unmunchify(self.request)
(params, _) = xmlrpc.loads(self.request)
return params |
def convert(self, request, response, data):
"""
Performs the desired formatting.
:param request: The webob Request object describing the
request.
:param response: The webob Response object describing the
response.
:param data: The data dictionary list returned by the
prepare() method.
:returns: A string, the results of which are the desired
conversion.
"""
result = []
for conv, datum in zip(self.conversions, data):
# Only include conversion if it's allowed
if conv.modifier.accept(response.status_code):
result.append(conv.convert(request, response, datum))
else:
result.append('-')
return ''.join(result) | Performs the desired formatting.
:param request: The webob Request object describing the
request.
:param response: The webob Response object describing the
response.
:param data: The data dictionary list returned by the
prepare() method.
:returns: A string, the results of which are the desired
conversion. | Below is the the instruction that describes the task:
### Input:
Performs the desired formatting.
:param request: The webob Request object describing the
request.
:param response: The webob Response object describing the
response.
:param data: The data dictionary list returned by the
prepare() method.
:returns: A string, the results of which are the desired
conversion.
### Response:
def convert(self, request, response, data):
"""
Performs the desired formatting.
:param request: The webob Request object describing the
request.
:param response: The webob Response object describing the
response.
:param data: The data dictionary list returned by the
prepare() method.
:returns: A string, the results of which are the desired
conversion.
"""
result = []
for conv, datum in zip(self.conversions, data):
# Only include conversion if it's allowed
if conv.modifier.accept(response.status_code):
result.append(conv.convert(request, response, datum))
else:
result.append('-')
return ''.join(result) |
def read_upload(up_file, data_model=None):
"""
take a file that should be ready for upload
using the data model, check that all required columns are full,
and that all numeric data is in fact numeric.
print out warnings for any validation problems
return True if there were no problems, otherwise return False
"""
print("-I- Running validation for your upload file")
## Read file
f = open(up_file)
lines = f.readlines()
f.close()
data = split_lines(lines)
data_dicts = get_dicts(data)
## initialize
invalid_data = {}
missing_data = {}
non_numeric = {}
bad_vocab = {}
bad_coords = {}
invalid_col_names = {}
missing_file_type = False
## make sure you have the data model
if not data_model:
data_model = get_data_model()
reqd_file_types = ['er_locations']
provided_file_types = set()
if not data_model:
return False, None
## Iterate through data
# each dictionary is one tab delimited line in a csv file
for dictionary in data_dicts:
for k, v in list(dictionary.items()):
if k == "file_type": # meta data
provided_file_types.add(v)
continue
file_type = dictionary['file_type']
# need to deal with pmag_criteria type file, too
item_type = file_type.split('_')[1][:-1]
if item_type == 'criteria':
item_name = dictionary.get('criteria_definition')
elif item_type == 'result':
item_name = dictionary.get('pmag_result_name', None)
elif item_type in ('specimen', 'sample', 'site', 'location'):
item_name = dictionary.get('er_' + item_type + '_name', None)
elif item_type == 'age':
# get the lowest level er_*_name column that is filled in
for dtype in ('specimen', 'sample', 'site', 'location'):
item_name = dictionary.get('er_' + dtype + '_name', None)
if item_name:
break
elif item_type == 'measurement':
exp_name = dictionary.get('magic_experiment_name')
meas_num = dictionary.get('measurement_number')
item_name = exp_name + '_' + str(meas_num)
else:
item_name = None
if file_type not in list(data_model.keys()):
continue
specific_data_model = data_model[file_type]
## Function for building problems list
def add_to_invalid_data(item_name, item_type, invalid_data,
validation, problem_type):
"""
correctly create or add to the dictionary of invalid values
"""
if item_name:
if item_type not in invalid_data:
invalid_data[item_type] = {}
if item_name not in invalid_data[item_type]:
invalid_data[item_type][item_name] = {}
if problem_type not in invalid_data[item_type][item_name]:
invalid_data[item_type][item_name][problem_type] = []
invalid_data[item_type][item_name][problem_type].append(validation)
## Validate for each problem type
# check if column header is in the data model
invalid_col_name = validate_for_recognized_column(k, v, specific_data_model)
if invalid_col_name:
if item_type not in list(invalid_col_names.keys()):
invalid_col_names[item_type] = set()
invalid_col_names[item_type].add(invalid_col_name)
# skip to next item, as additional validations won't work
# (key is not in the data model)
## new style
add_to_invalid_data(item_name, item_type, invalid_data,
invalid_col_name, 'invalid_col')
# skip to next item, as additional validations won't work
# (key is not in the data model)
continue
# make a list of missing, required data
missing_item = validate_for_presence(k, v, specific_data_model)
#print 'k, v', k, v
if missing_item:
if item_type not in list(missing_data.keys()):
missing_data[item_type] = set()
missing_data[item_type].add(missing_item)
if item_name:
# don't double count if a site is missing its parent location
if item_type == 'age' and missing_item == 'er_location_name':
pass
# ignore er_synthetic_name (data model is incorrect here)
if missing_item == 'er_synthetic_name':
pass
else:
add_to_invalid_data(item_name, item_type, invalid_data,
missing_item, 'missing_data')
# vocabulary problems
vocab_problem = validate_for_controlled_vocab(k, v, specific_data_model)
if vocab_problem:
if item_type not in list(bad_vocab.keys()):
bad_vocab[item_type] = set()
bad_vocab[item_type].add(vocab_problem)
add_to_invalid_data(item_name, item_type, invalid_data,
vocab_problem, 'vocab_problem')
# illegal coordinates
coord_problem = validate_for_coordinates(k, v, specific_data_model)
if coord_problem:
if item_type not in list(bad_coords.keys()):
bad_coords[item_type] = set()
bad_coords[item_type].add(coord_problem)
add_to_invalid_data(item_name, item_type, invalid_data,
coord_problem, 'coordinates')
# make a list of data that should be numeric, but aren't
number_fail = validate_for_numericality(k, v, specific_data_model)
if number_fail:
if item_type not in list(non_numeric.keys()):
non_numeric[item_type] = set()
non_numeric[item_type].add(number_fail)
add_to_invalid_data(item_name, item_type, invalid_data,
number_fail, 'number_fail')
## Print out all issues
for file_type, invalid_names in list(invalid_col_names.items()):
print("-W- In your {} file, you are using the following unrecognized columns: {}".format(file_type, ', '.join(invalid_names)))
for file_type, wrong_cols in list(non_numeric.items()):
print("-W- In your {} file, you must provide only valid numbers, in the following columns: {}".format(file_type, ', '.join(wrong_cols)))
for file_type, empty_cols in list(missing_data.items()):
print("-W- In your {} file, you are missing data in the following required columns: {}".format(file_type, ', '.join(empty_cols)))
for file_type in reqd_file_types:
if file_type not in provided_file_types:
print("-W- You have not provided a(n) {} type file, which is required data".format(file_type))
missing_file_type = True
for file_type, vocab_types in list(bad_vocab.items()):
print("-W- In your {} file, you are using an unrecognized value for these controlled vocabularies: {}".format(file_type, ', '.join(vocab_types)))
for file_type, coords in list(bad_coords.items()):
print("-W- In your {} file, you are using an illegal value for these columns: {}. (Latitude must be between -90 and +90)".format(file_type, ', '.join(coords)))
if any((invalid_col_names, non_numeric, missing_data, missing_file_type, bad_vocab, bad_coords)):
return False, invalid_data
else:
print("-I- validation was successful")
return True, None | take a file that should be ready for upload
using the data model, check that all required columns are full,
and that all numeric data is in fact numeric.
print out warnings for any validation problems
return True if there were no problems, otherwise return False | Below is the the instruction that describes the task:
### Input:
take a file that should be ready for upload
using the data model, check that all required columns are full,
and that all numeric data is in fact numeric.
print out warnings for any validation problems
return True if there were no problems, otherwise return False
### Response:
def read_upload(up_file, data_model=None):
"""
take a file that should be ready for upload
using the data model, check that all required columns are full,
and that all numeric data is in fact numeric.
print out warnings for any validation problems
return True if there were no problems, otherwise return False
"""
print("-I- Running validation for your upload file")
## Read file
f = open(up_file)
lines = f.readlines()
f.close()
data = split_lines(lines)
data_dicts = get_dicts(data)
## initialize
invalid_data = {}
missing_data = {}
non_numeric = {}
bad_vocab = {}
bad_coords = {}
invalid_col_names = {}
missing_file_type = False
## make sure you have the data model
if not data_model:
data_model = get_data_model()
reqd_file_types = ['er_locations']
provided_file_types = set()
if not data_model:
return False, None
## Iterate through data
# each dictionary is one tab delimited line in a csv file
for dictionary in data_dicts:
for k, v in list(dictionary.items()):
if k == "file_type": # meta data
provided_file_types.add(v)
continue
file_type = dictionary['file_type']
# need to deal with pmag_criteria type file, too
item_type = file_type.split('_')[1][:-1]
if item_type == 'criteria':
item_name = dictionary.get('criteria_definition')
elif item_type == 'result':
item_name = dictionary.get('pmag_result_name', None)
elif item_type in ('specimen', 'sample', 'site', 'location'):
item_name = dictionary.get('er_' + item_type + '_name', None)
elif item_type == 'age':
# get the lowest level er_*_name column that is filled in
for dtype in ('specimen', 'sample', 'site', 'location'):
item_name = dictionary.get('er_' + dtype + '_name', None)
if item_name:
break
elif item_type == 'measurement':
exp_name = dictionary.get('magic_experiment_name')
meas_num = dictionary.get('measurement_number')
item_name = exp_name + '_' + str(meas_num)
else:
item_name = None
if file_type not in list(data_model.keys()):
continue
specific_data_model = data_model[file_type]
## Function for building problems list
def add_to_invalid_data(item_name, item_type, invalid_data,
validation, problem_type):
"""
correctly create or add to the dictionary of invalid values
"""
if item_name:
if item_type not in invalid_data:
invalid_data[item_type] = {}
if item_name not in invalid_data[item_type]:
invalid_data[item_type][item_name] = {}
if problem_type not in invalid_data[item_type][item_name]:
invalid_data[item_type][item_name][problem_type] = []
invalid_data[item_type][item_name][problem_type].append(validation)
## Validate for each problem type
# check if column header is in the data model
invalid_col_name = validate_for_recognized_column(k, v, specific_data_model)
if invalid_col_name:
if item_type not in list(invalid_col_names.keys()):
invalid_col_names[item_type] = set()
invalid_col_names[item_type].add(invalid_col_name)
# skip to next item, as additional validations won't work
# (key is not in the data model)
## new style
add_to_invalid_data(item_name, item_type, invalid_data,
invalid_col_name, 'invalid_col')
# skip to next item, as additional validations won't work
# (key is not in the data model)
continue
# make a list of missing, required data
missing_item = validate_for_presence(k, v, specific_data_model)
#print 'k, v', k, v
if missing_item:
if item_type not in list(missing_data.keys()):
missing_data[item_type] = set()
missing_data[item_type].add(missing_item)
if item_name:
# don't double count if a site is missing its parent location
if item_type == 'age' and missing_item == 'er_location_name':
pass
# ignore er_synthetic_name (data model is incorrect here)
if missing_item == 'er_synthetic_name':
pass
else:
add_to_invalid_data(item_name, item_type, invalid_data,
missing_item, 'missing_data')
# vocabulary problems
vocab_problem = validate_for_controlled_vocab(k, v, specific_data_model)
if vocab_problem:
if item_type not in list(bad_vocab.keys()):
bad_vocab[item_type] = set()
bad_vocab[item_type].add(vocab_problem)
add_to_invalid_data(item_name, item_type, invalid_data,
vocab_problem, 'vocab_problem')
# illegal coordinates
coord_problem = validate_for_coordinates(k, v, specific_data_model)
if coord_problem:
if item_type not in list(bad_coords.keys()):
bad_coords[item_type] = set()
bad_coords[item_type].add(coord_problem)
add_to_invalid_data(item_name, item_type, invalid_data,
coord_problem, 'coordinates')
# make a list of data that should be numeric, but aren't
number_fail = validate_for_numericality(k, v, specific_data_model)
if number_fail:
if item_type not in list(non_numeric.keys()):
non_numeric[item_type] = set()
non_numeric[item_type].add(number_fail)
add_to_invalid_data(item_name, item_type, invalid_data,
number_fail, 'number_fail')
## Print out all issues
for file_type, invalid_names in list(invalid_col_names.items()):
print("-W- In your {} file, you are using the following unrecognized columns: {}".format(file_type, ', '.join(invalid_names)))
for file_type, wrong_cols in list(non_numeric.items()):
print("-W- In your {} file, you must provide only valid numbers, in the following columns: {}".format(file_type, ', '.join(wrong_cols)))
for file_type, empty_cols in list(missing_data.items()):
print("-W- In your {} file, you are missing data in the following required columns: {}".format(file_type, ', '.join(empty_cols)))
for file_type in reqd_file_types:
if file_type not in provided_file_types:
print("-W- You have not provided a(n) {} type file, which is required data".format(file_type))
missing_file_type = True
for file_type, vocab_types in list(bad_vocab.items()):
print("-W- In your {} file, you are using an unrecognized value for these controlled vocabularies: {}".format(file_type, ', '.join(vocab_types)))
for file_type, coords in list(bad_coords.items()):
print("-W- In your {} file, you are using an illegal value for these columns: {}. (Latitude must be between -90 and +90)".format(file_type, ', '.join(coords)))
if any((invalid_col_names, non_numeric, missing_data, missing_file_type, bad_vocab, bad_coords)):
return False, invalid_data
else:
print("-I- validation was successful")
return True, None |
def from_table(cls, table, length, prefix=0, flatten=False):
"""
Extract from the given table a tree for word length, taking only
prefixes of prefix length (if greater than 0) into account to compute
successors.
:param table: the table to extract the tree from;
:param length: the length of words generated by the extracted tree;
greater or equal to 1;
:param prefix: if greater than 0, the length of the prefixes used for
computing successors;
:param flatten: whether to flatten the table or not;
:return: the tree corresponding to words of length from table.
"""
# Build the expanded tree with necessary suffix and length
tree = defaultdict(dict) # The tree
pending = {(">", 0)} # The nodes to expand
while pending:
suffix, size = pending.pop()
if size < length:
choices = table.weighted_choices(suffix, exclude={"<"},
flatten=flatten)
# The word length is not reached yet, expand
for successor, weight in choices.items():
expanded = suffix + successor
if prefix > 0:
expanded = expanded[-prefix:]
new_node = (expanded, size + 1)
tree[(suffix, size)][new_node] = weight
pending.add(new_node)
else:
choices = table.weighted_choices(suffix, flatten=flatten)
# The word length is reached, only add < if present
if "<" in choices:
tree[(suffix, size)][("<", size + 1)] = 1
else:
tree[(suffix, size)] = dict()
return cls(cls.trim_tree(tree)) | Extract from the given table a tree for word length, taking only
prefixes of prefix length (if greater than 0) into account to compute
successors.
:param table: the table to extract the tree from;
:param length: the length of words generated by the extracted tree;
greater or equal to 1;
:param prefix: if greater than 0, the length of the prefixes used for
computing successors;
:param flatten: whether to flatten the table or not;
:return: the tree corresponding to words of length from table. | Below is the the instruction that describes the task:
### Input:
Extract from the given table a tree for word length, taking only
prefixes of prefix length (if greater than 0) into account to compute
successors.
:param table: the table to extract the tree from;
:param length: the length of words generated by the extracted tree;
greater or equal to 1;
:param prefix: if greater than 0, the length of the prefixes used for
computing successors;
:param flatten: whether to flatten the table or not;
:return: the tree corresponding to words of length from table.
### Response:
def from_table(cls, table, length, prefix=0, flatten=False):
"""
Extract from the given table a tree for word length, taking only
prefixes of prefix length (if greater than 0) into account to compute
successors.
:param table: the table to extract the tree from;
:param length: the length of words generated by the extracted tree;
greater or equal to 1;
:param prefix: if greater than 0, the length of the prefixes used for
computing successors;
:param flatten: whether to flatten the table or not;
:return: the tree corresponding to words of length from table.
"""
# Build the expanded tree with necessary suffix and length
tree = defaultdict(dict) # The tree
pending = {(">", 0)} # The nodes to expand
while pending:
suffix, size = pending.pop()
if size < length:
choices = table.weighted_choices(suffix, exclude={"<"},
flatten=flatten)
# The word length is not reached yet, expand
for successor, weight in choices.items():
expanded = suffix + successor
if prefix > 0:
expanded = expanded[-prefix:]
new_node = (expanded, size + 1)
tree[(suffix, size)][new_node] = weight
pending.add(new_node)
else:
choices = table.weighted_choices(suffix, flatten=flatten)
# The word length is reached, only add < if present
if "<" in choices:
tree[(suffix, size)][("<", size + 1)] = 1
else:
tree[(suffix, size)] = dict()
return cls(cls.trim_tree(tree)) |
def set_principal_credit_string(self, credit_string=None):
"""Sets the principal credit string.
:param credit_string: the new credit string
:type credit_string: ``string``
:raise: ``InvalidArgument`` -- ``credit_string`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``credit_string`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if credit_string is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['principal_credit_string'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(credit_string, metadata, array=False):
self._my_map['principalCreditString']['text'] = credit_string
else:
raise InvalidArgument() | Sets the principal credit string.
:param credit_string: the new credit string
:type credit_string: ``string``
:raise: ``InvalidArgument`` -- ``credit_string`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``credit_string`` is ``null``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Sets the principal credit string.
:param credit_string: the new credit string
:type credit_string: ``string``
:raise: ``InvalidArgument`` -- ``credit_string`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``credit_string`` is ``null``
*compliance: mandatory -- This method must be implemented.*
### Response:
def set_principal_credit_string(self, credit_string=None):
"""Sets the principal credit string.
:param credit_string: the new credit string
:type credit_string: ``string``
:raise: ``InvalidArgument`` -- ``credit_string`` is invalid
:raise: ``NoAccess`` -- ``Metadata.isReadOnly()`` is ``true``
:raise: ``NullArgument`` -- ``credit_string`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if credit_string is None:
raise NullArgument()
metadata = Metadata(**settings.METADATA['principal_credit_string'])
if metadata.is_read_only():
raise NoAccess()
if self._is_valid_input(credit_string, metadata, array=False):
self._my_map['principalCreditString']['text'] = credit_string
else:
raise InvalidArgument() |
def publish_events(
self, topic_hostname, events, custom_headers=None, raw=False, **operation_config):
"""Publishes a batch of events to an Azure Event Grid topic.
:param topic_hostname: The host name of the topic, e.g.
topic1.westus2-1.eventgrid.azure.net
:type topic_hostname: str
:param events: An array of events to be published to Event Grid.
:type events: list[~azure.eventgrid.models.EventGridEvent]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.publish_events.metadata['url']
path_format_arguments = {
'topicHostname': self._serialize.url("topic_hostname", topic_hostname, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(events, '[EventGridEvent]')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response | Publishes a batch of events to an Azure Event Grid topic.
:param topic_hostname: The host name of the topic, e.g.
topic1.westus2-1.eventgrid.azure.net
:type topic_hostname: str
:param events: An array of events to be published to Event Grid.
:type events: list[~azure.eventgrid.models.EventGridEvent]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>` | Below is the the instruction that describes the task:
### Input:
Publishes a batch of events to an Azure Event Grid topic.
:param topic_hostname: The host name of the topic, e.g.
topic1.westus2-1.eventgrid.azure.net
:type topic_hostname: str
:param events: An array of events to be published to Event Grid.
:type events: list[~azure.eventgrid.models.EventGridEvent]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
### Response:
def publish_events(
self, topic_hostname, events, custom_headers=None, raw=False, **operation_config):
"""Publishes a batch of events to an Azure Event Grid topic.
:param topic_hostname: The host name of the topic, e.g.
topic1.westus2-1.eventgrid.azure.net
:type topic_hostname: str
:param events: An array of events to be published to Event Grid.
:type events: list[~azure.eventgrid.models.EventGridEvent]
:param dict custom_headers: headers that will be added to the request
:param bool raw: returns the direct response alongside the
deserialized response
:param operation_config: :ref:`Operation configuration
overrides<msrest:optionsforoperations>`.
:return: None or ClientRawResponse if raw=true
:rtype: None or ~msrest.pipeline.ClientRawResponse
:raises:
:class:`HttpOperationError<msrest.exceptions.HttpOperationError>`
"""
# Construct URL
url = self.publish_events.metadata['url']
path_format_arguments = {
'topicHostname': self._serialize.url("topic_hostname", topic_hostname, 'str', skip_quote=True)
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {}
query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str')
# Construct headers
header_parameters = {}
header_parameters['Content-Type'] = 'application/json; charset=utf-8'
if custom_headers:
header_parameters.update(custom_headers)
# Construct body
body_content = self._serialize.body(events, '[EventGridEvent]')
# Construct and send request
request = self._client.post(url, query_parameters)
response = self._client.send(
request, header_parameters, body_content, stream=False, **operation_config)
if response.status_code not in [200]:
raise HttpOperationError(self._deserialize, response)
if raw:
client_raw_response = ClientRawResponse(None, response)
return client_raw_response |
def findFlippedSNPs(goldFrqFile1, sourceAlleles, outPrefix):
"""Find flipped SNPs and flip them in the data1."""
goldAlleles = {}
with open(goldFrqFile1, "r") as inputFile:
headerIndex = None
for i, line in enumerate(inputFile):
row = createRowFromPlinkSpacedOutput(line)
if i == 0:
# This is the header
headerIndex = dict([
(row[j], j) for j in xrange(len(row))
])
# Checking the columns
for columnName in ["SNP", "A1", "A2"]:
if columnName not in headerIndex:
msg = "%(fileName)s: no column named " \
"%(columnName)s" % locals()
raise ProgramError(msg)
else:
snpName = row[headerIndex["SNP"]]
allele1 = row[headerIndex["A1"]]
allele2 = row[headerIndex["A2"]]
alleles = set([allele1, allele2])
if "0" in alleles:
alleles.remove("0")
goldAlleles[snpName] = alleles
# Finding the SNPs to flip
toFlipOutputFile = None
try:
toFlipOutputFile = open(outPrefix + ".snp_to_flip_in_reference", "w")
except IOError:
msg = "%(outPrefix)s.snp_to_flip_in_reference: can't write " \
"file" % locals()
raise ProgramError(msg)
toRemoveOutputFile = None
try:
toRemoveOutputFile = open(outPrefix + ".snp_to_remove", "w")
except IOError:
msg = "%(outPrefix)s.snp_to_remove: can't write file" % locals()
raise ProgramError(msg)
toRemoveOutputFileExplanation = None
try:
toRemoveOutputFileExplanation = open(
outPrefix + ".snp_to_remove.explanation",
"w",
)
print >>toRemoveOutputFileExplanation, "\t".join(["Name", "Reason",
"Alleles 1",
"Alleles 2"])
except IOError:
msg = "%(outPrefix)s.snp_to_remove: can't write file" % locals()
raise ProgramError(msg)
for snpName in goldAlleles.iterkeys():
alleles1 = goldAlleles[snpName]
alleles2 = sourceAlleles[snpName]
if (len(alleles1) == 2) and (len(alleles2) == 2):
# Both are heterozygous
if (({"A", "T"} == alleles1 and {"A", "T"} == alleles2) or
({"C", "G"} == alleles1 and {"C", "G"} == alleles2)):
# We can't flip those..., so we remove them
print >>toRemoveOutputFile, snpName
print >>toRemoveOutputFileExplanation, "\t".join([
snpName,
"Undetermined",
"".join(alleles1),
"".join(alleles2),
])
else:
if alleles1 != alleles2:
# Let's try the flip one
if flipGenotype(alleles1) == alleles2:
# We need to flip it
print >>toFlipOutputFile, snpName
else:
# Those SNP are discordant...
print >>toRemoveOutputFile, snpName
print >>toRemoveOutputFileExplanation, "\t".join([
snpName,
"Invalid",
"".join(alleles1),
"".join(alleles2),
])
else:
# We want to remove this SNP, because there is at least one
# homozygous individual
print >>toRemoveOutputFile, snpName
tmp_allele1 = "".join(alleles1)
if len(alleles1) == 1:
tmp_allele1 += tmp_allele1
tmp_allele2 = "".join(alleles1)
if len(alleles1) == 1:
tmp_allele2 += tmp_allele2
print >>toRemoveOutputFileExplanation, "\t".join([snpName,
"Homozygous",
tmp_allele1,
tmp_allele2])
# Closing output files
toFlipOutputFile.close()
toRemoveOutputFile.close()
toRemoveOutputFileExplanation.close() | Find flipped SNPs and flip them in the data1. | Below is the the instruction that describes the task:
### Input:
Find flipped SNPs and flip them in the data1.
### Response:
def findFlippedSNPs(goldFrqFile1, sourceAlleles, outPrefix):
"""Find flipped SNPs and flip them in the data1."""
goldAlleles = {}
with open(goldFrqFile1, "r") as inputFile:
headerIndex = None
for i, line in enumerate(inputFile):
row = createRowFromPlinkSpacedOutput(line)
if i == 0:
# This is the header
headerIndex = dict([
(row[j], j) for j in xrange(len(row))
])
# Checking the columns
for columnName in ["SNP", "A1", "A2"]:
if columnName not in headerIndex:
msg = "%(fileName)s: no column named " \
"%(columnName)s" % locals()
raise ProgramError(msg)
else:
snpName = row[headerIndex["SNP"]]
allele1 = row[headerIndex["A1"]]
allele2 = row[headerIndex["A2"]]
alleles = set([allele1, allele2])
if "0" in alleles:
alleles.remove("0")
goldAlleles[snpName] = alleles
# Finding the SNPs to flip
toFlipOutputFile = None
try:
toFlipOutputFile = open(outPrefix + ".snp_to_flip_in_reference", "w")
except IOError:
msg = "%(outPrefix)s.snp_to_flip_in_reference: can't write " \
"file" % locals()
raise ProgramError(msg)
toRemoveOutputFile = None
try:
toRemoveOutputFile = open(outPrefix + ".snp_to_remove", "w")
except IOError:
msg = "%(outPrefix)s.snp_to_remove: can't write file" % locals()
raise ProgramError(msg)
toRemoveOutputFileExplanation = None
try:
toRemoveOutputFileExplanation = open(
outPrefix + ".snp_to_remove.explanation",
"w",
)
print >>toRemoveOutputFileExplanation, "\t".join(["Name", "Reason",
"Alleles 1",
"Alleles 2"])
except IOError:
msg = "%(outPrefix)s.snp_to_remove: can't write file" % locals()
raise ProgramError(msg)
for snpName in goldAlleles.iterkeys():
alleles1 = goldAlleles[snpName]
alleles2 = sourceAlleles[snpName]
if (len(alleles1) == 2) and (len(alleles2) == 2):
# Both are heterozygous
if (({"A", "T"} == alleles1 and {"A", "T"} == alleles2) or
({"C", "G"} == alleles1 and {"C", "G"} == alleles2)):
# We can't flip those..., so we remove them
print >>toRemoveOutputFile, snpName
print >>toRemoveOutputFileExplanation, "\t".join([
snpName,
"Undetermined",
"".join(alleles1),
"".join(alleles2),
])
else:
if alleles1 != alleles2:
# Let's try the flip one
if flipGenotype(alleles1) == alleles2:
# We need to flip it
print >>toFlipOutputFile, snpName
else:
# Those SNP are discordant...
print >>toRemoveOutputFile, snpName
print >>toRemoveOutputFileExplanation, "\t".join([
snpName,
"Invalid",
"".join(alleles1),
"".join(alleles2),
])
else:
# We want to remove this SNP, because there is at least one
# homozygous individual
print >>toRemoveOutputFile, snpName
tmp_allele1 = "".join(alleles1)
if len(alleles1) == 1:
tmp_allele1 += tmp_allele1
tmp_allele2 = "".join(alleles1)
if len(alleles1) == 1:
tmp_allele2 += tmp_allele2
print >>toRemoveOutputFileExplanation, "\t".join([snpName,
"Homozygous",
tmp_allele1,
tmp_allele2])
# Closing output files
toFlipOutputFile.close()
toRemoveOutputFile.close()
toRemoveOutputFileExplanation.close() |
def nl2br(self, text):
"""
Replace \'\n\' with \'<br/>\\n\'
"""
if isinstance(text, bytes):
return text.replace(b'\n', b'<br/>\n')
else:
return text.replace('\n', '<br/>\n') | Replace \'\n\' with \'<br/>\\n\' | Below is the the instruction that describes the task:
### Input:
Replace \'\n\' with \'<br/>\\n\'
### Response:
def nl2br(self, text):
"""
Replace \'\n\' with \'<br/>\\n\'
"""
if isinstance(text, bytes):
return text.replace(b'\n', b'<br/>\n')
else:
return text.replace('\n', '<br/>\n') |
def update_cb(self, context, t, idx, userdata):
"""A sink property changed, calls request_update"""
if t & PA_SUBSCRIPTION_EVENT_FACILITY_MASK == PA_SUBSCRIPTION_EVENT_SERVER:
pa_operation_unref(
pa_context_get_server_info(context, self._server_info_cb, None))
self.request_update(context) | A sink property changed, calls request_update | Below is the the instruction that describes the task:
### Input:
A sink property changed, calls request_update
### Response:
def update_cb(self, context, t, idx, userdata):
"""A sink property changed, calls request_update"""
if t & PA_SUBSCRIPTION_EVENT_FACILITY_MASK == PA_SUBSCRIPTION_EVENT_SERVER:
pa_operation_unref(
pa_context_get_server_info(context, self._server_info_cb, None))
self.request_update(context) |
def mcc(tp, tn, fp, fn):
""" Matthew's Correlation Coefficient [-1, 1]
0 = you're just guessing
:param int tp: number of true positives
:param int tn: number of true negatives
:param int fp: number of false positives
:param int fn: number of false negatives
:rtype: float
"""
if tp + fp == 0 or tp + fn == 0 or tn + fp == 0 or tn + fn == 0:
den = 1.0
else:
den = math.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return (tp * tn - fp * fn) / den | Matthew's Correlation Coefficient [-1, 1]
0 = you're just guessing
:param int tp: number of true positives
:param int tn: number of true negatives
:param int fp: number of false positives
:param int fn: number of false negatives
:rtype: float | Below is the the instruction that describes the task:
### Input:
Matthew's Correlation Coefficient [-1, 1]
0 = you're just guessing
:param int tp: number of true positives
:param int tn: number of true negatives
:param int fp: number of false positives
:param int fn: number of false negatives
:rtype: float
### Response:
def mcc(tp, tn, fp, fn):
""" Matthew's Correlation Coefficient [-1, 1]
0 = you're just guessing
:param int tp: number of true positives
:param int tn: number of true negatives
:param int fp: number of false positives
:param int fn: number of false negatives
:rtype: float
"""
if tp + fp == 0 or tp + fn == 0 or tn + fp == 0 or tn + fn == 0:
den = 1.0
else:
den = math.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return (tp * tn - fp * fn) / den |
def set_abort_pending(self, newstate):
"""
Method to set Abort state if something goes wrong during provisioning
Method also used to finish provisioning process when all is completed
Method: POST
"""
self.logger.debug("set_abort_pending(" + "{})".format(newstate))
# NOT TO BE USED
#default_minimal_cluster_config = '{"installationId":null,"mdmIPs":["192.168.102.12","192.168.102.13"],"mdmPassword":"Scaleio123","liaPassword":"Scaleio123","licenseKey":null,"primaryMdm":{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.12"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"managementIPs":null,"mdmIPs":["192.168.102.12"]},"secondaryMdm":{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.13"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"managementIPs":null,"mdmIPs":["192.168.102.13"]},"tb":{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.11"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"tbIPs":["192.168.102.11"]},"sdsList":[{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.11"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"sdsName":"SDS_[192.168.102.11]","protectionDomain":"default","faultSet":null,"allIPs":["192.168.102.11"],"sdsOnlyIPs":null,"sdcOnlyIPs":null,"devices":[{"devicePath":"/home/vagrant/scaleio1","storagePool":null,"deviceName":null}],"optimized":false,"port":7072},{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.12"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"sdsName":"SDS_[192.168.102.12]","protectionDomain":"default","faultSet":null,"allIPs":["192.168.102.12"],"sdsOnlyIPs":null,"sdcOnlyIPs":null,"devices":[{"devicePath":"/home/vagrant/scaleio1","storagePool":null,"deviceName":null}],"optimized":false,"port":7072},{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.13"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"sdsName":"SDS_[192.168.102.13]","protectionDomain":"default","faultSet":null,"allIPs":["192.168.102.13"],"sdsOnlyIPs":null,"sdcOnlyIPs":null,"devices":[{"devicePath":"/home/vagrant/scaleio1","storagePool":null,"deviceName":null}],"optimized":false,"port":7072}],"sdcList":[{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.11"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"splitterRpaIp":null},{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.12"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"splitterRpaIp":null},{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.13"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"splitterRpaIp":null}],"callHomeConfiguration":null,"remoteSyslogConfiguration":null}'
r1 = self._im_session.post(
"{}/{}".format(self._im_api_url,"types/Command/instances/actions/abortPending"),
headers={'Content-type':'application/json','Version':'1.0'},
verify=self._im_verify_ssl,
data = newstate,
stream=True
)
if not r1.ok:
# Something went wrong
self.logger.error("Error set_abort_pending(" +"{})".format(newstate))
#print "Response after set_abort_pending()"
# RESPONSE NEED TO BE WRAPPED IN try/catch. Cannot assume JSON is returned.
#print r1.text
#pprint (json.loads(r1.text))
return r1.text | Method to set Abort state if something goes wrong during provisioning
Method also used to finish provisioning process when all is completed
Method: POST | Below is the the instruction that describes the task:
### Input:
Method to set Abort state if something goes wrong during provisioning
Method also used to finish provisioning process when all is completed
Method: POST
### Response:
def set_abort_pending(self, newstate):
"""
Method to set Abort state if something goes wrong during provisioning
Method also used to finish provisioning process when all is completed
Method: POST
"""
self.logger.debug("set_abort_pending(" + "{})".format(newstate))
# NOT TO BE USED
#default_minimal_cluster_config = '{"installationId":null,"mdmIPs":["192.168.102.12","192.168.102.13"],"mdmPassword":"Scaleio123","liaPassword":"Scaleio123","licenseKey":null,"primaryMdm":{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.12"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"managementIPs":null,"mdmIPs":["192.168.102.12"]},"secondaryMdm":{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.13"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"managementIPs":null,"mdmIPs":["192.168.102.13"]},"tb":{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.11"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"tbIPs":["192.168.102.11"]},"sdsList":[{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.11"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"sdsName":"SDS_[192.168.102.11]","protectionDomain":"default","faultSet":null,"allIPs":["192.168.102.11"],"sdsOnlyIPs":null,"sdcOnlyIPs":null,"devices":[{"devicePath":"/home/vagrant/scaleio1","storagePool":null,"deviceName":null}],"optimized":false,"port":7072},{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.12"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"sdsName":"SDS_[192.168.102.12]","protectionDomain":"default","faultSet":null,"allIPs":["192.168.102.12"],"sdsOnlyIPs":null,"sdcOnlyIPs":null,"devices":[{"devicePath":"/home/vagrant/scaleio1","storagePool":null,"deviceName":null}],"optimized":false,"port":7072},{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.13"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"sdsName":"SDS_[192.168.102.13]","protectionDomain":"default","faultSet":null,"allIPs":["192.168.102.13"],"sdsOnlyIPs":null,"sdcOnlyIPs":null,"devices":[{"devicePath":"/home/vagrant/scaleio1","storagePool":null,"deviceName":null}],"optimized":false,"port":7072}],"sdcList":[{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.11"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"splitterRpaIp":null},{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.12"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"splitterRpaIp":null},{"node":{"ostype":"linux","nodeName":null,"nodeIPs":["192.168.102.13"],"domain":null,"userName":"root","password":"vagrant","liaPassword":null},"nodeInfo":null,"splitterRpaIp":null}],"callHomeConfiguration":null,"remoteSyslogConfiguration":null}'
r1 = self._im_session.post(
"{}/{}".format(self._im_api_url,"types/Command/instances/actions/abortPending"),
headers={'Content-type':'application/json','Version':'1.0'},
verify=self._im_verify_ssl,
data = newstate,
stream=True
)
if not r1.ok:
# Something went wrong
self.logger.error("Error set_abort_pending(" +"{})".format(newstate))
#print "Response after set_abort_pending()"
# RESPONSE NEED TO BE WRAPPED IN try/catch. Cannot assume JSON is returned.
#print r1.text
#pprint (json.loads(r1.text))
return r1.text |
def recordings(self):
"""
Access the recordings
:returns: twilio.rest.api.v2010.account.conference.recording.RecordingList
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingList
"""
if self._recordings is None:
self._recordings = RecordingList(
self._version,
account_sid=self._solution['account_sid'],
conference_sid=self._solution['sid'],
)
return self._recordings | Access the recordings
:returns: twilio.rest.api.v2010.account.conference.recording.RecordingList
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingList | Below is the the instruction that describes the task:
### Input:
Access the recordings
:returns: twilio.rest.api.v2010.account.conference.recording.RecordingList
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingList
### Response:
def recordings(self):
"""
Access the recordings
:returns: twilio.rest.api.v2010.account.conference.recording.RecordingList
:rtype: twilio.rest.api.v2010.account.conference.recording.RecordingList
"""
if self._recordings is None:
self._recordings = RecordingList(
self._version,
account_sid=self._solution['account_sid'],
conference_sid=self._solution['sid'],
)
return self._recordings |
def field_type(field):
"""
Template filter that returns field class name (in lower case).
E.g. if field is CharField then {{ field|field_type }} will
return 'charfield'.
"""
if hasattr(field, 'field') and field.field:
return field.field.__class__.__name__.lower()
return '' | Template filter that returns field class name (in lower case).
E.g. if field is CharField then {{ field|field_type }} will
return 'charfield'. | Below is the the instruction that describes the task:
### Input:
Template filter that returns field class name (in lower case).
E.g. if field is CharField then {{ field|field_type }} will
return 'charfield'.
### Response:
def field_type(field):
"""
Template filter that returns field class name (in lower case).
E.g. if field is CharField then {{ field|field_type }} will
return 'charfield'.
"""
if hasattr(field, 'field') and field.field:
return field.field.__class__.__name__.lower()
return '' |
def remove(self):
"""
remove this object from Ariane server
:return:
"""
LOGGER.debug("Environment.remove")
if self.id is None:
return None
else:
params = {
'id': self.id
}
args = {'http_operation': 'GET', 'operation_path': 'delete', 'parameters': params}
response = EnvironmentService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'Environment.remove - Problem while deleting environment ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
return self
else:
return None | remove this object from Ariane server
:return: | Below is the the instruction that describes the task:
### Input:
remove this object from Ariane server
:return:
### Response:
def remove(self):
"""
remove this object from Ariane server
:return:
"""
LOGGER.debug("Environment.remove")
if self.id is None:
return None
else:
params = {
'id': self.id
}
args = {'http_operation': 'GET', 'operation_path': 'delete', 'parameters': params}
response = EnvironmentService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'Environment.remove - Problem while deleting environment ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
return self
else:
return None |
def create_intent(self,
workspace_id,
intent,
description=None,
examples=None,
**kwargs):
"""
Create intent.
Create a new intent.
This operation is limited to 2000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The name of the intent. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, hyphen, and dot
characters.
- It cannot begin with the reserved prefix `sys-`.
- It must be no longer than 128 characters.
:param str description: The description of the intent. This string cannot contain
carriage return, newline, or tab characters, and it must be no longer than 128
characters.
:param list[Example] examples: An array of user input examples for the intent.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intent is None:
raise ValueError('intent must be provided')
if examples is not None:
examples = [self._convert_model(x, Example) for x in examples]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('conversation', 'V1', 'create_intent')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'intent': intent,
'description': description,
'examples': examples
}
url = '/v1/workspaces/{0}/intents'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response | Create intent.
Create a new intent.
This operation is limited to 2000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The name of the intent. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, hyphen, and dot
characters.
- It cannot begin with the reserved prefix `sys-`.
- It must be no longer than 128 characters.
:param str description: The description of the intent. This string cannot contain
carriage return, newline, or tab characters, and it must be no longer than 128
characters.
:param list[Example] examples: An array of user input examples for the intent.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse | Below is the the instruction that describes the task:
### Input:
Create intent.
Create a new intent.
This operation is limited to 2000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The name of the intent. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, hyphen, and dot
characters.
- It cannot begin with the reserved prefix `sys-`.
- It must be no longer than 128 characters.
:param str description: The description of the intent. This string cannot contain
carriage return, newline, or tab characters, and it must be no longer than 128
characters.
:param list[Example] examples: An array of user input examples for the intent.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
### Response:
def create_intent(self,
workspace_id,
intent,
description=None,
examples=None,
**kwargs):
"""
Create intent.
Create a new intent.
This operation is limited to 2000 requests per 30 minutes. For more information,
see **Rate limiting**.
:param str workspace_id: Unique identifier of the workspace.
:param str intent: The name of the intent. This string must conform to the
following restrictions:
- It can contain only Unicode alphanumeric, underscore, hyphen, and dot
characters.
- It cannot begin with the reserved prefix `sys-`.
- It must be no longer than 128 characters.
:param str description: The description of the intent. This string cannot contain
carriage return, newline, or tab characters, and it must be no longer than 128
characters.
:param list[Example] examples: An array of user input examples for the intent.
:param dict headers: A `dict` containing the request headers
:return: A `DetailedResponse` containing the result, headers and HTTP status code.
:rtype: DetailedResponse
"""
if workspace_id is None:
raise ValueError('workspace_id must be provided')
if intent is None:
raise ValueError('intent must be provided')
if examples is not None:
examples = [self._convert_model(x, Example) for x in examples]
headers = {}
if 'headers' in kwargs:
headers.update(kwargs.get('headers'))
sdk_headers = get_sdk_headers('conversation', 'V1', 'create_intent')
headers.update(sdk_headers)
params = {'version': self.version}
data = {
'intent': intent,
'description': description,
'examples': examples
}
url = '/v1/workspaces/{0}/intents'.format(
*self._encode_path_vars(workspace_id))
response = self.request(
method='POST',
url=url,
headers=headers,
params=params,
json=data,
accept_json=True)
return response |
def dms2dd(degrees, minutes, seconds, direction):
"""convert degrees, minutes, seconds to dd
:param string direction: one of N S W E
"""
dd = (degrees + minutes/60.0) + (seconds/3600.0) # 60.0 fraction for python 2+ compatibility
return dd * -1 if direction == 'S' or direction == 'W' else dd | convert degrees, minutes, seconds to dd
:param string direction: one of N S W E | Below is the the instruction that describes the task:
### Input:
convert degrees, minutes, seconds to dd
:param string direction: one of N S W E
### Response:
def dms2dd(degrees, minutes, seconds, direction):
"""convert degrees, minutes, seconds to dd
:param string direction: one of N S W E
"""
dd = (degrees + minutes/60.0) + (seconds/3600.0) # 60.0 fraction for python 2+ compatibility
return dd * -1 if direction == 'S' or direction == 'W' else dd |
def add_team(self, team, sync=True):
"""
add a team to this OS instance.
:param team: the team to add on this OS instance
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the team object on list to be added on next save().
:return:
"""
LOGGER.debug("OSInstance.add_team")
if not sync:
self.team_2_add.append(team)
else:
if team.id is None:
team.save()
if self.id is not None and team.id is not None:
params = {
'id': self.id,
'teamID': team.id
}
args = {'http_operation': 'GET', 'operation_path': 'update/teams/add', 'parameters': params}
response = OSInstanceService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'OSInstance.add_team - Problem while updating OS instance ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.team_ids.append(team.id)
team.osi_ids.append(self.id)
else:
LOGGER.warning(
'OSInstance.add_team - Problem while updating OS instance ' + self.name + '. Reason: application ' +
team.name + ' id is None'
) | add a team to this OS instance.
:param team: the team to add on this OS instance
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the team object on list to be added on next save().
:return: | Below is the the instruction that describes the task:
### Input:
add a team to this OS instance.
:param team: the team to add on this OS instance
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the team object on list to be added on next save().
:return:
### Response:
def add_team(self, team, sync=True):
"""
add a team to this OS instance.
:param team: the team to add on this OS instance
:param sync: If sync=True(default) synchronize with Ariane server. If sync=False,
add the team object on list to be added on next save().
:return:
"""
LOGGER.debug("OSInstance.add_team")
if not sync:
self.team_2_add.append(team)
else:
if team.id is None:
team.save()
if self.id is not None and team.id is not None:
params = {
'id': self.id,
'teamID': team.id
}
args = {'http_operation': 'GET', 'operation_path': 'update/teams/add', 'parameters': params}
response = OSInstanceService.requester.call(args)
if response.rc != 0:
LOGGER.warning(
'OSInstance.add_team - Problem while updating OS instance ' + self.name +
'. Reason: ' + str(response.response_content) + '-' + str(response.error_message) +
" (" + str(response.rc) + ")"
)
else:
self.team_ids.append(team.id)
team.osi_ids.append(self.id)
else:
LOGGER.warning(
'OSInstance.add_team - Problem while updating OS instance ' + self.name + '. Reason: application ' +
team.name + ' id is None'
) |
def tree(obj):
"""
Convert object to a tree of lists, dicts and simple values.
The result can be serialized to JSON.
"""
from .objects import Object
if isinstance(obj, (bool, int, float, str, bytes)):
return obj
elif isinstance(obj, (datetime.date, datetime.time)):
return obj.isoformat()
elif isinstance(obj, dict):
return {k: tree(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple, set)):
return [tree(i) for i in obj]
elif isinstance(obj, Object):
return {obj.__class__.__qualname__: tree(obj.nonDefaults())}
else:
return str(obj) | Convert object to a tree of lists, dicts and simple values.
The result can be serialized to JSON. | Below is the the instruction that describes the task:
### Input:
Convert object to a tree of lists, dicts and simple values.
The result can be serialized to JSON.
### Response:
def tree(obj):
"""
Convert object to a tree of lists, dicts and simple values.
The result can be serialized to JSON.
"""
from .objects import Object
if isinstance(obj, (bool, int, float, str, bytes)):
return obj
elif isinstance(obj, (datetime.date, datetime.time)):
return obj.isoformat()
elif isinstance(obj, dict):
return {k: tree(v) for k, v in obj.items()}
elif isinstance(obj, (list, tuple, set)):
return [tree(i) for i in obj]
elif isinstance(obj, Object):
return {obj.__class__.__qualname__: tree(obj.nonDefaults())}
else:
return str(obj) |
def validate_redirect_url(next_url):
"""
Returns the next_url path if next_url matches allowed hosts.
"""
if not next_url:
return None
parts = urlparse(next_url)
if parts.netloc:
domain, _ = split_domain_port(parts.netloc)
allowed_hosts = (['*'] if django_settings.DEBUG
else django_settings.ALLOWED_HOSTS)
if not (domain and validate_host(domain, allowed_hosts)):
return None
return urlunparse(("", "", parts.path,
parts.params, parts.query, parts.fragment)) | Returns the next_url path if next_url matches allowed hosts. | Below is the the instruction that describes the task:
### Input:
Returns the next_url path if next_url matches allowed hosts.
### Response:
def validate_redirect_url(next_url):
"""
Returns the next_url path if next_url matches allowed hosts.
"""
if not next_url:
return None
parts = urlparse(next_url)
if parts.netloc:
domain, _ = split_domain_port(parts.netloc)
allowed_hosts = (['*'] if django_settings.DEBUG
else django_settings.ALLOWED_HOSTS)
if not (domain and validate_host(domain, allowed_hosts)):
return None
return urlunparse(("", "", parts.path,
parts.params, parts.query, parts.fragment)) |
def _wait_for_new_tasks(self, timeout=0, batch_timeout=0):
"""
Check activity channel and wait as necessary.
This method is also used to slow down the main processing loop to reduce
the effects of rapidly sending Redis commands. This method will exit
for any of these conditions:
1. _did_work is True, suggests there could be more work pending
2. Found new queue and after batch timeout. Note batch timeout
can be zero so it will exit immediately.
3. Timeout seconds have passed, this is the maximum time to stay in
this method
"""
new_queue_found = False
start_time = batch_exit = time.time()
while True:
# Check to see if batch_exit has been updated
if batch_exit > start_time:
pubsub_sleep = batch_exit - time.time()
else:
pubsub_sleep = start_time + timeout - time.time()
message = self._pubsub.get_message(timeout=0 if pubsub_sleep < 0 or
self._did_work
else pubsub_sleep)
# Pull remaining messages off of channel
while message:
if message['type'] == 'message':
new_queue_found, batch_exit = self._process_queue_message(
message['data'], new_queue_found, batch_exit,
start_time, timeout, batch_timeout
)
message = self._pubsub.get_message()
if self._did_work:
break # Exit immediately if we did work during the last
# execution loop because there might be more work to do
elif time.time() >= batch_exit and new_queue_found:
break # After finding a new queue we can wait until the
# batch timeout expires
elif time.time() - start_time > timeout:
break | Check activity channel and wait as necessary.
This method is also used to slow down the main processing loop to reduce
the effects of rapidly sending Redis commands. This method will exit
for any of these conditions:
1. _did_work is True, suggests there could be more work pending
2. Found new queue and after batch timeout. Note batch timeout
can be zero so it will exit immediately.
3. Timeout seconds have passed, this is the maximum time to stay in
this method | Below is the the instruction that describes the task:
### Input:
Check activity channel and wait as necessary.
This method is also used to slow down the main processing loop to reduce
the effects of rapidly sending Redis commands. This method will exit
for any of these conditions:
1. _did_work is True, suggests there could be more work pending
2. Found new queue and after batch timeout. Note batch timeout
can be zero so it will exit immediately.
3. Timeout seconds have passed, this is the maximum time to stay in
this method
### Response:
def _wait_for_new_tasks(self, timeout=0, batch_timeout=0):
"""
Check activity channel and wait as necessary.
This method is also used to slow down the main processing loop to reduce
the effects of rapidly sending Redis commands. This method will exit
for any of these conditions:
1. _did_work is True, suggests there could be more work pending
2. Found new queue and after batch timeout. Note batch timeout
can be zero so it will exit immediately.
3. Timeout seconds have passed, this is the maximum time to stay in
this method
"""
new_queue_found = False
start_time = batch_exit = time.time()
while True:
# Check to see if batch_exit has been updated
if batch_exit > start_time:
pubsub_sleep = batch_exit - time.time()
else:
pubsub_sleep = start_time + timeout - time.time()
message = self._pubsub.get_message(timeout=0 if pubsub_sleep < 0 or
self._did_work
else pubsub_sleep)
# Pull remaining messages off of channel
while message:
if message['type'] == 'message':
new_queue_found, batch_exit = self._process_queue_message(
message['data'], new_queue_found, batch_exit,
start_time, timeout, batch_timeout
)
message = self._pubsub.get_message()
if self._did_work:
break # Exit immediately if we did work during the last
# execution loop because there might be more work to do
elif time.time() >= batch_exit and new_queue_found:
break # After finding a new queue we can wait until the
# batch timeout expires
elif time.time() - start_time > timeout:
break |
def spawn_isolated_child(self):
"""
Fork or launch a new child off the target context.
:returns:
mitogen.core.Context of the new child.
"""
return self.get_chain(use_fork=True).call(
ansible_mitogen.target.spawn_isolated_child
) | Fork or launch a new child off the target context.
:returns:
mitogen.core.Context of the new child. | Below is the the instruction that describes the task:
### Input:
Fork or launch a new child off the target context.
:returns:
mitogen.core.Context of the new child.
### Response:
def spawn_isolated_child(self):
"""
Fork or launch a new child off the target context.
:returns:
mitogen.core.Context of the new child.
"""
return self.get_chain(use_fork=True).call(
ansible_mitogen.target.spawn_isolated_child
) |
def mode_computations(self, f_hat, Ki_f, K, Y, likelihood, kern, Y_metadata):
"""
At the mode, compute the hessian and effective covariance matrix.
returns: logZ : approximation to the marginal likelihood
woodbury_inv : variable required for calculating the approximation to the covariance matrix
dL_dthetaL : array of derivatives (1 x num_kernel_params)
dL_dthetaL : array of derivatives (1 x num_likelihood_params)
"""
#At this point get the hessian matrix (or vector as W is diagonal)
W = -likelihood.d2logpdf_df2(f_hat, Y, Y_metadata=Y_metadata)
if np.any(np.isnan(W)):
raise ValueError('One or more element(s) of W is NaN')
K_Wi_i, logdet_I_KW, I_KW_i, Ki_W_i = self._compute_B_statistics(K, W, likelihood.log_concave)
#compute the log marginal
log_marginal = -0.5*np.sum(np.dot(Ki_f.T, f_hat)) + np.sum(likelihood.logpdf(f_hat, Y, Y_metadata=Y_metadata)) - 0.5*logdet_I_KW
# Compute matrices for derivatives
dW_df = -likelihood.d3logpdf_df3(f_hat, Y, Y_metadata=Y_metadata) # -d3lik_d3fhat
if np.any(np.isnan(dW_df)):
raise ValueError('One or more element(s) of dW_df is NaN')
dL_dfhat = -0.5*(np.diag(Ki_W_i)[:, None]*dW_df) # s2 in R&W p126 line 9.
#BiK, _ = dpotrs(L, K, lower=1)
#dL_dfhat = 0.5*np.diag(BiK)[:, None]*dW_df
I_KW_i = np.eye(Y.shape[0]) - np.dot(K, K_Wi_i)
####################
# compute dL_dK #
####################
if kern.size > 0 and not kern.is_fixed:
#Explicit
explicit_part = 0.5*(np.dot(Ki_f, Ki_f.T) - K_Wi_i)
#Implicit
implicit_part = np.dot(Ki_f, dL_dfhat.T).dot(I_KW_i)
dL_dK = explicit_part + implicit_part
else:
dL_dK = np.zeros(likelihood.size)
####################
#compute dL_dthetaL#
####################
if likelihood.size > 0 and not likelihood.is_fixed:
dlik_dthetaL, dlik_grad_dthetaL, dlik_hess_dthetaL = likelihood._laplace_gradients(f_hat, Y, Y_metadata=Y_metadata)
num_params = likelihood.size
# make space for one derivative for each likelihood parameter
dL_dthetaL = np.zeros(num_params)
for thetaL_i in range(num_params):
#Explicit
dL_dthetaL_exp = ( np.sum(dlik_dthetaL[thetaL_i,:, :])
# The + comes from the fact that dlik_hess_dthetaL == -dW_dthetaL
+ 0.5*np.sum(np.diag(Ki_W_i)*np.squeeze(dlik_hess_dthetaL[thetaL_i, :, :]))
)
#Implicit
dfhat_dthetaL = mdot(I_KW_i, K, dlik_grad_dthetaL[thetaL_i, :, :])
#dfhat_dthetaL = mdot(Ki_W_i, dlik_grad_dthetaL[thetaL_i, :, :])
dL_dthetaL_imp = np.dot(dL_dfhat.T, dfhat_dthetaL)
dL_dthetaL[thetaL_i] = np.sum(dL_dthetaL_exp + dL_dthetaL_imp)
else:
dL_dthetaL = np.zeros(likelihood.size)
#Cache some things for speedy LOO
self.Ki_W_i = Ki_W_i
self.K = K
self.W = W
self.f_hat = f_hat
return log_marginal, K_Wi_i, dL_dK, dL_dthetaL | At the mode, compute the hessian and effective covariance matrix.
returns: logZ : approximation to the marginal likelihood
woodbury_inv : variable required for calculating the approximation to the covariance matrix
dL_dthetaL : array of derivatives (1 x num_kernel_params)
dL_dthetaL : array of derivatives (1 x num_likelihood_params) | Below is the the instruction that describes the task:
### Input:
At the mode, compute the hessian and effective covariance matrix.
returns: logZ : approximation to the marginal likelihood
woodbury_inv : variable required for calculating the approximation to the covariance matrix
dL_dthetaL : array of derivatives (1 x num_kernel_params)
dL_dthetaL : array of derivatives (1 x num_likelihood_params)
### Response:
def mode_computations(self, f_hat, Ki_f, K, Y, likelihood, kern, Y_metadata):
"""
At the mode, compute the hessian and effective covariance matrix.
returns: logZ : approximation to the marginal likelihood
woodbury_inv : variable required for calculating the approximation to the covariance matrix
dL_dthetaL : array of derivatives (1 x num_kernel_params)
dL_dthetaL : array of derivatives (1 x num_likelihood_params)
"""
#At this point get the hessian matrix (or vector as W is diagonal)
W = -likelihood.d2logpdf_df2(f_hat, Y, Y_metadata=Y_metadata)
if np.any(np.isnan(W)):
raise ValueError('One or more element(s) of W is NaN')
K_Wi_i, logdet_I_KW, I_KW_i, Ki_W_i = self._compute_B_statistics(K, W, likelihood.log_concave)
#compute the log marginal
log_marginal = -0.5*np.sum(np.dot(Ki_f.T, f_hat)) + np.sum(likelihood.logpdf(f_hat, Y, Y_metadata=Y_metadata)) - 0.5*logdet_I_KW
# Compute matrices for derivatives
dW_df = -likelihood.d3logpdf_df3(f_hat, Y, Y_metadata=Y_metadata) # -d3lik_d3fhat
if np.any(np.isnan(dW_df)):
raise ValueError('One or more element(s) of dW_df is NaN')
dL_dfhat = -0.5*(np.diag(Ki_W_i)[:, None]*dW_df) # s2 in R&W p126 line 9.
#BiK, _ = dpotrs(L, K, lower=1)
#dL_dfhat = 0.5*np.diag(BiK)[:, None]*dW_df
I_KW_i = np.eye(Y.shape[0]) - np.dot(K, K_Wi_i)
####################
# compute dL_dK #
####################
if kern.size > 0 and not kern.is_fixed:
#Explicit
explicit_part = 0.5*(np.dot(Ki_f, Ki_f.T) - K_Wi_i)
#Implicit
implicit_part = np.dot(Ki_f, dL_dfhat.T).dot(I_KW_i)
dL_dK = explicit_part + implicit_part
else:
dL_dK = np.zeros(likelihood.size)
####################
#compute dL_dthetaL#
####################
if likelihood.size > 0 and not likelihood.is_fixed:
dlik_dthetaL, dlik_grad_dthetaL, dlik_hess_dthetaL = likelihood._laplace_gradients(f_hat, Y, Y_metadata=Y_metadata)
num_params = likelihood.size
# make space for one derivative for each likelihood parameter
dL_dthetaL = np.zeros(num_params)
for thetaL_i in range(num_params):
#Explicit
dL_dthetaL_exp = ( np.sum(dlik_dthetaL[thetaL_i,:, :])
# The + comes from the fact that dlik_hess_dthetaL == -dW_dthetaL
+ 0.5*np.sum(np.diag(Ki_W_i)*np.squeeze(dlik_hess_dthetaL[thetaL_i, :, :]))
)
#Implicit
dfhat_dthetaL = mdot(I_KW_i, K, dlik_grad_dthetaL[thetaL_i, :, :])
#dfhat_dthetaL = mdot(Ki_W_i, dlik_grad_dthetaL[thetaL_i, :, :])
dL_dthetaL_imp = np.dot(dL_dfhat.T, dfhat_dthetaL)
dL_dthetaL[thetaL_i] = np.sum(dL_dthetaL_exp + dL_dthetaL_imp)
else:
dL_dthetaL = np.zeros(likelihood.size)
#Cache some things for speedy LOO
self.Ki_W_i = Ki_W_i
self.K = K
self.W = W
self.f_hat = f_hat
return log_marginal, K_Wi_i, dL_dK, dL_dthetaL |
def lisFichierLexique(self, filepath):
""" Lecture des lemmes, et enregistrement de leurs radicaux
:param filepath: Chemin du fichier à charger
:type filepath: str
"""
orig = int(filepath.endswith("ext.la"))
lignes = lignesFichier(filepath)
for ligne in lignes:
self.parse_lemme(ligne, orig) | Lecture des lemmes, et enregistrement de leurs radicaux
:param filepath: Chemin du fichier à charger
:type filepath: str | Below is the the instruction that describes the task:
### Input:
Lecture des lemmes, et enregistrement de leurs radicaux
:param filepath: Chemin du fichier à charger
:type filepath: str
### Response:
def lisFichierLexique(self, filepath):
""" Lecture des lemmes, et enregistrement de leurs radicaux
:param filepath: Chemin du fichier à charger
:type filepath: str
"""
orig = int(filepath.endswith("ext.la"))
lignes = lignesFichier(filepath)
for ligne in lignes:
self.parse_lemme(ligne, orig) |
def register_encoder(self, lookup: Lookup, encoder: Encoder, label: str=None) -> None:
"""
Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._encoders, lookup, encoder, label=label) | Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`. | Below is the the instruction that describes the task:
### Input:
Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
### Response:
def register_encoder(self, lookup: Lookup, encoder: Encoder, label: str=None) -> None:
"""
Registers the given ``encoder`` under the given ``lookup``. A unique
string label may be optionally provided that can be used to refer to
the registration by name. For more information about arguments, refer
to :any:`register`.
"""
self._register_coder(self._encoders, lookup, encoder, label=label) |
def value_error(self, key, bad_value):
"""Reports a value error using ERROR_MESSAGES dict.
key - key to use for ERROR_MESSAGES.
bad_value - is passed to format which is called on what key maps to
in ERROR_MESSAGES.
"""
msg = ERROR_MESSAGES[key].format(bad_value)
self.logger.log(msg)
self.error = True | Reports a value error using ERROR_MESSAGES dict.
key - key to use for ERROR_MESSAGES.
bad_value - is passed to format which is called on what key maps to
in ERROR_MESSAGES. | Below is the the instruction that describes the task:
### Input:
Reports a value error using ERROR_MESSAGES dict.
key - key to use for ERROR_MESSAGES.
bad_value - is passed to format which is called on what key maps to
in ERROR_MESSAGES.
### Response:
def value_error(self, key, bad_value):
"""Reports a value error using ERROR_MESSAGES dict.
key - key to use for ERROR_MESSAGES.
bad_value - is passed to format which is called on what key maps to
in ERROR_MESSAGES.
"""
msg = ERROR_MESSAGES[key].format(bad_value)
self.logger.log(msg)
self.error = True |
def _mass(self,R,z=0.,t=0.):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2014-04-01 - Written - Erkal (IoA)
"""
if z is None: r= R
else: r= numpy.sqrt(R**2.+z**2.)
return (r/self.a)**(3.-self.alpha)/(3.-self.alpha)*special.hyp2f1(3.-self.alpha,-self.alpha+self.beta,4.-self.alpha,-r/self.a) | NAME:
_mass
PURPOSE:
evaluate the mass within R for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2014-04-01 - Written - Erkal (IoA) | Below is the the instruction that describes the task:
### Input:
NAME:
_mass
PURPOSE:
evaluate the mass within R for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2014-04-01 - Written - Erkal (IoA)
### Response:
def _mass(self,R,z=0.,t=0.):
"""
NAME:
_mass
PURPOSE:
evaluate the mass within R for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
t - time
OUTPUT:
the mass enclosed
HISTORY:
2014-04-01 - Written - Erkal (IoA)
"""
if z is None: r= R
else: r= numpy.sqrt(R**2.+z**2.)
return (r/self.a)**(3.-self.alpha)/(3.-self.alpha)*special.hyp2f1(3.-self.alpha,-self.alpha+self.beta,4.-self.alpha,-r/self.a) |
def remove_user_from_group(self, username, groupname):
"""Remove a user from a group.
:param username: The user to remove from the group.
:param groupname: The group that the user will be removed from.
"""
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': groupname,
'username': username}
self._session.delete(url, params=x)
return True | Remove a user from a group.
:param username: The user to remove from the group.
:param groupname: The group that the user will be removed from. | Below is the the instruction that describes the task:
### Input:
Remove a user from a group.
:param username: The user to remove from the group.
:param groupname: The group that the user will be removed from.
### Response:
def remove_user_from_group(self, username, groupname):
"""Remove a user from a group.
:param username: The user to remove from the group.
:param groupname: The group that the user will be removed from.
"""
url = self._options['server'] + '/rest/api/latest/group/user'
x = {'groupname': groupname,
'username': username}
self._session.delete(url, params=x)
return True |
def schedule_servicegroup_host_downtime(self, servicegroup, start_time, end_time,
fixed, trigger_id, duration, author, comment):
"""Schedule a host downtime for each host of services in a servicegroup
Format of the line that triggers function call::
SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;<start_time>;<end_time>;<fixed>;
<trigger_id>;<duration>;<author>;<comment>
:param servicegroup: servicegroup to schedule downtime
:type servicegroup: alignak.object.servicegroup.Servicegroup
:param start_time: downtime start time
:type start_time:
:param end_time: downtime end time
:type end_time:
:param fixed: is downtime fixed
:type fixed: bool
:param trigger_id: downtime id that triggered this one
:type trigger_id: str
:param duration: downtime duration
:type duration: int
:param author: downtime author
:type author: str
:param comment: downtime comment
:type comment: str
:return: None
"""
for host in [s.host for s in servicegroup.get_services()]:
self.schedule_host_downtime(host, start_time, end_time, fixed,
trigger_id, duration, author, comment) | Schedule a host downtime for each host of services in a servicegroup
Format of the line that triggers function call::
SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;<start_time>;<end_time>;<fixed>;
<trigger_id>;<duration>;<author>;<comment>
:param servicegroup: servicegroup to schedule downtime
:type servicegroup: alignak.object.servicegroup.Servicegroup
:param start_time: downtime start time
:type start_time:
:param end_time: downtime end time
:type end_time:
:param fixed: is downtime fixed
:type fixed: bool
:param trigger_id: downtime id that triggered this one
:type trigger_id: str
:param duration: downtime duration
:type duration: int
:param author: downtime author
:type author: str
:param comment: downtime comment
:type comment: str
:return: None | Below is the the instruction that describes the task:
### Input:
Schedule a host downtime for each host of services in a servicegroup
Format of the line that triggers function call::
SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;<start_time>;<end_time>;<fixed>;
<trigger_id>;<duration>;<author>;<comment>
:param servicegroup: servicegroup to schedule downtime
:type servicegroup: alignak.object.servicegroup.Servicegroup
:param start_time: downtime start time
:type start_time:
:param end_time: downtime end time
:type end_time:
:param fixed: is downtime fixed
:type fixed: bool
:param trigger_id: downtime id that triggered this one
:type trigger_id: str
:param duration: downtime duration
:type duration: int
:param author: downtime author
:type author: str
:param comment: downtime comment
:type comment: str
:return: None
### Response:
def schedule_servicegroup_host_downtime(self, servicegroup, start_time, end_time,
fixed, trigger_id, duration, author, comment):
"""Schedule a host downtime for each host of services in a servicegroup
Format of the line that triggers function call::
SCHEDULE_SERVICEGROUP_HOST_DOWNTIME;<servicegroup_name>;<start_time>;<end_time>;<fixed>;
<trigger_id>;<duration>;<author>;<comment>
:param servicegroup: servicegroup to schedule downtime
:type servicegroup: alignak.object.servicegroup.Servicegroup
:param start_time: downtime start time
:type start_time:
:param end_time: downtime end time
:type end_time:
:param fixed: is downtime fixed
:type fixed: bool
:param trigger_id: downtime id that triggered this one
:type trigger_id: str
:param duration: downtime duration
:type duration: int
:param author: downtime author
:type author: str
:param comment: downtime comment
:type comment: str
:return: None
"""
for host in [s.host for s in servicegroup.get_services()]:
self.schedule_host_downtime(host, start_time, end_time, fixed,
trigger_id, duration, author, comment) |
def mark_done(task_id):
"""Marks a task as done.
Args:
task_id: The integer id of the task to update.
Raises:
ValueError: if the requested task doesn't exist.
"""
task = Task.get_by_id(task_id)
if task is None:
raise ValueError('Task with id %d does not exist' % task_id)
task.done = True
task.put() | Marks a task as done.
Args:
task_id: The integer id of the task to update.
Raises:
ValueError: if the requested task doesn't exist. | Below is the the instruction that describes the task:
### Input:
Marks a task as done.
Args:
task_id: The integer id of the task to update.
Raises:
ValueError: if the requested task doesn't exist.
### Response:
def mark_done(task_id):
"""Marks a task as done.
Args:
task_id: The integer id of the task to update.
Raises:
ValueError: if the requested task doesn't exist.
"""
task = Task.get_by_id(task_id)
if task is None:
raise ValueError('Task with id %d does not exist' % task_id)
task.done = True
task.put() |
def _time_shift(self, periods, freq=None):
"""
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or string
Frequency increment to shift by.
"""
if freq is not None and freq != self.freq:
if isinstance(freq, str):
freq = frequencies.to_offset(freq)
offset = periods * freq
result = self + offset
return result
if periods == 0:
# immutable so OK
return self.copy()
if self.freq is None:
raise NullFrequencyError("Cannot shift with no freq")
start = self[0] + periods * self.freq
end = self[-1] + periods * self.freq
# Note: in the DatetimeTZ case, _generate_range will infer the
# appropriate timezone from `start` and `end`, so tz does not need
# to be passed explicitly.
return self._generate_range(start=start, end=end, periods=None,
freq=self.freq) | Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or string
Frequency increment to shift by. | Below is the the instruction that describes the task:
### Input:
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or string
Frequency increment to shift by.
### Response:
def _time_shift(self, periods, freq=None):
"""
Shift each value by `periods`.
Note this is different from ExtensionArray.shift, which
shifts the *position* of each element, padding the end with
missing values.
Parameters
----------
periods : int
Number of periods to shift by.
freq : pandas.DateOffset, pandas.Timedelta, or string
Frequency increment to shift by.
"""
if freq is not None and freq != self.freq:
if isinstance(freq, str):
freq = frequencies.to_offset(freq)
offset = periods * freq
result = self + offset
return result
if periods == 0:
# immutable so OK
return self.copy()
if self.freq is None:
raise NullFrequencyError("Cannot shift with no freq")
start = self[0] + periods * self.freq
end = self[-1] + periods * self.freq
# Note: in the DatetimeTZ case, _generate_range will infer the
# appropriate timezone from `start` and `end`, so tz does not need
# to be passed explicitly.
return self._generate_range(start=start, end=end, periods=None,
freq=self.freq) |
def list_role_binding_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind RoleBinding
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_role_binding_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1RoleBindingList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_role_binding_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_role_binding_for_all_namespaces_with_http_info(**kwargs)
return data | list or watch objects of kind RoleBinding
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_role_binding_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1RoleBindingList
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
list or watch objects of kind RoleBinding
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_role_binding_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1RoleBindingList
If the method is called asynchronously,
returns the request thread.
### Response:
def list_role_binding_for_all_namespaces(self, **kwargs):
"""
list or watch objects of kind RoleBinding
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_role_binding_for_all_namespaces(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str pretty: If 'true', then the output is pretty printed.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1RoleBindingList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_role_binding_for_all_namespaces_with_http_info(**kwargs)
else:
(data) = self.list_role_binding_for_all_namespaces_with_http_info(**kwargs)
return data |
def resolve_incident(self, incident_key,
description=None, details=None):
""" Causes the referenced incident to enter resolved state.
Send a resolve event when the problem that caused the initial
trigger has been fixed.
"""
return self.create_event(description, "resolve",
details, incident_key) | Causes the referenced incident to enter resolved state.
Send a resolve event when the problem that caused the initial
trigger has been fixed. | Below is the the instruction that describes the task:
### Input:
Causes the referenced incident to enter resolved state.
Send a resolve event when the problem that caused the initial
trigger has been fixed.
### Response:
def resolve_incident(self, incident_key,
description=None, details=None):
""" Causes the referenced incident to enter resolved state.
Send a resolve event when the problem that caused the initial
trigger has been fixed.
"""
return self.create_event(description, "resolve",
details, incident_key) |
def _assert_type_bounds_are_not_conflicting(current_type_bound, previous_type_bound,
location, match_query):
"""Ensure that the two bounds either are an exact match, or one of them is None."""
if all((current_type_bound is not None,
previous_type_bound is not None,
current_type_bound != previous_type_bound)):
raise AssertionError(
u'Conflicting type bounds calculated at location {}: {} vs {} '
u'for query {}'.format(location, previous_type_bound, current_type_bound, match_query)) | Ensure that the two bounds either are an exact match, or one of them is None. | Below is the the instruction that describes the task:
### Input:
Ensure that the two bounds either are an exact match, or one of them is None.
### Response:
def _assert_type_bounds_are_not_conflicting(current_type_bound, previous_type_bound,
location, match_query):
"""Ensure that the two bounds either are an exact match, or one of them is None."""
if all((current_type_bound is not None,
previous_type_bound is not None,
current_type_bound != previous_type_bound)):
raise AssertionError(
u'Conflicting type bounds calculated at location {}: {} vs {} '
u'for query {}'.format(location, previous_type_bound, current_type_bound, match_query)) |
def is_group(self):
"""True if the message was sent on a group or megagroup."""
if self._broadcast is None and self.chat:
self._broadcast = getattr(self.chat, 'broadcast', None)
return (
isinstance(self._chat_peer, (types.PeerChat, types.PeerChannel))
and not self._broadcast
) | True if the message was sent on a group or megagroup. | Below is the the instruction that describes the task:
### Input:
True if the message was sent on a group or megagroup.
### Response:
def is_group(self):
"""True if the message was sent on a group or megagroup."""
if self._broadcast is None and self.chat:
self._broadcast = getattr(self.chat, 'broadcast', None)
return (
isinstance(self._chat_peer, (types.PeerChat, types.PeerChannel))
and not self._broadcast
) |
def item_move(self, request, tree_id, item_id, direction):
"""Moves item up or down by swapping 'sort_order' field values of neighboring items."""
current_item = MODEL_TREE_ITEM_CLASS._default_manager.get(pk=item_id)
if direction == 'up':
sort_order = 'sort_order'
else:
sort_order = '-sort_order'
siblings = MODEL_TREE_ITEM_CLASS._default_manager.filter(
parent=current_item.parent,
tree=current_item.tree
).order_by(sort_order)
previous_item = None
for item in siblings:
if item != current_item:
previous_item = item
else:
break
if previous_item is not None:
current_item_sort_order = current_item.sort_order
previous_item_sort_order = previous_item.sort_order
current_item.sort_order = previous_item_sort_order
previous_item.sort_order = current_item_sort_order
current_item.save()
previous_item.save()
return HttpResponseRedirect('../../') | Moves item up or down by swapping 'sort_order' field values of neighboring items. | Below is the the instruction that describes the task:
### Input:
Moves item up or down by swapping 'sort_order' field values of neighboring items.
### Response:
def item_move(self, request, tree_id, item_id, direction):
"""Moves item up or down by swapping 'sort_order' field values of neighboring items."""
current_item = MODEL_TREE_ITEM_CLASS._default_manager.get(pk=item_id)
if direction == 'up':
sort_order = 'sort_order'
else:
sort_order = '-sort_order'
siblings = MODEL_TREE_ITEM_CLASS._default_manager.filter(
parent=current_item.parent,
tree=current_item.tree
).order_by(sort_order)
previous_item = None
for item in siblings:
if item != current_item:
previous_item = item
else:
break
if previous_item is not None:
current_item_sort_order = current_item.sort_order
previous_item_sort_order = previous_item.sort_order
current_item.sort_order = previous_item_sort_order
previous_item.sort_order = current_item_sort_order
current_item.save()
previous_item.save()
return HttpResponseRedirect('../../') |
def DiffDoArrays(self, oldObj, newObj, isElementLinks):
"""Diff two DataObject arrays"""
if len(oldObj) != len(newObj):
__Log__.debug('DiffDoArrays: Array lengths do not match %d != %d'
% (len(oldObj), len(newObj)))
return False
for i, j in zip(oldObj, newObj):
if isElementLinks:
if i.GetKey() != j.GetKey():
__Log__.debug('DiffDoArrays: Keys do not match %s != %s'
% (i.GetKey(), j.GetKey()))
return False
else:
if not self.DiffDataObjects(i, j):
__Log__.debug(
'DiffDoArrays: one of the elements do not match')
return False
return True | Diff two DataObject arrays | Below is the the instruction that describes the task:
### Input:
Diff two DataObject arrays
### Response:
def DiffDoArrays(self, oldObj, newObj, isElementLinks):
"""Diff two DataObject arrays"""
if len(oldObj) != len(newObj):
__Log__.debug('DiffDoArrays: Array lengths do not match %d != %d'
% (len(oldObj), len(newObj)))
return False
for i, j in zip(oldObj, newObj):
if isElementLinks:
if i.GetKey() != j.GetKey():
__Log__.debug('DiffDoArrays: Keys do not match %s != %s'
% (i.GetKey(), j.GetKey()))
return False
else:
if not self.DiffDataObjects(i, j):
__Log__.debug(
'DiffDoArrays: one of the elements do not match')
return False
return True |
def search_issues(self, query, sort=None, order=None, per_page=None,
text_match=False, number=-1, etag=None):
"""Find issues by state and keyword
The query can contain any combination of the following supported
qualifers:
- ``type`` With this qualifier you can restrict the search to issues
or pull request only.
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the title, body, comments, or any
combination of these.
- ``author`` Finds issues created by a certain user.
- ``assignee`` Finds issues that are assigned to a certain user.
- ``mentions`` Finds issues that mention a certain user.
- ``commenter`` Finds issues that a certain user commented on.
- ``involves`` Finds issues that were either created by a certain user,
assigned to that user, mention that user, or were commented on by
that user.
- ``state`` Filter issues based on whether they’re open or closed.
- ``labels`` Filters issues based on their labels.
- ``language`` Searches for issues within repositories that match a
certain language.
- ``created`` or ``updated`` Filters issues based on times of creation,
or when they were last updated.
- ``comments`` Filters issues based on the quantity of comments.
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
For more information about these qualifiers, see: http://git.io/d1oELA
:param str query: (required), a valid query as described above, e.g.,
``windows label:bug``
:param str sort: (optional), how the results should be sorted;
options: ``created``, ``comments``, ``updated``;
default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/QLQuSQ for more information
:param int number: (optional), number of issues to return.
Default: -1, returns all available issues
:param str etag: (optional), previous ETag header value
:return: generator of :class:`IssueSearchResult
<github3.search.IssueSearchResult>`
"""
params = {'q': query}
headers = {}
if sort in ('comments', 'created', 'updated'):
params['sort'] = sort
if order in ('asc', 'desc'):
params['order'] = order
if text_match:
headers = {
'Accept': 'application/vnd.github.v3.full.text-match+json'
}
url = self._build_url('search', 'issues')
return SearchIterator(number, url, IssueSearchResult, self, params,
etag, headers) | Find issues by state and keyword
The query can contain any combination of the following supported
qualifers:
- ``type`` With this qualifier you can restrict the search to issues
or pull request only.
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the title, body, comments, or any
combination of these.
- ``author`` Finds issues created by a certain user.
- ``assignee`` Finds issues that are assigned to a certain user.
- ``mentions`` Finds issues that mention a certain user.
- ``commenter`` Finds issues that a certain user commented on.
- ``involves`` Finds issues that were either created by a certain user,
assigned to that user, mention that user, or were commented on by
that user.
- ``state`` Filter issues based on whether they’re open or closed.
- ``labels`` Filters issues based on their labels.
- ``language`` Searches for issues within repositories that match a
certain language.
- ``created`` or ``updated`` Filters issues based on times of creation,
or when they were last updated.
- ``comments`` Filters issues based on the quantity of comments.
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
For more information about these qualifiers, see: http://git.io/d1oELA
:param str query: (required), a valid query as described above, e.g.,
``windows label:bug``
:param str sort: (optional), how the results should be sorted;
options: ``created``, ``comments``, ``updated``;
default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/QLQuSQ for more information
:param int number: (optional), number of issues to return.
Default: -1, returns all available issues
:param str etag: (optional), previous ETag header value
:return: generator of :class:`IssueSearchResult
<github3.search.IssueSearchResult>` | Below is the the instruction that describes the task:
### Input:
Find issues by state and keyword
The query can contain any combination of the following supported
qualifers:
- ``type`` With this qualifier you can restrict the search to issues
or pull request only.
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the title, body, comments, or any
combination of these.
- ``author`` Finds issues created by a certain user.
- ``assignee`` Finds issues that are assigned to a certain user.
- ``mentions`` Finds issues that mention a certain user.
- ``commenter`` Finds issues that a certain user commented on.
- ``involves`` Finds issues that were either created by a certain user,
assigned to that user, mention that user, or were commented on by
that user.
- ``state`` Filter issues based on whether they’re open or closed.
- ``labels`` Filters issues based on their labels.
- ``language`` Searches for issues within repositories that match a
certain language.
- ``created`` or ``updated`` Filters issues based on times of creation,
or when they were last updated.
- ``comments`` Filters issues based on the quantity of comments.
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
For more information about these qualifiers, see: http://git.io/d1oELA
:param str query: (required), a valid query as described above, e.g.,
``windows label:bug``
:param str sort: (optional), how the results should be sorted;
options: ``created``, ``comments``, ``updated``;
default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/QLQuSQ for more information
:param int number: (optional), number of issues to return.
Default: -1, returns all available issues
:param str etag: (optional), previous ETag header value
:return: generator of :class:`IssueSearchResult
<github3.search.IssueSearchResult>`
### Response:
def search_issues(self, query, sort=None, order=None, per_page=None,
text_match=False, number=-1, etag=None):
"""Find issues by state and keyword
The query can contain any combination of the following supported
qualifers:
- ``type`` With this qualifier you can restrict the search to issues
or pull request only.
- ``in`` Qualifies which fields are searched. With this qualifier you
can restrict the search to just the title, body, comments, or any
combination of these.
- ``author`` Finds issues created by a certain user.
- ``assignee`` Finds issues that are assigned to a certain user.
- ``mentions`` Finds issues that mention a certain user.
- ``commenter`` Finds issues that a certain user commented on.
- ``involves`` Finds issues that were either created by a certain user,
assigned to that user, mention that user, or were commented on by
that user.
- ``state`` Filter issues based on whether they’re open or closed.
- ``labels`` Filters issues based on their labels.
- ``language`` Searches for issues within repositories that match a
certain language.
- ``created`` or ``updated`` Filters issues based on times of creation,
or when they were last updated.
- ``comments`` Filters issues based on the quantity of comments.
- ``user`` or ``repo`` Limits searches to a specific user or
repository.
For more information about these qualifiers, see: http://git.io/d1oELA
:param str query: (required), a valid query as described above, e.g.,
``windows label:bug``
:param str sort: (optional), how the results should be sorted;
options: ``created``, ``comments``, ``updated``;
default: best match
:param str order: (optional), the direction of the sorted results,
options: ``asc``, ``desc``; default: ``desc``
:param int per_page: (optional)
:param bool text_match: (optional), if True, return matching search
terms. See http://git.io/QLQuSQ for more information
:param int number: (optional), number of issues to return.
Default: -1, returns all available issues
:param str etag: (optional), previous ETag header value
:return: generator of :class:`IssueSearchResult
<github3.search.IssueSearchResult>`
"""
params = {'q': query}
headers = {}
if sort in ('comments', 'created', 'updated'):
params['sort'] = sort
if order in ('asc', 'desc'):
params['order'] = order
if text_match:
headers = {
'Accept': 'application/vnd.github.v3.full.text-match+json'
}
url = self._build_url('search', 'issues')
return SearchIterator(number, url, IssueSearchResult, self, params,
etag, headers) |
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results | Return the results of the RPM probes. | Below is the the instruction that describes the task:
### Input:
Return the results of the RPM probes.
### Response:
def get_probes_results(self):
"""Return the results of the RPM probes."""
probes_results = {}
probes_results_table = junos_views.junos_rpm_probes_results_table(self.device)
probes_results_table.get()
probes_results_items = probes_results_table.items()
for probe_result in probes_results_items:
probe_name = py23_compat.text_type(probe_result[0])
test_results = {
p[0]: p[1] for p in probe_result[1]
}
test_results['last_test_loss'] = napalm_base.helpers.convert(
int, test_results.pop('last_test_loss'), 0)
for test_param_name, test_param_value in test_results.items():
if isinstance(test_param_value, float):
test_results[test_param_name] = test_param_value * 1e-3
# convert from useconds to mseconds
test_name = test_results.pop('test_name', '')
source = test_results.get('source', u'')
if source is None:
test_results['source'] = u''
if probe_name not in probes_results.keys():
probes_results[probe_name] = {}
probes_results[probe_name][test_name] = test_results
return probes_results |
def compute_depth(self):
"""
Recursively computes true depth of the subtree. Should only
be needed for debugging. Unless something is wrong, the
depth field should reflect the correct depth of the subtree.
"""
left_depth = self.left_node.compute_depth() if self.left_node else 0
right_depth = self.right_node.compute_depth() if self.right_node else 0
return 1 + max(left_depth, right_depth) | Recursively computes true depth of the subtree. Should only
be needed for debugging. Unless something is wrong, the
depth field should reflect the correct depth of the subtree. | Below is the the instruction that describes the task:
### Input:
Recursively computes true depth of the subtree. Should only
be needed for debugging. Unless something is wrong, the
depth field should reflect the correct depth of the subtree.
### Response:
def compute_depth(self):
"""
Recursively computes true depth of the subtree. Should only
be needed for debugging. Unless something is wrong, the
depth field should reflect the correct depth of the subtree.
"""
left_depth = self.left_node.compute_depth() if self.left_node else 0
right_depth = self.right_node.compute_depth() if self.right_node else 0
return 1 + max(left_depth, right_depth) |
def parallel_beam_geometry(space, num_angles=None, det_shape=None):
r"""Create default parallel beam geometry from ``space``.
This is intended for simple test cases where users do not need the full
flexibility of the geometries, but simply want a geometry that works.
This default geometry gives a fully sampled sinogram according to the
Nyquist criterion, which in general results in a very large number of
samples. In particular, a ``space`` that is not centered at the origin
can result in very large detectors.
Parameters
----------
space : `DiscreteLp`
Reconstruction space, the space of the volumetric data to be projected.
Needs to be 2d or 3d.
num_angles : int, optional
Number of angles.
Default: Enough to fully sample the data, see Notes.
det_shape : int or sequence of int, optional
Number of detector pixels.
Default: Enough to fully sample the data, see Notes.
Returns
-------
geometry : `ParallelBeamGeometry`
If ``space`` is 2d, return a `Parallel2dGeometry`.
If ``space`` is 3d, return a `Parallel3dAxisGeometry`.
Examples
--------
Create a parallel beam geometry from a 2d space:
>>> space = odl.uniform_discr([-1, -1], [1, 1], (20, 20))
>>> geometry = parallel_beam_geometry(space)
>>> geometry.angles.size
45
>>> geometry.detector.size
31
Notes
-----
According to [NW2001]_, pages 72--74, a function
:math:`f : \mathbb{R}^2 \to \mathbb{R}` that has compact support
.. math::
\| x \| > \rho \implies f(x) = 0,
and is essentially bandlimited
.. math::
\| \xi \| > \Omega \implies \hat{f}(\xi) \approx 0,
can be fully reconstructed from a parallel beam ray transform
if (1) the projection angles are sampled with a spacing of
:math:`\Delta \psi` such that
.. math::
\Delta \psi \leq \frac{\pi}{\rho \Omega},
and (2) the detector is sampled with an interval :math:`\Delta s`
that satisfies
.. math::
\Delta s \leq \frac{\pi}{\Omega}.
The geometry returned by this function satisfies these conditions exactly.
If the domain is 3-dimensional, the geometry is "separable", in that each
slice along the z-dimension of the data is treated as independed 2d data.
References
----------
.. [NW2001] Natterer, F and Wuebbeling, F.
*Mathematical Methods in Image Reconstruction*.
SIAM, 2001.
https://dx.doi.org/10.1137/1.9780898718324
"""
# Find maximum distance from rotation axis
corners = space.domain.corners()[:, :2]
rho = np.max(np.linalg.norm(corners, axis=1))
# Find default values according to Nyquist criterion.
# We assume that the function is bandlimited by a wave along the x or y
# axis. The highest frequency we can measure is then a standing wave with
# period of twice the inter-node distance.
min_side = min(space.partition.cell_sides[:2])
omega = np.pi / min_side
num_px_horiz = 2 * int(np.ceil(rho * omega / np.pi)) + 1
if space.ndim == 2:
det_min_pt = -rho
det_max_pt = rho
if det_shape is None:
det_shape = num_px_horiz
elif space.ndim == 3:
num_px_vert = space.shape[2]
min_h = space.domain.min_pt[2]
max_h = space.domain.max_pt[2]
det_min_pt = [-rho, min_h]
det_max_pt = [rho, max_h]
if det_shape is None:
det_shape = [num_px_horiz, num_px_vert]
if num_angles is None:
num_angles = int(np.ceil(omega * rho))
angle_partition = uniform_partition(0, np.pi, num_angles)
det_partition = uniform_partition(det_min_pt, det_max_pt, det_shape)
if space.ndim == 2:
return Parallel2dGeometry(angle_partition, det_partition)
elif space.ndim == 3:
return Parallel3dAxisGeometry(angle_partition, det_partition)
else:
raise ValueError('``space.ndim`` must be 2 or 3.') | r"""Create default parallel beam geometry from ``space``.
This is intended for simple test cases where users do not need the full
flexibility of the geometries, but simply want a geometry that works.
This default geometry gives a fully sampled sinogram according to the
Nyquist criterion, which in general results in a very large number of
samples. In particular, a ``space`` that is not centered at the origin
can result in very large detectors.
Parameters
----------
space : `DiscreteLp`
Reconstruction space, the space of the volumetric data to be projected.
Needs to be 2d or 3d.
num_angles : int, optional
Number of angles.
Default: Enough to fully sample the data, see Notes.
det_shape : int or sequence of int, optional
Number of detector pixels.
Default: Enough to fully sample the data, see Notes.
Returns
-------
geometry : `ParallelBeamGeometry`
If ``space`` is 2d, return a `Parallel2dGeometry`.
If ``space`` is 3d, return a `Parallel3dAxisGeometry`.
Examples
--------
Create a parallel beam geometry from a 2d space:
>>> space = odl.uniform_discr([-1, -1], [1, 1], (20, 20))
>>> geometry = parallel_beam_geometry(space)
>>> geometry.angles.size
45
>>> geometry.detector.size
31
Notes
-----
According to [NW2001]_, pages 72--74, a function
:math:`f : \mathbb{R}^2 \to \mathbb{R}` that has compact support
.. math::
\| x \| > \rho \implies f(x) = 0,
and is essentially bandlimited
.. math::
\| \xi \| > \Omega \implies \hat{f}(\xi) \approx 0,
can be fully reconstructed from a parallel beam ray transform
if (1) the projection angles are sampled with a spacing of
:math:`\Delta \psi` such that
.. math::
\Delta \psi \leq \frac{\pi}{\rho \Omega},
and (2) the detector is sampled with an interval :math:`\Delta s`
that satisfies
.. math::
\Delta s \leq \frac{\pi}{\Omega}.
The geometry returned by this function satisfies these conditions exactly.
If the domain is 3-dimensional, the geometry is "separable", in that each
slice along the z-dimension of the data is treated as independed 2d data.
References
----------
.. [NW2001] Natterer, F and Wuebbeling, F.
*Mathematical Methods in Image Reconstruction*.
SIAM, 2001.
https://dx.doi.org/10.1137/1.9780898718324 | Below is the the instruction that describes the task:
### Input:
r"""Create default parallel beam geometry from ``space``.
This is intended for simple test cases where users do not need the full
flexibility of the geometries, but simply want a geometry that works.
This default geometry gives a fully sampled sinogram according to the
Nyquist criterion, which in general results in a very large number of
samples. In particular, a ``space`` that is not centered at the origin
can result in very large detectors.
Parameters
----------
space : `DiscreteLp`
Reconstruction space, the space of the volumetric data to be projected.
Needs to be 2d or 3d.
num_angles : int, optional
Number of angles.
Default: Enough to fully sample the data, see Notes.
det_shape : int or sequence of int, optional
Number of detector pixels.
Default: Enough to fully sample the data, see Notes.
Returns
-------
geometry : `ParallelBeamGeometry`
If ``space`` is 2d, return a `Parallel2dGeometry`.
If ``space`` is 3d, return a `Parallel3dAxisGeometry`.
Examples
--------
Create a parallel beam geometry from a 2d space:
>>> space = odl.uniform_discr([-1, -1], [1, 1], (20, 20))
>>> geometry = parallel_beam_geometry(space)
>>> geometry.angles.size
45
>>> geometry.detector.size
31
Notes
-----
According to [NW2001]_, pages 72--74, a function
:math:`f : \mathbb{R}^2 \to \mathbb{R}` that has compact support
.. math::
\| x \| > \rho \implies f(x) = 0,
and is essentially bandlimited
.. math::
\| \xi \| > \Omega \implies \hat{f}(\xi) \approx 0,
can be fully reconstructed from a parallel beam ray transform
if (1) the projection angles are sampled with a spacing of
:math:`\Delta \psi` such that
.. math::
\Delta \psi \leq \frac{\pi}{\rho \Omega},
and (2) the detector is sampled with an interval :math:`\Delta s`
that satisfies
.. math::
\Delta s \leq \frac{\pi}{\Omega}.
The geometry returned by this function satisfies these conditions exactly.
If the domain is 3-dimensional, the geometry is "separable", in that each
slice along the z-dimension of the data is treated as independed 2d data.
References
----------
.. [NW2001] Natterer, F and Wuebbeling, F.
*Mathematical Methods in Image Reconstruction*.
SIAM, 2001.
https://dx.doi.org/10.1137/1.9780898718324
### Response:
def parallel_beam_geometry(space, num_angles=None, det_shape=None):
r"""Create default parallel beam geometry from ``space``.
This is intended for simple test cases where users do not need the full
flexibility of the geometries, but simply want a geometry that works.
This default geometry gives a fully sampled sinogram according to the
Nyquist criterion, which in general results in a very large number of
samples. In particular, a ``space`` that is not centered at the origin
can result in very large detectors.
Parameters
----------
space : `DiscreteLp`
Reconstruction space, the space of the volumetric data to be projected.
Needs to be 2d or 3d.
num_angles : int, optional
Number of angles.
Default: Enough to fully sample the data, see Notes.
det_shape : int or sequence of int, optional
Number of detector pixels.
Default: Enough to fully sample the data, see Notes.
Returns
-------
geometry : `ParallelBeamGeometry`
If ``space`` is 2d, return a `Parallel2dGeometry`.
If ``space`` is 3d, return a `Parallel3dAxisGeometry`.
Examples
--------
Create a parallel beam geometry from a 2d space:
>>> space = odl.uniform_discr([-1, -1], [1, 1], (20, 20))
>>> geometry = parallel_beam_geometry(space)
>>> geometry.angles.size
45
>>> geometry.detector.size
31
Notes
-----
According to [NW2001]_, pages 72--74, a function
:math:`f : \mathbb{R}^2 \to \mathbb{R}` that has compact support
.. math::
\| x \| > \rho \implies f(x) = 0,
and is essentially bandlimited
.. math::
\| \xi \| > \Omega \implies \hat{f}(\xi) \approx 0,
can be fully reconstructed from a parallel beam ray transform
if (1) the projection angles are sampled with a spacing of
:math:`\Delta \psi` such that
.. math::
\Delta \psi \leq \frac{\pi}{\rho \Omega},
and (2) the detector is sampled with an interval :math:`\Delta s`
that satisfies
.. math::
\Delta s \leq \frac{\pi}{\Omega}.
The geometry returned by this function satisfies these conditions exactly.
If the domain is 3-dimensional, the geometry is "separable", in that each
slice along the z-dimension of the data is treated as independed 2d data.
References
----------
.. [NW2001] Natterer, F and Wuebbeling, F.
*Mathematical Methods in Image Reconstruction*.
SIAM, 2001.
https://dx.doi.org/10.1137/1.9780898718324
"""
# Find maximum distance from rotation axis
corners = space.domain.corners()[:, :2]
rho = np.max(np.linalg.norm(corners, axis=1))
# Find default values according to Nyquist criterion.
# We assume that the function is bandlimited by a wave along the x or y
# axis. The highest frequency we can measure is then a standing wave with
# period of twice the inter-node distance.
min_side = min(space.partition.cell_sides[:2])
omega = np.pi / min_side
num_px_horiz = 2 * int(np.ceil(rho * omega / np.pi)) + 1
if space.ndim == 2:
det_min_pt = -rho
det_max_pt = rho
if det_shape is None:
det_shape = num_px_horiz
elif space.ndim == 3:
num_px_vert = space.shape[2]
min_h = space.domain.min_pt[2]
max_h = space.domain.max_pt[2]
det_min_pt = [-rho, min_h]
det_max_pt = [rho, max_h]
if det_shape is None:
det_shape = [num_px_horiz, num_px_vert]
if num_angles is None:
num_angles = int(np.ceil(omega * rho))
angle_partition = uniform_partition(0, np.pi, num_angles)
det_partition = uniform_partition(det_min_pt, det_max_pt, det_shape)
if space.ndim == 2:
return Parallel2dGeometry(angle_partition, det_partition)
elif space.ndim == 3:
return Parallel3dAxisGeometry(angle_partition, det_partition)
else:
raise ValueError('``space.ndim`` must be 2 or 3.') |
def info(cabdir, header=False):
""" prints out help information about a cab """
# First check if cab exists
pfile = "{}/parameters.json".format(cabdir)
if not os.path.exists(pfile):
raise RuntimeError("Cab could not be found at : {}".format(cabdir))
# Get cab info
cab_definition = cab.CabDefinition(parameter_file=pfile)
cab_definition.display(header) | prints out help information about a cab | Below is the the instruction that describes the task:
### Input:
prints out help information about a cab
### Response:
def info(cabdir, header=False):
""" prints out help information about a cab """
# First check if cab exists
pfile = "{}/parameters.json".format(cabdir)
if not os.path.exists(pfile):
raise RuntimeError("Cab could not be found at : {}".format(cabdir))
# Get cab info
cab_definition = cab.CabDefinition(parameter_file=pfile)
cab_definition.display(header) |
def processEnded(self, status):
"""
:api:`twisted.internet.protocol.ProcessProtocol <ProcessProtocol>` API
"""
self.cleanup()
if status.value.exitCode is None:
if self._did_timeout:
err = RuntimeError("Timeout waiting for Tor launch.")
else:
err = RuntimeError(
"Tor was killed (%s)." % status.value.signal)
else:
err = RuntimeError(
"Tor exited with error-code %d" % status.value.exitCode)
# hmmm, this log() should probably go away...not always an
# error (e.g. .quit()
log.err(err)
self._maybe_notify_connected(Failure(err)) | :api:`twisted.internet.protocol.ProcessProtocol <ProcessProtocol>` API | Below is the the instruction that describes the task:
### Input:
:api:`twisted.internet.protocol.ProcessProtocol <ProcessProtocol>` API
### Response:
def processEnded(self, status):
"""
:api:`twisted.internet.protocol.ProcessProtocol <ProcessProtocol>` API
"""
self.cleanup()
if status.value.exitCode is None:
if self._did_timeout:
err = RuntimeError("Timeout waiting for Tor launch.")
else:
err = RuntimeError(
"Tor was killed (%s)." % status.value.signal)
else:
err = RuntimeError(
"Tor exited with error-code %d" % status.value.exitCode)
# hmmm, this log() should probably go away...not always an
# error (e.g. .quit()
log.err(err)
self._maybe_notify_connected(Failure(err)) |
def DomainsGet(self, parameters = None, domain_id = -1):
"""
This method returns the domains of the current user.
The list also contains the domains to which the users has not yet been accepted.
@param parameters (dictonary) - Dictionary containing the parameters of the request.
@return (bool) - Boolean indicating whether DomainsGet was successful.
"""
url = ''
if parameters is None and domain_id <> -1:
url = '/domains/{0}.json'.format(domain_id)
else:
url = '/domains.json'
if self.__SenseApiCall__(url, 'GET', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False | This method returns the domains of the current user.
The list also contains the domains to which the users has not yet been accepted.
@param parameters (dictonary) - Dictionary containing the parameters of the request.
@return (bool) - Boolean indicating whether DomainsGet was successful. | Below is the the instruction that describes the task:
### Input:
This method returns the domains of the current user.
The list also contains the domains to which the users has not yet been accepted.
@param parameters (dictonary) - Dictionary containing the parameters of the request.
@return (bool) - Boolean indicating whether DomainsGet was successful.
### Response:
def DomainsGet(self, parameters = None, domain_id = -1):
"""
This method returns the domains of the current user.
The list also contains the domains to which the users has not yet been accepted.
@param parameters (dictonary) - Dictionary containing the parameters of the request.
@return (bool) - Boolean indicating whether DomainsGet was successful.
"""
url = ''
if parameters is None and domain_id <> -1:
url = '/domains/{0}.json'.format(domain_id)
else:
url = '/domains.json'
if self.__SenseApiCall__(url, 'GET', parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False |
def contains(self, key):
'''Returns whether the object named by `key` exists.
First checks ``cache_datastore``.
'''
return self.cache_datastore.contains(key) \
or self.child_datastore.contains(key) | Returns whether the object named by `key` exists.
First checks ``cache_datastore``. | Below is the the instruction that describes the task:
### Input:
Returns whether the object named by `key` exists.
First checks ``cache_datastore``.
### Response:
def contains(self, key):
'''Returns whether the object named by `key` exists.
First checks ``cache_datastore``.
'''
return self.cache_datastore.contains(key) \
or self.child_datastore.contains(key) |
def experiment_list(args):
'''get the information of all experiments'''
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
if not experiment_dict:
print('There is no experiment running...')
exit(1)
update_experiment()
experiment_id_list = []
if args.all and args.all == 'all':
for key in experiment_dict.keys():
experiment_id_list.append(key)
else:
for key in experiment_dict.keys():
if experiment_dict[key]['status'] != 'STOPPED':
experiment_id_list.append(key)
if not experiment_id_list:
print_warning('There is no experiment running...\nYou can use \'nnictl experiment list all\' to list all stopped experiments!')
experiment_information = ""
for key in experiment_id_list:
experiment_information += (EXPERIMENT_DETAIL_FORMAT % (key, experiment_dict[key]['status'], experiment_dict[key]['port'],\
experiment_dict[key].get('platform'), experiment_dict[key]['startTime'], experiment_dict[key]['endTime']))
print(EXPERIMENT_INFORMATION_FORMAT % experiment_information) | get the information of all experiments | Below is the the instruction that describes the task:
### Input:
get the information of all experiments
### Response:
def experiment_list(args):
'''get the information of all experiments'''
experiment_config = Experiments()
experiment_dict = experiment_config.get_all_experiments()
if not experiment_dict:
print('There is no experiment running...')
exit(1)
update_experiment()
experiment_id_list = []
if args.all and args.all == 'all':
for key in experiment_dict.keys():
experiment_id_list.append(key)
else:
for key in experiment_dict.keys():
if experiment_dict[key]['status'] != 'STOPPED':
experiment_id_list.append(key)
if not experiment_id_list:
print_warning('There is no experiment running...\nYou can use \'nnictl experiment list all\' to list all stopped experiments!')
experiment_information = ""
for key in experiment_id_list:
experiment_information += (EXPERIMENT_DETAIL_FORMAT % (key, experiment_dict[key]['status'], experiment_dict[key]['port'],\
experiment_dict[key].get('platform'), experiment_dict[key]['startTime'], experiment_dict[key]['endTime']))
print(EXPERIMENT_INFORMATION_FORMAT % experiment_information) |
def keywords_special_characters(keywords):
"""
Confirms that the keywords don't contain special characters
Args:
keywords (str)
Raises:
django.forms.ValidationError
"""
invalid_chars = '!\"#$%&\'()*+-./:;<=>?@[\\]^_{|}~\t\n'
if any(char in invalid_chars for char in keywords):
raise ValidationError(MESSAGE_KEYWORD_SPECIAL_CHARS) | Confirms that the keywords don't contain special characters
Args:
keywords (str)
Raises:
django.forms.ValidationError | Below is the the instruction that describes the task:
### Input:
Confirms that the keywords don't contain special characters
Args:
keywords (str)
Raises:
django.forms.ValidationError
### Response:
def keywords_special_characters(keywords):
"""
Confirms that the keywords don't contain special characters
Args:
keywords (str)
Raises:
django.forms.ValidationError
"""
invalid_chars = '!\"#$%&\'()*+-./:;<=>?@[\\]^_{|}~\t\n'
if any(char in invalid_chars for char in keywords):
raise ValidationError(MESSAGE_KEYWORD_SPECIAL_CHARS) |
def team(self):
"""Team to which the scope is assigned."""
team_dict = self._json_data.get('team')
if team_dict and team_dict.get('id'):
return self._client.team(id=team_dict.get('id'))
else:
return None | Team to which the scope is assigned. | Below is the the instruction that describes the task:
### Input:
Team to which the scope is assigned.
### Response:
def team(self):
"""Team to which the scope is assigned."""
team_dict = self._json_data.get('team')
if team_dict and team_dict.get('id'):
return self._client.team(id=team_dict.get('id'))
else:
return None |
def salt_run():
'''
Execute a salt convenience routine.
'''
import salt.cli.run
if '' in sys.path:
sys.path.remove('')
client = salt.cli.run.SaltRun()
_install_signal_handlers(client)
client.run() | Execute a salt convenience routine. | Below is the the instruction that describes the task:
### Input:
Execute a salt convenience routine.
### Response:
def salt_run():
'''
Execute a salt convenience routine.
'''
import salt.cli.run
if '' in sys.path:
sys.path.remove('')
client = salt.cli.run.SaltRun()
_install_signal_handlers(client)
client.run() |
def process(filename=None, url=None, key=None):
"""
Yeilds DOE CODE records based on provided input sources
param:
filename (str): Path to a DOE CODE .json file
url (str): URL for a DOE CODE server json file
key (str): API Key for connecting to DOE CODE server
"""
if filename is not None:
yield from process_json(filename)
elif url and key:
yield from process_url(url, key) | Yeilds DOE CODE records based on provided input sources
param:
filename (str): Path to a DOE CODE .json file
url (str): URL for a DOE CODE server json file
key (str): API Key for connecting to DOE CODE server | Below is the the instruction that describes the task:
### Input:
Yeilds DOE CODE records based on provided input sources
param:
filename (str): Path to a DOE CODE .json file
url (str): URL for a DOE CODE server json file
key (str): API Key for connecting to DOE CODE server
### Response:
def process(filename=None, url=None, key=None):
"""
Yeilds DOE CODE records based on provided input sources
param:
filename (str): Path to a DOE CODE .json file
url (str): URL for a DOE CODE server json file
key (str): API Key for connecting to DOE CODE server
"""
if filename is not None:
yield from process_json(filename)
elif url and key:
yield from process_url(url, key) |
def play_NoteContainer(self, notecontainer):
"""Convert a mingus.containers.NoteContainer to the equivalent MIDI
events and add it to the track_data.
Note.channel and Note.velocity can be set as well.
"""
if len(notecontainer) <= 1:
[self.play_Note(x) for x in notecontainer]
else:
self.play_Note(notecontainer[0])
self.set_deltatime(0)
[self.play_Note(x) for x in notecontainer[1:]] | Convert a mingus.containers.NoteContainer to the equivalent MIDI
events and add it to the track_data.
Note.channel and Note.velocity can be set as well. | Below is the the instruction that describes the task:
### Input:
Convert a mingus.containers.NoteContainer to the equivalent MIDI
events and add it to the track_data.
Note.channel and Note.velocity can be set as well.
### Response:
def play_NoteContainer(self, notecontainer):
"""Convert a mingus.containers.NoteContainer to the equivalent MIDI
events and add it to the track_data.
Note.channel and Note.velocity can be set as well.
"""
if len(notecontainer) <= 1:
[self.play_Note(x) for x in notecontainer]
else:
self.play_Note(notecontainer[0])
self.set_deltatime(0)
[self.play_Note(x) for x in notecontainer[1:]] |
def query_instance(vm_=None, call=None):
'''
Query an instance upon creation from the EC2 API
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The query_instance action must be called with -a or --action.'
)
instance_id = vm_['instance_id']
location = vm_.get('location', get_location(vm_))
__utils__['cloud.fire_event'](
'event',
'querying instance',
'salt/cloud/{0}/querying'.format(vm_['name']),
args={'instance_id': instance_id},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('The new VM instance_id is %s', instance_id)
params = {'Action': 'DescribeInstances',
'InstanceId.1': instance_id}
provider = get_provider(vm_)
attempts = 0
while attempts < aws.AWS_MAX_RETRIES:
data, requesturl = aws.query(params, # pylint: disable=unbalanced-tuple-unpacking
location=location,
provider=provider,
opts=__opts__,
return_url=True,
sigver='4')
log.debug('The query returned: %s', data)
if isinstance(data, dict) and 'error' in data:
log.warning(
'There was an error in the query. %s attempts '
'remaining: %s', attempts, data['error']
)
elif isinstance(data, list) and not data:
log.warning(
'Query returned an empty list. %s attempts '
'remaining.', attempts
)
else:
break
aws.sleep_exponential_backoff(attempts)
attempts += 1
continue
else:
raise SaltCloudSystemExit(
'An error occurred while creating VM: {0}'.format(data['error'])
)
def __query_ip_address(params, url): # pylint: disable=W0613
data = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if not data:
log.error(
'There was an error while querying EC2. Empty response'
)
# Trigger a failure in the wait for IP function
return False
if isinstance(data, dict) and 'error' in data:
log.warning('There was an error in the query. %s', data['error'])
# Trigger a failure in the wait for IP function
return False
log.debug('Returned query data: %s', data)
if ssh_interface(vm_) == 'public_ips':
if 'ipAddress' in data[0]['instancesSet']['item']:
return data
else:
log.error(
'Public IP not detected.'
)
if ssh_interface(vm_) == 'private_ips':
if 'privateIpAddress' in data[0]['instancesSet']['item']:
return data
else:
log.error(
'Private IP not detected.'
)
try:
data = salt.utils.cloud.wait_for_ip(
__query_ip_address,
update_args=(params, requesturl),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
interval_multiplier=config.get_cloud_config_value(
'wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
if 'reactor' in vm_ and vm_['reactor'] is True:
__utils__['cloud.fire_event'](
'event',
'instance queried',
'salt/cloud/{0}/query_reactor'.format(vm_['name']),
args={'data': data},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data | Query an instance upon creation from the EC2 API | Below is the the instruction that describes the task:
### Input:
Query an instance upon creation from the EC2 API
### Response:
def query_instance(vm_=None, call=None):
'''
Query an instance upon creation from the EC2 API
'''
if call == 'function':
# Technically this function may be called other ways too, but it
# definitely cannot be called with --function.
raise SaltCloudSystemExit(
'The query_instance action must be called with -a or --action.'
)
instance_id = vm_['instance_id']
location = vm_.get('location', get_location(vm_))
__utils__['cloud.fire_event'](
'event',
'querying instance',
'salt/cloud/{0}/querying'.format(vm_['name']),
args={'instance_id': instance_id},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
log.debug('The new VM instance_id is %s', instance_id)
params = {'Action': 'DescribeInstances',
'InstanceId.1': instance_id}
provider = get_provider(vm_)
attempts = 0
while attempts < aws.AWS_MAX_RETRIES:
data, requesturl = aws.query(params, # pylint: disable=unbalanced-tuple-unpacking
location=location,
provider=provider,
opts=__opts__,
return_url=True,
sigver='4')
log.debug('The query returned: %s', data)
if isinstance(data, dict) and 'error' in data:
log.warning(
'There was an error in the query. %s attempts '
'remaining: %s', attempts, data['error']
)
elif isinstance(data, list) and not data:
log.warning(
'Query returned an empty list. %s attempts '
'remaining.', attempts
)
else:
break
aws.sleep_exponential_backoff(attempts)
attempts += 1
continue
else:
raise SaltCloudSystemExit(
'An error occurred while creating VM: {0}'.format(data['error'])
)
def __query_ip_address(params, url): # pylint: disable=W0613
data = aws.query(params,
location=location,
provider=provider,
opts=__opts__,
sigver='4')
if not data:
log.error(
'There was an error while querying EC2. Empty response'
)
# Trigger a failure in the wait for IP function
return False
if isinstance(data, dict) and 'error' in data:
log.warning('There was an error in the query. %s', data['error'])
# Trigger a failure in the wait for IP function
return False
log.debug('Returned query data: %s', data)
if ssh_interface(vm_) == 'public_ips':
if 'ipAddress' in data[0]['instancesSet']['item']:
return data
else:
log.error(
'Public IP not detected.'
)
if ssh_interface(vm_) == 'private_ips':
if 'privateIpAddress' in data[0]['instancesSet']['item']:
return data
else:
log.error(
'Private IP not detected.'
)
try:
data = salt.utils.cloud.wait_for_ip(
__query_ip_address,
update_args=(params, requesturl),
timeout=config.get_cloud_config_value(
'wait_for_ip_timeout', vm_, __opts__, default=10 * 60),
interval=config.get_cloud_config_value(
'wait_for_ip_interval', vm_, __opts__, default=10),
interval_multiplier=config.get_cloud_config_value(
'wait_for_ip_interval_multiplier', vm_, __opts__, default=1),
)
except (SaltCloudExecutionTimeout, SaltCloudExecutionFailure) as exc:
try:
# It might be already up, let's destroy it!
destroy(vm_['name'])
except SaltCloudSystemExit:
pass
finally:
raise SaltCloudSystemExit(six.text_type(exc))
if 'reactor' in vm_ and vm_['reactor'] is True:
__utils__['cloud.fire_event'](
'event',
'instance queried',
'salt/cloud/{0}/query_reactor'.format(vm_['name']),
args={'data': data},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return data |
def generate(env):
"""Add Builders and construction variables for compaq visual fortran to an Environment."""
fortran.generate(env)
env['FORTRAN'] = 'f90'
env['FORTRANCOM'] = '$FORTRAN $FORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['FORTRANPPCOM'] = '$FORTRAN $FORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['SHFORTRANCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['SHFORTRANPPCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['OBJSUFFIX'] = '.obj'
env['FORTRANMODDIR'] = '${TARGET.dir}'
env['FORTRANMODDIRPREFIX'] = '/module:'
env['FORTRANMODDIRSUFFIX'] = '' | Add Builders and construction variables for compaq visual fortran to an Environment. | Below is the the instruction that describes the task:
### Input:
Add Builders and construction variables for compaq visual fortran to an Environment.
### Response:
def generate(env):
"""Add Builders and construction variables for compaq visual fortran to an Environment."""
fortran.generate(env)
env['FORTRAN'] = 'f90'
env['FORTRANCOM'] = '$FORTRAN $FORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['FORTRANPPCOM'] = '$FORTRAN $FORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['SHFORTRANCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['SHFORTRANPPCOM'] = '$SHFORTRAN $SHFORTRANFLAGS $CPPFLAGS $_CPPDEFFLAGS $_FORTRANMODFLAG $_FORTRANINCFLAGS /compile_only ${SOURCES.windows} /object:${TARGET.windows}'
env['OBJSUFFIX'] = '.obj'
env['FORTRANMODDIR'] = '${TARGET.dir}'
env['FORTRANMODDIRPREFIX'] = '/module:'
env['FORTRANMODDIRSUFFIX'] = '' |
def _should_rotate_log(self, handler):
''' Determine if a log file rotation is necessary '''
if handler['rotate_log']:
rotate_time_index = handler.get('rotate_log_index', 'day')
try:
rotate_time_index = self._decode_time_rotation_index(rotate_time_index)
except ValueError:
rotate_time_index = 2
rotate_time_delta = handler.get('rotate_log_delta', 1)
cur_t = time.gmtime()
first_different_index = 9
for i in range(9):
if cur_t[i] != handler['log_rot_time'][i]:
first_different_index = i
break
if first_different_index < rotate_time_index:
# If the time deltas differ by a time step greater than what we
# have set for the rotation (I.e., months instead of days) we will
# automatically rotate.
return True
else:
time_delta = cur_t[rotate_time_index] - handler['log_rot_time'][rotate_time_index]
return time_delta >= rotate_time_delta
return False | Determine if a log file rotation is necessary | Below is the the instruction that describes the task:
### Input:
Determine if a log file rotation is necessary
### Response:
def _should_rotate_log(self, handler):
''' Determine if a log file rotation is necessary '''
if handler['rotate_log']:
rotate_time_index = handler.get('rotate_log_index', 'day')
try:
rotate_time_index = self._decode_time_rotation_index(rotate_time_index)
except ValueError:
rotate_time_index = 2
rotate_time_delta = handler.get('rotate_log_delta', 1)
cur_t = time.gmtime()
first_different_index = 9
for i in range(9):
if cur_t[i] != handler['log_rot_time'][i]:
first_different_index = i
break
if first_different_index < rotate_time_index:
# If the time deltas differ by a time step greater than what we
# have set for the rotation (I.e., months instead of days) we will
# automatically rotate.
return True
else:
time_delta = cur_t[rotate_time_index] - handler['log_rot_time'][rotate_time_index]
return time_delta >= rotate_time_delta
return False |
def validate(value):
"""
checks if given value is a valid country codes
@param string value
@return bool
"""
if not helpers.has_len(value):
return False
return COUNTRIES.has_key(str(value).lower()) | checks if given value is a valid country codes
@param string value
@return bool | Below is the the instruction that describes the task:
### Input:
checks if given value is a valid country codes
@param string value
@return bool
### Response:
def validate(value):
"""
checks if given value is a valid country codes
@param string value
@return bool
"""
if not helpers.has_len(value):
return False
return COUNTRIES.has_key(str(value).lower()) |
def pfull_from_ps(bk, pk, ps, pfull_coord):
"""Compute pressure at full levels from surface pressure."""
return to_pfull_from_phalf(phalf_from_ps(bk, pk, ps), pfull_coord) | Compute pressure at full levels from surface pressure. | Below is the the instruction that describes the task:
### Input:
Compute pressure at full levels from surface pressure.
### Response:
def pfull_from_ps(bk, pk, ps, pfull_coord):
"""Compute pressure at full levels from surface pressure."""
return to_pfull_from_phalf(phalf_from_ps(bk, pk, ps), pfull_coord) |
def validate(cls, cpf):
u"""
Válida o CPF.
>>> CPF.validate(58119443659)
True
>>> CPF.validate(58119443650)
False
>>> CPF.validate('58119443659')
True
>>> CPF.validate('581.194.436-59')
True
"""
if cpf is None:
return False
cpf = CPF.clean(cpf)
def mod11(value):
return (value % 11) % 10
dig1 = mod11(sum([(i + 1) * int(cpf[i]) for i in range(0, 9)]))
dig2 = mod11(sum([i * int(cpf[i]) for i in range(1, 10)]))
return cpf[-2:] == '{0}{1}'.format(dig1, dig2) | u"""
Válida o CPF.
>>> CPF.validate(58119443659)
True
>>> CPF.validate(58119443650)
False
>>> CPF.validate('58119443659')
True
>>> CPF.validate('581.194.436-59')
True | Below is the the instruction that describes the task:
### Input:
u"""
Válida o CPF.
>>> CPF.validate(58119443659)
True
>>> CPF.validate(58119443650)
False
>>> CPF.validate('58119443659')
True
>>> CPF.validate('581.194.436-59')
True
### Response:
def validate(cls, cpf):
u"""
Válida o CPF.
>>> CPF.validate(58119443659)
True
>>> CPF.validate(58119443650)
False
>>> CPF.validate('58119443659')
True
>>> CPF.validate('581.194.436-59')
True
"""
if cpf is None:
return False
cpf = CPF.clean(cpf)
def mod11(value):
return (value % 11) % 10
dig1 = mod11(sum([(i + 1) * int(cpf[i]) for i in range(0, 9)]))
dig2 = mod11(sum([i * int(cpf[i]) for i in range(1, 10)]))
return cpf[-2:] == '{0}{1}'.format(dig1, dig2) |
def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise,
discretize_warmup_steps, mode):
"""Simple discretization through tanh, flip bottleneck_noise many bits."""
x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck")
d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0
if mode == tf.estimator.ModeKeys.TRAIN:
x += tf.truncated_normal(
common_layers.shape_list(x), mean=0.0, stddev=0.2)
x = tf.tanh(x)
d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
if mode == tf.estimator.ModeKeys.TRAIN:
noise = tf.random_uniform(common_layers.shape_list(x))
noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
d *= noise
d = common_layers.mix(d, x, discretize_warmup_steps,
mode == tf.estimator.ModeKeys.TRAIN)
return d, d0 | Simple discretization through tanh, flip bottleneck_noise many bits. | Below is the the instruction that describes the task:
### Input:
Simple discretization through tanh, flip bottleneck_noise many bits.
### Response:
def tanh_discrete_bottleneck(x, bottleneck_bits, bottleneck_noise,
discretize_warmup_steps, mode):
"""Simple discretization through tanh, flip bottleneck_noise many bits."""
x = tf.layers.dense(x, bottleneck_bits, name="tanh_discrete_bottleneck")
d0 = tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x))) - 1.0
if mode == tf.estimator.ModeKeys.TRAIN:
x += tf.truncated_normal(
common_layers.shape_list(x), mean=0.0, stddev=0.2)
x = tf.tanh(x)
d = x + tf.stop_gradient(2.0 * tf.to_float(tf.less(0.0, x)) - 1.0 - x)
if mode == tf.estimator.ModeKeys.TRAIN:
noise = tf.random_uniform(common_layers.shape_list(x))
noise = 2.0 * tf.to_float(tf.less(bottleneck_noise, noise)) - 1.0
d *= noise
d = common_layers.mix(d, x, discretize_warmup_steps,
mode == tf.estimator.ModeKeys.TRAIN)
return d, d0 |
def unregister(self, entry_point):
"""Unregister a provider
:param str entry_point: provider to unregister (entry point syntax).
"""
if entry_point not in self.registered_extensions:
raise ValueError('Extension not registered')
ep = EntryPoint.parse(entry_point)
self.registered_extensions.remove(entry_point)
if self._extensions_by_name is not None:
del self._extensions_by_name[ep.name]
for i, ext in enumerate(self.extensions):
if ext.name == ep.name:
del self.extensions[i]
break | Unregister a provider
:param str entry_point: provider to unregister (entry point syntax). | Below is the the instruction that describes the task:
### Input:
Unregister a provider
:param str entry_point: provider to unregister (entry point syntax).
### Response:
def unregister(self, entry_point):
"""Unregister a provider
:param str entry_point: provider to unregister (entry point syntax).
"""
if entry_point not in self.registered_extensions:
raise ValueError('Extension not registered')
ep = EntryPoint.parse(entry_point)
self.registered_extensions.remove(entry_point)
if self._extensions_by_name is not None:
del self._extensions_by_name[ep.name]
for i, ext in enumerate(self.extensions):
if ext.name == ep.name:
del self.extensions[i]
break |
def match_file(patterns, file):
"""
Matches the file to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
matched = False
for pattern in patterns:
if pattern.include is not None:
if file in pattern.match((file,)):
matched = pattern.include
return matched | Matches the file to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`. | Below is the the instruction that describes the task:
### Input:
Matches the file to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
### Response:
def match_file(patterns, file):
"""
Matches the file to the patterns.
*patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`)
contains the patterns to use.
*file* (:class:`str`) is the normalized file path to be matched
against *patterns*.
Returns :data:`True` if *file* matched; otherwise, :data:`False`.
"""
matched = False
for pattern in patterns:
if pattern.include is not None:
if file in pattern.match((file,)):
matched = pattern.include
return matched |
def average():
""" generator that holds a rolling average """
count = 0
total = total()
i=0
while 1:
i = yield ((total.send(i)*1.0)/count if count else 0)
count += 1 | generator that holds a rolling average | Below is the the instruction that describes the task:
### Input:
generator that holds a rolling average
### Response:
def average():
""" generator that holds a rolling average """
count = 0
total = total()
i=0
while 1:
i = yield ((total.send(i)*1.0)/count if count else 0)
count += 1 |
def visit_keyword(self, node: AST, dfltChaining: bool = True) -> str:
"""Return representation of `node` as keyword arg."""
arg = node.arg
if arg is None:
return f"**{self.visit(node.value)}"
else:
return f"{arg}={self.visit(node.value)}" | Return representation of `node` as keyword arg. | Below is the the instruction that describes the task:
### Input:
Return representation of `node` as keyword arg.
### Response:
def visit_keyword(self, node: AST, dfltChaining: bool = True) -> str:
"""Return representation of `node` as keyword arg."""
arg = node.arg
if arg is None:
return f"**{self.visit(node.value)}"
else:
return f"{arg}={self.visit(node.value)}" |
def response_as_single(self, copy=0):
""" convert the response map to a single data frame with Multi-Index columns """
arr = []
for sid, frame in self.response.iteritems():
if copy:
frame = frame.copy()
'security' not in frame and frame.insert(0, 'security', sid)
arr.append(frame.reset_index().set_index(['date', 'security']))
return concat(arr).unstack() | convert the response map to a single data frame with Multi-Index columns | Below is the the instruction that describes the task:
### Input:
convert the response map to a single data frame with Multi-Index columns
### Response:
def response_as_single(self, copy=0):
""" convert the response map to a single data frame with Multi-Index columns """
arr = []
for sid, frame in self.response.iteritems():
if copy:
frame = frame.copy()
'security' not in frame and frame.insert(0, 'security', sid)
arr.append(frame.reset_index().set_index(['date', 'security']))
return concat(arr).unstack() |
def relabeled_clone(self, relabels):
"""Gets a re-labeled clone of this expression."""
return self.__class__(
relabels.get(self.alias, self.alias),
self.target,
self.hstore_key,
self.output_field
) | Gets a re-labeled clone of this expression. | Below is the the instruction that describes the task:
### Input:
Gets a re-labeled clone of this expression.
### Response:
def relabeled_clone(self, relabels):
"""Gets a re-labeled clone of this expression."""
return self.__class__(
relabels.get(self.alias, self.alias),
self.target,
self.hstore_key,
self.output_field
) |
def add_interrupt_callback(self, gpio_id, callback, edge='both',
pull_up_down=_GPIO.PUD_OFF, threaded_callback=False,
debounce_timeout_ms=None):
"""
Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
"""
gpio_id = _GPIO.channel_to_gpio(gpio_id)
debug("Adding callback for GPIO %s" % gpio_id)
if not edge in ["falling", "rising", "both", "none"]:
raise AttributeError("'%s' is not a valid edge." % edge)
if not pull_up_down in [_GPIO.PUD_UP, _GPIO.PUD_DOWN, _GPIO.PUD_OFF]:
raise AttributeError("'%s' is not a valid pull_up_down." % edge)
# Make sure the gpio_id is valid
if not gpio_id in set(chain(RPIO.GPIO_LIST_R1, RPIO.GPIO_LIST_R2, \
RPIO.GPIO_LIST_R3)):
raise AttributeError("GPIO %s is not a valid gpio-id." % gpio_id)
# Require INPUT pin setup; and set the correct PULL_UPDN
if RPIO.gpio_function(int(gpio_id)) == RPIO.IN:
RPIO.set_pullupdn(gpio_id, pull_up_down)
else:
debug("- changing gpio function from %s to INPUT" % \
(GPIO_FUNCTIONS[RPIO.gpio_function(int(gpio_id))]))
RPIO.setup(gpio_id, RPIO.IN, pull_up_down)
# Prepare the callback (wrap in Thread if needed)
cb = callback if not threaded_callback else \
partial(_threaded_callback, callback)
# Prepare the /sys/class path of this gpio
path_gpio = "%sgpio%s/" % (_SYS_GPIO_ROOT, gpio_id)
# If initial callback for this GPIO then set everything up. Else make
# sure the edge detection is the same.
if gpio_id in self._map_gpioid_to_callbacks:
with open(path_gpio + "edge", "r") as f:
e = f.read().strip()
if e != edge:
raise AttributeError(("Cannot add callback for gpio %s:"
" edge detection '%s' not compatible with existing"
" edge detection '%s'.") % (gpio_id, edge, e))
# Check whether edge is the same, else throw Exception
debug("- kernel interface already setup for GPIO %s" % gpio_id)
self._map_gpioid_to_callbacks[gpio_id].append(cb)
else:
# If kernel interface already exists unexport first for clean setup
if os.path.exists(path_gpio):
if self._show_warnings:
warn("Kernel interface for GPIO %s already exists." % \
gpio_id)
debug("- unexporting kernel interface for GPIO %s" % gpio_id)
with open(_SYS_GPIO_ROOT + "unexport", "w") as f:
f.write("%s" % gpio_id)
time.sleep(0.1)
# Export kernel interface /sys/class/gpio/gpioN
with open(_SYS_GPIO_ROOT + "export", "w") as f:
f.write("%s" % gpio_id)
self._gpio_kernel_interfaces_created.append(gpio_id)
debug("- kernel interface exported for GPIO %s" % gpio_id)
# Configure gpio as input
with open(path_gpio + "direction", "w") as f:
f.write("in")
# Configure gpio edge detection
with open(path_gpio + "edge", "w") as f:
f.write(edge)
debug(("- kernel interface configured for GPIO %s "
"(edge='%s', pullupdn=%s)") % (gpio_id, edge, \
_PULL_UPDN[pull_up_down]))
# Open the gpio value stream and read the initial value
f = open(path_gpio + "value", 'r')
val_initial = f.read().strip()
debug("- inital gpio value: %s" % val_initial)
f.seek(0)
# Add callback info to the mapping dictionaries
self._map_fileno_to_file[f.fileno()] = f
self._map_fileno_to_gpioid[f.fileno()] = gpio_id
self._map_fileno_to_options[f.fileno()] = {
"debounce_timeout_s": debounce_timeout_ms / 1000.0 if \
debounce_timeout_ms else 0,
"interrupt_last": 0,
"edge": edge
}
self._map_gpioid_to_fileno[gpio_id] = f.fileno()
self._map_gpioid_to_callbacks[gpio_id] = [cb]
# Add to epoll
self._epoll.register(f.fileno(), select.EPOLLPRI | select.EPOLLERR) | Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread. | Below is the the instruction that describes the task:
### Input:
Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
### Response:
def add_interrupt_callback(self, gpio_id, callback, edge='both',
pull_up_down=_GPIO.PUD_OFF, threaded_callback=False,
debounce_timeout_ms=None):
"""
Add a callback to be executed when the value on 'gpio_id' changes to
the edge specified via the 'edge' parameter (default='both').
`pull_up_down` can be set to `RPIO.PUD_UP`, `RPIO.PUD_DOWN`, and
`RPIO.PUD_OFF`.
If `threaded_callback` is True, the callback will be started
inside a Thread.
"""
gpio_id = _GPIO.channel_to_gpio(gpio_id)
debug("Adding callback for GPIO %s" % gpio_id)
if not edge in ["falling", "rising", "both", "none"]:
raise AttributeError("'%s' is not a valid edge." % edge)
if not pull_up_down in [_GPIO.PUD_UP, _GPIO.PUD_DOWN, _GPIO.PUD_OFF]:
raise AttributeError("'%s' is not a valid pull_up_down." % edge)
# Make sure the gpio_id is valid
if not gpio_id in set(chain(RPIO.GPIO_LIST_R1, RPIO.GPIO_LIST_R2, \
RPIO.GPIO_LIST_R3)):
raise AttributeError("GPIO %s is not a valid gpio-id." % gpio_id)
# Require INPUT pin setup; and set the correct PULL_UPDN
if RPIO.gpio_function(int(gpio_id)) == RPIO.IN:
RPIO.set_pullupdn(gpio_id, pull_up_down)
else:
debug("- changing gpio function from %s to INPUT" % \
(GPIO_FUNCTIONS[RPIO.gpio_function(int(gpio_id))]))
RPIO.setup(gpio_id, RPIO.IN, pull_up_down)
# Prepare the callback (wrap in Thread if needed)
cb = callback if not threaded_callback else \
partial(_threaded_callback, callback)
# Prepare the /sys/class path of this gpio
path_gpio = "%sgpio%s/" % (_SYS_GPIO_ROOT, gpio_id)
# If initial callback for this GPIO then set everything up. Else make
# sure the edge detection is the same.
if gpio_id in self._map_gpioid_to_callbacks:
with open(path_gpio + "edge", "r") as f:
e = f.read().strip()
if e != edge:
raise AttributeError(("Cannot add callback for gpio %s:"
" edge detection '%s' not compatible with existing"
" edge detection '%s'.") % (gpio_id, edge, e))
# Check whether edge is the same, else throw Exception
debug("- kernel interface already setup for GPIO %s" % gpio_id)
self._map_gpioid_to_callbacks[gpio_id].append(cb)
else:
# If kernel interface already exists unexport first for clean setup
if os.path.exists(path_gpio):
if self._show_warnings:
warn("Kernel interface for GPIO %s already exists." % \
gpio_id)
debug("- unexporting kernel interface for GPIO %s" % gpio_id)
with open(_SYS_GPIO_ROOT + "unexport", "w") as f:
f.write("%s" % gpio_id)
time.sleep(0.1)
# Export kernel interface /sys/class/gpio/gpioN
with open(_SYS_GPIO_ROOT + "export", "w") as f:
f.write("%s" % gpio_id)
self._gpio_kernel_interfaces_created.append(gpio_id)
debug("- kernel interface exported for GPIO %s" % gpio_id)
# Configure gpio as input
with open(path_gpio + "direction", "w") as f:
f.write("in")
# Configure gpio edge detection
with open(path_gpio + "edge", "w") as f:
f.write(edge)
debug(("- kernel interface configured for GPIO %s "
"(edge='%s', pullupdn=%s)") % (gpio_id, edge, \
_PULL_UPDN[pull_up_down]))
# Open the gpio value stream and read the initial value
f = open(path_gpio + "value", 'r')
val_initial = f.read().strip()
debug("- inital gpio value: %s" % val_initial)
f.seek(0)
# Add callback info to the mapping dictionaries
self._map_fileno_to_file[f.fileno()] = f
self._map_fileno_to_gpioid[f.fileno()] = gpio_id
self._map_fileno_to_options[f.fileno()] = {
"debounce_timeout_s": debounce_timeout_ms / 1000.0 if \
debounce_timeout_ms else 0,
"interrupt_last": 0,
"edge": edge
}
self._map_gpioid_to_fileno[gpio_id] = f.fileno()
self._map_gpioid_to_callbacks[gpio_id] = [cb]
# Add to epoll
self._epoll.register(f.fileno(), select.EPOLLPRI | select.EPOLLERR) |
def parseAndSave(option, urlOrPaths, outDir=None, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType='application/json', metaExtension='_meta.json',
services={'meta': '/meta', 'text': '/tika', 'all': '/rmeta'}):
'''
Parse the objects and write extracted metadata and/or text in JSON format to matching
filename with an extension of '_meta.json'.
:param option:
:param urlOrPaths:
:param outDir:
:param serverEndpoint:
:param verbose:
:param tikaServerJar:
:param responseMimeType:
:param metaExtension:
:param services:
:return:
'''
metaPaths = []
paths = getPaths(urlOrPaths)
for path in paths:
if outDir is None:
metaPath = path + metaExtension
else:
metaPath = os.path.join(outDir, os.path.split(path)[1] + metaExtension)
log.info('Writing %s' % metaPath)
with open(metaPath, 'w', 'utf-8') as f:
f.write(parse1(option, path, serverEndpoint, verbose, tikaServerJar, \
responseMimeType, services)[1] + u"\n")
metaPaths.append(metaPath)
return metaPaths | Parse the objects and write extracted metadata and/or text in JSON format to matching
filename with an extension of '_meta.json'.
:param option:
:param urlOrPaths:
:param outDir:
:param serverEndpoint:
:param verbose:
:param tikaServerJar:
:param responseMimeType:
:param metaExtension:
:param services:
:return: | Below is the the instruction that describes the task:
### Input:
Parse the objects and write extracted metadata and/or text in JSON format to matching
filename with an extension of '_meta.json'.
:param option:
:param urlOrPaths:
:param outDir:
:param serverEndpoint:
:param verbose:
:param tikaServerJar:
:param responseMimeType:
:param metaExtension:
:param services:
:return:
### Response:
def parseAndSave(option, urlOrPaths, outDir=None, serverEndpoint=ServerEndpoint, verbose=Verbose, tikaServerJar=TikaServerJar,
responseMimeType='application/json', metaExtension='_meta.json',
services={'meta': '/meta', 'text': '/tika', 'all': '/rmeta'}):
'''
Parse the objects and write extracted metadata and/or text in JSON format to matching
filename with an extension of '_meta.json'.
:param option:
:param urlOrPaths:
:param outDir:
:param serverEndpoint:
:param verbose:
:param tikaServerJar:
:param responseMimeType:
:param metaExtension:
:param services:
:return:
'''
metaPaths = []
paths = getPaths(urlOrPaths)
for path in paths:
if outDir is None:
metaPath = path + metaExtension
else:
metaPath = os.path.join(outDir, os.path.split(path)[1] + metaExtension)
log.info('Writing %s' % metaPath)
with open(metaPath, 'w', 'utf-8') as f:
f.write(parse1(option, path, serverEndpoint, verbose, tikaServerJar, \
responseMimeType, services)[1] + u"\n")
metaPaths.append(metaPath)
return metaPaths |
def comma_replacement(random, population, parents, offspring, args):
"""Performs "comma" replacement.
This function performs "comma" replacement, which means that
the entire existing population is replaced by the best
population-many elements from the offspring. This function
makes the assumption that the size of the offspring is at
least as large as the original population. Otherwise, the
population size will not be constant.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
offspring.sort(reverse=True)
survivors = offspring[:len(population)]
return survivors | Performs "comma" replacement.
This function performs "comma" replacement, which means that
the entire existing population is replaced by the best
population-many elements from the offspring. This function
makes the assumption that the size of the offspring is at
least as large as the original population. Otherwise, the
population size will not be constant.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments | Below is the the instruction that describes the task:
### Input:
Performs "comma" replacement.
This function performs "comma" replacement, which means that
the entire existing population is replaced by the best
population-many elements from the offspring. This function
makes the assumption that the size of the offspring is at
least as large as the original population. Otherwise, the
population size will not be constant.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
### Response:
def comma_replacement(random, population, parents, offspring, args):
"""Performs "comma" replacement.
This function performs "comma" replacement, which means that
the entire existing population is replaced by the best
population-many elements from the offspring. This function
makes the assumption that the size of the offspring is at
least as large as the original population. Otherwise, the
population size will not be constant.
.. Arguments:
random -- the random number generator object
population -- the population of individuals
parents -- the list of parent individuals
offspring -- the list of offspring individuals
args -- a dictionary of keyword arguments
"""
offspring.sort(reverse=True)
survivors = offspring[:len(population)]
return survivors |
def show(self):
"""
Prints the content of this method to stdout.
This will print the method signature and the decompiled code.
"""
args, ret = self.method.get_descriptor()[1:].split(")")
if self.code:
# We patch the descriptor here and add the registers, if code is available
args = args.split(" ")
reg_len = self.code.get_registers_size()
nb_args = len(args)
start_reg = reg_len - nb_args
args = ["{} v{}".format(a, start_reg + i) for i, a in enumerate(args)]
print("METHOD {} {} {} ({}){}".format(
self.method.get_class_name(),
self.method.get_access_flags_string(),
self.method.get_name(),
", ".join(args), ret))
bytecode.PrettyShow(self, self.basic_blocks.gets(), self.method.notes) | Prints the content of this method to stdout.
This will print the method signature and the decompiled code. | Below is the the instruction that describes the task:
### Input:
Prints the content of this method to stdout.
This will print the method signature and the decompiled code.
### Response:
def show(self):
"""
Prints the content of this method to stdout.
This will print the method signature and the decompiled code.
"""
args, ret = self.method.get_descriptor()[1:].split(")")
if self.code:
# We patch the descriptor here and add the registers, if code is available
args = args.split(" ")
reg_len = self.code.get_registers_size()
nb_args = len(args)
start_reg = reg_len - nb_args
args = ["{} v{}".format(a, start_reg + i) for i, a in enumerate(args)]
print("METHOD {} {} {} ({}){}".format(
self.method.get_class_name(),
self.method.get_access_flags_string(),
self.method.get_name(),
", ".join(args), ret))
bytecode.PrettyShow(self, self.basic_blocks.gets(), self.method.notes) |
def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443,
sslflags=0):
'''
Assign a certificate to an IIS Web Binding.
.. versionadded:: 2016.11.0
.. note::
The web binding that the certificate is being assigned to must already
exist.
Args:
name (str): The thumbprint of the certificate.
site (str): The IIS site name.
hostheader (str): The host header of the binding.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
sslflags (int): Flags representing certificate type and certificate storage of the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_cert_binding name='AAA000' site='site0' hostheader='example.com' ipaddress='*' port='443'
'''
name = six.text_type(name).upper()
binding_info = _get_binding_info(hostheader, ipaddress, port)
if _iisVersion() < 8:
# IIS 7.5 and earlier don't support SNI for HTTPS, therefore cert bindings don't contain the host header
binding_info = binding_info.rpartition(':')[0] + ':'
binding_path = r"IIS:\SslBindings\{0}".format(binding_info.replace(':', '!'))
if sslflags not in _VALID_SSL_FLAGS:
message = ("Invalid sslflags '{0}' specified. Valid sslflags range: "
"{1}..{2}").format(sslflags, _VALID_SSL_FLAGS[0],
_VALID_SSL_FLAGS[-1])
raise SaltInvocationError(message)
# Verify that the target binding exists.
current_bindings = list_bindings(site)
if binding_info not in current_bindings:
log.error('Binding not present: %s', binding_info)
return False
# Check to see if the certificate is already assigned.
current_name = None
for current_binding in current_bindings:
if binding_info == current_binding:
current_name = current_bindings[current_binding]['certificatehash']
log.debug('Current certificate thumbprint: %s', current_name)
log.debug('New certificate thumbprint: %s', name)
if name == current_name:
log.debug('Certificate already present for binding: %s', name)
return True
# Verify that the certificate exists.
certs = _list_certs()
if name not in certs:
log.error('Certificate not present: %s', name)
return False
if _iisVersion() < 8:
# IIS 7.5 and earlier have different syntax for associating a certificate with a site
# Modify IP spec to IIS 7.5 format
iis7path = binding_path.replace(r"\*!", "\\0.0.0.0!")
# win 2008 uses the following format: ip!port and not ip!port!
if iis7path.endswith("!"):
iis7path = iis7path[:-1]
ps_cmd = ['New-Item',
'-Path', "'{0}'".format(iis7path),
'-Thumbprint', "'{0}'".format(name)]
else:
ps_cmd = ['New-Item',
'-Path', "'{0}'".format(binding_path),
'-Thumbprint', "'{0}'".format(name),
'-SSLFlags', '{0}'.format(sslflags)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to create certificate binding: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
new_cert_bindings = list_cert_bindings(site)
if binding_info not in new_cert_bindings:
log.error('Binding not present: %s', binding_info)
return False
if name == new_cert_bindings[binding_info]['certificatehash']:
log.debug('Certificate binding created successfully: %s', name)
return True
log.error('Unable to create certificate binding: %s', name)
return False | Assign a certificate to an IIS Web Binding.
.. versionadded:: 2016.11.0
.. note::
The web binding that the certificate is being assigned to must already
exist.
Args:
name (str): The thumbprint of the certificate.
site (str): The IIS site name.
hostheader (str): The host header of the binding.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
sslflags (int): Flags representing certificate type and certificate storage of the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_cert_binding name='AAA000' site='site0' hostheader='example.com' ipaddress='*' port='443' | Below is the the instruction that describes the task:
### Input:
Assign a certificate to an IIS Web Binding.
.. versionadded:: 2016.11.0
.. note::
The web binding that the certificate is being assigned to must already
exist.
Args:
name (str): The thumbprint of the certificate.
site (str): The IIS site name.
hostheader (str): The host header of the binding.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
sslflags (int): Flags representing certificate type and certificate storage of the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_cert_binding name='AAA000' site='site0' hostheader='example.com' ipaddress='*' port='443'
### Response:
def create_cert_binding(name, site, hostheader='', ipaddress='*', port=443,
sslflags=0):
'''
Assign a certificate to an IIS Web Binding.
.. versionadded:: 2016.11.0
.. note::
The web binding that the certificate is being assigned to must already
exist.
Args:
name (str): The thumbprint of the certificate.
site (str): The IIS site name.
hostheader (str): The host header of the binding.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
sslflags (int): Flags representing certificate type and certificate storage of the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_cert_binding name='AAA000' site='site0' hostheader='example.com' ipaddress='*' port='443'
'''
name = six.text_type(name).upper()
binding_info = _get_binding_info(hostheader, ipaddress, port)
if _iisVersion() < 8:
# IIS 7.5 and earlier don't support SNI for HTTPS, therefore cert bindings don't contain the host header
binding_info = binding_info.rpartition(':')[0] + ':'
binding_path = r"IIS:\SslBindings\{0}".format(binding_info.replace(':', '!'))
if sslflags not in _VALID_SSL_FLAGS:
message = ("Invalid sslflags '{0}' specified. Valid sslflags range: "
"{1}..{2}").format(sslflags, _VALID_SSL_FLAGS[0],
_VALID_SSL_FLAGS[-1])
raise SaltInvocationError(message)
# Verify that the target binding exists.
current_bindings = list_bindings(site)
if binding_info not in current_bindings:
log.error('Binding not present: %s', binding_info)
return False
# Check to see if the certificate is already assigned.
current_name = None
for current_binding in current_bindings:
if binding_info == current_binding:
current_name = current_bindings[current_binding]['certificatehash']
log.debug('Current certificate thumbprint: %s', current_name)
log.debug('New certificate thumbprint: %s', name)
if name == current_name:
log.debug('Certificate already present for binding: %s', name)
return True
# Verify that the certificate exists.
certs = _list_certs()
if name not in certs:
log.error('Certificate not present: %s', name)
return False
if _iisVersion() < 8:
# IIS 7.5 and earlier have different syntax for associating a certificate with a site
# Modify IP spec to IIS 7.5 format
iis7path = binding_path.replace(r"\*!", "\\0.0.0.0!")
# win 2008 uses the following format: ip!port and not ip!port!
if iis7path.endswith("!"):
iis7path = iis7path[:-1]
ps_cmd = ['New-Item',
'-Path', "'{0}'".format(iis7path),
'-Thumbprint', "'{0}'".format(name)]
else:
ps_cmd = ['New-Item',
'-Path', "'{0}'".format(binding_path),
'-Thumbprint', "'{0}'".format(name),
'-SSLFlags', '{0}'.format(sslflags)]
cmd_ret = _srvmgr(ps_cmd)
if cmd_ret['retcode'] != 0:
msg = 'Unable to create certificate binding: {0}\nError: {1}' \
''.format(name, cmd_ret['stderr'])
raise CommandExecutionError(msg)
new_cert_bindings = list_cert_bindings(site)
if binding_info not in new_cert_bindings:
log.error('Binding not present: %s', binding_info)
return False
if name == new_cert_bindings[binding_info]['certificatehash']:
log.debug('Certificate binding created successfully: %s', name)
return True
log.error('Unable to create certificate binding: %s', name)
return False |
def _detect_notebook() -> bool:
"""Detect if code is running in a Jupyter Notebook.
This isn't 100% correct but seems good enough
Returns
-------
bool
True if it detects this is a notebook, otherwise False.
"""
try:
from IPython import get_ipython
from ipykernel import zmqshell
except ImportError:
return False
kernel = get_ipython()
try:
from spyder.utils.ipython.spyder_kernel import SpyderKernel
if isinstance(kernel.kernel, SpyderKernel):
return False
except (ImportError, AttributeError):
pass
return isinstance(kernel, zmqshell.ZMQInteractiveShell) | Detect if code is running in a Jupyter Notebook.
This isn't 100% correct but seems good enough
Returns
-------
bool
True if it detects this is a notebook, otherwise False. | Below is the the instruction that describes the task:
### Input:
Detect if code is running in a Jupyter Notebook.
This isn't 100% correct but seems good enough
Returns
-------
bool
True if it detects this is a notebook, otherwise False.
### Response:
def _detect_notebook() -> bool:
"""Detect if code is running in a Jupyter Notebook.
This isn't 100% correct but seems good enough
Returns
-------
bool
True if it detects this is a notebook, otherwise False.
"""
try:
from IPython import get_ipython
from ipykernel import zmqshell
except ImportError:
return False
kernel = get_ipython()
try:
from spyder.utils.ipython.spyder_kernel import SpyderKernel
if isinstance(kernel.kernel, SpyderKernel):
return False
except (ImportError, AttributeError):
pass
return isinstance(kernel, zmqshell.ZMQInteractiveShell) |
def from_key_bytes(cls, algorithm, key_bytes):
"""Builds a `Signer` from an algorithm suite and a raw signing key.
:param algorithm: Algorithm on which to base signer
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param bytes key_bytes: Raw signing key
:rtype: aws_encryption_sdk.internal.crypto.Signer
"""
key = serialization.load_der_private_key(data=key_bytes, password=None, backend=default_backend())
return cls(algorithm, key) | Builds a `Signer` from an algorithm suite and a raw signing key.
:param algorithm: Algorithm on which to base signer
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param bytes key_bytes: Raw signing key
:rtype: aws_encryption_sdk.internal.crypto.Signer | Below is the the instruction that describes the task:
### Input:
Builds a `Signer` from an algorithm suite and a raw signing key.
:param algorithm: Algorithm on which to base signer
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param bytes key_bytes: Raw signing key
:rtype: aws_encryption_sdk.internal.crypto.Signer
### Response:
def from_key_bytes(cls, algorithm, key_bytes):
"""Builds a `Signer` from an algorithm suite and a raw signing key.
:param algorithm: Algorithm on which to base signer
:type algorithm: aws_encryption_sdk.identifiers.Algorithm
:param bytes key_bytes: Raw signing key
:rtype: aws_encryption_sdk.internal.crypto.Signer
"""
key = serialization.load_der_private_key(data=key_bytes, password=None, backend=default_backend())
return cls(algorithm, key) |
def error(self, text):
"""
Posts an error message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console.
"""
self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.ERROR))) | Posts an error message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console. | Below is the the instruction that describes the task:
### Input:
Posts an error message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console.
### Response:
def error(self, text):
"""
Posts an error message adding a timestamp and logging level to it for both file and console handlers.
Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress
at the very time they are being logged but their timestamp will be captured at the right time. Logger will
redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw
immediately (may produce flickering) then call 'flush' method.
:param text: The text to log into file and console.
"""
self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.ERROR))) |
def save_model(self, model, meta_data=None, index_fields=None):
"""
model (instance): Model instance.
meta (dict): JSON serializable meta data for logging of save operation.
{'lorem': 'ipsum', 'dolar': 5}
index_fields (list): Tuple list for indexing keys in riak (with 'bin' or 'int').
[('lorem','bin'),('dolar','int')]
:return:
"""
# if model:
# self._model = model
if settings.DEBUG:
t1 = time.time()
clean_value = model.clean_value()
model._data = clean_value
if settings.DEBUG:
t2 = time.time()
if not model.exist:
obj = self.bucket.new(data=clean_value).store()
model.key = obj.key
new_obj = True
else:
new_obj = False
obj = self.bucket.get(model.key)
obj.data = clean_value
obj.store()
if settings.ENABLE_VERSIONS:
version_key = self._write_version(clean_value, model)
else:
version_key = ''
if settings.ENABLE_CACHING:
self.set_to_cache((clean_value, model.key))
meta_data = meta_data or model.save_meta_data
if settings.ENABLE_ACTIVITY_LOGGING and meta_data:
self._write_log(version_key, meta_data, index_fields)
if self.COLLECT_SAVES and self.COLLECT_SAVES_FOR_MODEL == model.__class__.__name__:
self.block_saved_keys.append(obj.key)
if settings.DEBUG:
if new_obj:
sys.PYOKO_STAT_COUNTER['save'] += 1
sys.PYOKO_LOGS['new'].append(obj.key)
else:
sys.PYOKO_LOGS[self._model_class.__name__].append(obj.key)
sys.PYOKO_STAT_COUNTER['update'] += 1
# sys._debug_db_queries.append({
# 'TIMESTAMP': t1,
# 'KEY': obj.key,
# 'BUCKET': self.index_name,
# 'SAVE_IS_NEW': new_obj,
# 'SERIALIZATION_TIME': round(t2 - t1, 5),
# 'TIME': round(time.time() - t2, 5)
# })
return model | model (instance): Model instance.
meta (dict): JSON serializable meta data for logging of save operation.
{'lorem': 'ipsum', 'dolar': 5}
index_fields (list): Tuple list for indexing keys in riak (with 'bin' or 'int').
[('lorem','bin'),('dolar','int')]
:return: | Below is the the instruction that describes the task:
### Input:
model (instance): Model instance.
meta (dict): JSON serializable meta data for logging of save operation.
{'lorem': 'ipsum', 'dolar': 5}
index_fields (list): Tuple list for indexing keys in riak (with 'bin' or 'int').
[('lorem','bin'),('dolar','int')]
:return:
### Response:
def save_model(self, model, meta_data=None, index_fields=None):
"""
model (instance): Model instance.
meta (dict): JSON serializable meta data for logging of save operation.
{'lorem': 'ipsum', 'dolar': 5}
index_fields (list): Tuple list for indexing keys in riak (with 'bin' or 'int').
[('lorem','bin'),('dolar','int')]
:return:
"""
# if model:
# self._model = model
if settings.DEBUG:
t1 = time.time()
clean_value = model.clean_value()
model._data = clean_value
if settings.DEBUG:
t2 = time.time()
if not model.exist:
obj = self.bucket.new(data=clean_value).store()
model.key = obj.key
new_obj = True
else:
new_obj = False
obj = self.bucket.get(model.key)
obj.data = clean_value
obj.store()
if settings.ENABLE_VERSIONS:
version_key = self._write_version(clean_value, model)
else:
version_key = ''
if settings.ENABLE_CACHING:
self.set_to_cache((clean_value, model.key))
meta_data = meta_data or model.save_meta_data
if settings.ENABLE_ACTIVITY_LOGGING and meta_data:
self._write_log(version_key, meta_data, index_fields)
if self.COLLECT_SAVES and self.COLLECT_SAVES_FOR_MODEL == model.__class__.__name__:
self.block_saved_keys.append(obj.key)
if settings.DEBUG:
if new_obj:
sys.PYOKO_STAT_COUNTER['save'] += 1
sys.PYOKO_LOGS['new'].append(obj.key)
else:
sys.PYOKO_LOGS[self._model_class.__name__].append(obj.key)
sys.PYOKO_STAT_COUNTER['update'] += 1
# sys._debug_db_queries.append({
# 'TIMESTAMP': t1,
# 'KEY': obj.key,
# 'BUCKET': self.index_name,
# 'SAVE_IS_NEW': new_obj,
# 'SERIALIZATION_TIME': round(t2 - t1, 5),
# 'TIME': round(time.time() - t2, 5)
# })
return model |
def update(self, data=None, timeout=-1, force=''):
"""Updates server profile template.
Args:
data: Data to update the resource.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
force: Force the update operation.
Returns:
A dict with the updated resource data.
"""
uri = self.data['uri']
resource = deepcopy(self.data)
resource.update(data)
# Removes related fields to serverHardware in case of unassign
if resource.get('serverHardwareUri') is None:
resource.pop('enclosureBay', None)
resource.pop('enclosureUri', None)
self.data = self._helper.update(resource, uri, force, timeout)
return self | Updates server profile template.
Args:
data: Data to update the resource.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
force: Force the update operation.
Returns:
A dict with the updated resource data. | Below is the the instruction that describes the task:
### Input:
Updates server profile template.
Args:
data: Data to update the resource.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
force: Force the update operation.
Returns:
A dict with the updated resource data.
### Response:
def update(self, data=None, timeout=-1, force=''):
"""Updates server profile template.
Args:
data: Data to update the resource.
timeout: Timeout in seconds. Wait for task completion by default. The timeout does not abort the operation
in OneView; it just stops waiting for its completion.
force: Force the update operation.
Returns:
A dict with the updated resource data.
"""
uri = self.data['uri']
resource = deepcopy(self.data)
resource.update(data)
# Removes related fields to serverHardware in case of unassign
if resource.get('serverHardwareUri') is None:
resource.pop('enclosureBay', None)
resource.pop('enclosureUri', None)
self.data = self._helper.update(resource, uri, force, timeout)
return self |
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts | Split code at plt.show() | Below is the the instruction that describes the task:
### Input:
Split code at plt.show()
### Response:
def split_code_at_show(text):
"""
Split code at plt.show()
"""
parts = []
is_doctest = contains_doctest(text)
part = []
for line in text.split("\n"):
if (not is_doctest and line.strip() == 'plt.show()') or \
(is_doctest and line.strip() == '>>> plt.show()'):
part.append(line)
parts.append("\n".join(part))
part = []
else:
part.append(line)
if "\n".join(part).strip():
parts.append("\n".join(part))
return parts |
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_many(filter, update, upsert).raw_result | Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered | Below is the the instruction that describes the task:
### Input:
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
### Response:
def update_many(cls, filter, update, upsert=False):
"""
Updates all documents that pass the filter with the update value
Will upsert a new document if upsert=True and no document is filtered
"""
return cls.collection.update_many(filter, update, upsert).raw_result |
def disk_cache(cls, basename, function, *args, method=True, **kwargs):
"""
Cache the return value in the correct cache directory. Set 'method' to
false for static methods.
"""
@utility.disk_cache(basename, cls.directory(), method=method)
def wrapper(*args, **kwargs):
return function(*args, **kwargs)
return wrapper(*args, **kwargs) | Cache the return value in the correct cache directory. Set 'method' to
false for static methods. | Below is the the instruction that describes the task:
### Input:
Cache the return value in the correct cache directory. Set 'method' to
false for static methods.
### Response:
def disk_cache(cls, basename, function, *args, method=True, **kwargs):
"""
Cache the return value in the correct cache directory. Set 'method' to
false for static methods.
"""
@utility.disk_cache(basename, cls.directory(), method=method)
def wrapper(*args, **kwargs):
return function(*args, **kwargs)
return wrapper(*args, **kwargs) |
def get_schema_dir(self, path):
"""Retrieve the directory containing the given schema.
:param path: Schema path, relative to the directory where it was
registered.
:raises invenio_jsonschemas.errors.JSONSchemaNotFound: If no schema
was found in the specified path.
:returns: The schema directory.
"""
if path not in self.schemas:
raise JSONSchemaNotFound(path)
return self.schemas[path] | Retrieve the directory containing the given schema.
:param path: Schema path, relative to the directory where it was
registered.
:raises invenio_jsonschemas.errors.JSONSchemaNotFound: If no schema
was found in the specified path.
:returns: The schema directory. | Below is the the instruction that describes the task:
### Input:
Retrieve the directory containing the given schema.
:param path: Schema path, relative to the directory where it was
registered.
:raises invenio_jsonschemas.errors.JSONSchemaNotFound: If no schema
was found in the specified path.
:returns: The schema directory.
### Response:
def get_schema_dir(self, path):
"""Retrieve the directory containing the given schema.
:param path: Schema path, relative to the directory where it was
registered.
:raises invenio_jsonschemas.errors.JSONSchemaNotFound: If no schema
was found in the specified path.
:returns: The schema directory.
"""
if path not in self.schemas:
raise JSONSchemaNotFound(path)
return self.schemas[path] |
def center_of_mass(self):
"""
Calculates the center of mass of the slab
"""
weights = [s.species.weight for s in self]
center_of_mass = np.average(self.frac_coords,
weights=weights, axis=0)
return center_of_mass | Calculates the center of mass of the slab | Below is the the instruction that describes the task:
### Input:
Calculates the center of mass of the slab
### Response:
def center_of_mass(self):
"""
Calculates the center of mass of the slab
"""
weights = [s.species.weight for s in self]
center_of_mass = np.average(self.frac_coords,
weights=weights, axis=0)
return center_of_mass |
def do_speed(self, speed):
"""
rewind
"""
if speed:
try:
self.bot._speed = float(speed)
except Exception as e:
self.print_response('%s is not a valid framerate' % speed)
return
self.print_response('Speed: %s FPS' % self.bot._speed) | rewind | Below is the the instruction that describes the task:
### Input:
rewind
### Response:
def do_speed(self, speed):
"""
rewind
"""
if speed:
try:
self.bot._speed = float(speed)
except Exception as e:
self.print_response('%s is not a valid framerate' % speed)
return
self.print_response('Speed: %s FPS' % self.bot._speed) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.