code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def add_walk_distances_to_db_python(gtfs, osm_path, cutoff_distance_m=1000):
"""
Computes the walk paths between stops, and updates these to the gtfs database.
Parameters
----------
gtfs: gtfspy.GTFS or str
A GTFS object or a string representation.
osm_path: str
path to the OpenStreetMap file
cutoff_distance_m: number
maximum allowed distance in meters
Returns
-------
None
See Also
--------
gtfspy.calc_transfers
compute_walk_paths_java
"""
if isinstance(gtfs, str):
gtfs = GTFS(gtfs)
assert (isinstance(gtfs, GTFS))
print("Reading in walk network")
walk_network = create_walk_network_from_osm(osm_path)
print("Matching stops to the OSM network")
stop_I_to_nearest_osm_node, stop_I_to_nearest_osm_node_distance = match_stops_to_nodes(gtfs, walk_network)
transfers = gtfs.get_straight_line_transfer_distances()
from_I_to_to_stop_Is = {stop_I: set() for stop_I in stop_I_to_nearest_osm_node}
for transfer_tuple in transfers.itertuples():
from_I = transfer_tuple.from_stop_I
to_I = transfer_tuple.to_stop_I
from_I_to_to_stop_Is[from_I].add(to_I)
print("Computing walking distances")
for from_I, to_stop_Is in from_I_to_to_stop_Is.items():
from_node = stop_I_to_nearest_osm_node[from_I]
from_dist = stop_I_to_nearest_osm_node_distance[from_I]
shortest_paths = networkx.single_source_dijkstra_path_length(walk_network,
from_node,
cutoff=cutoff_distance_m - from_dist,
weight="distance")
for to_I in to_stop_Is:
to_distance = stop_I_to_nearest_osm_node_distance[to_I]
to_node = stop_I_to_nearest_osm_node[to_I]
osm_distance = shortest_paths.get(to_node, float('inf'))
total_distance = from_dist + osm_distance + to_distance
from_stop_I_transfers = transfers[transfers['from_stop_I'] == from_I]
straigth_distance = from_stop_I_transfers[from_stop_I_transfers["to_stop_I"] == to_I]["d"].values[0]
assert (straigth_distance < total_distance + 2) # allow for a maximum of 2 meters in calculations
if total_distance <= cutoff_distance_m:
gtfs.conn.execute("UPDATE stop_distances "
"SET d_walk = " + str(int(total_distance)) +
" WHERE from_stop_I=" + str(from_I) + " AND to_stop_I=" + str(to_I))
gtfs.conn.commit() | Computes the walk paths between stops, and updates these to the gtfs database.
Parameters
----------
gtfs: gtfspy.GTFS or str
A GTFS object or a string representation.
osm_path: str
path to the OpenStreetMap file
cutoff_distance_m: number
maximum allowed distance in meters
Returns
-------
None
See Also
--------
gtfspy.calc_transfers
compute_walk_paths_java | Below is the the instruction that describes the task:
### Input:
Computes the walk paths between stops, and updates these to the gtfs database.
Parameters
----------
gtfs: gtfspy.GTFS or str
A GTFS object or a string representation.
osm_path: str
path to the OpenStreetMap file
cutoff_distance_m: number
maximum allowed distance in meters
Returns
-------
None
See Also
--------
gtfspy.calc_transfers
compute_walk_paths_java
### Response:
def add_walk_distances_to_db_python(gtfs, osm_path, cutoff_distance_m=1000):
"""
Computes the walk paths between stops, and updates these to the gtfs database.
Parameters
----------
gtfs: gtfspy.GTFS or str
A GTFS object or a string representation.
osm_path: str
path to the OpenStreetMap file
cutoff_distance_m: number
maximum allowed distance in meters
Returns
-------
None
See Also
--------
gtfspy.calc_transfers
compute_walk_paths_java
"""
if isinstance(gtfs, str):
gtfs = GTFS(gtfs)
assert (isinstance(gtfs, GTFS))
print("Reading in walk network")
walk_network = create_walk_network_from_osm(osm_path)
print("Matching stops to the OSM network")
stop_I_to_nearest_osm_node, stop_I_to_nearest_osm_node_distance = match_stops_to_nodes(gtfs, walk_network)
transfers = gtfs.get_straight_line_transfer_distances()
from_I_to_to_stop_Is = {stop_I: set() for stop_I in stop_I_to_nearest_osm_node}
for transfer_tuple in transfers.itertuples():
from_I = transfer_tuple.from_stop_I
to_I = transfer_tuple.to_stop_I
from_I_to_to_stop_Is[from_I].add(to_I)
print("Computing walking distances")
for from_I, to_stop_Is in from_I_to_to_stop_Is.items():
from_node = stop_I_to_nearest_osm_node[from_I]
from_dist = stop_I_to_nearest_osm_node_distance[from_I]
shortest_paths = networkx.single_source_dijkstra_path_length(walk_network,
from_node,
cutoff=cutoff_distance_m - from_dist,
weight="distance")
for to_I in to_stop_Is:
to_distance = stop_I_to_nearest_osm_node_distance[to_I]
to_node = stop_I_to_nearest_osm_node[to_I]
osm_distance = shortest_paths.get(to_node, float('inf'))
total_distance = from_dist + osm_distance + to_distance
from_stop_I_transfers = transfers[transfers['from_stop_I'] == from_I]
straigth_distance = from_stop_I_transfers[from_stop_I_transfers["to_stop_I"] == to_I]["d"].values[0]
assert (straigth_distance < total_distance + 2) # allow for a maximum of 2 meters in calculations
if total_distance <= cutoff_distance_m:
gtfs.conn.execute("UPDATE stop_distances "
"SET d_walk = " + str(int(total_distance)) +
" WHERE from_stop_I=" + str(from_I) + " AND to_stop_I=" + str(to_I))
gtfs.conn.commit() |
def _get_regular_expression_of_symbols(self):
"""
Returns the regular expression to search all symbols.
:return: The regular expression to search all symbols.
:rtype: str
"""
regular_expression = None
for symbol in self.symbols:
formated_symbol = self._get_formated_symbol(symbol['symbol'])
if regular_expression is None:
regular_expression = '(' + formated_symbol + ')'
else:
regular_expression = (
regular_expression +
'|(' +
formated_symbol +
')'
)
return regular_expression | Returns the regular expression to search all symbols.
:return: The regular expression to search all symbols.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Returns the regular expression to search all symbols.
:return: The regular expression to search all symbols.
:rtype: str
### Response:
def _get_regular_expression_of_symbols(self):
"""
Returns the regular expression to search all symbols.
:return: The regular expression to search all symbols.
:rtype: str
"""
regular_expression = None
for symbol in self.symbols:
formated_symbol = self._get_formated_symbol(symbol['symbol'])
if regular_expression is None:
regular_expression = '(' + formated_symbol + ')'
else:
regular_expression = (
regular_expression +
'|(' +
formated_symbol +
')'
)
return regular_expression |
def clear_file(self):
"""stub"""
if (self.get_file_metadata().is_read_only() or
self.get_file_metadata().is_required()):
raise NoAccess()
if 'assetId' in self.my_osid_object_form._my_map['fileId']:
rm = self.my_osid_object_form._get_provider_manager('REPOSITORY')
catalog_id_str = ''
if 'assignedBankIds' in self.my_osid_object_form._my_map:
catalog_id_str = self.my_osid_object_form._my_map['assignedBankIds'][0]
elif 'assignedRepositoryIds' in self.my_osid_object_form._my_map:
catalog_id_str = self.my_osid_object_form._my_map['assignedRepositoryIds'][0]
try:
try:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str),
self.my_osid_object_form._proxy)
except NullArgument:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str))
except AttributeError:
# for update forms
try:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str),
self.my_osid_object_form._proxy)
except NullArgument:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str))
aas.delete_asset(Id(self.my_osid_object_form._my_map['fileId']['assetId']))
self.my_osid_object_form._my_map['fileId'] = \
dict(self.get_file_metadata().get_default_object_values()[0]) | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def clear_file(self):
"""stub"""
if (self.get_file_metadata().is_read_only() or
self.get_file_metadata().is_required()):
raise NoAccess()
if 'assetId' in self.my_osid_object_form._my_map['fileId']:
rm = self.my_osid_object_form._get_provider_manager('REPOSITORY')
catalog_id_str = ''
if 'assignedBankIds' in self.my_osid_object_form._my_map:
catalog_id_str = self.my_osid_object_form._my_map['assignedBankIds'][0]
elif 'assignedRepositoryIds' in self.my_osid_object_form._my_map:
catalog_id_str = self.my_osid_object_form._my_map['assignedRepositoryIds'][0]
try:
try:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str),
self.my_osid_object_form._proxy)
except NullArgument:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str))
except AttributeError:
# for update forms
try:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str),
self.my_osid_object_form._proxy)
except NullArgument:
aas = rm.get_asset_admin_session_for_repository(
Id(catalog_id_str))
aas.delete_asset(Id(self.my_osid_object_form._my_map['fileId']['assetId']))
self.my_osid_object_form._my_map['fileId'] = \
dict(self.get_file_metadata().get_default_object_values()[0]) |
def is_unitary(
matrix: np.ndarray,
*,
rtol: float = 1e-5,
atol: float = 1e-8) -> bool:
"""Determines if a matrix is approximately unitary.
A matrix is unitary if it's square and its adjoint is its inverse.
Args:
matrix: The matrix to check.
rtol: The per-matrix-entry relative tolerance on equality.
atol: The per-matrix-entry absolute tolerance on equality.
Returns:
Whether the matrix is unitary within the given tolerance.
"""
return (matrix.shape[0] == matrix.shape[1] and
np.allclose(matrix.dot(np.conj(matrix.T)), np.eye(matrix.shape[0]),
rtol=rtol,
atol=atol)) | Determines if a matrix is approximately unitary.
A matrix is unitary if it's square and its adjoint is its inverse.
Args:
matrix: The matrix to check.
rtol: The per-matrix-entry relative tolerance on equality.
atol: The per-matrix-entry absolute tolerance on equality.
Returns:
Whether the matrix is unitary within the given tolerance. | Below is the the instruction that describes the task:
### Input:
Determines if a matrix is approximately unitary.
A matrix is unitary if it's square and its adjoint is its inverse.
Args:
matrix: The matrix to check.
rtol: The per-matrix-entry relative tolerance on equality.
atol: The per-matrix-entry absolute tolerance on equality.
Returns:
Whether the matrix is unitary within the given tolerance.
### Response:
def is_unitary(
matrix: np.ndarray,
*,
rtol: float = 1e-5,
atol: float = 1e-8) -> bool:
"""Determines if a matrix is approximately unitary.
A matrix is unitary if it's square and its adjoint is its inverse.
Args:
matrix: The matrix to check.
rtol: The per-matrix-entry relative tolerance on equality.
atol: The per-matrix-entry absolute tolerance on equality.
Returns:
Whether the matrix is unitary within the given tolerance.
"""
return (matrix.shape[0] == matrix.shape[1] and
np.allclose(matrix.dot(np.conj(matrix.T)), np.eye(matrix.shape[0]),
rtol=rtol,
atol=atol)) |
def _parse_vars(self, tokens):
"""
Given an iterable of tokens, returns variables and their values as a
dictionary.
For example:
['dtap=prod', 'comment=some comment']
Returns:
{'dtap': 'prod', 'comment': 'some comment'}
"""
key_values = {}
for token in tokens:
if token.startswith('#'):
# End parsing if we encounter a comment, which lasts
# until the end of the line.
break
else:
k, v = token.split('=', 1)
key = k.strip()
key_values[key] = v.strip()
return key_values | Given an iterable of tokens, returns variables and their values as a
dictionary.
For example:
['dtap=prod', 'comment=some comment']
Returns:
{'dtap': 'prod', 'comment': 'some comment'} | Below is the the instruction that describes the task:
### Input:
Given an iterable of tokens, returns variables and their values as a
dictionary.
For example:
['dtap=prod', 'comment=some comment']
Returns:
{'dtap': 'prod', 'comment': 'some comment'}
### Response:
def _parse_vars(self, tokens):
"""
Given an iterable of tokens, returns variables and their values as a
dictionary.
For example:
['dtap=prod', 'comment=some comment']
Returns:
{'dtap': 'prod', 'comment': 'some comment'}
"""
key_values = {}
for token in tokens:
if token.startswith('#'):
# End parsing if we encounter a comment, which lasts
# until the end of the line.
break
else:
k, v = token.split('=', 1)
key = k.strip()
key_values[key] = v.strip()
return key_values |
def _transform_incoming(self, son, collection, skip=0):
"""Recursively replace all keys that need transforming."""
skip = 0 if skip < 0 else skip
if isinstance(son, dict):
for (key, value) in son.items():
if key.startswith('$'):
if isinstance(value, dict):
skip = 2
else:
pass # allow mongo to complain
if self.replace in key:
k = key if skip else self.transform_key(key)
son[k] = self._transform_incoming(
son.pop(key), collection, skip=skip - 1)
elif isinstance(value, dict): # recurse into sub-docs
son[key] = self._transform_incoming(value, collection,
skip=skip - 1)
elif isinstance(value, list):
son[key] = [
self._transform_incoming(k, collection, skip=skip - 1)
for k in value
]
return son
elif isinstance(son, list):
return [self._transform_incoming(item, collection, skip=skip - 1)
for item in son]
else:
return son | Recursively replace all keys that need transforming. | Below is the the instruction that describes the task:
### Input:
Recursively replace all keys that need transforming.
### Response:
def _transform_incoming(self, son, collection, skip=0):
"""Recursively replace all keys that need transforming."""
skip = 0 if skip < 0 else skip
if isinstance(son, dict):
for (key, value) in son.items():
if key.startswith('$'):
if isinstance(value, dict):
skip = 2
else:
pass # allow mongo to complain
if self.replace in key:
k = key if skip else self.transform_key(key)
son[k] = self._transform_incoming(
son.pop(key), collection, skip=skip - 1)
elif isinstance(value, dict): # recurse into sub-docs
son[key] = self._transform_incoming(value, collection,
skip=skip - 1)
elif isinstance(value, list):
son[key] = [
self._transform_incoming(k, collection, skip=skip - 1)
for k in value
]
return son
elif isinstance(son, list):
return [self._transform_incoming(item, collection, skip=skip - 1)
for item in son]
else:
return son |
def set_limit_override(self, limit_name, value, override_ta=True):
"""
Set a new limit ``value`` for the specified limit, overriding
the default. If ``override_ta`` is True, also use this value
instead of any found by Trusted Advisor. This method simply
passes the data through to the
:py:meth:`~awslimitchecker.limit.AwsLimit.set_limit_override`
method of the underlying :py:class:`~.AwsLimit` instance.
:param limit_name: the name of the limit to override the value for
:type limit_name: str
:param value: the new value to set for the limit
:type value: int
:param override_ta: whether or not to also override Trusted
Advisor information
:type override_ta: bool
:raises: ValueError if limit_name is not known to this service
"""
try:
self.limits[limit_name].set_limit_override(
value,
override_ta=override_ta
)
logger.debug(
"Overriding %s limit %s; default=%d override=%d",
self.service_name,
limit_name,
value,
self.limits[limit_name].default_limit,
)
except KeyError:
raise ValueError("{s} service has no '{l}' limit".format(
s=self.service_name,
l=limit_name)) | Set a new limit ``value`` for the specified limit, overriding
the default. If ``override_ta`` is True, also use this value
instead of any found by Trusted Advisor. This method simply
passes the data through to the
:py:meth:`~awslimitchecker.limit.AwsLimit.set_limit_override`
method of the underlying :py:class:`~.AwsLimit` instance.
:param limit_name: the name of the limit to override the value for
:type limit_name: str
:param value: the new value to set for the limit
:type value: int
:param override_ta: whether or not to also override Trusted
Advisor information
:type override_ta: bool
:raises: ValueError if limit_name is not known to this service | Below is the the instruction that describes the task:
### Input:
Set a new limit ``value`` for the specified limit, overriding
the default. If ``override_ta`` is True, also use this value
instead of any found by Trusted Advisor. This method simply
passes the data through to the
:py:meth:`~awslimitchecker.limit.AwsLimit.set_limit_override`
method of the underlying :py:class:`~.AwsLimit` instance.
:param limit_name: the name of the limit to override the value for
:type limit_name: str
:param value: the new value to set for the limit
:type value: int
:param override_ta: whether or not to also override Trusted
Advisor information
:type override_ta: bool
:raises: ValueError if limit_name is not known to this service
### Response:
def set_limit_override(self, limit_name, value, override_ta=True):
"""
Set a new limit ``value`` for the specified limit, overriding
the default. If ``override_ta`` is True, also use this value
instead of any found by Trusted Advisor. This method simply
passes the data through to the
:py:meth:`~awslimitchecker.limit.AwsLimit.set_limit_override`
method of the underlying :py:class:`~.AwsLimit` instance.
:param limit_name: the name of the limit to override the value for
:type limit_name: str
:param value: the new value to set for the limit
:type value: int
:param override_ta: whether or not to also override Trusted
Advisor information
:type override_ta: bool
:raises: ValueError if limit_name is not known to this service
"""
try:
self.limits[limit_name].set_limit_override(
value,
override_ta=override_ta
)
logger.debug(
"Overriding %s limit %s; default=%d override=%d",
self.service_name,
limit_name,
value,
self.limits[limit_name].default_limit,
)
except KeyError:
raise ValueError("{s} service has no '{l}' limit".format(
s=self.service_name,
l=limit_name)) |
def debug(frame=None):
"""Set breakpoint at current location, or a specified frame"""
# ???
if frame is None:
frame = _frame().f_back
dbg = RemoteCeleryTrepan()
dbg.say(BANNER.format(self=dbg))
# dbg.say(SESSION_STARTED.format(self=dbg))
trepan.api.debug(dbg_opts=dbg.dbg_opts) | Set breakpoint at current location, or a specified frame | Below is the the instruction that describes the task:
### Input:
Set breakpoint at current location, or a specified frame
### Response:
def debug(frame=None):
"""Set breakpoint at current location, or a specified frame"""
# ???
if frame is None:
frame = _frame().f_back
dbg = RemoteCeleryTrepan()
dbg.say(BANNER.format(self=dbg))
# dbg.say(SESSION_STARTED.format(self=dbg))
trepan.api.debug(dbg_opts=dbg.dbg_opts) |
def removeChild(self, child):
"""
Remove a child from this element. The child element is
returned, and it's parentNode element is reset.
"""
super(Table, self).removeChild(child)
if child.tagName == ligolw.Column.tagName:
self._update_column_info()
return child | Remove a child from this element. The child element is
returned, and it's parentNode element is reset. | Below is the the instruction that describes the task:
### Input:
Remove a child from this element. The child element is
returned, and it's parentNode element is reset.
### Response:
def removeChild(self, child):
"""
Remove a child from this element. The child element is
returned, and it's parentNode element is reset.
"""
super(Table, self).removeChild(child)
if child.tagName == ligolw.Column.tagName:
self._update_column_info()
return child |
def set_or_edit_conditional_breakpoint(self):
"""Set/Edit conditional breakpoint"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
self.switch_to_plugin()
editorstack.set_or_edit_conditional_breakpoint() | Set/Edit conditional breakpoint | Below is the the instruction that describes the task:
### Input:
Set/Edit conditional breakpoint
### Response:
def set_or_edit_conditional_breakpoint(self):
"""Set/Edit conditional breakpoint"""
editorstack = self.get_current_editorstack()
if editorstack is not None:
self.switch_to_plugin()
editorstack.set_or_edit_conditional_breakpoint() |
def OnShiftVideo(self, event):
"""Shifts through the video"""
length = self.player.get_length()
time = self.player.get_time()
if event.GetWheelRotation() < 0:
target_time = max(0, time-length/100.0)
elif event.GetWheelRotation() > 0:
target_time = min(length, time+length/100.0)
self.player.set_time(int(target_time)) | Shifts through the video | Below is the the instruction that describes the task:
### Input:
Shifts through the video
### Response:
def OnShiftVideo(self, event):
"""Shifts through the video"""
length = self.player.get_length()
time = self.player.get_time()
if event.GetWheelRotation() < 0:
target_time = max(0, time-length/100.0)
elif event.GetWheelRotation() > 0:
target_time = min(length, time+length/100.0)
self.player.set_time(int(target_time)) |
def set_basic_params(self, no_expire=None, expire_scan_interval=None, report_freed=None):
"""
:param bool no_expire: Disable auto sweep of expired items.
Since uWSGI 1.2, cache item expiration is managed by a thread in the master process,
to reduce the risk of deadlock. This thread can be disabled
(making item expiry a no-op) with the this option.
:param int expire_scan_interval: Set the frequency (in seconds) of cache sweeper scans. Default: 3.
:param bool report_freed: Constantly report the cache item freed by the sweeper.
.. warning:: Use only for debug.
"""
self._set('cache-no-expire', no_expire, cast=bool)
self._set('cache-report-freed-items', report_freed, cast=bool)
self._set('cache-expire-freq', expire_scan_interval)
return self._section | :param bool no_expire: Disable auto sweep of expired items.
Since uWSGI 1.2, cache item expiration is managed by a thread in the master process,
to reduce the risk of deadlock. This thread can be disabled
(making item expiry a no-op) with the this option.
:param int expire_scan_interval: Set the frequency (in seconds) of cache sweeper scans. Default: 3.
:param bool report_freed: Constantly report the cache item freed by the sweeper.
.. warning:: Use only for debug. | Below is the the instruction that describes the task:
### Input:
:param bool no_expire: Disable auto sweep of expired items.
Since uWSGI 1.2, cache item expiration is managed by a thread in the master process,
to reduce the risk of deadlock. This thread can be disabled
(making item expiry a no-op) with the this option.
:param int expire_scan_interval: Set the frequency (in seconds) of cache sweeper scans. Default: 3.
:param bool report_freed: Constantly report the cache item freed by the sweeper.
.. warning:: Use only for debug.
### Response:
def set_basic_params(self, no_expire=None, expire_scan_interval=None, report_freed=None):
"""
:param bool no_expire: Disable auto sweep of expired items.
Since uWSGI 1.2, cache item expiration is managed by a thread in the master process,
to reduce the risk of deadlock. This thread can be disabled
(making item expiry a no-op) with the this option.
:param int expire_scan_interval: Set the frequency (in seconds) of cache sweeper scans. Default: 3.
:param bool report_freed: Constantly report the cache item freed by the sweeper.
.. warning:: Use only for debug.
"""
self._set('cache-no-expire', no_expire, cast=bool)
self._set('cache-report-freed-items', report_freed, cast=bool)
self._set('cache-expire-freq', expire_scan_interval)
return self._section |
def get_short_uid(self, uid):
"""Get the shortend UID for the given UID.
:param uid: the full UID to shorten
:type uid: str
:returns: the shortend uid or the empty string
:rtype: str
"""
if uid:
short_uids = self.get_short_uid_dict()
for length_of_uid in range(len(uid), 0, -1):
if short_uids.get(uid[:length_of_uid]) is not None:
return uid[:length_of_uid]
return "" | Get the shortend UID for the given UID.
:param uid: the full UID to shorten
:type uid: str
:returns: the shortend uid or the empty string
:rtype: str | Below is the the instruction that describes the task:
### Input:
Get the shortend UID for the given UID.
:param uid: the full UID to shorten
:type uid: str
:returns: the shortend uid or the empty string
:rtype: str
### Response:
def get_short_uid(self, uid):
"""Get the shortend UID for the given UID.
:param uid: the full UID to shorten
:type uid: str
:returns: the shortend uid or the empty string
:rtype: str
"""
if uid:
short_uids = self.get_short_uid_dict()
for length_of_uid in range(len(uid), 0, -1):
if short_uids.get(uid[:length_of_uid]) is not None:
return uid[:length_of_uid]
return "" |
def append(self, point):
"""
appends a copy of the given point to this sequence
"""
point = Point(point)
self._elements.append(point) | appends a copy of the given point to this sequence | Below is the the instruction that describes the task:
### Input:
appends a copy of the given point to this sequence
### Response:
def append(self, point):
"""
appends a copy of the given point to this sequence
"""
point = Point(point)
self._elements.append(point) |
def get_subgraph_by_node_search(graph: BELGraph, query: Strings) -> BELGraph:
"""Get a sub-graph induced over all nodes matching the query string.
:param graph: A BEL Graph
:param query: A query string or iterable of query strings for node names
Thinly wraps :func:`search_node_names` and :func:`get_subgraph_by_induction`.
"""
nodes = search_node_names(graph, query)
return get_subgraph_by_induction(graph, nodes) | Get a sub-graph induced over all nodes matching the query string.
:param graph: A BEL Graph
:param query: A query string or iterable of query strings for node names
Thinly wraps :func:`search_node_names` and :func:`get_subgraph_by_induction`. | Below is the the instruction that describes the task:
### Input:
Get a sub-graph induced over all nodes matching the query string.
:param graph: A BEL Graph
:param query: A query string or iterable of query strings for node names
Thinly wraps :func:`search_node_names` and :func:`get_subgraph_by_induction`.
### Response:
def get_subgraph_by_node_search(graph: BELGraph, query: Strings) -> BELGraph:
"""Get a sub-graph induced over all nodes matching the query string.
:param graph: A BEL Graph
:param query: A query string or iterable of query strings for node names
Thinly wraps :func:`search_node_names` and :func:`get_subgraph_by_induction`.
"""
nodes = search_node_names(graph, query)
return get_subgraph_by_induction(graph, nodes) |
def gravity(latitude, H):
r'''Calculates local acceleration due to gravity `g` according to [1]_.
Uses latitude and height to calculate `g`.
.. math::
g = 9.780356(1 + 0.0052885\sin^2\phi - 0.0000059^22\phi)
- 3.086\times 10^{-6} H
Parameters
----------
latitude : float
Degrees, [degrees]
H : float
Height above earth's surface [m]
Returns
-------
g : float
Acceleration due to gravity, [m/s^2]
Notes
-----
Better models, such as EGM2008 exist.
Examples
--------
>>> gravity(55, 1E4)
9.784151976863571
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
'''
lat = latitude*pi/180
g = 9.780356*(1+0.0052885*sin(lat)**2 -0.0000059*sin(2*lat)**2)-3.086E-6*H
return g | r'''Calculates local acceleration due to gravity `g` according to [1]_.
Uses latitude and height to calculate `g`.
.. math::
g = 9.780356(1 + 0.0052885\sin^2\phi - 0.0000059^22\phi)
- 3.086\times 10^{-6} H
Parameters
----------
latitude : float
Degrees, [degrees]
H : float
Height above earth's surface [m]
Returns
-------
g : float
Acceleration due to gravity, [m/s^2]
Notes
-----
Better models, such as EGM2008 exist.
Examples
--------
>>> gravity(55, 1E4)
9.784151976863571
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014. | Below is the the instruction that describes the task:
### Input:
r'''Calculates local acceleration due to gravity `g` according to [1]_.
Uses latitude and height to calculate `g`.
.. math::
g = 9.780356(1 + 0.0052885\sin^2\phi - 0.0000059^22\phi)
- 3.086\times 10^{-6} H
Parameters
----------
latitude : float
Degrees, [degrees]
H : float
Height above earth's surface [m]
Returns
-------
g : float
Acceleration due to gravity, [m/s^2]
Notes
-----
Better models, such as EGM2008 exist.
Examples
--------
>>> gravity(55, 1E4)
9.784151976863571
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
### Response:
def gravity(latitude, H):
r'''Calculates local acceleration due to gravity `g` according to [1]_.
Uses latitude and height to calculate `g`.
.. math::
g = 9.780356(1 + 0.0052885\sin^2\phi - 0.0000059^22\phi)
- 3.086\times 10^{-6} H
Parameters
----------
latitude : float
Degrees, [degrees]
H : float
Height above earth's surface [m]
Returns
-------
g : float
Acceleration due to gravity, [m/s^2]
Notes
-----
Better models, such as EGM2008 exist.
Examples
--------
>>> gravity(55, 1E4)
9.784151976863571
References
----------
.. [1] Haynes, W.M., Thomas J. Bruno, and David R. Lide. CRC Handbook of
Chemistry and Physics. [Boca Raton, FL]: CRC press, 2014.
'''
lat = latitude*pi/180
g = 9.780356*(1+0.0052885*sin(lat)**2 -0.0000059*sin(2*lat)**2)-3.086E-6*H
return g |
def getPreprocessorDefinitions(self, engineRoot, delimiter=' '):
"""
Returns the list of preprocessor definitions for this library, joined using the specified delimiter
"""
return delimiter.join(self.resolveRoot(self.definitions, engineRoot)) | Returns the list of preprocessor definitions for this library, joined using the specified delimiter | Below is the the instruction that describes the task:
### Input:
Returns the list of preprocessor definitions for this library, joined using the specified delimiter
### Response:
def getPreprocessorDefinitions(self, engineRoot, delimiter=' '):
"""
Returns the list of preprocessor definitions for this library, joined using the specified delimiter
"""
return delimiter.join(self.resolveRoot(self.definitions, engineRoot)) |
def process_xml(xml_str):
"""Return processor with Statements extracted from a Sparser XML.
Parameters
----------
xml_str : str
The XML string obtained by reading content with Sparser, using the
'xml' output mode.
Returns
-------
sp : SparserXMLProcessor
A SparserXMLProcessor which has extracted Statements as its
statements attribute.
"""
try:
tree = ET.XML(xml_str, parser=UTB())
except ET.ParseError as e:
logger.error('Could not parse XML string')
logger.error(e)
return None
sp = _process_elementtree(tree)
return sp | Return processor with Statements extracted from a Sparser XML.
Parameters
----------
xml_str : str
The XML string obtained by reading content with Sparser, using the
'xml' output mode.
Returns
-------
sp : SparserXMLProcessor
A SparserXMLProcessor which has extracted Statements as its
statements attribute. | Below is the the instruction that describes the task:
### Input:
Return processor with Statements extracted from a Sparser XML.
Parameters
----------
xml_str : str
The XML string obtained by reading content with Sparser, using the
'xml' output mode.
Returns
-------
sp : SparserXMLProcessor
A SparserXMLProcessor which has extracted Statements as its
statements attribute.
### Response:
def process_xml(xml_str):
"""Return processor with Statements extracted from a Sparser XML.
Parameters
----------
xml_str : str
The XML string obtained by reading content with Sparser, using the
'xml' output mode.
Returns
-------
sp : SparserXMLProcessor
A SparserXMLProcessor which has extracted Statements as its
statements attribute.
"""
try:
tree = ET.XML(xml_str, parser=UTB())
except ET.ParseError as e:
logger.error('Could not parse XML string')
logger.error(e)
return None
sp = _process_elementtree(tree)
return sp |
def get_owner_names_value(self, obj):
"""Extract owners' names."""
return [
self._get_user(user)
for user in get_users_with_permission(obj, get_full_perm('owner', obj))
] | Extract owners' names. | Below is the the instruction that describes the task:
### Input:
Extract owners' names.
### Response:
def get_owner_names_value(self, obj):
"""Extract owners' names."""
return [
self._get_user(user)
for user in get_users_with_permission(obj, get_full_perm('owner', obj))
] |
def get_work_item_template(self, project, type, fields=None, as_of=None, expand=None):
"""GetWorkItemTemplate.
[Preview API] Returns a single work item from a template.
:param str project: Project ID or project name
:param str type: The work item type name
:param str fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:rtype: :class:`<WorkItem> <azure.devops.v5_1.work-item-tracking.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
query_parameters = {}
if fields is not None:
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if as_of is not None:
query_parameters['asOf'] = self._serialize.query('as_of', as_of, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='62d3d110-0047-428c-ad3c-4fe872c91c74',
version='5.1-preview.3',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItem', response) | GetWorkItemTemplate.
[Preview API] Returns a single work item from a template.
:param str project: Project ID or project name
:param str type: The work item type name
:param str fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:rtype: :class:`<WorkItem> <azure.devops.v5_1.work-item-tracking.models.WorkItem>` | Below is the the instruction that describes the task:
### Input:
GetWorkItemTemplate.
[Preview API] Returns a single work item from a template.
:param str project: Project ID or project name
:param str type: The work item type name
:param str fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:rtype: :class:`<WorkItem> <azure.devops.v5_1.work-item-tracking.models.WorkItem>`
### Response:
def get_work_item_template(self, project, type, fields=None, as_of=None, expand=None):
"""GetWorkItemTemplate.
[Preview API] Returns a single work item from a template.
:param str project: Project ID or project name
:param str type: The work item type name
:param str fields: Comma-separated list of requested fields
:param datetime as_of: AsOf UTC date time string
:param str expand: The expand parameters for work item attributes. Possible options are { None, Relations, Fields, Links, All }.
:rtype: :class:`<WorkItem> <azure.devops.v5_1.work-item-tracking.models.WorkItem>`
"""
route_values = {}
if project is not None:
route_values['project'] = self._serialize.url('project', project, 'str')
if type is not None:
route_values['type'] = self._serialize.url('type', type, 'str')
query_parameters = {}
if fields is not None:
query_parameters['fields'] = self._serialize.query('fields', fields, 'str')
if as_of is not None:
query_parameters['asOf'] = self._serialize.query('as_of', as_of, 'iso-8601')
if expand is not None:
query_parameters['$expand'] = self._serialize.query('expand', expand, 'str')
response = self._send(http_method='GET',
location_id='62d3d110-0047-428c-ad3c-4fe872c91c74',
version='5.1-preview.3',
route_values=route_values,
query_parameters=query_parameters)
return self._deserialize('WorkItem', response) |
def _find_single(self, match_class, **keywds):
"""implementation details"""
self._logger.debug('find single query execution - started')
start_time = timeit.default_timer()
norm_keywds = self.__normalize_args(**keywds)
decl_matcher = self.__create_matcher(match_class, **norm_keywds)
dtype = self.__findout_decl_type(match_class, **norm_keywds)
recursive_ = self.__findout_recursive(**norm_keywds)
decls = self.__findout_range(norm_keywds['name'], dtype, recursive_)
found = matcher.get_single(decl_matcher, decls, False)
self._logger.debug(
'find single query execution - done( %f seconds )',
(timeit.default_timer() - start_time))
return found | implementation details | Below is the the instruction that describes the task:
### Input:
implementation details
### Response:
def _find_single(self, match_class, **keywds):
"""implementation details"""
self._logger.debug('find single query execution - started')
start_time = timeit.default_timer()
norm_keywds = self.__normalize_args(**keywds)
decl_matcher = self.__create_matcher(match_class, **norm_keywds)
dtype = self.__findout_decl_type(match_class, **norm_keywds)
recursive_ = self.__findout_recursive(**norm_keywds)
decls = self.__findout_range(norm_keywds['name'], dtype, recursive_)
found = matcher.get_single(decl_matcher, decls, False)
self._logger.debug(
'find single query execution - done( %f seconds )',
(timeit.default_timer() - start_time))
return found |
def spans(self, layer):
"""Retrieve (start, end) tuples denoting the spans of given layer elements.
Returns
-------
list of (int, int)
List of (start, end) tuples.
"""
spans = []
for data in self[layer]:
spans.append((data[START], data[END]))
return spans | Retrieve (start, end) tuples denoting the spans of given layer elements.
Returns
-------
list of (int, int)
List of (start, end) tuples. | Below is the the instruction that describes the task:
### Input:
Retrieve (start, end) tuples denoting the spans of given layer elements.
Returns
-------
list of (int, int)
List of (start, end) tuples.
### Response:
def spans(self, layer):
"""Retrieve (start, end) tuples denoting the spans of given layer elements.
Returns
-------
list of (int, int)
List of (start, end) tuples.
"""
spans = []
for data in self[layer]:
spans.append((data[START], data[END]))
return spans |
def _element_to_dict(data, position, obj_end, opts):
"""Decode a single key, value pair."""
element_type = data[position:position + 1]
position += 1
element_name, position = _get_c_string(data, position, opts)
try:
value, position = _ELEMENT_GETTER[element_type](data, position,
obj_end, opts,
element_name)
except KeyError:
_raise_unknown_type(element_type, element_name)
if opts.type_registry._decoder_map:
custom_decoder = opts.type_registry._decoder_map.get(type(value))
if custom_decoder is not None:
value = custom_decoder(value)
return element_name, value, position | Decode a single key, value pair. | Below is the the instruction that describes the task:
### Input:
Decode a single key, value pair.
### Response:
def _element_to_dict(data, position, obj_end, opts):
"""Decode a single key, value pair."""
element_type = data[position:position + 1]
position += 1
element_name, position = _get_c_string(data, position, opts)
try:
value, position = _ELEMENT_GETTER[element_type](data, position,
obj_end, opts,
element_name)
except KeyError:
_raise_unknown_type(element_type, element_name)
if opts.type_registry._decoder_map:
custom_decoder = opts.type_registry._decoder_map.get(type(value))
if custom_decoder is not None:
value = custom_decoder(value)
return element_name, value, position |
def _filter_tables_by_time(self, tables, start_time, end_time):
"""Filter a table dictionary and return table names based on the range
of start and end times in unix seconds.
Parameters
----------
tables : dict
Dates referenced by table names
start_time : int
The unix time after which records will be fetched
end_time : int
The unix time up to which records will be fetched
Returns
-------
list
Table names that are inside the time range
"""
return [table_name for (table_name, unix_seconds) in tables.items()
if self._in_range(start_time, end_time, unix_seconds)] | Filter a table dictionary and return table names based on the range
of start and end times in unix seconds.
Parameters
----------
tables : dict
Dates referenced by table names
start_time : int
The unix time after which records will be fetched
end_time : int
The unix time up to which records will be fetched
Returns
-------
list
Table names that are inside the time range | Below is the the instruction that describes the task:
### Input:
Filter a table dictionary and return table names based on the range
of start and end times in unix seconds.
Parameters
----------
tables : dict
Dates referenced by table names
start_time : int
The unix time after which records will be fetched
end_time : int
The unix time up to which records will be fetched
Returns
-------
list
Table names that are inside the time range
### Response:
def _filter_tables_by_time(self, tables, start_time, end_time):
"""Filter a table dictionary and return table names based on the range
of start and end times in unix seconds.
Parameters
----------
tables : dict
Dates referenced by table names
start_time : int
The unix time after which records will be fetched
end_time : int
The unix time up to which records will be fetched
Returns
-------
list
Table names that are inside the time range
"""
return [table_name for (table_name, unix_seconds) in tables.items()
if self._in_range(start_time, end_time, unix_seconds)] |
def dump(self, force=False):
"""
Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value
"""
if force:
self._set_contents(force=force)
return Asn1Value.dump(self) | Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value | Below is the the instruction that describes the task:
### Input:
Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value
### Response:
def dump(self, force=False):
"""
Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value
"""
if force:
self._set_contents(force=force)
return Asn1Value.dump(self) |
def generate(self):
"""
Generates and returns a numeric captcha image in base64 format.
Saves the correct answer in `session['captcha_answer']`
Use later as:
src = captcha.generate()
<img src="{{src}}">
"""
answer = self.rand.randrange(self.max)
answer = str(answer).zfill(self.digits)
image_data = self.image_generator.generate(answer)
base64_captcha = base64.b64encode(image_data.getvalue()).decode("ascii")
logging.debug('Generated captcha with answer: ' + answer)
session['captcha_answer'] = answer
return base64_captcha | Generates and returns a numeric captcha image in base64 format.
Saves the correct answer in `session['captcha_answer']`
Use later as:
src = captcha.generate()
<img src="{{src}}"> | Below is the the instruction that describes the task:
### Input:
Generates and returns a numeric captcha image in base64 format.
Saves the correct answer in `session['captcha_answer']`
Use later as:
src = captcha.generate()
<img src="{{src}}">
### Response:
def generate(self):
"""
Generates and returns a numeric captcha image in base64 format.
Saves the correct answer in `session['captcha_answer']`
Use later as:
src = captcha.generate()
<img src="{{src}}">
"""
answer = self.rand.randrange(self.max)
answer = str(answer).zfill(self.digits)
image_data = self.image_generator.generate(answer)
base64_captcha = base64.b64encode(image_data.getvalue()).decode("ascii")
logging.debug('Generated captcha with answer: ' + answer)
session['captcha_answer'] = answer
return base64_captcha |
def md_to_text(content):
""" Converts markdown content to text """
text = None
html = markdown.markdown(content)
if html:
text = html_to_text(content)
return text | Converts markdown content to text | Below is the the instruction that describes the task:
### Input:
Converts markdown content to text
### Response:
def md_to_text(content):
""" Converts markdown content to text """
text = None
html = markdown.markdown(content)
if html:
text = html_to_text(content)
return text |
def sequence(arcs):
"""sequence: make a list of cities to visit, from set of arcs"""
succ = {}
for (i,j) in arcs:
succ[i] = j
curr = 1 # first node being visited
sol = [curr]
for i in range(len(arcs)-2):
curr = succ[curr]
sol.append(curr)
return sol | sequence: make a list of cities to visit, from set of arcs | Below is the the instruction that describes the task:
### Input:
sequence: make a list of cities to visit, from set of arcs
### Response:
def sequence(arcs):
"""sequence: make a list of cities to visit, from set of arcs"""
succ = {}
for (i,j) in arcs:
succ[i] = j
curr = 1 # first node being visited
sol = [curr]
for i in range(len(arcs)-2):
curr = succ[curr]
sol.append(curr)
return sol |
def mk_request_non(self, method, path):
"""
Create a request.
:param method: the CoAP method
:param path: the path of the request
:return: the request
"""
request = Request()
request.destination = self.server
request.code = method.number
request.uri_path = path
request.type = defines.Types["NON"]
return request | Create a request.
:param method: the CoAP method
:param path: the path of the request
:return: the request | Below is the the instruction that describes the task:
### Input:
Create a request.
:param method: the CoAP method
:param path: the path of the request
:return: the request
### Response:
def mk_request_non(self, method, path):
"""
Create a request.
:param method: the CoAP method
:param path: the path of the request
:return: the request
"""
request = Request()
request.destination = self.server
request.code = method.number
request.uri_path = path
request.type = defines.Types["NON"]
return request |
def use_storage_service(self, service_name, custom_path):
"""
Sets the current storage service to service_name and runs the connect method on the service.
:param str service_name: Name of the storage service
:param str custom_path: Custom path where to download tracks for local storage (optional, and must already exist, use absolute paths only)
"""
try:
self.current_storage = self.storage_services[service_name]
except KeyError:
if service_name == 'google drive':
self.storage_services['google drive'] = GoogleDrive()
self.current_storage = self.storage_services['google drive']
self.current_storage.connect()
elif service_name == 'dropbox':
log.error('Dropbox is not supported yet.')
elif service_name == 'local':
self.storage_services['local'] = LocalStorage(custom_path=custom_path)
self.current_storage = self.storage_services['local']
self.current_storage.connect()
else:
log.error('Storage service name is not recognized.') | Sets the current storage service to service_name and runs the connect method on the service.
:param str service_name: Name of the storage service
:param str custom_path: Custom path where to download tracks for local storage (optional, and must already exist, use absolute paths only) | Below is the the instruction that describes the task:
### Input:
Sets the current storage service to service_name and runs the connect method on the service.
:param str service_name: Name of the storage service
:param str custom_path: Custom path where to download tracks for local storage (optional, and must already exist, use absolute paths only)
### Response:
def use_storage_service(self, service_name, custom_path):
"""
Sets the current storage service to service_name and runs the connect method on the service.
:param str service_name: Name of the storage service
:param str custom_path: Custom path where to download tracks for local storage (optional, and must already exist, use absolute paths only)
"""
try:
self.current_storage = self.storage_services[service_name]
except KeyError:
if service_name == 'google drive':
self.storage_services['google drive'] = GoogleDrive()
self.current_storage = self.storage_services['google drive']
self.current_storage.connect()
elif service_name == 'dropbox':
log.error('Dropbox is not supported yet.')
elif service_name == 'local':
self.storage_services['local'] = LocalStorage(custom_path=custom_path)
self.current_storage = self.storage_services['local']
self.current_storage.connect()
else:
log.error('Storage service name is not recognized.') |
def check_cgroup_availability(wait=1):
"""
Basic utility to check the availability and permissions of cgroups.
This will log some warnings for the user if necessary.
On some systems, daemons such as cgrulesengd might interfere with the cgroups
of a process soon after it was started. Thus this function starts a process,
waits a configurable amount of time, and check whether the cgroups have been changed.
@param wait: a non-negative int that is interpreted as seconds to wait during the check
@raise SystemExit: if cgroups are not usable
"""
logging.basicConfig(format="%(levelname)s: %(message)s")
runexecutor = RunExecutor()
my_cgroups = runexecutor.cgroups
if not (CPUACCT in my_cgroups and
CPUSET in my_cgroups and
# FREEZER in my_cgroups and # For now, we do not require freezer
MEMORY in my_cgroups):
sys.exit(1)
with tempfile.NamedTemporaryFile(mode='rt') as tmp:
runexecutor.execute_run(['sh', '-c', 'sleep {0}; cat /proc/self/cgroup'.format(wait)], tmp.name,
memlimit=1024*1024, # set memlimit to force check for swapaccount
# set cores and memory_nodes to force usage of CPUSET
cores=util.parse_int_list(my_cgroups.get_value(CPUSET, 'cpus')),
memory_nodes=my_cgroups.read_allowed_memory_banks())
lines = []
for line in tmp:
line = line.strip()
if line and not line == "sh -c 'sleep {0}; cat /proc/self/cgroup'".format(wait) \
and not all(c == '-' for c in line):
lines.append(line)
task_cgroups = find_my_cgroups(lines)
fail = False
for subsystem in CPUACCT, CPUSET, MEMORY, FREEZER:
if subsystem in my_cgroups:
if not task_cgroups[subsystem].startswith(os.path.join(my_cgroups[subsystem], 'benchmark_')):
logging.warning('Task was in cgroup %s for subsystem %s, '
'which is not the expected sub-cgroup of %s. '
'Maybe some other program is interfering with cgroup management?',
task_cgroups[subsystem], subsystem, my_cgroups[subsystem])
fail = True
if fail:
sys.exit(1) | Basic utility to check the availability and permissions of cgroups.
This will log some warnings for the user if necessary.
On some systems, daemons such as cgrulesengd might interfere with the cgroups
of a process soon after it was started. Thus this function starts a process,
waits a configurable amount of time, and check whether the cgroups have been changed.
@param wait: a non-negative int that is interpreted as seconds to wait during the check
@raise SystemExit: if cgroups are not usable | Below is the the instruction that describes the task:
### Input:
Basic utility to check the availability and permissions of cgroups.
This will log some warnings for the user if necessary.
On some systems, daemons such as cgrulesengd might interfere with the cgroups
of a process soon after it was started. Thus this function starts a process,
waits a configurable amount of time, and check whether the cgroups have been changed.
@param wait: a non-negative int that is interpreted as seconds to wait during the check
@raise SystemExit: if cgroups are not usable
### Response:
def check_cgroup_availability(wait=1):
"""
Basic utility to check the availability and permissions of cgroups.
This will log some warnings for the user if necessary.
On some systems, daemons such as cgrulesengd might interfere with the cgroups
of a process soon after it was started. Thus this function starts a process,
waits a configurable amount of time, and check whether the cgroups have been changed.
@param wait: a non-negative int that is interpreted as seconds to wait during the check
@raise SystemExit: if cgroups are not usable
"""
logging.basicConfig(format="%(levelname)s: %(message)s")
runexecutor = RunExecutor()
my_cgroups = runexecutor.cgroups
if not (CPUACCT in my_cgroups and
CPUSET in my_cgroups and
# FREEZER in my_cgroups and # For now, we do not require freezer
MEMORY in my_cgroups):
sys.exit(1)
with tempfile.NamedTemporaryFile(mode='rt') as tmp:
runexecutor.execute_run(['sh', '-c', 'sleep {0}; cat /proc/self/cgroup'.format(wait)], tmp.name,
memlimit=1024*1024, # set memlimit to force check for swapaccount
# set cores and memory_nodes to force usage of CPUSET
cores=util.parse_int_list(my_cgroups.get_value(CPUSET, 'cpus')),
memory_nodes=my_cgroups.read_allowed_memory_banks())
lines = []
for line in tmp:
line = line.strip()
if line and not line == "sh -c 'sleep {0}; cat /proc/self/cgroup'".format(wait) \
and not all(c == '-' for c in line):
lines.append(line)
task_cgroups = find_my_cgroups(lines)
fail = False
for subsystem in CPUACCT, CPUSET, MEMORY, FREEZER:
if subsystem in my_cgroups:
if not task_cgroups[subsystem].startswith(os.path.join(my_cgroups[subsystem], 'benchmark_')):
logging.warning('Task was in cgroup %s for subsystem %s, '
'which is not the expected sub-cgroup of %s. '
'Maybe some other program is interfering with cgroup management?',
task_cgroups[subsystem], subsystem, my_cgroups[subsystem])
fail = True
if fail:
sys.exit(1) |
def participation_coef(W, ci, degree='undirected'):
'''
Participation coefficient is a measure of diversity of intermodular
connections of individual nodes.
Parameters
----------
W : NxN np.ndarray
binary/weighted directed/undirected connection matrix
ci : Nx1 np.ndarray
community affiliation vector
degree : str
Flag to describe nature of graph 'undirected': For undirected graphs
'in': Uses the in-degree
'out': Uses the out-degree
Returns
-------
P : Nx1 np.ndarray
participation coefficient
'''
if degree == 'in':
W = W.T
_, ci = np.unique(ci, return_inverse=True)
ci += 1
n = len(W) # number of vertices
Ko = np.sum(W, axis=1) # (out) degree
Gc = np.dot((W != 0), np.diag(ci)) # neighbor community affiliation
Kc2 = np.zeros((n,)) # community-specific neighbors
for i in range(1, int(np.max(ci)) + 1):
Kc2 += np.square(np.sum(W * (Gc == i), axis=1))
P = np.ones((n,)) - Kc2 / np.square(Ko)
# P=0 if for nodes with no (out) neighbors
P[np.where(np.logical_not(Ko))] = 0
return P | Participation coefficient is a measure of diversity of intermodular
connections of individual nodes.
Parameters
----------
W : NxN np.ndarray
binary/weighted directed/undirected connection matrix
ci : Nx1 np.ndarray
community affiliation vector
degree : str
Flag to describe nature of graph 'undirected': For undirected graphs
'in': Uses the in-degree
'out': Uses the out-degree
Returns
-------
P : Nx1 np.ndarray
participation coefficient | Below is the the instruction that describes the task:
### Input:
Participation coefficient is a measure of diversity of intermodular
connections of individual nodes.
Parameters
----------
W : NxN np.ndarray
binary/weighted directed/undirected connection matrix
ci : Nx1 np.ndarray
community affiliation vector
degree : str
Flag to describe nature of graph 'undirected': For undirected graphs
'in': Uses the in-degree
'out': Uses the out-degree
Returns
-------
P : Nx1 np.ndarray
participation coefficient
### Response:
def participation_coef(W, ci, degree='undirected'):
'''
Participation coefficient is a measure of diversity of intermodular
connections of individual nodes.
Parameters
----------
W : NxN np.ndarray
binary/weighted directed/undirected connection matrix
ci : Nx1 np.ndarray
community affiliation vector
degree : str
Flag to describe nature of graph 'undirected': For undirected graphs
'in': Uses the in-degree
'out': Uses the out-degree
Returns
-------
P : Nx1 np.ndarray
participation coefficient
'''
if degree == 'in':
W = W.T
_, ci = np.unique(ci, return_inverse=True)
ci += 1
n = len(W) # number of vertices
Ko = np.sum(W, axis=1) # (out) degree
Gc = np.dot((W != 0), np.diag(ci)) # neighbor community affiliation
Kc2 = np.zeros((n,)) # community-specific neighbors
for i in range(1, int(np.max(ci)) + 1):
Kc2 += np.square(np.sum(W * (Gc == i), axis=1))
P = np.ones((n,)) - Kc2 / np.square(Ko)
# P=0 if for nodes with no (out) neighbors
P[np.where(np.logical_not(Ko))] = 0
return P |
def uptodate():
'''
Call the REST endpoint to see if the packages on the "server" are up to date.
'''
DETAILS = _load_state()
for p in DETAILS['packages']:
version_float = float(DETAILS['packages'][p])
version_float = version_float + 1.0
DETAILS['packages'][p] = six.text_type(version_float)
return DETAILS['packages'] | Call the REST endpoint to see if the packages on the "server" are up to date. | Below is the the instruction that describes the task:
### Input:
Call the REST endpoint to see if the packages on the "server" are up to date.
### Response:
def uptodate():
'''
Call the REST endpoint to see if the packages on the "server" are up to date.
'''
DETAILS = _load_state()
for p in DETAILS['packages']:
version_float = float(DETAILS['packages'][p])
version_float = version_float + 1.0
DETAILS['packages'][p] = six.text_type(version_float)
return DETAILS['packages'] |
def get_credentials_from_env():
"""Get credentials from environment variables.
Preference of credentials is:
- No credentials if DATASTORE_EMULATOR_HOST is set.
- Google APIs Signed JWT credentials based on
DATASTORE_SERVICE_ACCOUNT and DATASTORE_PRIVATE_KEY_FILE
environments variables
- Google Application Default
https://developers.google.com/identity/protocols/application-default-credentials
Returns:
credentials or None.
"""
if os.getenv(_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV):
logging.info('connecting without credentials because %s is set.',
_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV)
return None
if os.getenv(_DATASTORE_EMULATOR_HOST_ENV):
logging.info('connecting without credentials because %s is set.',
_DATASTORE_EMULATOR_HOST_ENV)
return None
if (os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV)
and os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV)):
with open(os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV), 'rb') as f:
key = f.read()
credentials = client.SignedJwtAssertionCredentials(
os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV), key, SCOPE)
logging.info('connecting using private key file.')
return credentials
try:
credentials = client.GoogleCredentials.get_application_default()
credentials = credentials.create_scoped(SCOPE)
logging.info('connecting using Google Application Default Credentials.')
return credentials
except client.ApplicationDefaultCredentialsError, e:
logging.error('Unable to find any credentials to use. '
'If you are running locally, make sure to set the '
'%s environment variable.', _DATASTORE_EMULATOR_HOST_ENV)
raise e | Get credentials from environment variables.
Preference of credentials is:
- No credentials if DATASTORE_EMULATOR_HOST is set.
- Google APIs Signed JWT credentials based on
DATASTORE_SERVICE_ACCOUNT and DATASTORE_PRIVATE_KEY_FILE
environments variables
- Google Application Default
https://developers.google.com/identity/protocols/application-default-credentials
Returns:
credentials or None. | Below is the the instruction that describes the task:
### Input:
Get credentials from environment variables.
Preference of credentials is:
- No credentials if DATASTORE_EMULATOR_HOST is set.
- Google APIs Signed JWT credentials based on
DATASTORE_SERVICE_ACCOUNT and DATASTORE_PRIVATE_KEY_FILE
environments variables
- Google Application Default
https://developers.google.com/identity/protocols/application-default-credentials
Returns:
credentials or None.
### Response:
def get_credentials_from_env():
"""Get credentials from environment variables.
Preference of credentials is:
- No credentials if DATASTORE_EMULATOR_HOST is set.
- Google APIs Signed JWT credentials based on
DATASTORE_SERVICE_ACCOUNT and DATASTORE_PRIVATE_KEY_FILE
environments variables
- Google Application Default
https://developers.google.com/identity/protocols/application-default-credentials
Returns:
credentials or None.
"""
if os.getenv(_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV):
logging.info('connecting without credentials because %s is set.',
_DATASTORE_USE_STUB_CREDENTIAL_FOR_TEST_ENV)
return None
if os.getenv(_DATASTORE_EMULATOR_HOST_ENV):
logging.info('connecting without credentials because %s is set.',
_DATASTORE_EMULATOR_HOST_ENV)
return None
if (os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV)
and os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV)):
with open(os.getenv(_DATASTORE_PRIVATE_KEY_FILE_ENV), 'rb') as f:
key = f.read()
credentials = client.SignedJwtAssertionCredentials(
os.getenv(_DATASTORE_SERVICE_ACCOUNT_ENV), key, SCOPE)
logging.info('connecting using private key file.')
return credentials
try:
credentials = client.GoogleCredentials.get_application_default()
credentials = credentials.create_scoped(SCOPE)
logging.info('connecting using Google Application Default Credentials.')
return credentials
except client.ApplicationDefaultCredentialsError, e:
logging.error('Unable to find any credentials to use. '
'If you are running locally, make sure to set the '
'%s environment variable.', _DATASTORE_EMULATOR_HOST_ENV)
raise e |
def _resource(methode, zone, resource_type, resource_selector, **kwargs):
'''
internal resource hanlder
methode : string
add or update
zone : string
name of zone
resource_type : string
type of resource
resource_selector : string
unique resource identifier
**kwargs : string|int|...
resource properties
'''
ret = {'status': True}
# parse kwargs
kwargs = salt.utils.args.clean_kwargs(**kwargs)
for k in kwargs:
if isinstance(kwargs[k], dict) or isinstance(kwargs[k], list):
kwargs[k] = _sanitize_value(kwargs[k])
if methode not in ['add', 'update']:
ret['status'] = False
ret['message'] = 'unknown methode {0}'.format(methode)
return ret
if methode in ['update'] and resource_selector and resource_selector not in kwargs:
ret['status'] = False
ret['message'] = 'resource selector {0} not found in parameters'.format(resource_selector)
return ret
# generate update script
cfg_file = salt.utils.files.mkstemp()
with salt.utils.files.fpopen(cfg_file, 'w+', mode=0o600) as fp_:
if methode in ['add']:
fp_.write("add {0}\n".format(resource_type))
elif methode in ['update']:
if resource_selector:
value = kwargs[resource_selector]
if isinstance(value, dict) or isinstance(value, list):
value = _sanitize_value(value)
value = six.text_type(value).lower() if isinstance(value, bool) else six.text_type(value)
fp_.write("select {0} {1}={2}\n".format(resource_type, resource_selector, _sanitize_value(value)))
else:
fp_.write("select {0}\n".format(resource_type))
for k, v in six.iteritems(kwargs):
if methode in ['update'] and k == resource_selector:
continue
if isinstance(v, dict) or isinstance(v, list):
value = _sanitize_value(value)
value = six.text_type(v).lower() if isinstance(v, bool) else six.text_type(v)
if k in _zonecfg_resource_setters[resource_type]:
fp_.write("set {0}={1}\n".format(k, _sanitize_value(value)))
else:
fp_.write("add {0} {1}\n".format(k, _sanitize_value(value)))
fp_.write("end\n")
# update property
if cfg_file:
_dump_cfg(cfg_file)
res = __salt__['cmd.run_all']('zonecfg -z {zone} -f {path}'.format(
zone=zone,
path=cfg_file,
))
ret['status'] = res['retcode'] == 0
ret['message'] = res['stdout'] if ret['status'] else res['stderr']
if ret['message'] == '':
del ret['message']
else:
ret['message'] = _clean_message(ret['message'])
# cleanup config file
if __salt__['file.file_exists'](cfg_file):
__salt__['file.remove'](cfg_file)
return ret | internal resource hanlder
methode : string
add or update
zone : string
name of zone
resource_type : string
type of resource
resource_selector : string
unique resource identifier
**kwargs : string|int|...
resource properties | Below is the the instruction that describes the task:
### Input:
internal resource hanlder
methode : string
add or update
zone : string
name of zone
resource_type : string
type of resource
resource_selector : string
unique resource identifier
**kwargs : string|int|...
resource properties
### Response:
def _resource(methode, zone, resource_type, resource_selector, **kwargs):
'''
internal resource hanlder
methode : string
add or update
zone : string
name of zone
resource_type : string
type of resource
resource_selector : string
unique resource identifier
**kwargs : string|int|...
resource properties
'''
ret = {'status': True}
# parse kwargs
kwargs = salt.utils.args.clean_kwargs(**kwargs)
for k in kwargs:
if isinstance(kwargs[k], dict) or isinstance(kwargs[k], list):
kwargs[k] = _sanitize_value(kwargs[k])
if methode not in ['add', 'update']:
ret['status'] = False
ret['message'] = 'unknown methode {0}'.format(methode)
return ret
if methode in ['update'] and resource_selector and resource_selector not in kwargs:
ret['status'] = False
ret['message'] = 'resource selector {0} not found in parameters'.format(resource_selector)
return ret
# generate update script
cfg_file = salt.utils.files.mkstemp()
with salt.utils.files.fpopen(cfg_file, 'w+', mode=0o600) as fp_:
if methode in ['add']:
fp_.write("add {0}\n".format(resource_type))
elif methode in ['update']:
if resource_selector:
value = kwargs[resource_selector]
if isinstance(value, dict) or isinstance(value, list):
value = _sanitize_value(value)
value = six.text_type(value).lower() if isinstance(value, bool) else six.text_type(value)
fp_.write("select {0} {1}={2}\n".format(resource_type, resource_selector, _sanitize_value(value)))
else:
fp_.write("select {0}\n".format(resource_type))
for k, v in six.iteritems(kwargs):
if methode in ['update'] and k == resource_selector:
continue
if isinstance(v, dict) or isinstance(v, list):
value = _sanitize_value(value)
value = six.text_type(v).lower() if isinstance(v, bool) else six.text_type(v)
if k in _zonecfg_resource_setters[resource_type]:
fp_.write("set {0}={1}\n".format(k, _sanitize_value(value)))
else:
fp_.write("add {0} {1}\n".format(k, _sanitize_value(value)))
fp_.write("end\n")
# update property
if cfg_file:
_dump_cfg(cfg_file)
res = __salt__['cmd.run_all']('zonecfg -z {zone} -f {path}'.format(
zone=zone,
path=cfg_file,
))
ret['status'] = res['retcode'] == 0
ret['message'] = res['stdout'] if ret['status'] else res['stderr']
if ret['message'] == '':
del ret['message']
else:
ret['message'] = _clean_message(ret['message'])
# cleanup config file
if __salt__['file.file_exists'](cfg_file):
__salt__['file.remove'](cfg_file)
return ret |
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url) | Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration. | Below is the the instruction that describes the task:
### Input:
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
### Response:
def read_configuration(self):
"""
Read the PyPI access configuration as supported by distutils, getting
PyPI to do the actual work. This populates ``username``, ``password``,
``realm`` and ``url`` attributes from the configuration.
"""
# get distutils to do the work
c = self._get_pypirc_command()
c.repository = self.url
cfg = c._read_pypirc()
self.username = cfg.get('username')
self.password = cfg.get('password')
self.realm = cfg.get('realm', 'pypi')
self.url = cfg.get('repository', self.url) |
def cwd_filt(depth):
"""Return the last depth elements of the current working directory.
$HOME is always replaced with '~'.
If depth==0, the full path is returned."""
cwd = os.getcwdu().replace(HOME,"~")
out = os.sep.join(cwd.split(os.sep)[-depth:])
return out or os.sep | Return the last depth elements of the current working directory.
$HOME is always replaced with '~'.
If depth==0, the full path is returned. | Below is the the instruction that describes the task:
### Input:
Return the last depth elements of the current working directory.
$HOME is always replaced with '~'.
If depth==0, the full path is returned.
### Response:
def cwd_filt(depth):
"""Return the last depth elements of the current working directory.
$HOME is always replaced with '~'.
If depth==0, the full path is returned."""
cwd = os.getcwdu().replace(HOME,"~")
out = os.sep.join(cwd.split(os.sep)[-depth:])
return out or os.sep |
def inspect_to_metadata(metadata_object, inspect_data):
"""
process data from `docker inspect` and update provided metadata object
:param metadata_object: instance of Metadata
:param inspect_data: dict, metadata from `docker inspect` or `dockert_client.images()`
:return: instance of Metadata
"""
identifier = graceful_get(inspect_data, 'Id')
if identifier:
if ":" in identifier:
# format of image name from docker inspect:
# sha256:8f0e66c924c0c169352de487a3c2463d82da24e9442fc097dddaa5f800df7129
metadata_object.identifier = identifier.split(':')[1]
else:
# container
metadata_object.identifier = identifier
# format of Environment Variables from docker inspect:
# ['DISTTAG=f26container', 'FGC=f26']
raw_env_vars = graceful_get(inspect_data, "Config", "Env") or []
if raw_env_vars:
metadata_object.env_variables = {}
for env_variable in raw_env_vars:
splits = env_variable.split("=", 1)
name = splits[0]
value = splits[1] if len(splits) > 1 else None
if value is not None:
metadata_object.env_variables.update({name: value})
raw_exposed_ports = graceful_get(inspect_data, "Config", "ExposedPorts")
if raw_exposed_ports:
metadata_object.exposed_ports = list(raw_exposed_ports.keys())
# specific to images
raw_repo_tags = graceful_get(inspect_data, 'RepoTags')
if raw_repo_tags:
metadata_object.name = raw_repo_tags[0]
metadata_object.labels = graceful_get(inspect_data, 'Config', 'Labels')
metadata_object.command = graceful_get(inspect_data, 'Config', 'Cmd')
metadata_object.creation_timestamp = inspect_data.get('Created', None)
# specific to images
metadata_object.image_names = inspect_data.get('RepoTags', None)
# specific to images
digests = inspect_data.get("RepoDigests", None)
if digests:
metadata_object.repo_digests = digests
metadata_object.digest = digests[0]
return metadata_object | process data from `docker inspect` and update provided metadata object
:param metadata_object: instance of Metadata
:param inspect_data: dict, metadata from `docker inspect` or `dockert_client.images()`
:return: instance of Metadata | Below is the the instruction that describes the task:
### Input:
process data from `docker inspect` and update provided metadata object
:param metadata_object: instance of Metadata
:param inspect_data: dict, metadata from `docker inspect` or `dockert_client.images()`
:return: instance of Metadata
### Response:
def inspect_to_metadata(metadata_object, inspect_data):
"""
process data from `docker inspect` and update provided metadata object
:param metadata_object: instance of Metadata
:param inspect_data: dict, metadata from `docker inspect` or `dockert_client.images()`
:return: instance of Metadata
"""
identifier = graceful_get(inspect_data, 'Id')
if identifier:
if ":" in identifier:
# format of image name from docker inspect:
# sha256:8f0e66c924c0c169352de487a3c2463d82da24e9442fc097dddaa5f800df7129
metadata_object.identifier = identifier.split(':')[1]
else:
# container
metadata_object.identifier = identifier
# format of Environment Variables from docker inspect:
# ['DISTTAG=f26container', 'FGC=f26']
raw_env_vars = graceful_get(inspect_data, "Config", "Env") or []
if raw_env_vars:
metadata_object.env_variables = {}
for env_variable in raw_env_vars:
splits = env_variable.split("=", 1)
name = splits[0]
value = splits[1] if len(splits) > 1 else None
if value is not None:
metadata_object.env_variables.update({name: value})
raw_exposed_ports = graceful_get(inspect_data, "Config", "ExposedPorts")
if raw_exposed_ports:
metadata_object.exposed_ports = list(raw_exposed_ports.keys())
# specific to images
raw_repo_tags = graceful_get(inspect_data, 'RepoTags')
if raw_repo_tags:
metadata_object.name = raw_repo_tags[0]
metadata_object.labels = graceful_get(inspect_data, 'Config', 'Labels')
metadata_object.command = graceful_get(inspect_data, 'Config', 'Cmd')
metadata_object.creation_timestamp = inspect_data.get('Created', None)
# specific to images
metadata_object.image_names = inspect_data.get('RepoTags', None)
# specific to images
digests = inspect_data.get("RepoDigests", None)
if digests:
metadata_object.repo_digests = digests
metadata_object.digest = digests[0]
return metadata_object |
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
"""Get console screen buffer info object."""
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
handle, byref(csbi))
if not success:
raise WinError()
return csbi | Get console screen buffer info object. | Below is the the instruction that describes the task:
### Input:
Get console screen buffer info object.
### Response:
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
"""Get console screen buffer info object."""
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = windll.kernel32.GetConsoleScreenBufferInfo(
handle, byref(csbi))
if not success:
raise WinError()
return csbi |
def lock_file(path, maxdelay=.1, lock_cls=LockFile, timeout=10.0):
"""Cooperative file lock. Uses `lockfile.LockFile` polling under the hood.
`maxdelay` defines the interval between individual polls.
"""
lock = lock_cls(path)
max_t = time.time() + timeout
while True:
if time.time() >= max_t:
raise LockTimeout("Timeout waiting to acquire lock for %s" % (path,)) # same exception messages as in lockfile
try:
lock.acquire(timeout=0)
except AlreadyLocked:
sleep(maxdelay)
else:
try:
yield lock
break
finally:
lock.release() | Cooperative file lock. Uses `lockfile.LockFile` polling under the hood.
`maxdelay` defines the interval between individual polls. | Below is the the instruction that describes the task:
### Input:
Cooperative file lock. Uses `lockfile.LockFile` polling under the hood.
`maxdelay` defines the interval between individual polls.
### Response:
def lock_file(path, maxdelay=.1, lock_cls=LockFile, timeout=10.0):
"""Cooperative file lock. Uses `lockfile.LockFile` polling under the hood.
`maxdelay` defines the interval between individual polls.
"""
lock = lock_cls(path)
max_t = time.time() + timeout
while True:
if time.time() >= max_t:
raise LockTimeout("Timeout waiting to acquire lock for %s" % (path,)) # same exception messages as in lockfile
try:
lock.acquire(timeout=0)
except AlreadyLocked:
sleep(maxdelay)
else:
try:
yield lock
break
finally:
lock.release() |
def configure(self, inputs, outputs):
"""Configure activity input and output.
You need to provide a list of input and output :class:`Property`. Does not work with lists of propery id's.
:param inputs: iterable of input property models
:type inputs: list(:class:`Property`)
:param outputs: iterable of output property models
:type outputs: list(:class:`Property`)
:raises APIError: when unable to configure the activity
"""
url = self._client._build_url('activity', activity_id=self.id)
r = self._client._request('PUT', url, params={'select_action': 'update_associations'}, json={
'inputs': [p.id for p in inputs],
'outputs': [p.id for p in outputs]
})
if r.status_code != requests.codes.ok: # pragma: no cover
raise APIError("Could not configure activity") | Configure activity input and output.
You need to provide a list of input and output :class:`Property`. Does not work with lists of propery id's.
:param inputs: iterable of input property models
:type inputs: list(:class:`Property`)
:param outputs: iterable of output property models
:type outputs: list(:class:`Property`)
:raises APIError: when unable to configure the activity | Below is the the instruction that describes the task:
### Input:
Configure activity input and output.
You need to provide a list of input and output :class:`Property`. Does not work with lists of propery id's.
:param inputs: iterable of input property models
:type inputs: list(:class:`Property`)
:param outputs: iterable of output property models
:type outputs: list(:class:`Property`)
:raises APIError: when unable to configure the activity
### Response:
def configure(self, inputs, outputs):
"""Configure activity input and output.
You need to provide a list of input and output :class:`Property`. Does not work with lists of propery id's.
:param inputs: iterable of input property models
:type inputs: list(:class:`Property`)
:param outputs: iterable of output property models
:type outputs: list(:class:`Property`)
:raises APIError: when unable to configure the activity
"""
url = self._client._build_url('activity', activity_id=self.id)
r = self._client._request('PUT', url, params={'select_action': 'update_associations'}, json={
'inputs': [p.id for p in inputs],
'outputs': [p.id for p in outputs]
})
if r.status_code != requests.codes.ok: # pragma: no cover
raise APIError("Could not configure activity") |
def Size(self):
"""
Get the total size in bytes of the object.
Returns:
int: size.
"""
return s.uint8 + GetVarSize(self.Key) + GetVarSize(self.Field) + GetVarSize(self.Value) | Get the total size in bytes of the object.
Returns:
int: size. | Below is the the instruction that describes the task:
### Input:
Get the total size in bytes of the object.
Returns:
int: size.
### Response:
def Size(self):
"""
Get the total size in bytes of the object.
Returns:
int: size.
"""
return s.uint8 + GetVarSize(self.Key) + GetVarSize(self.Field) + GetVarSize(self.Value) |
def _dfs_postorder(node, visited):
"""Iterate through nodes in DFS post-order."""
if node.lo is not None:
yield from _dfs_postorder(node.lo, visited)
if node.hi is not None:
yield from _dfs_postorder(node.hi, visited)
if node not in visited:
visited.add(node)
yield node | Iterate through nodes in DFS post-order. | Below is the the instruction that describes the task:
### Input:
Iterate through nodes in DFS post-order.
### Response:
def _dfs_postorder(node, visited):
"""Iterate through nodes in DFS post-order."""
if node.lo is not None:
yield from _dfs_postorder(node.lo, visited)
if node.hi is not None:
yield from _dfs_postorder(node.hi, visited)
if node not in visited:
visited.add(node)
yield node |
def subn(pattern, repl, string, count=0, flags=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used."""
return _compile(pattern, flags).subn(repl, string, count) | Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used. | Below is the the instruction that describes the task:
### Input:
Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used.
### Response:
def subn(pattern, repl, string, count=0, flags=0):
"""Return a 2-tuple containing (new_string, number).
new_string is the string obtained by replacing the leftmost
non-overlapping occurrences of the pattern in the source
string by the replacement repl. number is the number of
substitutions that were made. repl can be either a string or a
callable; if a string, backslash escapes in it are processed.
If it is a callable, it's passed the match object and must
return a replacement string to be used."""
return _compile(pattern, flags).subn(repl, string, count) |
def _sync_dag_view_permissions(self, dag_id, access_control):
"""Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
"""
def _get_or_create_dag_permission(perm_name):
dag_perm = self.find_permission_view_menu(perm_name, dag_id)
if not dag_perm:
self.log.info(
"Creating new permission '%s' on view '%s'",
perm_name, dag_id
)
dag_perm = self.add_permission_view_menu(perm_name, dag_id)
return dag_perm
def _revoke_stale_permissions(dag_view):
existing_dag_perms = self.find_permissions_view_menu(dag_view)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role
if role.name != 'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, {})
if perm.permission.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.permission, dag_id, role.name
)
self.del_permission_role(role, perm)
dag_view = self.find_view_menu(dag_id)
if dag_view:
_revoke_stale_permissions(dag_view)
for rolename, perms in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
"The access_control mapping for DAG '{}' includes a role "
"named '{}', but that role does not exist".format(
dag_id,
rolename))
perms = set(perms)
invalid_perms = perms - self.DAG_PERMS
if invalid_perms:
raise AirflowException(
"The access_control map for DAG '{}' includes the following "
"invalid permissions: {}; The set of valid permissions "
"is: {}".format(dag_id,
(perms - self.DAG_PERMS),
self.DAG_PERMS))
for perm_name in perms:
dag_perm = _get_or_create_dag_permission(perm_name)
self.add_permission_role(role, dag_perm) | Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict | Below is the the instruction that describes the task:
### Input:
Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
### Response:
def _sync_dag_view_permissions(self, dag_id, access_control):
"""Set the access policy on the given DAG's ViewModel.
:param dag_id: the ID of the DAG whose permissions should be updated
:type dag_id: string
:param access_control: a dict where each key is a rolename and
each value is a set() of permission names (e.g.,
{'can_dag_read'}
:type access_control: dict
"""
def _get_or_create_dag_permission(perm_name):
dag_perm = self.find_permission_view_menu(perm_name, dag_id)
if not dag_perm:
self.log.info(
"Creating new permission '%s' on view '%s'",
perm_name, dag_id
)
dag_perm = self.add_permission_view_menu(perm_name, dag_id)
return dag_perm
def _revoke_stale_permissions(dag_view):
existing_dag_perms = self.find_permissions_view_menu(dag_view)
for perm in existing_dag_perms:
non_admin_roles = [role for role in perm.role
if role.name != 'Admin']
for role in non_admin_roles:
target_perms_for_role = access_control.get(role.name, {})
if perm.permission.name not in target_perms_for_role:
self.log.info(
"Revoking '%s' on DAG '%s' for role '%s'",
perm.permission, dag_id, role.name
)
self.del_permission_role(role, perm)
dag_view = self.find_view_menu(dag_id)
if dag_view:
_revoke_stale_permissions(dag_view)
for rolename, perms in access_control.items():
role = self.find_role(rolename)
if not role:
raise AirflowException(
"The access_control mapping for DAG '{}' includes a role "
"named '{}', but that role does not exist".format(
dag_id,
rolename))
perms = set(perms)
invalid_perms = perms - self.DAG_PERMS
if invalid_perms:
raise AirflowException(
"The access_control map for DAG '{}' includes the following "
"invalid permissions: {}; The set of valid permissions "
"is: {}".format(dag_id,
(perms - self.DAG_PERMS),
self.DAG_PERMS))
for perm_name in perms:
dag_perm = _get_or_create_dag_permission(perm_name)
self.add_permission_role(role, dag_perm) |
def to_mesh(obj):
'''
to_mesh(obj) yields a Mesh object that is equivalent to obj or identical to obj if obj is itself
a mesh object.
The following objects can be converted into meshes:
* a mesh object
* a tuple (coords, faces) where coords is a coordinate matrix and faces is a matrix of
coordinate indices that make-up the triangles
* a tuple (faces, coords) where faces is a triangle matrix and coords is a coordinate matrix;
note that if neither matrix is of integer type, then the latter ordering (which is the same
as that accepted by the mesh() function) is assumed.
* a tuple (topo, regname) specifying the registration name to use (note that regname may
optionally start with 'reg:' which is ignored).
* a tuple (cortex, surfname) specifying the surface name to use. Note that surfname may
optionally start with 'surf:' or 'reg:', both of which are used only to determine whether
to lookup a registration or a surface. If no 'surf:' or 'reg:' is given as a prefix, then
a surface is tried first followed by a registration. The surface name 'sphere' is
automatically translated to 'reg:native' and any surface name of the form '<name>_sphere' is
automatically translated to 'reg:<name>'.
* a tuple (topo/cortex, mesh) results in the mesh being returned.
* a tuple (mesh, string) or (mesh, None) results in mesh with the second argument ignored.
* a tuple (mesh1, mesh2) results in mesh2 with mesh1 ignored.
Note that some of the behavior described above is desirable because of a common use case of the
to_mesh function. When another function f accepts as arguments both a hemi/topology object as
well as an optional surface argument, the purpose is often to obtain a specific mesh from the
topology but to allow the user to specify which or to pass their own mesh.
'''
if is_mesh(obj): return obj
elif pimms.is_vector(obj) and len(obj) == 2:
(a,b) = obj
if pimms.is_matrix(a, 'int') and pimms.is_matrix(b, 'real'): return mesh(a, b)
elif pimms.is_matrix(b, 'int') and pimms.is_matrix(a, 'real'): return mesh(b, a)
elif is_mesh(a) and (b is None or pimms.is_str(b)): return a
elif is_mesh(a) and is_mesh(b): return b
elif is_topo(a):
from neuropythy import is_cortex
if is_mesh(b): return b
elif not pimms.is_str(b): raise ValueError('to_mesh: non-str surf/reg name: %s' % (b,))
(b0, lb) = (b, b.lower())
# check for translations of the name first:
s = b[4:] if lb.startswith('reg:') else b[5:] if lb.startswith('surf:') else b
ls = s.lower()
if ls.endswith('_sphere'): b = ('reg:' + s[:-7])
elif ls == 'sphere': b = 'reg:native'
lb = b.lower()
# we try surfaces first (if a is a cortex and has surfaces)
if is_cortex(a) and not lb.startswith('reg:'):
(s,ls) = (b[5:],lb[5:]) if lb.startswith('surf:') else (b,lb)
if s in a.surfaces: return a.surfaces[s]
elif ls in a.surfaces: return a.surfaces[ls]
# then check registrations
if not lb.startswith('surf:'):
(s,ls) = (b[4:],lb[4:]) if lb.startswith('reg:') else (b,lb)
if s in a.registrations: return a.registrations[s]
elif ls in a.registrations: return a.registrations[ls]
# nothing found
raise ValueError('to_mesh: mesh named "%s" not found in topology %s' % (b0, a))
else: raise ValueError('to_mesh: could not deduce meaning of row: %s' % (obj,))
else: raise ValueError('Could not deduce how object can be convertex into a mesh') | to_mesh(obj) yields a Mesh object that is equivalent to obj or identical to obj if obj is itself
a mesh object.
The following objects can be converted into meshes:
* a mesh object
* a tuple (coords, faces) where coords is a coordinate matrix and faces is a matrix of
coordinate indices that make-up the triangles
* a tuple (faces, coords) where faces is a triangle matrix and coords is a coordinate matrix;
note that if neither matrix is of integer type, then the latter ordering (which is the same
as that accepted by the mesh() function) is assumed.
* a tuple (topo, regname) specifying the registration name to use (note that regname may
optionally start with 'reg:' which is ignored).
* a tuple (cortex, surfname) specifying the surface name to use. Note that surfname may
optionally start with 'surf:' or 'reg:', both of which are used only to determine whether
to lookup a registration or a surface. If no 'surf:' or 'reg:' is given as a prefix, then
a surface is tried first followed by a registration. The surface name 'sphere' is
automatically translated to 'reg:native' and any surface name of the form '<name>_sphere' is
automatically translated to 'reg:<name>'.
* a tuple (topo/cortex, mesh) results in the mesh being returned.
* a tuple (mesh, string) or (mesh, None) results in mesh with the second argument ignored.
* a tuple (mesh1, mesh2) results in mesh2 with mesh1 ignored.
Note that some of the behavior described above is desirable because of a common use case of the
to_mesh function. When another function f accepts as arguments both a hemi/topology object as
well as an optional surface argument, the purpose is often to obtain a specific mesh from the
topology but to allow the user to specify which or to pass their own mesh. | Below is the the instruction that describes the task:
### Input:
to_mesh(obj) yields a Mesh object that is equivalent to obj or identical to obj if obj is itself
a mesh object.
The following objects can be converted into meshes:
* a mesh object
* a tuple (coords, faces) where coords is a coordinate matrix and faces is a matrix of
coordinate indices that make-up the triangles
* a tuple (faces, coords) where faces is a triangle matrix and coords is a coordinate matrix;
note that if neither matrix is of integer type, then the latter ordering (which is the same
as that accepted by the mesh() function) is assumed.
* a tuple (topo, regname) specifying the registration name to use (note that regname may
optionally start with 'reg:' which is ignored).
* a tuple (cortex, surfname) specifying the surface name to use. Note that surfname may
optionally start with 'surf:' or 'reg:', both of which are used only to determine whether
to lookup a registration or a surface. If no 'surf:' or 'reg:' is given as a prefix, then
a surface is tried first followed by a registration. The surface name 'sphere' is
automatically translated to 'reg:native' and any surface name of the form '<name>_sphere' is
automatically translated to 'reg:<name>'.
* a tuple (topo/cortex, mesh) results in the mesh being returned.
* a tuple (mesh, string) or (mesh, None) results in mesh with the second argument ignored.
* a tuple (mesh1, mesh2) results in mesh2 with mesh1 ignored.
Note that some of the behavior described above is desirable because of a common use case of the
to_mesh function. When another function f accepts as arguments both a hemi/topology object as
well as an optional surface argument, the purpose is often to obtain a specific mesh from the
topology but to allow the user to specify which or to pass their own mesh.
### Response:
def to_mesh(obj):
'''
to_mesh(obj) yields a Mesh object that is equivalent to obj or identical to obj if obj is itself
a mesh object.
The following objects can be converted into meshes:
* a mesh object
* a tuple (coords, faces) where coords is a coordinate matrix and faces is a matrix of
coordinate indices that make-up the triangles
* a tuple (faces, coords) where faces is a triangle matrix and coords is a coordinate matrix;
note that if neither matrix is of integer type, then the latter ordering (which is the same
as that accepted by the mesh() function) is assumed.
* a tuple (topo, regname) specifying the registration name to use (note that regname may
optionally start with 'reg:' which is ignored).
* a tuple (cortex, surfname) specifying the surface name to use. Note that surfname may
optionally start with 'surf:' or 'reg:', both of which are used only to determine whether
to lookup a registration or a surface. If no 'surf:' or 'reg:' is given as a prefix, then
a surface is tried first followed by a registration. The surface name 'sphere' is
automatically translated to 'reg:native' and any surface name of the form '<name>_sphere' is
automatically translated to 'reg:<name>'.
* a tuple (topo/cortex, mesh) results in the mesh being returned.
* a tuple (mesh, string) or (mesh, None) results in mesh with the second argument ignored.
* a tuple (mesh1, mesh2) results in mesh2 with mesh1 ignored.
Note that some of the behavior described above is desirable because of a common use case of the
to_mesh function. When another function f accepts as arguments both a hemi/topology object as
well as an optional surface argument, the purpose is often to obtain a specific mesh from the
topology but to allow the user to specify which or to pass their own mesh.
'''
if is_mesh(obj): return obj
elif pimms.is_vector(obj) and len(obj) == 2:
(a,b) = obj
if pimms.is_matrix(a, 'int') and pimms.is_matrix(b, 'real'): return mesh(a, b)
elif pimms.is_matrix(b, 'int') and pimms.is_matrix(a, 'real'): return mesh(b, a)
elif is_mesh(a) and (b is None or pimms.is_str(b)): return a
elif is_mesh(a) and is_mesh(b): return b
elif is_topo(a):
from neuropythy import is_cortex
if is_mesh(b): return b
elif not pimms.is_str(b): raise ValueError('to_mesh: non-str surf/reg name: %s' % (b,))
(b0, lb) = (b, b.lower())
# check for translations of the name first:
s = b[4:] if lb.startswith('reg:') else b[5:] if lb.startswith('surf:') else b
ls = s.lower()
if ls.endswith('_sphere'): b = ('reg:' + s[:-7])
elif ls == 'sphere': b = 'reg:native'
lb = b.lower()
# we try surfaces first (if a is a cortex and has surfaces)
if is_cortex(a) and not lb.startswith('reg:'):
(s,ls) = (b[5:],lb[5:]) if lb.startswith('surf:') else (b,lb)
if s in a.surfaces: return a.surfaces[s]
elif ls in a.surfaces: return a.surfaces[ls]
# then check registrations
if not lb.startswith('surf:'):
(s,ls) = (b[4:],lb[4:]) if lb.startswith('reg:') else (b,lb)
if s in a.registrations: return a.registrations[s]
elif ls in a.registrations: return a.registrations[ls]
# nothing found
raise ValueError('to_mesh: mesh named "%s" not found in topology %s' % (b0, a))
else: raise ValueError('to_mesh: could not deduce meaning of row: %s' % (obj,))
else: raise ValueError('Could not deduce how object can be convertex into a mesh') |
def _pload32(ins):
""" Loads from stack pointer (SP) + X, being
X 2st parameter.
1st operand must be a SIGNED integer.
2nd operand cannot be an immediate nor an address.
"""
output = _pload(ins.quad[2], 4)
output.append('push de')
output.append('push hl')
return output | Loads from stack pointer (SP) + X, being
X 2st parameter.
1st operand must be a SIGNED integer.
2nd operand cannot be an immediate nor an address. | Below is the the instruction that describes the task:
### Input:
Loads from stack pointer (SP) + X, being
X 2st parameter.
1st operand must be a SIGNED integer.
2nd operand cannot be an immediate nor an address.
### Response:
def _pload32(ins):
""" Loads from stack pointer (SP) + X, being
X 2st parameter.
1st operand must be a SIGNED integer.
2nd operand cannot be an immediate nor an address.
"""
output = _pload(ins.quad[2], 4)
output.append('push de')
output.append('push hl')
return output |
def open(self, output_only = False, shared = True):
"""Open HID device and obtain 'Collection Information'.
It effectively prepares the HidDevice object for reading and writing
"""
if self.is_opened():
raise HIDError("Device already opened")
sharing_flags = 0
if shared:
sharing_flags = winapi.FILE_SHARE_READ | winapi.FILE_SHARE_WRITE
hid_handle = winapi.CreateFile(
self.device_path,
winapi.GENERIC_READ | winapi.GENERIC_WRITE,
sharing_flags,
None, # no security
winapi.OPEN_EXISTING,
winapi.FILE_ATTRIBUTE_NORMAL | winapi.FILE_FLAG_OVERLAPPED,
0 )
if not hid_handle or hid_handle == INVALID_HANDLE_VALUE:
raise HIDError("Error opening HID device: %s\n"%self.product_name)
#get pre parsed data
ptr_preparsed_data = ctypes.c_void_p()
if not hid_dll.HidD_GetPreparsedData(int(hid_handle),
byref(ptr_preparsed_data)):
winapi.CloseHandle(int(hid_handle))
raise HIDError("Failure to get HID pre parsed data")
self.ptr_preparsed_data = ptr_preparsed_data
self.hid_handle = hid_handle
#get top level capabilities
self.hid_caps = winapi.HIDP_CAPS()
HidStatus( hid_dll.HidP_GetCaps(ptr_preparsed_data,
byref(self.hid_caps)) )
#proceed with button capabilities
caps_length = c_ulong()
all_items = [\
(HidP_Input, winapi.HIDP_BUTTON_CAPS,
self.hid_caps.number_input_button_caps,
hid_dll.HidP_GetButtonCaps
),
(HidP_Input, winapi.HIDP_VALUE_CAPS,
self.hid_caps.number_input_value_caps,
hid_dll.HidP_GetValueCaps
),
(HidP_Output, winapi.HIDP_BUTTON_CAPS,
self.hid_caps.number_output_button_caps,
hid_dll.HidP_GetButtonCaps
),
(HidP_Output, winapi.HIDP_VALUE_CAPS,
self.hid_caps.number_output_value_caps,
hid_dll.HidP_GetValueCaps
),
(HidP_Feature, winapi.HIDP_BUTTON_CAPS,
self.hid_caps.number_feature_button_caps,
hid_dll.HidP_GetButtonCaps
),
(HidP_Feature, winapi.HIDP_VALUE_CAPS,
self.hid_caps.number_feature_value_caps,
hid_dll.HidP_GetValueCaps
),
]
for report_kind, struct_kind, max_items, get_control_caps in all_items:
if not int(max_items):
continue #nothing here
#create storage for control/data
ctrl_array_type = struct_kind * max_items
ctrl_array_struct = ctrl_array_type()
#target max size for API function
caps_length.value = max_items
HidStatus( get_control_caps(\
report_kind,
byref(ctrl_array_struct),
byref(caps_length),
ptr_preparsed_data) )
#keep reference of usages
for idx in range(caps_length.value):
usage_item = HidPUsageCaps( ctrl_array_struct[idx] )
#by report type
if report_kind not in self.usages_storage:
self.usages_storage[report_kind] = list()
self.usages_storage[report_kind].append( usage_item )
#also add report_id to known reports set
if report_kind not in self.report_set:
self.report_set[report_kind] = set()
self.report_set[report_kind].add( usage_item.report_id )
del ctrl_array_struct
del ctrl_array_type
# now is the time to consider the device opened, as report
# handling threads enforce it
self.__open_status = True
#now prepare the input report handler
self.__input_report_templates = dict()
if not output_only and self.hid_caps.input_report_byte_length and \
HidP_Input in self.report_set:
#first make templates for easy parsing input reports
for report_id in self.report_set[HidP_Input]:
self.__input_report_templates[report_id] = \
HidReport( self, HidP_Input, report_id )
#prepare input reports handlers
self._input_report_queue = HidDevice.InputReportQueue( \
self.max_input_queue_size,
self.hid_caps.input_report_byte_length)
self.__input_processing_thread = \
HidDevice.InputReportProcessingThread(self)
self.__reading_thread = HidDevice.InputReportReaderThread( \
self, self.hid_caps.input_report_byte_length) | Open HID device and obtain 'Collection Information'.
It effectively prepares the HidDevice object for reading and writing | Below is the the instruction that describes the task:
### Input:
Open HID device and obtain 'Collection Information'.
It effectively prepares the HidDevice object for reading and writing
### Response:
def open(self, output_only = False, shared = True):
"""Open HID device and obtain 'Collection Information'.
It effectively prepares the HidDevice object for reading and writing
"""
if self.is_opened():
raise HIDError("Device already opened")
sharing_flags = 0
if shared:
sharing_flags = winapi.FILE_SHARE_READ | winapi.FILE_SHARE_WRITE
hid_handle = winapi.CreateFile(
self.device_path,
winapi.GENERIC_READ | winapi.GENERIC_WRITE,
sharing_flags,
None, # no security
winapi.OPEN_EXISTING,
winapi.FILE_ATTRIBUTE_NORMAL | winapi.FILE_FLAG_OVERLAPPED,
0 )
if not hid_handle or hid_handle == INVALID_HANDLE_VALUE:
raise HIDError("Error opening HID device: %s\n"%self.product_name)
#get pre parsed data
ptr_preparsed_data = ctypes.c_void_p()
if not hid_dll.HidD_GetPreparsedData(int(hid_handle),
byref(ptr_preparsed_data)):
winapi.CloseHandle(int(hid_handle))
raise HIDError("Failure to get HID pre parsed data")
self.ptr_preparsed_data = ptr_preparsed_data
self.hid_handle = hid_handle
#get top level capabilities
self.hid_caps = winapi.HIDP_CAPS()
HidStatus( hid_dll.HidP_GetCaps(ptr_preparsed_data,
byref(self.hid_caps)) )
#proceed with button capabilities
caps_length = c_ulong()
all_items = [\
(HidP_Input, winapi.HIDP_BUTTON_CAPS,
self.hid_caps.number_input_button_caps,
hid_dll.HidP_GetButtonCaps
),
(HidP_Input, winapi.HIDP_VALUE_CAPS,
self.hid_caps.number_input_value_caps,
hid_dll.HidP_GetValueCaps
),
(HidP_Output, winapi.HIDP_BUTTON_CAPS,
self.hid_caps.number_output_button_caps,
hid_dll.HidP_GetButtonCaps
),
(HidP_Output, winapi.HIDP_VALUE_CAPS,
self.hid_caps.number_output_value_caps,
hid_dll.HidP_GetValueCaps
),
(HidP_Feature, winapi.HIDP_BUTTON_CAPS,
self.hid_caps.number_feature_button_caps,
hid_dll.HidP_GetButtonCaps
),
(HidP_Feature, winapi.HIDP_VALUE_CAPS,
self.hid_caps.number_feature_value_caps,
hid_dll.HidP_GetValueCaps
),
]
for report_kind, struct_kind, max_items, get_control_caps in all_items:
if not int(max_items):
continue #nothing here
#create storage for control/data
ctrl_array_type = struct_kind * max_items
ctrl_array_struct = ctrl_array_type()
#target max size for API function
caps_length.value = max_items
HidStatus( get_control_caps(\
report_kind,
byref(ctrl_array_struct),
byref(caps_length),
ptr_preparsed_data) )
#keep reference of usages
for idx in range(caps_length.value):
usage_item = HidPUsageCaps( ctrl_array_struct[idx] )
#by report type
if report_kind not in self.usages_storage:
self.usages_storage[report_kind] = list()
self.usages_storage[report_kind].append( usage_item )
#also add report_id to known reports set
if report_kind not in self.report_set:
self.report_set[report_kind] = set()
self.report_set[report_kind].add( usage_item.report_id )
del ctrl_array_struct
del ctrl_array_type
# now is the time to consider the device opened, as report
# handling threads enforce it
self.__open_status = True
#now prepare the input report handler
self.__input_report_templates = dict()
if not output_only and self.hid_caps.input_report_byte_length and \
HidP_Input in self.report_set:
#first make templates for easy parsing input reports
for report_id in self.report_set[HidP_Input]:
self.__input_report_templates[report_id] = \
HidReport( self, HidP_Input, report_id )
#prepare input reports handlers
self._input_report_queue = HidDevice.InputReportQueue( \
self.max_input_queue_size,
self.hid_caps.input_report_byte_length)
self.__input_processing_thread = \
HidDevice.InputReportProcessingThread(self)
self.__reading_thread = HidDevice.InputReportReaderThread( \
self, self.hid_caps.input_report_byte_length) |
def block(bdaddr):
'''
Block a specific bluetooth device by BD Address
CLI Example:
.. code-block:: bash
salt '*' bluetooth.block DE:AD:BE:EF:CA:FE
'''
if not salt.utils.validate.net.mac(bdaddr):
raise CommandExecutionError(
'Invalid BD address passed to bluetooth.block'
)
cmd = 'hciconfig {0} block'.format(bdaddr)
__salt__['cmd.run'](cmd).splitlines() | Block a specific bluetooth device by BD Address
CLI Example:
.. code-block:: bash
salt '*' bluetooth.block DE:AD:BE:EF:CA:FE | Below is the the instruction that describes the task:
### Input:
Block a specific bluetooth device by BD Address
CLI Example:
.. code-block:: bash
salt '*' bluetooth.block DE:AD:BE:EF:CA:FE
### Response:
def block(bdaddr):
'''
Block a specific bluetooth device by BD Address
CLI Example:
.. code-block:: bash
salt '*' bluetooth.block DE:AD:BE:EF:CA:FE
'''
if not salt.utils.validate.net.mac(bdaddr):
raise CommandExecutionError(
'Invalid BD address passed to bluetooth.block'
)
cmd = 'hciconfig {0} block'.format(bdaddr)
__salt__['cmd.run'](cmd).splitlines() |
def get_content(self, obj):
"""All content for a state's page on an election day."""
election_day = ElectionDay.objects.get(
date=self.context['election_date'])
division = obj
# In case of house special election,
# use parent division.
if obj.level.name == DivisionLevel.DISTRICT:
division = obj.parent
special = True if self.context.get('special') else False
return PageContent.objects.division_content(
election_day,
division,
special
) | All content for a state's page on an election day. | Below is the the instruction that describes the task:
### Input:
All content for a state's page on an election day.
### Response:
def get_content(self, obj):
"""All content for a state's page on an election day."""
election_day = ElectionDay.objects.get(
date=self.context['election_date'])
division = obj
# In case of house special election,
# use parent division.
if obj.level.name == DivisionLevel.DISTRICT:
division = obj.parent
special = True if self.context.get('special') else False
return PageContent.objects.division_content(
election_day,
division,
special
) |
def pub_dates(soup):
"""
return a list of all the pub dates
"""
pub_dates = []
tags = raw_parser.pub_date(soup)
for tag in tags:
pub_date = OrderedDict()
copy_attribute(tag.attrs, 'publication-format', pub_date)
copy_attribute(tag.attrs, 'date-type', pub_date)
copy_attribute(tag.attrs, 'pub-type', pub_date)
for tag_attr in ["date-type", "pub-type"]:
if tag_attr in tag.attrs:
(day, month, year) = ymd(tag)
pub_date['day'] = day
pub_date['month'] = month
pub_date['year'] = year
pub_date['date'] = date_struct_nn(year, month, day)
pub_dates.append(pub_date)
return pub_dates | return a list of all the pub dates | Below is the the instruction that describes the task:
### Input:
return a list of all the pub dates
### Response:
def pub_dates(soup):
"""
return a list of all the pub dates
"""
pub_dates = []
tags = raw_parser.pub_date(soup)
for tag in tags:
pub_date = OrderedDict()
copy_attribute(tag.attrs, 'publication-format', pub_date)
copy_attribute(tag.attrs, 'date-type', pub_date)
copy_attribute(tag.attrs, 'pub-type', pub_date)
for tag_attr in ["date-type", "pub-type"]:
if tag_attr in tag.attrs:
(day, month, year) = ymd(tag)
pub_date['day'] = day
pub_date['month'] = month
pub_date['year'] = year
pub_date['date'] = date_struct_nn(year, month, day)
pub_dates.append(pub_date)
return pub_dates |
def status(cls):
"""Retrieve global status from status.gandi.net."""
return cls.json_get('%s/status' % cls.api_url, empty_key=True,
send_key=False) | Retrieve global status from status.gandi.net. | Below is the the instruction that describes the task:
### Input:
Retrieve global status from status.gandi.net.
### Response:
def status(cls):
"""Retrieve global status from status.gandi.net."""
return cls.json_get('%s/status' % cls.api_url, empty_key=True,
send_key=False) |
def text_editor(file='', background=False, return_cmd=False):
'''Starts the default graphical text editor.
Start the user's preferred graphical text editor, optionally with a file.
Args:
file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file).
background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``.
return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``.
Returns:
str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing.
'''
desktop_env = system.get_name()
if desktop_env == 'windows':
editor_cmd_str = system.get_cmd_out(
['ftype', 'textfile']).split('=', 1)[1]
elif desktop_env == 'mac':
editor_cmd_str = 'open -a' + system.get_cmd_out(
['def',
'read',
'com.apple.LaunchServices',
'LSHandlers'
'-array'
'{LSHandlerContentType=public.plain-text;}']
)
else:
# Use def handler for MIME-type text/plain
editor_cmd_str = system.get_cmd_out(
['xdg-mime', 'query', 'default', 'text/plain'])
if '\n' in editor_cmd_str:
# Sometimes locate returns multiple results
# use first one
editor_cmd_str = editor_cmd_str.split('\n')[0]
if editor_cmd_str.endswith('.desktop'):
# We don't use desktopfile.execute() in order to have working
# return_cmd and background
editor_cmd_str = desktopfile.parse(
desktopfile.locate(editor_cmd_str)[0])['Exec']
for i in editor_cmd_str.split():
if i.startswith('%'):
# %-style formatters
editor_cmd_str = editor_cmd_str.replace(i, '')
if i == '--new-document':
# Gedit
editor_cmd_str = editor_cmd_str.replace(i, '')
if file:
editor_cmd_str += ' {}'.format(shlex.quote(file))
if return_cmd:
return editor_cmd_str
text_editor_proc = sp.Popen([editor_cmd_str], shell=True)
if not background:
text_editor_proc.wait() | Starts the default graphical text editor.
Start the user's preferred graphical text editor, optionally with a file.
Args:
file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file).
background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``.
return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``.
Returns:
str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing. | Below is the the instruction that describes the task:
### Input:
Starts the default graphical text editor.
Start the user's preferred graphical text editor, optionally with a file.
Args:
file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file).
background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``.
return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``.
Returns:
str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing.
### Response:
def text_editor(file='', background=False, return_cmd=False):
'''Starts the default graphical text editor.
Start the user's preferred graphical text editor, optionally with a file.
Args:
file (str) : The file to be opened with the editor. Defaults to an empty string (i.e. no file).
background (bool): Runs the editor in the background, instead of waiting for completion. Defaults to ``False``.
return_cmd (bool): Returns the command (str) to run the editor instead of running it. Defaults to ``False``.
Returns:
str: Only if ``return_cmd``, the command to run the editor is returned. Else returns nothing.
'''
desktop_env = system.get_name()
if desktop_env == 'windows':
editor_cmd_str = system.get_cmd_out(
['ftype', 'textfile']).split('=', 1)[1]
elif desktop_env == 'mac':
editor_cmd_str = 'open -a' + system.get_cmd_out(
['def',
'read',
'com.apple.LaunchServices',
'LSHandlers'
'-array'
'{LSHandlerContentType=public.plain-text;}']
)
else:
# Use def handler for MIME-type text/plain
editor_cmd_str = system.get_cmd_out(
['xdg-mime', 'query', 'default', 'text/plain'])
if '\n' in editor_cmd_str:
# Sometimes locate returns multiple results
# use first one
editor_cmd_str = editor_cmd_str.split('\n')[0]
if editor_cmd_str.endswith('.desktop'):
# We don't use desktopfile.execute() in order to have working
# return_cmd and background
editor_cmd_str = desktopfile.parse(
desktopfile.locate(editor_cmd_str)[0])['Exec']
for i in editor_cmd_str.split():
if i.startswith('%'):
# %-style formatters
editor_cmd_str = editor_cmd_str.replace(i, '')
if i == '--new-document':
# Gedit
editor_cmd_str = editor_cmd_str.replace(i, '')
if file:
editor_cmd_str += ' {}'.format(shlex.quote(file))
if return_cmd:
return editor_cmd_str
text_editor_proc = sp.Popen([editor_cmd_str], shell=True)
if not background:
text_editor_proc.wait() |
def _process_using_meta_feature_generator(self, X, meta_feature_generator):
"""Process using secondary learner meta-feature generator
Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba,
this internal method gives the ability to use any string. Just make sure secondary learner
has the method.
Args:
X (array-like): Features array
meta_feature_generator (str, unicode): Method for use by secondary learner
"""
all_learner_meta_features = []
for idx, base_learner in enumerate(self.base_learners):
single_learner_meta_features = getattr(base_learner,
self.meta_feature_generators[idx])(X)
if len(single_learner_meta_features.shape) == 1:
single_learner_meta_features = single_learner_meta_features.reshape(-1, 1)
all_learner_meta_features.append(single_learner_meta_features)
all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1)
out = getattr(self.secondary_learner, meta_feature_generator)(all_learner_meta_features)
return out | Process using secondary learner meta-feature generator
Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba,
this internal method gives the ability to use any string. Just make sure secondary learner
has the method.
Args:
X (array-like): Features array
meta_feature_generator (str, unicode): Method for use by secondary learner | Below is the the instruction that describes the task:
### Input:
Process using secondary learner meta-feature generator
Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba,
this internal method gives the ability to use any string. Just make sure secondary learner
has the method.
Args:
X (array-like): Features array
meta_feature_generator (str, unicode): Method for use by secondary learner
### Response:
def _process_using_meta_feature_generator(self, X, meta_feature_generator):
"""Process using secondary learner meta-feature generator
Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba,
this internal method gives the ability to use any string. Just make sure secondary learner
has the method.
Args:
X (array-like): Features array
meta_feature_generator (str, unicode): Method for use by secondary learner
"""
all_learner_meta_features = []
for idx, base_learner in enumerate(self.base_learners):
single_learner_meta_features = getattr(base_learner,
self.meta_feature_generators[idx])(X)
if len(single_learner_meta_features.shape) == 1:
single_learner_meta_features = single_learner_meta_features.reshape(-1, 1)
all_learner_meta_features.append(single_learner_meta_features)
all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1)
out = getattr(self.secondary_learner, meta_feature_generator)(all_learner_meta_features)
return out |
def run_in_parallel(programs, nsamples, cxn, shuffle=True):
"""
Take sequences of Protoquil programs on disjoint qubits and execute a single sequence of
programs that executes the input programs in parallel. Optionally randomize within each
qubit-specific sequence.
The programs are passed as a 2d array of Quil programs, where the (first) outer axis iterates
over disjoint sets of qubits that the programs involve and the inner axis iterates over a
sequence of related programs, e.g., tomography sequences, on the same set of qubits.
:param Union[np.ndarray,List[List[Program]]] programs: A rectangular list of lists, or a 2d
array of Quil Programs. The outer list iterates over disjoint qubit groups as targets, the
inner list over programs to run on those qubits, e.g., tomographic sequences.
:param int nsamples: Number of repetitions for executing each Program.
:param QPUConnection|QVMConnection cxn: The quantum machine connection.
:param bool shuffle: If True, the order of each qubit specific sequence (2nd axis) is randomized
Default is True.
:return: An array of 2d arrays that provide bitstring histograms for each input program.
The axis of the outer array iterates over the disjoint qubit groups, the outer axis of the
inner 2d array iterates over the programs for that group and the inner most axis iterates
over all possible bitstrings for the qubit group under consideration.
:rtype np.array
"""
if shuffle:
n_groups = len(programs)
n_progs_per_group = len(programs[0])
permutations = np.outer(np.ones(n_groups, dtype=int),
np.arange(n_progs_per_group, dtype=int))
inverse_permutations = np.zeros_like(permutations)
for jj in range(n_groups):
# in-place operation
np.random.shuffle(permutations[jj])
# store inverse permutation
inverse_permutations[jj] = np.argsort(permutations[jj])
# apply to programs
shuffled_programs = np.empty((n_groups, n_progs_per_group), dtype=object)
for jdx, (progsj, pj) in enumerate(zip(programs, permutations)):
shuffled_programs[jdx] = [progsj[pjk] for pjk in pj]
shuffled_results = _run_in_parallel(shuffled_programs, nsamples, cxn)
# reverse shuffling of results
results = np.array([resultsj[pj]
for resultsj, pj in zip(shuffled_results, inverse_permutations)])
return results
else:
return _run_in_parallel(programs, nsamples, cxn) | Take sequences of Protoquil programs on disjoint qubits and execute a single sequence of
programs that executes the input programs in parallel. Optionally randomize within each
qubit-specific sequence.
The programs are passed as a 2d array of Quil programs, where the (first) outer axis iterates
over disjoint sets of qubits that the programs involve and the inner axis iterates over a
sequence of related programs, e.g., tomography sequences, on the same set of qubits.
:param Union[np.ndarray,List[List[Program]]] programs: A rectangular list of lists, or a 2d
array of Quil Programs. The outer list iterates over disjoint qubit groups as targets, the
inner list over programs to run on those qubits, e.g., tomographic sequences.
:param int nsamples: Number of repetitions for executing each Program.
:param QPUConnection|QVMConnection cxn: The quantum machine connection.
:param bool shuffle: If True, the order of each qubit specific sequence (2nd axis) is randomized
Default is True.
:return: An array of 2d arrays that provide bitstring histograms for each input program.
The axis of the outer array iterates over the disjoint qubit groups, the outer axis of the
inner 2d array iterates over the programs for that group and the inner most axis iterates
over all possible bitstrings for the qubit group under consideration.
:rtype np.array | Below is the the instruction that describes the task:
### Input:
Take sequences of Protoquil programs on disjoint qubits and execute a single sequence of
programs that executes the input programs in parallel. Optionally randomize within each
qubit-specific sequence.
The programs are passed as a 2d array of Quil programs, where the (first) outer axis iterates
over disjoint sets of qubits that the programs involve and the inner axis iterates over a
sequence of related programs, e.g., tomography sequences, on the same set of qubits.
:param Union[np.ndarray,List[List[Program]]] programs: A rectangular list of lists, or a 2d
array of Quil Programs. The outer list iterates over disjoint qubit groups as targets, the
inner list over programs to run on those qubits, e.g., tomographic sequences.
:param int nsamples: Number of repetitions for executing each Program.
:param QPUConnection|QVMConnection cxn: The quantum machine connection.
:param bool shuffle: If True, the order of each qubit specific sequence (2nd axis) is randomized
Default is True.
:return: An array of 2d arrays that provide bitstring histograms for each input program.
The axis of the outer array iterates over the disjoint qubit groups, the outer axis of the
inner 2d array iterates over the programs for that group and the inner most axis iterates
over all possible bitstrings for the qubit group under consideration.
:rtype np.array
### Response:
def run_in_parallel(programs, nsamples, cxn, shuffle=True):
"""
Take sequences of Protoquil programs on disjoint qubits and execute a single sequence of
programs that executes the input programs in parallel. Optionally randomize within each
qubit-specific sequence.
The programs are passed as a 2d array of Quil programs, where the (first) outer axis iterates
over disjoint sets of qubits that the programs involve and the inner axis iterates over a
sequence of related programs, e.g., tomography sequences, on the same set of qubits.
:param Union[np.ndarray,List[List[Program]]] programs: A rectangular list of lists, or a 2d
array of Quil Programs. The outer list iterates over disjoint qubit groups as targets, the
inner list over programs to run on those qubits, e.g., tomographic sequences.
:param int nsamples: Number of repetitions for executing each Program.
:param QPUConnection|QVMConnection cxn: The quantum machine connection.
:param bool shuffle: If True, the order of each qubit specific sequence (2nd axis) is randomized
Default is True.
:return: An array of 2d arrays that provide bitstring histograms for each input program.
The axis of the outer array iterates over the disjoint qubit groups, the outer axis of the
inner 2d array iterates over the programs for that group and the inner most axis iterates
over all possible bitstrings for the qubit group under consideration.
:rtype np.array
"""
if shuffle:
n_groups = len(programs)
n_progs_per_group = len(programs[0])
permutations = np.outer(np.ones(n_groups, dtype=int),
np.arange(n_progs_per_group, dtype=int))
inverse_permutations = np.zeros_like(permutations)
for jj in range(n_groups):
# in-place operation
np.random.shuffle(permutations[jj])
# store inverse permutation
inverse_permutations[jj] = np.argsort(permutations[jj])
# apply to programs
shuffled_programs = np.empty((n_groups, n_progs_per_group), dtype=object)
for jdx, (progsj, pj) in enumerate(zip(programs, permutations)):
shuffled_programs[jdx] = [progsj[pjk] for pjk in pj]
shuffled_results = _run_in_parallel(shuffled_programs, nsamples, cxn)
# reverse shuffling of results
results = np.array([resultsj[pj]
for resultsj, pj in zip(shuffled_results, inverse_permutations)])
return results
else:
return _run_in_parallel(programs, nsamples, cxn) |
def vq_discrete_unbottleneck(x, hparams):
"""Simple undiscretization from vector quantized representation."""
x_shape = common_layers.shape_list(x)
bottleneck_size = 2**hparams.bottleneck_bits
means = hparams.means
x_flat = tf.reshape(x, [-1, bottleneck_size])
result = tf.matmul(x_flat, means)
result = tf.reshape(result, x_shape[:-1] + [hparams.hidden_size])
return result | Simple undiscretization from vector quantized representation. | Below is the the instruction that describes the task:
### Input:
Simple undiscretization from vector quantized representation.
### Response:
def vq_discrete_unbottleneck(x, hparams):
"""Simple undiscretization from vector quantized representation."""
x_shape = common_layers.shape_list(x)
bottleneck_size = 2**hparams.bottleneck_bits
means = hparams.means
x_flat = tf.reshape(x, [-1, bottleneck_size])
result = tf.matmul(x_flat, means)
result = tf.reshape(result, x_shape[:-1] + [hparams.hidden_size])
return result |
def sort_download_list(self):
"""
Method for sorting the list of download requests. Band images have priority before metadata files. If bands
images or metadata files are specified with a list they will be sorted in the same order as in the list.
Otherwise they will be sorted alphabetically (band B8A will be between B08 and B09).
"""
def aws_sort_function(download_request):
data_name = download_request.properties['data_name']
if 'product_name' in download_request.properties:
product_name = download_request.properties['product_name']
else:
product_name = self._url_to_props(download_request.url)[0]
if data_name in self.bands:
return 0, product_name, self.bands.index(data_name)
return 1, product_name, self.metafiles.index(data_name)
self.download_list.sort(key=aws_sort_function) | Method for sorting the list of download requests. Band images have priority before metadata files. If bands
images or metadata files are specified with a list they will be sorted in the same order as in the list.
Otherwise they will be sorted alphabetically (band B8A will be between B08 and B09). | Below is the the instruction that describes the task:
### Input:
Method for sorting the list of download requests. Band images have priority before metadata files. If bands
images or metadata files are specified with a list they will be sorted in the same order as in the list.
Otherwise they will be sorted alphabetically (band B8A will be between B08 and B09).
### Response:
def sort_download_list(self):
"""
Method for sorting the list of download requests. Band images have priority before metadata files. If bands
images or metadata files are specified with a list they will be sorted in the same order as in the list.
Otherwise they will be sorted alphabetically (band B8A will be between B08 and B09).
"""
def aws_sort_function(download_request):
data_name = download_request.properties['data_name']
if 'product_name' in download_request.properties:
product_name = download_request.properties['product_name']
else:
product_name = self._url_to_props(download_request.url)[0]
if data_name in self.bands:
return 0, product_name, self.bands.index(data_name)
return 1, product_name, self.metafiles.index(data_name)
self.download_list.sort(key=aws_sort_function) |
def status(self):
"""Reads a command response status.
If there is no response message then the returned status message will
be an empty string.
Raises:
NNTPError: If data is required to be read from the socket and fails.
NNTPProtocolError: If the status line can't be parsed.
NNTPTemporaryError: For status code 400-499
NNTPPermanentError: For status code 500-599
Returns:
A tuple of status code (as an integer) and status message.
"""
line = next(self.__line_gen()).rstrip()
parts = line.split(None, 1)
try:
code, message = int(parts[0]), ""
except ValueError:
raise NNTPProtocolError(line)
if code < 100 or code >= 600:
raise NNTPProtocolError(line)
if len(parts) > 1:
message = parts[1]
if 400 <= code <= 499:
raise NNTPTemporaryError(code, message)
if 500 <= code <= 599:
raise NNTPPermanentError(code, message)
return code, message | Reads a command response status.
If there is no response message then the returned status message will
be an empty string.
Raises:
NNTPError: If data is required to be read from the socket and fails.
NNTPProtocolError: If the status line can't be parsed.
NNTPTemporaryError: For status code 400-499
NNTPPermanentError: For status code 500-599
Returns:
A tuple of status code (as an integer) and status message. | Below is the the instruction that describes the task:
### Input:
Reads a command response status.
If there is no response message then the returned status message will
be an empty string.
Raises:
NNTPError: If data is required to be read from the socket and fails.
NNTPProtocolError: If the status line can't be parsed.
NNTPTemporaryError: For status code 400-499
NNTPPermanentError: For status code 500-599
Returns:
A tuple of status code (as an integer) and status message.
### Response:
def status(self):
"""Reads a command response status.
If there is no response message then the returned status message will
be an empty string.
Raises:
NNTPError: If data is required to be read from the socket and fails.
NNTPProtocolError: If the status line can't be parsed.
NNTPTemporaryError: For status code 400-499
NNTPPermanentError: For status code 500-599
Returns:
A tuple of status code (as an integer) and status message.
"""
line = next(self.__line_gen()).rstrip()
parts = line.split(None, 1)
try:
code, message = int(parts[0]), ""
except ValueError:
raise NNTPProtocolError(line)
if code < 100 or code >= 600:
raise NNTPProtocolError(line)
if len(parts) > 1:
message = parts[1]
if 400 <= code <= 499:
raise NNTPTemporaryError(code, message)
if 500 <= code <= 599:
raise NNTPPermanentError(code, message)
return code, message |
def make_entity_name(name):
"""Creates a valid PlantUML entity name from the given value."""
invalid_chars = "-=!#$%^&*[](){}/~'`<>:;"
for char in invalid_chars:
name = name.replace(char, "_")
return name | Creates a valid PlantUML entity name from the given value. | Below is the the instruction that describes the task:
### Input:
Creates a valid PlantUML entity name from the given value.
### Response:
def make_entity_name(name):
"""Creates a valid PlantUML entity name from the given value."""
invalid_chars = "-=!#$%^&*[](){}/~'`<>:;"
for char in invalid_chars:
name = name.replace(char, "_")
return name |
async def delete(self, *, reason=None):
"""|coro|
Deletes the role.
You must have the :attr:`~Permissions.manage_roles` permission to
use this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this role. Shows up on the audit log.
Raises
--------
Forbidden
You do not have permissions to delete the role.
HTTPException
Deleting the role failed.
"""
await self._state.http.delete_role(self.guild.id, self.id, reason=reason) | |coro|
Deletes the role.
You must have the :attr:`~Permissions.manage_roles` permission to
use this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this role. Shows up on the audit log.
Raises
--------
Forbidden
You do not have permissions to delete the role.
HTTPException
Deleting the role failed. | Below is the the instruction that describes the task:
### Input:
|coro|
Deletes the role.
You must have the :attr:`~Permissions.manage_roles` permission to
use this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this role. Shows up on the audit log.
Raises
--------
Forbidden
You do not have permissions to delete the role.
HTTPException
Deleting the role failed.
### Response:
async def delete(self, *, reason=None):
"""|coro|
Deletes the role.
You must have the :attr:`~Permissions.manage_roles` permission to
use this.
Parameters
-----------
reason: Optional[:class:`str`]
The reason for deleting this role. Shows up on the audit log.
Raises
--------
Forbidden
You do not have permissions to delete the role.
HTTPException
Deleting the role failed.
"""
await self._state.http.delete_role(self.guild.id, self.id, reason=reason) |
def get_hg_revision(repopath):
"""Return Mercurial revision for the repository located at repopath
Result is a tuple (global, local, branch), with None values on error
For example:
>>> get_hg_revision(".")
('eba7273c69df+', '2015+', 'default')
"""
try:
assert osp.isdir(osp.join(repopath, '.hg'))
proc = programs.run_program('hg', ['id', '-nib', repopath])
output, _err = proc.communicate()
# output is now: ('eba7273c69df+ 2015+ default\n', None)
# Split 2 times max to allow spaces in branch names.
return tuple(output.decode().strip().split(None, 2))
except (subprocess.CalledProcessError, AssertionError, AttributeError,
OSError):
return (None, None, None) | Return Mercurial revision for the repository located at repopath
Result is a tuple (global, local, branch), with None values on error
For example:
>>> get_hg_revision(".")
('eba7273c69df+', '2015+', 'default') | Below is the the instruction that describes the task:
### Input:
Return Mercurial revision for the repository located at repopath
Result is a tuple (global, local, branch), with None values on error
For example:
>>> get_hg_revision(".")
('eba7273c69df+', '2015+', 'default')
### Response:
def get_hg_revision(repopath):
"""Return Mercurial revision for the repository located at repopath
Result is a tuple (global, local, branch), with None values on error
For example:
>>> get_hg_revision(".")
('eba7273c69df+', '2015+', 'default')
"""
try:
assert osp.isdir(osp.join(repopath, '.hg'))
proc = programs.run_program('hg', ['id', '-nib', repopath])
output, _err = proc.communicate()
# output is now: ('eba7273c69df+ 2015+ default\n', None)
# Split 2 times max to allow spaces in branch names.
return tuple(output.decode().strip().split(None, 2))
except (subprocess.CalledProcessError, AssertionError, AttributeError,
OSError):
return (None, None, None) |
def close(self):
"""Closes this response."""
if self._connection:
self._connection.close()
self._response.close() | Closes this response. | Below is the the instruction that describes the task:
### Input:
Closes this response.
### Response:
def close(self):
"""Closes this response."""
if self._connection:
self._connection.close()
self._response.close() |
def train_epoch(model:nn.Module, dl:DataLoader, opt:optim.Optimizer, loss_func:LossFunction)->None:
"Simple training of `model` for 1 epoch of `dl` using optim `opt` and loss function `loss_func`."
model.train()
for xb,yb in dl:
loss = loss_func(model(xb), yb)
loss.backward()
opt.step()
opt.zero_grad() | Simple training of `model` for 1 epoch of `dl` using optim `opt` and loss function `loss_func`. | Below is the the instruction that describes the task:
### Input:
Simple training of `model` for 1 epoch of `dl` using optim `opt` and loss function `loss_func`.
### Response:
def train_epoch(model:nn.Module, dl:DataLoader, opt:optim.Optimizer, loss_func:LossFunction)->None:
"Simple training of `model` for 1 epoch of `dl` using optim `opt` and loss function `loss_func`."
model.train()
for xb,yb in dl:
loss = loss_func(model(xb), yb)
loss.backward()
opt.step()
opt.zero_grad() |
def getOneMessage ( self ):
"""
I pull one complete message off the buffer and return it decoded
as a dict. If there is no complete message in the buffer, I
return None.
Note that the buffer can contain more than once message. You
should therefore call me in a loop until I return None.
"""
( mbytes, hbytes ) = self._findMessageBytes ( self.buffer )
if not mbytes:
return None
msgdata = self.buffer[:mbytes]
self.buffer = self.buffer[mbytes:]
hdata = msgdata[:hbytes]
elems = hdata.split ( '\n' )
cmd = elems.pop ( 0 )
headers = {}
# We can't use a simple split because the value can legally contain
# colon characters (for example, the session returned by ActiveMQ).
for e in elems:
try:
i = e.find ( ':' )
except ValueError:
continue
k = e[:i].strip()
v = e[i+1:].strip()
headers [ k ] = v
# hbytes points to the start of the '\n\n' at the end of the header,
# so 2 bytes beyond this is the start of the body. The body EXCLUDES
# the final two bytes, which are '\x00\n'. Note that these 2 bytes
# are UNRELATED to the 2-byte '\n\n' that Frame.pack() used to insert
# into the data stream.
body = msgdata[hbytes+2:-2]
msg = { 'cmd' : cmd,
'headers' : headers,
'body' : body,
}
return msg | I pull one complete message off the buffer and return it decoded
as a dict. If there is no complete message in the buffer, I
return None.
Note that the buffer can contain more than once message. You
should therefore call me in a loop until I return None. | Below is the the instruction that describes the task:
### Input:
I pull one complete message off the buffer and return it decoded
as a dict. If there is no complete message in the buffer, I
return None.
Note that the buffer can contain more than once message. You
should therefore call me in a loop until I return None.
### Response:
def getOneMessage ( self ):
"""
I pull one complete message off the buffer and return it decoded
as a dict. If there is no complete message in the buffer, I
return None.
Note that the buffer can contain more than once message. You
should therefore call me in a loop until I return None.
"""
( mbytes, hbytes ) = self._findMessageBytes ( self.buffer )
if not mbytes:
return None
msgdata = self.buffer[:mbytes]
self.buffer = self.buffer[mbytes:]
hdata = msgdata[:hbytes]
elems = hdata.split ( '\n' )
cmd = elems.pop ( 0 )
headers = {}
# We can't use a simple split because the value can legally contain
# colon characters (for example, the session returned by ActiveMQ).
for e in elems:
try:
i = e.find ( ':' )
except ValueError:
continue
k = e[:i].strip()
v = e[i+1:].strip()
headers [ k ] = v
# hbytes points to the start of the '\n\n' at the end of the header,
# so 2 bytes beyond this is the start of the body. The body EXCLUDES
# the final two bytes, which are '\x00\n'. Note that these 2 bytes
# are UNRELATED to the 2-byte '\n\n' that Frame.pack() used to insert
# into the data stream.
body = msgdata[hbytes+2:-2]
msg = { 'cmd' : cmd,
'headers' : headers,
'body' : body,
}
return msg |
def do_struct(self, subcmd, opts, message):
"""${cmd_name}: get the structure of the specified message
${cmd_usage}
${cmd_option_list}
"""
client = MdClient(self.maildir, filesystem=self.filesystem)
as_json = getattr(opts, "json", False)
client.getstruct(message, as_json=as_json, stream=self.stdout) | ${cmd_name}: get the structure of the specified message
${cmd_usage}
${cmd_option_list} | Below is the the instruction that describes the task:
### Input:
${cmd_name}: get the structure of the specified message
${cmd_usage}
${cmd_option_list}
### Response:
def do_struct(self, subcmd, opts, message):
"""${cmd_name}: get the structure of the specified message
${cmd_usage}
${cmd_option_list}
"""
client = MdClient(self.maildir, filesystem=self.filesystem)
as_json = getattr(opts, "json", False)
client.getstruct(message, as_json=as_json, stream=self.stdout) |
def format_records(records):
"""Serialise multiple records"""
formatted = list()
for record_ in records:
formatted.append(format_record(record_))
return formatted | Serialise multiple records | Below is the the instruction that describes the task:
### Input:
Serialise multiple records
### Response:
def format_records(records):
"""Serialise multiple records"""
formatted = list()
for record_ in records:
formatted.append(format_record(record_))
return formatted |
def get_degree_cols(df):
"""
Take in a pandas DataFrame, and return a list of columns
that are in that DataFrame AND should be between 0 - 360 degrees.
"""
vals = ['lon_w', 'lon_e', 'lat_lon_precision', 'pole_lon',
'paleolon', 'paleolon_sigma',
'lon', 'lon_sigma', 'vgp_lon', 'paleo_lon', 'paleo_lon_sigma',
'azimuth', 'azimuth_dec_correction', 'dir_dec',
'geographic_precision', 'bed_dip_direction']
relevant_cols = list(set(vals).intersection(df.columns))
return relevant_cols | Take in a pandas DataFrame, and return a list of columns
that are in that DataFrame AND should be between 0 - 360 degrees. | Below is the the instruction that describes the task:
### Input:
Take in a pandas DataFrame, and return a list of columns
that are in that DataFrame AND should be between 0 - 360 degrees.
### Response:
def get_degree_cols(df):
"""
Take in a pandas DataFrame, and return a list of columns
that are in that DataFrame AND should be between 0 - 360 degrees.
"""
vals = ['lon_w', 'lon_e', 'lat_lon_precision', 'pole_lon',
'paleolon', 'paleolon_sigma',
'lon', 'lon_sigma', 'vgp_lon', 'paleo_lon', 'paleo_lon_sigma',
'azimuth', 'azimuth_dec_correction', 'dir_dec',
'geographic_precision', 'bed_dip_direction']
relevant_cols = list(set(vals).intersection(df.columns))
return relevant_cols |
def write_eval_records(bt_table, game_data, last_game):
"""Write all eval_records to eval_table
In addition to writing new rows table_state must be updated in
row `table_state` columns `metadata:eval_game_counter`
Args:
bt_table: bigtable table to add rows to.
game_data: metadata pairs (column name, value) for each eval record.
last_game: last_game in metadata:table_state
"""
eval_num = last_game
# Each column counts as a mutation so max rows is ~10000
GAMES_PER_COMMIT = 2000
for games in grouper(tqdm(game_data), GAMES_PER_COMMIT):
assert bt_table.read_row(EVAL_PREFIX.format(eval_num)), "Prev row doesn't exists"
assert bt_table.read_row(EVAL_PREFIX.format(eval_num+1)) is None, "Row already exists"
rows = []
for i, metadata in enumerate(games):
eval_num += 1
row_name = EVAL_PREFIX.format(eval_num)
row = bt_table.row(row_name)
for column, value in metadata:
row.set_cell(METADATA, column, value)
rows.append(row)
# For each batch of games print a couple of the rows being added.
if i < 5 or i + 5 > len(games):
print("\t", i, row_name, metadata[6][1])
if eval_num == last_game + len(games):
test = input("Commit ('y'/'yes' required): ")
if test.lower() not in ('y', 'yes'):
break
# TODO(derek): Figure out how to condition on atomic counter update.
# Condition all updates on the current value of last_game
game_num_update = bt_table.row(TABLE_STATE)
game_num_update.set_cell(METADATA, EVAL_GAME_COUNTER, eval_num)
print(TABLE_STATE, eval_num)
response = bt_table.mutate_rows(rows)
# validate that all rows written successfully
any_bad = False
for i, status in enumerate(response):
if status.code is not 0:
print("Row number {} failed to write {}".format(i, status))
any_bad = True
if any_bad:
break
game_num_update.commit() | Write all eval_records to eval_table
In addition to writing new rows table_state must be updated in
row `table_state` columns `metadata:eval_game_counter`
Args:
bt_table: bigtable table to add rows to.
game_data: metadata pairs (column name, value) for each eval record.
last_game: last_game in metadata:table_state | Below is the the instruction that describes the task:
### Input:
Write all eval_records to eval_table
In addition to writing new rows table_state must be updated in
row `table_state` columns `metadata:eval_game_counter`
Args:
bt_table: bigtable table to add rows to.
game_data: metadata pairs (column name, value) for each eval record.
last_game: last_game in metadata:table_state
### Response:
def write_eval_records(bt_table, game_data, last_game):
"""Write all eval_records to eval_table
In addition to writing new rows table_state must be updated in
row `table_state` columns `metadata:eval_game_counter`
Args:
bt_table: bigtable table to add rows to.
game_data: metadata pairs (column name, value) for each eval record.
last_game: last_game in metadata:table_state
"""
eval_num = last_game
# Each column counts as a mutation so max rows is ~10000
GAMES_PER_COMMIT = 2000
for games in grouper(tqdm(game_data), GAMES_PER_COMMIT):
assert bt_table.read_row(EVAL_PREFIX.format(eval_num)), "Prev row doesn't exists"
assert bt_table.read_row(EVAL_PREFIX.format(eval_num+1)) is None, "Row already exists"
rows = []
for i, metadata in enumerate(games):
eval_num += 1
row_name = EVAL_PREFIX.format(eval_num)
row = bt_table.row(row_name)
for column, value in metadata:
row.set_cell(METADATA, column, value)
rows.append(row)
# For each batch of games print a couple of the rows being added.
if i < 5 or i + 5 > len(games):
print("\t", i, row_name, metadata[6][1])
if eval_num == last_game + len(games):
test = input("Commit ('y'/'yes' required): ")
if test.lower() not in ('y', 'yes'):
break
# TODO(derek): Figure out how to condition on atomic counter update.
# Condition all updates on the current value of last_game
game_num_update = bt_table.row(TABLE_STATE)
game_num_update.set_cell(METADATA, EVAL_GAME_COUNTER, eval_num)
print(TABLE_STATE, eval_num)
response = bt_table.mutate_rows(rows)
# validate that all rows written successfully
any_bad = False
for i, status in enumerate(response):
if status.code is not 0:
print("Row number {} failed to write {}".format(i, status))
any_bad = True
if any_bad:
break
game_num_update.commit() |
def add_file(self, name, required=False, error=None, extensions=None):
""" Add a file field to parse on request (uploads) """
if name is None:
return
self.file_arguments.append(dict(
name=name,
required=required,
error=error,
extensions=extensions)) | Add a file field to parse on request (uploads) | Below is the the instruction that describes the task:
### Input:
Add a file field to parse on request (uploads)
### Response:
def add_file(self, name, required=False, error=None, extensions=None):
""" Add a file field to parse on request (uploads) """
if name is None:
return
self.file_arguments.append(dict(
name=name,
required=required,
error=error,
extensions=extensions)) |
def uniform_binning_correction(x, n_bits=8):
"""Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0).
Args:
x: 4-D Tensor of shape (NHWC)
n_bits: optional.
Returns:
x: x ~ U(x, x + 1.0 / 256)
objective: Equivalent to -q(x)*log(q(x)).
"""
n_bins = 2**n_bits
batch_size, height, width, n_channels = common_layers.shape_list(x)
hwc = float(height * width * n_channels)
x = x + tf.random_uniform(
shape=(batch_size, height, width, n_channels),
minval=0.0, maxval=1.0/n_bins)
objective = -np.log(n_bins) * hwc * tf.ones(batch_size)
return x, objective | Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0).
Args:
x: 4-D Tensor of shape (NHWC)
n_bits: optional.
Returns:
x: x ~ U(x, x + 1.0 / 256)
objective: Equivalent to -q(x)*log(q(x)). | Below is the the instruction that describes the task:
### Input:
Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0).
Args:
x: 4-D Tensor of shape (NHWC)
n_bits: optional.
Returns:
x: x ~ U(x, x + 1.0 / 256)
objective: Equivalent to -q(x)*log(q(x)).
### Response:
def uniform_binning_correction(x, n_bits=8):
"""Replaces x^i with q^i(x) = U(x, x + 1.0 / 256.0).
Args:
x: 4-D Tensor of shape (NHWC)
n_bits: optional.
Returns:
x: x ~ U(x, x + 1.0 / 256)
objective: Equivalent to -q(x)*log(q(x)).
"""
n_bins = 2**n_bits
batch_size, height, width, n_channels = common_layers.shape_list(x)
hwc = float(height * width * n_channels)
x = x + tf.random_uniform(
shape=(batch_size, height, width, n_channels),
minval=0.0, maxval=1.0/n_bins)
objective = -np.log(n_bins) * hwc * tf.ones(batch_size)
return x, objective |
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response) | Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB. | Below is the the instruction that describes the task:
### Input:
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
### Response:
def get_updated(self, from_time, to_time=None):
"""
Retrives a list of series that have changed on TheTVDB since a provided from time parameter and optionally to an
specified to time.
:param from_time: An epoch representation of the date from which to restrict the query to.
:param to_time: An optional epcoh representation of the date to which to restrict the query to.
:return: a python dictionary with either the result of the search or an error from TheTVDB.
"""
arguments = locals()
optional_parameters = {'to_time': 'toTime'}
query_string = 'fromTime=%s&%s' % (from_time,
utils.query_param_string_from_option_args(optional_parameters, arguments))
raw_response = requests_util.run_request('get', self.API_BASE_URL + '/uodated/query?%s' % query_string,
headers=self.__get_header_with_auth())
return self.parse_raw_response(raw_response) |
def cli(env, sortby, cpu, domain, datacenter, hostname, memory, network,
hourly, monthly, tag, columns, limit):
"""List virtual servers."""
vsi = SoftLayer.VSManager(env.client)
guests = vsi.list_instances(hourly=hourly,
monthly=monthly,
hostname=hostname,
domain=domain,
cpus=cpu,
memory=memory,
datacenter=datacenter,
nic_speed=network,
tags=tag,
mask=columns.mask(),
limit=limit)
table = formatting.Table(columns.columns)
table.sortby = sortby
for guest in guests:
table.add_row([value or formatting.blank()
for value in columns.row(guest)])
env.fout(table) | List virtual servers. | Below is the the instruction that describes the task:
### Input:
List virtual servers.
### Response:
def cli(env, sortby, cpu, domain, datacenter, hostname, memory, network,
hourly, monthly, tag, columns, limit):
"""List virtual servers."""
vsi = SoftLayer.VSManager(env.client)
guests = vsi.list_instances(hourly=hourly,
monthly=monthly,
hostname=hostname,
domain=domain,
cpus=cpu,
memory=memory,
datacenter=datacenter,
nic_speed=network,
tags=tag,
mask=columns.mask(),
limit=limit)
table = formatting.Table(columns.columns)
table.sortby = sortby
for guest in guests:
table.add_row([value or formatting.blank()
for value in columns.row(guest)])
env.fout(table) |
def GetRendererForValueOrClass(cls, value, limit_lists=-1):
"""Returns renderer corresponding to a given value and rendering args."""
if inspect.isclass(value):
value_cls = value
else:
value_cls = value.__class__
cache_key = "%s_%d" % (value_cls.__name__, limit_lists)
try:
renderer_cls = cls._renderers_cache[cache_key]
except KeyError:
candidates = []
for candidate in itervalues(ApiValueRenderer.classes):
if candidate.value_class:
candidate_class = candidate.value_class
else:
continue
if inspect.isclass(value):
if issubclass(value_cls, candidate_class):
candidates.append((candidate, candidate_class))
else:
if isinstance(value, candidate_class):
candidates.append((candidate, candidate_class))
if not candidates:
raise RuntimeError(
"No renderer found for value %s." % value.__class__.__name__)
candidates = sorted(
candidates, key=lambda candidate: len(candidate[1].mro()))
renderer_cls = candidates[-1][0]
cls._renderers_cache[cache_key] = renderer_cls
return renderer_cls(limit_lists=limit_lists) | Returns renderer corresponding to a given value and rendering args. | Below is the the instruction that describes the task:
### Input:
Returns renderer corresponding to a given value and rendering args.
### Response:
def GetRendererForValueOrClass(cls, value, limit_lists=-1):
"""Returns renderer corresponding to a given value and rendering args."""
if inspect.isclass(value):
value_cls = value
else:
value_cls = value.__class__
cache_key = "%s_%d" % (value_cls.__name__, limit_lists)
try:
renderer_cls = cls._renderers_cache[cache_key]
except KeyError:
candidates = []
for candidate in itervalues(ApiValueRenderer.classes):
if candidate.value_class:
candidate_class = candidate.value_class
else:
continue
if inspect.isclass(value):
if issubclass(value_cls, candidate_class):
candidates.append((candidate, candidate_class))
else:
if isinstance(value, candidate_class):
candidates.append((candidate, candidate_class))
if not candidates:
raise RuntimeError(
"No renderer found for value %s." % value.__class__.__name__)
candidates = sorted(
candidates, key=lambda candidate: len(candidate[1].mro()))
renderer_cls = candidates[-1][0]
cls._renderers_cache[cache_key] = renderer_cls
return renderer_cls(limit_lists=limit_lists) |
def substitute_timestep(self, regex, timestep):
"""
Substitute a new timestep value using regex.
"""
# Make one change at a time, each change affects subsequent matches.
timestep_changed = False
while True:
matches = re.finditer(regex, self.str, re.MULTILINE | re.DOTALL)
none_updated = True
for m in matches:
if m.group(1) == timestep:
continue
else:
self.str = (self.str[:m.start(1)] + timestep +
self.str[m.end(1):])
none_updated = False
timestep_changed = True
break
if none_updated:
break
if not timestep_changed:
sys.stderr.write('WARNING: no update with {0}.\n'.format(regex)) | Substitute a new timestep value using regex. | Below is the the instruction that describes the task:
### Input:
Substitute a new timestep value using regex.
### Response:
def substitute_timestep(self, regex, timestep):
"""
Substitute a new timestep value using regex.
"""
# Make one change at a time, each change affects subsequent matches.
timestep_changed = False
while True:
matches = re.finditer(regex, self.str, re.MULTILINE | re.DOTALL)
none_updated = True
for m in matches:
if m.group(1) == timestep:
continue
else:
self.str = (self.str[:m.start(1)] + timestep +
self.str[m.end(1):])
none_updated = False
timestep_changed = True
break
if none_updated:
break
if not timestep_changed:
sys.stderr.write('WARNING: no update with {0}.\n'.format(regex)) |
def crossvalidate_model(self, classifier, data, num_folds, rnd, output=None):
"""
Crossvalidates the model using the specified data, number of folds and random number generator wrapper.
:param classifier: the classifier to cross-validate
:type classifier: Classifier
:param data: the data to evaluate on
:type data: Instances
:param num_folds: the number of folds
:type num_folds: int
:param rnd: the random number generator to use
:type rnd: Random
:param output: the output generator to use
:type output: PredictionOutput
"""
if output is None:
generator = []
else:
generator = [output.jobject]
javabridge.call(
self.jobject, "crossValidateModel",
"(Lweka/classifiers/Classifier;Lweka/core/Instances;ILjava/util/Random;[Ljava/lang/Object;)V",
classifier.jobject, data.jobject, num_folds, rnd.jobject, generator) | Crossvalidates the model using the specified data, number of folds and random number generator wrapper.
:param classifier: the classifier to cross-validate
:type classifier: Classifier
:param data: the data to evaluate on
:type data: Instances
:param num_folds: the number of folds
:type num_folds: int
:param rnd: the random number generator to use
:type rnd: Random
:param output: the output generator to use
:type output: PredictionOutput | Below is the the instruction that describes the task:
### Input:
Crossvalidates the model using the specified data, number of folds and random number generator wrapper.
:param classifier: the classifier to cross-validate
:type classifier: Classifier
:param data: the data to evaluate on
:type data: Instances
:param num_folds: the number of folds
:type num_folds: int
:param rnd: the random number generator to use
:type rnd: Random
:param output: the output generator to use
:type output: PredictionOutput
### Response:
def crossvalidate_model(self, classifier, data, num_folds, rnd, output=None):
"""
Crossvalidates the model using the specified data, number of folds and random number generator wrapper.
:param classifier: the classifier to cross-validate
:type classifier: Classifier
:param data: the data to evaluate on
:type data: Instances
:param num_folds: the number of folds
:type num_folds: int
:param rnd: the random number generator to use
:type rnd: Random
:param output: the output generator to use
:type output: PredictionOutput
"""
if output is None:
generator = []
else:
generator = [output.jobject]
javabridge.call(
self.jobject, "crossValidateModel",
"(Lweka/classifiers/Classifier;Lweka/core/Instances;ILjava/util/Random;[Ljava/lang/Object;)V",
classifier.jobject, data.jobject, num_folds, rnd.jobject, generator) |
def split_no_wd_params(layer_groups:Collection[nn.Module])->List[List[nn.Parameter]]:
"Separate the parameters in `layer_groups` between `no_wd_types` and bias (`bias_types`) from the rest."
split_params = []
for l in layer_groups:
l1,l2 = [],[]
for c in l.children():
if isinstance(c, no_wd_types): l2 += list(trainable_params(c))
elif isinstance(c, bias_types):
bias = c.bias if hasattr(c, 'bias') else None
l1 += [p for p in trainable_params(c) if not (p is bias)]
if bias is not None: l2.append(bias)
else: l1 += list(trainable_params(c))
#Since we scan the children separately, we might get duplicates (tied weights). We need to preserve the order
#for the optimizer load of state_dict
l1,l2 = uniqueify(l1),uniqueify(l2)
split_params += [l1, l2]
return split_params | Separate the parameters in `layer_groups` between `no_wd_types` and bias (`bias_types`) from the rest. | Below is the the instruction that describes the task:
### Input:
Separate the parameters in `layer_groups` between `no_wd_types` and bias (`bias_types`) from the rest.
### Response:
def split_no_wd_params(layer_groups:Collection[nn.Module])->List[List[nn.Parameter]]:
"Separate the parameters in `layer_groups` between `no_wd_types` and bias (`bias_types`) from the rest."
split_params = []
for l in layer_groups:
l1,l2 = [],[]
for c in l.children():
if isinstance(c, no_wd_types): l2 += list(trainable_params(c))
elif isinstance(c, bias_types):
bias = c.bias if hasattr(c, 'bias') else None
l1 += [p for p in trainable_params(c) if not (p is bias)]
if bias is not None: l2.append(bias)
else: l1 += list(trainable_params(c))
#Since we scan the children separately, we might get duplicates (tied weights). We need to preserve the order
#for the optimizer load of state_dict
l1,l2 = uniqueify(l1),uniqueify(l2)
split_params += [l1, l2]
return split_params |
def getMetastable(rates, ver: np.ndarray, lamb, br, reactfn: Path):
with h5py.File(reactfn, 'r') as f:
A = f['/metastable/A'][:]
lambnew = f['/metastable/lambda'].value.ravel(order='F') # some are not 1-D!
"""
concatenate along the reaction dimension, axis=-1
"""
vnew = np.concatenate((A[:2] * rates.loc[..., 'no1s'].values[:, None],
A[2:4] * rates.loc[..., 'no1d'].values[:, None],
A[4:] * rates.loc[..., 'noii2p'].values[:, None]), axis=-1)
assert vnew.shape == (rates.shape[0], A.size)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br) | concatenate along the reaction dimension, axis=-1 | Below is the the instruction that describes the task:
### Input:
concatenate along the reaction dimension, axis=-1
### Response:
def getMetastable(rates, ver: np.ndarray, lamb, br, reactfn: Path):
with h5py.File(reactfn, 'r') as f:
A = f['/metastable/A'][:]
lambnew = f['/metastable/lambda'].value.ravel(order='F') # some are not 1-D!
"""
concatenate along the reaction dimension, axis=-1
"""
vnew = np.concatenate((A[:2] * rates.loc[..., 'no1s'].values[:, None],
A[2:4] * rates.loc[..., 'no1d'].values[:, None],
A[4:] * rates.loc[..., 'noii2p'].values[:, None]), axis=-1)
assert vnew.shape == (rates.shape[0], A.size)
return catvl(rates.alt_km, ver, vnew, lamb, lambnew, br) |
def split_focus(self):
"""Divide the focus edit widget at the cursor location."""
focus = self.lines[self.focus]
pos = focus.edit_pos
edit = urwid.Edit("", focus.edit_text[pos:], allow_tab=True)
edit.original_text = ""
focus.set_edit_text(focus.edit_text[:pos])
edit.set_edit_pos(0)
self.lines.insert(self.focus + 1, edit) | Divide the focus edit widget at the cursor location. | Below is the the instruction that describes the task:
### Input:
Divide the focus edit widget at the cursor location.
### Response:
def split_focus(self):
"""Divide the focus edit widget at the cursor location."""
focus = self.lines[self.focus]
pos = focus.edit_pos
edit = urwid.Edit("", focus.edit_text[pos:], allow_tab=True)
edit.original_text = ""
focus.set_edit_text(focus.edit_text[:pos])
edit.set_edit_pos(0)
self.lines.insert(self.focus + 1, edit) |
def update_session(fname=None):
"""Update current Scapy session from the file specified in the fname arg.
params:
- fname: file to load the scapy session from"""
if fname is None:
fname = conf.session
try:
s = six.moves.cPickle.load(gzip.open(fname, "rb"))
except IOError:
s = six.moves.cPickle.load(open(fname, "rb"))
scapy_session = six.moves.builtins.__dict__["scapy_session"]
scapy_session.update(s)
update_ipython_session(scapy_session) | Update current Scapy session from the file specified in the fname arg.
params:
- fname: file to load the scapy session from | Below is the the instruction that describes the task:
### Input:
Update current Scapy session from the file specified in the fname arg.
params:
- fname: file to load the scapy session from
### Response:
def update_session(fname=None):
"""Update current Scapy session from the file specified in the fname arg.
params:
- fname: file to load the scapy session from"""
if fname is None:
fname = conf.session
try:
s = six.moves.cPickle.load(gzip.open(fname, "rb"))
except IOError:
s = six.moves.cPickle.load(open(fname, "rb"))
scapy_session = six.moves.builtins.__dict__["scapy_session"]
scapy_session.update(s)
update_ipython_session(scapy_session) |
def formatted_str_to_val(data, format, enum_set=None):
""" Return an unsigned integer representation of the data given format specified.
:param data: a string holding the value to convert
:param format: a string holding a format which will be used to convert the data string
:param enum_set: an iterable of enums which are used as part of the converstion process
Given a string (not a wirevector!) covert that to an unsigned integer ready for input
to the simulation enviornment. This helps deal with signed/unsigned numbers (simulation
assumes the values have been converted via two's complement already), but it also takes
hex, binary, and enum types as inputs. It is easiest to see how it works with some
examples. ::
formatted_str_to_val('2', 's3') == 2 # 0b010
formatted_str_to_val('-1', 's3') == 7 # 0b111
formatted_str_to_val('101', 'b3') == 5
formatted_str_to_val('5', 'u3') == 5
formatted_str_to_val('-3', 's3') == 5
formatted_str_to_val('a', 'x3') == 10
class Ctl(Enum):
ADD = 5
SUB = 12
formatted_str_to_val('ADD', 'e3/Ctl', [Ctl]) == 5
formatted_str_to_val('SUB', 'e3/Ctl', [Ctl]) == 12
"""
type = format[0]
bitwidth = int(format[1:].split('/')[0])
bitmask = (1 << bitwidth)-1
if type == 's':
rval = int(data) & bitmask
elif type == 'x':
rval = int(data, 16)
elif type == 'b':
rval = int(data, 2)
elif type == 'u':
rval = int(data)
if rval < 0:
raise PyrtlError('unsigned format requested, but negative value provided')
elif type == 'e':
enumname = format.split('/')[1]
enum_inst_list = [e for e in enum_set if e.__name__ == enumname]
if len(enum_inst_list) == 0:
raise PyrtlError('enum "{}" not found in passed enum_set "{}"'
.format(enumname, enum_set))
rval = getattr(enum_inst_list[0], data).value
else:
raise PyrtlError('unknown format type {}'.format(format))
return rval | Return an unsigned integer representation of the data given format specified.
:param data: a string holding the value to convert
:param format: a string holding a format which will be used to convert the data string
:param enum_set: an iterable of enums which are used as part of the converstion process
Given a string (not a wirevector!) covert that to an unsigned integer ready for input
to the simulation enviornment. This helps deal with signed/unsigned numbers (simulation
assumes the values have been converted via two's complement already), but it also takes
hex, binary, and enum types as inputs. It is easiest to see how it works with some
examples. ::
formatted_str_to_val('2', 's3') == 2 # 0b010
formatted_str_to_val('-1', 's3') == 7 # 0b111
formatted_str_to_val('101', 'b3') == 5
formatted_str_to_val('5', 'u3') == 5
formatted_str_to_val('-3', 's3') == 5
formatted_str_to_val('a', 'x3') == 10
class Ctl(Enum):
ADD = 5
SUB = 12
formatted_str_to_val('ADD', 'e3/Ctl', [Ctl]) == 5
formatted_str_to_val('SUB', 'e3/Ctl', [Ctl]) == 12 | Below is the the instruction that describes the task:
### Input:
Return an unsigned integer representation of the data given format specified.
:param data: a string holding the value to convert
:param format: a string holding a format which will be used to convert the data string
:param enum_set: an iterable of enums which are used as part of the converstion process
Given a string (not a wirevector!) covert that to an unsigned integer ready for input
to the simulation enviornment. This helps deal with signed/unsigned numbers (simulation
assumes the values have been converted via two's complement already), but it also takes
hex, binary, and enum types as inputs. It is easiest to see how it works with some
examples. ::
formatted_str_to_val('2', 's3') == 2 # 0b010
formatted_str_to_val('-1', 's3') == 7 # 0b111
formatted_str_to_val('101', 'b3') == 5
formatted_str_to_val('5', 'u3') == 5
formatted_str_to_val('-3', 's3') == 5
formatted_str_to_val('a', 'x3') == 10
class Ctl(Enum):
ADD = 5
SUB = 12
formatted_str_to_val('ADD', 'e3/Ctl', [Ctl]) == 5
formatted_str_to_val('SUB', 'e3/Ctl', [Ctl]) == 12
### Response:
def formatted_str_to_val(data, format, enum_set=None):
""" Return an unsigned integer representation of the data given format specified.
:param data: a string holding the value to convert
:param format: a string holding a format which will be used to convert the data string
:param enum_set: an iterable of enums which are used as part of the converstion process
Given a string (not a wirevector!) covert that to an unsigned integer ready for input
to the simulation enviornment. This helps deal with signed/unsigned numbers (simulation
assumes the values have been converted via two's complement already), but it also takes
hex, binary, and enum types as inputs. It is easiest to see how it works with some
examples. ::
formatted_str_to_val('2', 's3') == 2 # 0b010
formatted_str_to_val('-1', 's3') == 7 # 0b111
formatted_str_to_val('101', 'b3') == 5
formatted_str_to_val('5', 'u3') == 5
formatted_str_to_val('-3', 's3') == 5
formatted_str_to_val('a', 'x3') == 10
class Ctl(Enum):
ADD = 5
SUB = 12
formatted_str_to_val('ADD', 'e3/Ctl', [Ctl]) == 5
formatted_str_to_val('SUB', 'e3/Ctl', [Ctl]) == 12
"""
type = format[0]
bitwidth = int(format[1:].split('/')[0])
bitmask = (1 << bitwidth)-1
if type == 's':
rval = int(data) & bitmask
elif type == 'x':
rval = int(data, 16)
elif type == 'b':
rval = int(data, 2)
elif type == 'u':
rval = int(data)
if rval < 0:
raise PyrtlError('unsigned format requested, but negative value provided')
elif type == 'e':
enumname = format.split('/')[1]
enum_inst_list = [e for e in enum_set if e.__name__ == enumname]
if len(enum_inst_list) == 0:
raise PyrtlError('enum "{}" not found in passed enum_set "{}"'
.format(enumname, enum_set))
rval = getattr(enum_inst_list[0], data).value
else:
raise PyrtlError('unknown format type {}'.format(format))
return rval |
def fgp_dual(p, data, alpha, niter, grad, proj_C, proj_P, tol=None, **kwargs):
"""Computes a solution to the ROF problem with the fast gradient
projection algorithm.
Parameters
----------
p : np.array
dual initial variable
data : np.array
noisy data / proximal point
alpha : float
regularization parameter
niter : int
number of iterations
grad : instance of gradient class
class that supports grad(x), grad.adjoint(x), grad.norm
proj_C : function
projection onto the constraint set of the primal variable,
e.g. non-negativity
proj_P : function
projection onto the constraint set of the dual variable,
e.g. norm <= 1
tol : float (optional)
nonnegative parameter that gives the tolerance for convergence. If set
None, then the algorithm will run for a fixed number of iterations
Other Parameters
----------------
callback : callable, optional
Function called with the current iterate after each iteration.
"""
# Callback object
callback = kwargs.pop('callback', None)
if callback is not None and not callable(callback):
raise TypeError('`callback` {} is not callable'.format(callback))
factr = 1 / (grad.norm**2 * alpha)
q = p.copy()
x = data.space.zero()
t = 1.
if tol is None:
def convergence_eval(p1, p2):
return False
else:
def convergence_eval(p1, p2):
return (p1 - p2).norm() / p1.norm() < tol
pnew = p.copy()
if callback is not None:
callback(p)
for k in range(niter):
t0 = t
grad.adjoint(q, out=x)
proj_C(data - alpha * x, out=x)
grad(x, out=pnew)
pnew *= factr
pnew += q
proj_P(pnew, out=pnew)
converged = convergence_eval(p, pnew)
if not converged:
# update step size
t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2.
# calculate next iterate
q[:] = pnew + (t0 - 1) / t * (pnew - p)
p[:] = pnew
if converged:
t = None
break
if callback is not None:
callback(p)
# get current image estimate
x = proj_C(data - alpha * grad.adjoint(p))
return x | Computes a solution to the ROF problem with the fast gradient
projection algorithm.
Parameters
----------
p : np.array
dual initial variable
data : np.array
noisy data / proximal point
alpha : float
regularization parameter
niter : int
number of iterations
grad : instance of gradient class
class that supports grad(x), grad.adjoint(x), grad.norm
proj_C : function
projection onto the constraint set of the primal variable,
e.g. non-negativity
proj_P : function
projection onto the constraint set of the dual variable,
e.g. norm <= 1
tol : float (optional)
nonnegative parameter that gives the tolerance for convergence. If set
None, then the algorithm will run for a fixed number of iterations
Other Parameters
----------------
callback : callable, optional
Function called with the current iterate after each iteration. | Below is the the instruction that describes the task:
### Input:
Computes a solution to the ROF problem with the fast gradient
projection algorithm.
Parameters
----------
p : np.array
dual initial variable
data : np.array
noisy data / proximal point
alpha : float
regularization parameter
niter : int
number of iterations
grad : instance of gradient class
class that supports grad(x), grad.adjoint(x), grad.norm
proj_C : function
projection onto the constraint set of the primal variable,
e.g. non-negativity
proj_P : function
projection onto the constraint set of the dual variable,
e.g. norm <= 1
tol : float (optional)
nonnegative parameter that gives the tolerance for convergence. If set
None, then the algorithm will run for a fixed number of iterations
Other Parameters
----------------
callback : callable, optional
Function called with the current iterate after each iteration.
### Response:
def fgp_dual(p, data, alpha, niter, grad, proj_C, proj_P, tol=None, **kwargs):
"""Computes a solution to the ROF problem with the fast gradient
projection algorithm.
Parameters
----------
p : np.array
dual initial variable
data : np.array
noisy data / proximal point
alpha : float
regularization parameter
niter : int
number of iterations
grad : instance of gradient class
class that supports grad(x), grad.adjoint(x), grad.norm
proj_C : function
projection onto the constraint set of the primal variable,
e.g. non-negativity
proj_P : function
projection onto the constraint set of the dual variable,
e.g. norm <= 1
tol : float (optional)
nonnegative parameter that gives the tolerance for convergence. If set
None, then the algorithm will run for a fixed number of iterations
Other Parameters
----------------
callback : callable, optional
Function called with the current iterate after each iteration.
"""
# Callback object
callback = kwargs.pop('callback', None)
if callback is not None and not callable(callback):
raise TypeError('`callback` {} is not callable'.format(callback))
factr = 1 / (grad.norm**2 * alpha)
q = p.copy()
x = data.space.zero()
t = 1.
if tol is None:
def convergence_eval(p1, p2):
return False
else:
def convergence_eval(p1, p2):
return (p1 - p2).norm() / p1.norm() < tol
pnew = p.copy()
if callback is not None:
callback(p)
for k in range(niter):
t0 = t
grad.adjoint(q, out=x)
proj_C(data - alpha * x, out=x)
grad(x, out=pnew)
pnew *= factr
pnew += q
proj_P(pnew, out=pnew)
converged = convergence_eval(p, pnew)
if not converged:
# update step size
t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2.
# calculate next iterate
q[:] = pnew + (t0 - 1) / t * (pnew - p)
p[:] = pnew
if converged:
t = None
break
if callback is not None:
callback(p)
# get current image estimate
x = proj_C(data - alpha * grad.adjoint(p))
return x |
def replace_by_key(pif, key, subs, new_key=None, remove=False):
"""Replace values that match a key
Deeply traverses the pif object, looking for `key` and
replacing values in accordance with `subs`. If `new_key`
is set, the replaced values are assigned to that key. If
`remove` is `True`, the old `key` pairs are removed.
"""
if not new_key:
new_key = key
remove = False
orig = pif.as_dictionary()
new = _recurse_replace(orig, to_camel_case(key), to_camel_case(new_key), subs, remove)
return pypif.pif.loads(json.dumps(new)) | Replace values that match a key
Deeply traverses the pif object, looking for `key` and
replacing values in accordance with `subs`. If `new_key`
is set, the replaced values are assigned to that key. If
`remove` is `True`, the old `key` pairs are removed. | Below is the the instruction that describes the task:
### Input:
Replace values that match a key
Deeply traverses the pif object, looking for `key` and
replacing values in accordance with `subs`. If `new_key`
is set, the replaced values are assigned to that key. If
`remove` is `True`, the old `key` pairs are removed.
### Response:
def replace_by_key(pif, key, subs, new_key=None, remove=False):
"""Replace values that match a key
Deeply traverses the pif object, looking for `key` and
replacing values in accordance with `subs`. If `new_key`
is set, the replaced values are assigned to that key. If
`remove` is `True`, the old `key` pairs are removed.
"""
if not new_key:
new_key = key
remove = False
orig = pif.as_dictionary()
new = _recurse_replace(orig, to_camel_case(key), to_camel_case(new_key), subs, remove)
return pypif.pif.loads(json.dumps(new)) |
def get_downloads(self):
"""
:calls: `GET /repos/:owner/:repo/downloads <http://developer.github.com/v3/repos/downloads>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Download.Download`
"""
return github.PaginatedList.PaginatedList(
github.Download.Download,
self._requester,
self.url + "/downloads",
None
) | :calls: `GET /repos/:owner/:repo/downloads <http://developer.github.com/v3/repos/downloads>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Download.Download` | Below is the the instruction that describes the task:
### Input:
:calls: `GET /repos/:owner/:repo/downloads <http://developer.github.com/v3/repos/downloads>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Download.Download`
### Response:
def get_downloads(self):
"""
:calls: `GET /repos/:owner/:repo/downloads <http://developer.github.com/v3/repos/downloads>`_
:rtype: :class:`github.PaginatedList.PaginatedList` of :class:`github.Download.Download`
"""
return github.PaginatedList.PaginatedList(
github.Download.Download,
self._requester,
self.url + "/downloads",
None
) |
def increment_day_start_ut(self, day_start_ut, n_days=1):
"""Increment the GTFS-definition of "day start".
Parameters
----------
day_start_ut : int
unixtime of the previous start of day. If this time is between
12:00 or greater, there *will* be bugs. To solve this, run the
input through day_start_ut first.
n_days: int
number of days to increment
"""
old_tz = self.set_current_process_time_zone()
day0 = time.localtime(day_start_ut + 43200) # time of noon
dayN = time.mktime(day0[:2] + # YYYY, MM
(day0[2] + n_days,) + # DD
(12, 00, 0, 0, 0, -1)) - 43200 # HHMM, etc. Minus 12 hours.
set_process_timezone(old_tz)
return dayN | Increment the GTFS-definition of "day start".
Parameters
----------
day_start_ut : int
unixtime of the previous start of day. If this time is between
12:00 or greater, there *will* be bugs. To solve this, run the
input through day_start_ut first.
n_days: int
number of days to increment | Below is the the instruction that describes the task:
### Input:
Increment the GTFS-definition of "day start".
Parameters
----------
day_start_ut : int
unixtime of the previous start of day. If this time is between
12:00 or greater, there *will* be bugs. To solve this, run the
input through day_start_ut first.
n_days: int
number of days to increment
### Response:
def increment_day_start_ut(self, day_start_ut, n_days=1):
"""Increment the GTFS-definition of "day start".
Parameters
----------
day_start_ut : int
unixtime of the previous start of day. If this time is between
12:00 or greater, there *will* be bugs. To solve this, run the
input through day_start_ut first.
n_days: int
number of days to increment
"""
old_tz = self.set_current_process_time_zone()
day0 = time.localtime(day_start_ut + 43200) # time of noon
dayN = time.mktime(day0[:2] + # YYYY, MM
(day0[2] + n_days,) + # DD
(12, 00, 0, 0, 0, -1)) - 43200 # HHMM, etc. Minus 12 hours.
set_process_timezone(old_tz)
return dayN |
def serialize(self, content):
""" Serialize to JSON.
:return string: serializaed JSON
"""
worker = JSONSerializer(
scheme=self.resource,
options=self.resource._meta.emit_options,
format=self.resource._meta.emit_format,
**self.resource._meta.emit_models
)
return worker.serialize(content) | Serialize to JSON.
:return string: serializaed JSON | Below is the the instruction that describes the task:
### Input:
Serialize to JSON.
:return string: serializaed JSON
### Response:
def serialize(self, content):
""" Serialize to JSON.
:return string: serializaed JSON
"""
worker = JSONSerializer(
scheme=self.resource,
options=self.resource._meta.emit_options,
format=self.resource._meta.emit_format,
**self.resource._meta.emit_models
)
return worker.serialize(content) |
def process_analyses(analysis_system_instance, analysis_method, sleep_time):
"""Process all analyses which are scheduled for the analysis system instance.
This function does not terminate on its own, give it a SIGINT or Ctrl+C to stop.
:param analysis_system_instance: The analysis system instance for which the analyses are scheduled.
:param analysis_method: A function or method which analyses a scheduled analysis. The function must not take further arguments.
:param sleep_time: Time to wait between polls to the MASS server
"""
try:
while True:
for analysis_request in analysis_system_instance.get_scheduled_analyses():
analysis_method(analysis_request)
time.sleep(sleep_time)
except KeyboardInterrupt:
logging.debug('Shutting down.')
return | Process all analyses which are scheduled for the analysis system instance.
This function does not terminate on its own, give it a SIGINT or Ctrl+C to stop.
:param analysis_system_instance: The analysis system instance for which the analyses are scheduled.
:param analysis_method: A function or method which analyses a scheduled analysis. The function must not take further arguments.
:param sleep_time: Time to wait between polls to the MASS server | Below is the the instruction that describes the task:
### Input:
Process all analyses which are scheduled for the analysis system instance.
This function does not terminate on its own, give it a SIGINT or Ctrl+C to stop.
:param analysis_system_instance: The analysis system instance for which the analyses are scheduled.
:param analysis_method: A function or method which analyses a scheduled analysis. The function must not take further arguments.
:param sleep_time: Time to wait between polls to the MASS server
### Response:
def process_analyses(analysis_system_instance, analysis_method, sleep_time):
"""Process all analyses which are scheduled for the analysis system instance.
This function does not terminate on its own, give it a SIGINT or Ctrl+C to stop.
:param analysis_system_instance: The analysis system instance for which the analyses are scheduled.
:param analysis_method: A function or method which analyses a scheduled analysis. The function must not take further arguments.
:param sleep_time: Time to wait between polls to the MASS server
"""
try:
while True:
for analysis_request in analysis_system_instance.get_scheduled_analyses():
analysis_method(analysis_request)
time.sleep(sleep_time)
except KeyboardInterrupt:
logging.debug('Shutting down.')
return |
def SignMessage(self, message, script_hash):
"""
Sign a message with a specified script_hash.
Args:
message (str): a hex encoded message to sign
script_hash (UInt160): a bytearray (len 20).
Returns:
str: the signed message
"""
keypair = self.GetKeyByScriptHash(script_hash)
prikey = bytes(keypair.PrivateKey)
res = Crypto.Default().Sign(message, prikey)
return res, keypair.PublicKey | Sign a message with a specified script_hash.
Args:
message (str): a hex encoded message to sign
script_hash (UInt160): a bytearray (len 20).
Returns:
str: the signed message | Below is the the instruction that describes the task:
### Input:
Sign a message with a specified script_hash.
Args:
message (str): a hex encoded message to sign
script_hash (UInt160): a bytearray (len 20).
Returns:
str: the signed message
### Response:
def SignMessage(self, message, script_hash):
"""
Sign a message with a specified script_hash.
Args:
message (str): a hex encoded message to sign
script_hash (UInt160): a bytearray (len 20).
Returns:
str: the signed message
"""
keypair = self.GetKeyByScriptHash(script_hash)
prikey = bytes(keypair.PrivateKey)
res = Crypto.Default().Sign(message, prikey)
return res, keypair.PublicKey |
def find(self, path, all=False):
"""
Looks for files in PIPELINE.STYLESHEETS and PIPELINE.JAVASCRIPT
"""
matches = []
for elem in chain(settings.STYLESHEETS.values(), settings.JAVASCRIPT.values()):
if normpath(elem['output_filename']) == normpath(path):
match = safe_join(settings.PIPELINE_ROOT, path)
if not all:
return match
matches.append(match)
return matches | Looks for files in PIPELINE.STYLESHEETS and PIPELINE.JAVASCRIPT | Below is the the instruction that describes the task:
### Input:
Looks for files in PIPELINE.STYLESHEETS and PIPELINE.JAVASCRIPT
### Response:
def find(self, path, all=False):
"""
Looks for files in PIPELINE.STYLESHEETS and PIPELINE.JAVASCRIPT
"""
matches = []
for elem in chain(settings.STYLESHEETS.values(), settings.JAVASCRIPT.values()):
if normpath(elem['output_filename']) == normpath(path):
match = safe_join(settings.PIPELINE_ROOT, path)
if not all:
return match
matches.append(match)
return matches |
def open(self, mode):
"""
Open the underlying .hdf5 file and the parent, if any
"""
if self.hdf5 == (): # not already open
kw = dict(mode=mode, libver='latest')
if mode == 'r':
kw['swmr'] = True
try:
self.hdf5 = hdf5.File(self.filename, **kw)
except OSError as exc:
raise OSError('%s in %s' % (exc, self.filename)) | Open the underlying .hdf5 file and the parent, if any | Below is the the instruction that describes the task:
### Input:
Open the underlying .hdf5 file and the parent, if any
### Response:
def open(self, mode):
"""
Open the underlying .hdf5 file and the parent, if any
"""
if self.hdf5 == (): # not already open
kw = dict(mode=mode, libver='latest')
if mode == 'r':
kw['swmr'] = True
try:
self.hdf5 = hdf5.File(self.filename, **kw)
except OSError as exc:
raise OSError('%s in %s' % (exc, self.filename)) |
def _run_sbgenomics(args):
"""Run CWL on SevenBridges platform and Cancer Genomics Cloud.
"""
assert not args.no_container, "Seven Bridges runs require containers"
main_file, json_file, project_name = _get_main_and_json(args.directory)
flags = []
cmd = ["sbg-cwl-runner"] + flags + args.toolargs + [main_file, json_file]
_run_tool(cmd) | Run CWL on SevenBridges platform and Cancer Genomics Cloud. | Below is the the instruction that describes the task:
### Input:
Run CWL on SevenBridges platform and Cancer Genomics Cloud.
### Response:
def _run_sbgenomics(args):
"""Run CWL on SevenBridges platform and Cancer Genomics Cloud.
"""
assert not args.no_container, "Seven Bridges runs require containers"
main_file, json_file, project_name = _get_main_and_json(args.directory)
flags = []
cmd = ["sbg-cwl-runner"] + flags + args.toolargs + [main_file, json_file]
_run_tool(cmd) |
def floyd_warshall_get_path(self, distance, nextn, i, j):
'''
API:
floyd_warshall_get_path(self, distance, nextn, i, j):
Description:
Finds shortest path between i and j using distance and nextn
dictionaries.
Pre:
(1) distance and nextn are outputs of floyd_warshall method.
(2) The graph does not have a negative cycle, , ie.
distance[(i,i)] >=0 for all node i.
Return:
Returns the list of nodes on the path from i to j, ie. [i,...,j]
'''
if distance[(i,j)]=='infinity':
return None
k = nextn[(i,j)]
path = self.floyd_warshall_get_path
if i==k:
return [i, j]
else:
return path(distance, nextn, i,k) + [k] + path(distance, nextn, k,j) | API:
floyd_warshall_get_path(self, distance, nextn, i, j):
Description:
Finds shortest path between i and j using distance and nextn
dictionaries.
Pre:
(1) distance and nextn are outputs of floyd_warshall method.
(2) The graph does not have a negative cycle, , ie.
distance[(i,i)] >=0 for all node i.
Return:
Returns the list of nodes on the path from i to j, ie. [i,...,j] | Below is the the instruction that describes the task:
### Input:
API:
floyd_warshall_get_path(self, distance, nextn, i, j):
Description:
Finds shortest path between i and j using distance and nextn
dictionaries.
Pre:
(1) distance and nextn are outputs of floyd_warshall method.
(2) The graph does not have a negative cycle, , ie.
distance[(i,i)] >=0 for all node i.
Return:
Returns the list of nodes on the path from i to j, ie. [i,...,j]
### Response:
def floyd_warshall_get_path(self, distance, nextn, i, j):
'''
API:
floyd_warshall_get_path(self, distance, nextn, i, j):
Description:
Finds shortest path between i and j using distance and nextn
dictionaries.
Pre:
(1) distance and nextn are outputs of floyd_warshall method.
(2) The graph does not have a negative cycle, , ie.
distance[(i,i)] >=0 for all node i.
Return:
Returns the list of nodes on the path from i to j, ie. [i,...,j]
'''
if distance[(i,j)]=='infinity':
return None
k = nextn[(i,j)]
path = self.floyd_warshall_get_path
if i==k:
return [i, j]
else:
return path(distance, nextn, i,k) + [k] + path(distance, nextn, k,j) |
def axisfn(reverse=False, principal_node_type=xml.dom.Node.ELEMENT_NODE):
"""Axis function decorator.
An axis function will take a node as an argument and return a sequence
over the nodes along an XPath axis. Axis functions have two extra
attributes indicating the axis direction and principal node type.
"""
def decorate(f):
f.__name__ = f.__name__.replace('_', '-')
f.reverse = reverse
f.principal_node_type = principal_node_type
return f
return decorate | Axis function decorator.
An axis function will take a node as an argument and return a sequence
over the nodes along an XPath axis. Axis functions have two extra
attributes indicating the axis direction and principal node type. | Below is the the instruction that describes the task:
### Input:
Axis function decorator.
An axis function will take a node as an argument and return a sequence
over the nodes along an XPath axis. Axis functions have two extra
attributes indicating the axis direction and principal node type.
### Response:
def axisfn(reverse=False, principal_node_type=xml.dom.Node.ELEMENT_NODE):
"""Axis function decorator.
An axis function will take a node as an argument and return a sequence
over the nodes along an XPath axis. Axis functions have two extra
attributes indicating the axis direction and principal node type.
"""
def decorate(f):
f.__name__ = f.__name__.replace('_', '-')
f.reverse = reverse
f.principal_node_type = principal_node_type
return f
return decorate |
def show_info(ulog, verbose):
"""Show general information from an ULog"""
m1, s1 = divmod(int(ulog.start_timestamp/1e6), 60)
h1, m1 = divmod(m1, 60)
m2, s2 = divmod(int((ulog.last_timestamp - ulog.start_timestamp)/1e6), 60)
h2, m2 = divmod(m2, 60)
print("Logging start time: {:d}:{:02d}:{:02d}, duration: {:d}:{:02d}:{:02d}".format(
h1, m1, s1, h2, m2, s2))
dropout_durations = [dropout.duration for dropout in ulog.dropouts]
if len(dropout_durations) == 0:
print("No Dropouts")
else:
print("Dropouts: count: {:}, total duration: {:.1f} s, max: {:} ms, mean: {:} ms"
.format(len(dropout_durations), sum(dropout_durations)/1000.,
max(dropout_durations),
int(sum(dropout_durations)/len(dropout_durations))))
version = ulog.get_version_info_str()
if not version is None:
print('SW Version: {}'.format(version))
print("Info Messages:")
for k in sorted(ulog.msg_info_dict):
if not k.startswith('perf_') or verbose:
print(" {0}: {1}".format(k, ulog.msg_info_dict[k]))
if len(ulog.msg_info_multiple_dict) > 0:
if verbose:
print("Info Multiple Messages:")
for k in sorted(ulog.msg_info_multiple_dict):
print(" {0}: {1}".format(k, ulog.msg_info_multiple_dict[k]))
else:
print("Info Multiple Messages: {}".format(
", ".join(["[{}: {}]".format(k, len(ulog.msg_info_multiple_dict[k])) for k in
sorted(ulog.msg_info_multiple_dict)])))
print("")
print("{:<41} {:7}, {:10}".format("Name (multi id, message size in bytes)",
"number of data points", "total bytes"))
data_list_sorted = sorted(ulog.data_list, key=lambda d: d.name + str(d.multi_id))
for d in data_list_sorted:
message_size = sum([ULog.get_field_size(f.type_str) for f in d.field_data])
num_data_points = len(d.data['timestamp'])
name_id = "{:} ({:}, {:})".format(d.name, d.multi_id, message_size)
print(" {:<40} {:7d} {:10d}".format(name_id, num_data_points,
message_size * num_data_points)) | Show general information from an ULog | Below is the the instruction that describes the task:
### Input:
Show general information from an ULog
### Response:
def show_info(ulog, verbose):
"""Show general information from an ULog"""
m1, s1 = divmod(int(ulog.start_timestamp/1e6), 60)
h1, m1 = divmod(m1, 60)
m2, s2 = divmod(int((ulog.last_timestamp - ulog.start_timestamp)/1e6), 60)
h2, m2 = divmod(m2, 60)
print("Logging start time: {:d}:{:02d}:{:02d}, duration: {:d}:{:02d}:{:02d}".format(
h1, m1, s1, h2, m2, s2))
dropout_durations = [dropout.duration for dropout in ulog.dropouts]
if len(dropout_durations) == 0:
print("No Dropouts")
else:
print("Dropouts: count: {:}, total duration: {:.1f} s, max: {:} ms, mean: {:} ms"
.format(len(dropout_durations), sum(dropout_durations)/1000.,
max(dropout_durations),
int(sum(dropout_durations)/len(dropout_durations))))
version = ulog.get_version_info_str()
if not version is None:
print('SW Version: {}'.format(version))
print("Info Messages:")
for k in sorted(ulog.msg_info_dict):
if not k.startswith('perf_') or verbose:
print(" {0}: {1}".format(k, ulog.msg_info_dict[k]))
if len(ulog.msg_info_multiple_dict) > 0:
if verbose:
print("Info Multiple Messages:")
for k in sorted(ulog.msg_info_multiple_dict):
print(" {0}: {1}".format(k, ulog.msg_info_multiple_dict[k]))
else:
print("Info Multiple Messages: {}".format(
", ".join(["[{}: {}]".format(k, len(ulog.msg_info_multiple_dict[k])) for k in
sorted(ulog.msg_info_multiple_dict)])))
print("")
print("{:<41} {:7}, {:10}".format("Name (multi id, message size in bytes)",
"number of data points", "total bytes"))
data_list_sorted = sorted(ulog.data_list, key=lambda d: d.name + str(d.multi_id))
for d in data_list_sorted:
message_size = sum([ULog.get_field_size(f.type_str) for f in d.field_data])
num_data_points = len(d.data['timestamp'])
name_id = "{:} ({:}, {:})".format(d.name, d.multi_id, message_size)
print(" {:<40} {:7d} {:10d}".format(name_id, num_data_points,
message_size * num_data_points)) |
def get_logger(name=None, filename=None, filemode=None, level=WARNING):
"""Gets a customized logger.
Parameters
----------
name: str, optional
Name of the logger.
filename: str, optional
The filename to which the logger's output will be sent.
filemode: str, optional
The file mode to open the file (corresponding to `filename`),
default is 'a' if `filename` is not ``None``.
level: int, optional
The `logging` level for the logger.
See: https://docs.python.org/2/library/logging.html#logging-levels
Returns
-------
Logger
A customized `Logger` object.
Example
-------
## get_logger call with default parameters.
>>> from mxnet.log import get_logger
>>> logger = get_logger("Test")
>>> logger.warn("Hello World")
W0505 00:29:47 3525 <stdin>:<module>:1] Hello World
## get_logger call with WARNING level.
>>> import logging
>>> logger = get_logger("Test2", level=logging.WARNING)
>>> logger.warn("Hello World")
W0505 00:30:50 3525 <stdin>:<module>:1] Hello World
>>> logger.debug("Hello World") # This doesn't return anything as the level is logging.WARNING.
## get_logger call with DEBUG level.
>>> logger = get_logger("Test3", level=logging.DEBUG)
>>> logger.debug("Hello World") # Logs the debug output as the level is logging.DEBUG.
D0505 00:31:30 3525 <stdin>:<module>:1] Hello World
"""
logger = logging.getLogger(name)
if name is not None and not getattr(logger, '_init_done', None):
logger._init_done = True
if filename:
mode = filemode if filemode else 'a'
hdlr = logging.FileHandler(filename, mode)
else:
hdlr = logging.StreamHandler() # pylint: disable=redefined-variable-type
# the `_Formatter` contain some escape character to
# represent color, which is not suitable for FileHandler,
# (TODO) maybe we can add another Formatter for FileHandler.
hdlr.setFormatter(_Formatter())
logger.addHandler(hdlr)
logger.setLevel(level)
return logger | Gets a customized logger.
Parameters
----------
name: str, optional
Name of the logger.
filename: str, optional
The filename to which the logger's output will be sent.
filemode: str, optional
The file mode to open the file (corresponding to `filename`),
default is 'a' if `filename` is not ``None``.
level: int, optional
The `logging` level for the logger.
See: https://docs.python.org/2/library/logging.html#logging-levels
Returns
-------
Logger
A customized `Logger` object.
Example
-------
## get_logger call with default parameters.
>>> from mxnet.log import get_logger
>>> logger = get_logger("Test")
>>> logger.warn("Hello World")
W0505 00:29:47 3525 <stdin>:<module>:1] Hello World
## get_logger call with WARNING level.
>>> import logging
>>> logger = get_logger("Test2", level=logging.WARNING)
>>> logger.warn("Hello World")
W0505 00:30:50 3525 <stdin>:<module>:1] Hello World
>>> logger.debug("Hello World") # This doesn't return anything as the level is logging.WARNING.
## get_logger call with DEBUG level.
>>> logger = get_logger("Test3", level=logging.DEBUG)
>>> logger.debug("Hello World") # Logs the debug output as the level is logging.DEBUG.
D0505 00:31:30 3525 <stdin>:<module>:1] Hello World | Below is the the instruction that describes the task:
### Input:
Gets a customized logger.
Parameters
----------
name: str, optional
Name of the logger.
filename: str, optional
The filename to which the logger's output will be sent.
filemode: str, optional
The file mode to open the file (corresponding to `filename`),
default is 'a' if `filename` is not ``None``.
level: int, optional
The `logging` level for the logger.
See: https://docs.python.org/2/library/logging.html#logging-levels
Returns
-------
Logger
A customized `Logger` object.
Example
-------
## get_logger call with default parameters.
>>> from mxnet.log import get_logger
>>> logger = get_logger("Test")
>>> logger.warn("Hello World")
W0505 00:29:47 3525 <stdin>:<module>:1] Hello World
## get_logger call with WARNING level.
>>> import logging
>>> logger = get_logger("Test2", level=logging.WARNING)
>>> logger.warn("Hello World")
W0505 00:30:50 3525 <stdin>:<module>:1] Hello World
>>> logger.debug("Hello World") # This doesn't return anything as the level is logging.WARNING.
## get_logger call with DEBUG level.
>>> logger = get_logger("Test3", level=logging.DEBUG)
>>> logger.debug("Hello World") # Logs the debug output as the level is logging.DEBUG.
D0505 00:31:30 3525 <stdin>:<module>:1] Hello World
### Response:
def get_logger(name=None, filename=None, filemode=None, level=WARNING):
"""Gets a customized logger.
Parameters
----------
name: str, optional
Name of the logger.
filename: str, optional
The filename to which the logger's output will be sent.
filemode: str, optional
The file mode to open the file (corresponding to `filename`),
default is 'a' if `filename` is not ``None``.
level: int, optional
The `logging` level for the logger.
See: https://docs.python.org/2/library/logging.html#logging-levels
Returns
-------
Logger
A customized `Logger` object.
Example
-------
## get_logger call with default parameters.
>>> from mxnet.log import get_logger
>>> logger = get_logger("Test")
>>> logger.warn("Hello World")
W0505 00:29:47 3525 <stdin>:<module>:1] Hello World
## get_logger call with WARNING level.
>>> import logging
>>> logger = get_logger("Test2", level=logging.WARNING)
>>> logger.warn("Hello World")
W0505 00:30:50 3525 <stdin>:<module>:1] Hello World
>>> logger.debug("Hello World") # This doesn't return anything as the level is logging.WARNING.
## get_logger call with DEBUG level.
>>> logger = get_logger("Test3", level=logging.DEBUG)
>>> logger.debug("Hello World") # Logs the debug output as the level is logging.DEBUG.
D0505 00:31:30 3525 <stdin>:<module>:1] Hello World
"""
logger = logging.getLogger(name)
if name is not None and not getattr(logger, '_init_done', None):
logger._init_done = True
if filename:
mode = filemode if filemode else 'a'
hdlr = logging.FileHandler(filename, mode)
else:
hdlr = logging.StreamHandler() # pylint: disable=redefined-variable-type
# the `_Formatter` contain some escape character to
# represent color, which is not suitable for FileHandler,
# (TODO) maybe we can add another Formatter for FileHandler.
hdlr.setFormatter(_Formatter())
logger.addHandler(hdlr)
logger.setLevel(level)
return logger |
def fn_abs(self, value):
"""
Return the absolute value of a number.
:param value: The number.
:return: The absolute value of the number.
"""
if is_ndarray(value):
return numpy.absolute(value)
else:
return abs(value) | Return the absolute value of a number.
:param value: The number.
:return: The absolute value of the number. | Below is the the instruction that describes the task:
### Input:
Return the absolute value of a number.
:param value: The number.
:return: The absolute value of the number.
### Response:
def fn_abs(self, value):
"""
Return the absolute value of a number.
:param value: The number.
:return: The absolute value of the number.
"""
if is_ndarray(value):
return numpy.absolute(value)
else:
return abs(value) |
def _convert_sky_coords(self):
"""
Convert to sky coordinates
"""
parsed_angles = [(x, y)
for x, y in zip(self.coord[:-1:2], self.coord[1::2])
if (isinstance(x, coordinates.Angle) and isinstance(y, coordinates.Angle))
]
frame = coordinates.frame_transform_graph.lookup_name(self.coordsys)
lon, lat = zip(*parsed_angles)
if hasattr(lon, '__len__') and hasattr(lat, '__len__') and len(lon) == 1 and len(lat) == 1:
# force entries to be scalar if they are length-1
lon, lat = u.Quantity(lon[0]), u.Quantity(lat[0])
else:
# otherwise, they are vector quantities
lon, lat = u.Quantity(lon), u.Quantity(lat)
sphcoords = coordinates.UnitSphericalRepresentation(lon, lat)
coords = [SkyCoord(frame(sphcoords))]
if self.region_type != 'polygon':
coords += self.coord[len(coords * 2):]
return coords | Convert to sky coordinates | Below is the the instruction that describes the task:
### Input:
Convert to sky coordinates
### Response:
def _convert_sky_coords(self):
"""
Convert to sky coordinates
"""
parsed_angles = [(x, y)
for x, y in zip(self.coord[:-1:2], self.coord[1::2])
if (isinstance(x, coordinates.Angle) and isinstance(y, coordinates.Angle))
]
frame = coordinates.frame_transform_graph.lookup_name(self.coordsys)
lon, lat = zip(*parsed_angles)
if hasattr(lon, '__len__') and hasattr(lat, '__len__') and len(lon) == 1 and len(lat) == 1:
# force entries to be scalar if they are length-1
lon, lat = u.Quantity(lon[0]), u.Quantity(lat[0])
else:
# otherwise, they are vector quantities
lon, lat = u.Quantity(lon), u.Quantity(lat)
sphcoords = coordinates.UnitSphericalRepresentation(lon, lat)
coords = [SkyCoord(frame(sphcoords))]
if self.region_type != 'polygon':
coords += self.coord[len(coords * 2):]
return coords |
def set_start_date(self, date):
"""Sets the start date.
arg: date (osid.calendaring.DateTime): the new date
raise: InvalidArgument - ``date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if self.get_start_date_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_date_time(date, self.get_start_date_metadata()):
raise errors.InvalidArgument()
# self._my_map['startDate'] = self._get_date_map(date)
self._my_map['startDate'] = date | Sets the start date.
arg: date (osid.calendaring.DateTime): the new date
raise: InvalidArgument - ``date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``date`` is ``null``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Sets the start date.
arg: date (osid.calendaring.DateTime): the new date
raise: InvalidArgument - ``date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
### Response:
def set_start_date(self, date):
"""Sets the start date.
arg: date (osid.calendaring.DateTime): the new date
raise: InvalidArgument - ``date`` is invalid
raise: NoAccess - ``Metadata.isReadOnly()`` is ``true``
raise: NullArgument - ``date`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
if self.get_start_date_metadata().is_read_only():
raise errors.NoAccess()
if not self._is_valid_date_time(date, self.get_start_date_metadata()):
raise errors.InvalidArgument()
# self._my_map['startDate'] = self._get_date_map(date)
self._my_map['startDate'] = date |
def find_entity_view(self, view_type, begin_entity=None, filter={},
properties=None):
"""Find a ManagedEntity of the requested type.
Traverses the MOB looking for an entity matching the filter.
:param view_type: The type of ManagedEntity to find.
:type view_type: str
:param begin_entity: The MOR to start searching for the entity. \
The default is to start the search at the root folder.
:type begin_entity: ManagedObjectReference or None
:param filter: Key/value pairs to filter the results. The key is \
a valid parameter of the ManagedEntity type. The value is what \
that parameter should match.
:type filter: dict
:returns: If an entity is found, a ManagedEntity matching the search.
:rtype: ManagedEntity
"""
if properties is None:
properties = []
kls = classmapper(view_type)
# Start the search at the root folder if no begin_entity was given
if not begin_entity:
begin_entity = self.sc.rootFolder._mo_ref
logger.debug("Using %s", self.sc.rootFolder._mo_ref)
property_spec = self.create('PropertySpec')
property_spec.type = view_type
property_spec.all = False
property_spec.pathSet = list(filter.keys())
pfs = self.get_search_filter_spec(begin_entity, property_spec)
# Retrieve properties from server and update entity
#obj_contents = self.propertyCollector.RetrieveProperties(specSet=pfs)
obj_contents = self.sc.propertyCollector.RetrieveProperties(specSet=pfs)
# TODO: Implement filtering
if not filter:
logger.warning('No filter specified, returning first match.')
# If no filter is specified we just return the first item
# in the list of returned objects
logger.debug("Creating class in find_entity_view (filter)")
view = kls(obj_contents[0].obj._mo_ref, self)
logger.debug("Completed creating class in find_entity_view (filter)")
#view.update_view_data(properties)
return view
matched = False
# Iterate through obj_contents retrieved
for obj_content in obj_contents:
# If there are is no propSet, skip this one
if not obj_content.propSet:
continue
matches = 0
# Iterate through each property in the set
for prop in obj_content.propSet:
for key in filter.keys():
# If the property name is in the defined filter
if prop.name == key:
# ...and it matches the value specified
# TODO: Regex this?
if prop.val == filter[prop.name]:
# We've found a match
matches += 1
else:
break
else:
continue
if matches == len(filter):
filtered_obj_content = obj_content
matched = True
break
else:
continue
if matched is not True:
# There were no matches
raise ObjectNotFoundError("No matching objects for filter")
logger.debug("Creating class in find_entity_view")
view = kls(filtered_obj_content.obj._mo_ref, self)
logger.debug("Completed creating class in find_entity_view")
#view.update_view_data(properties=properties)
return view | Find a ManagedEntity of the requested type.
Traverses the MOB looking for an entity matching the filter.
:param view_type: The type of ManagedEntity to find.
:type view_type: str
:param begin_entity: The MOR to start searching for the entity. \
The default is to start the search at the root folder.
:type begin_entity: ManagedObjectReference or None
:param filter: Key/value pairs to filter the results. The key is \
a valid parameter of the ManagedEntity type. The value is what \
that parameter should match.
:type filter: dict
:returns: If an entity is found, a ManagedEntity matching the search.
:rtype: ManagedEntity | Below is the the instruction that describes the task:
### Input:
Find a ManagedEntity of the requested type.
Traverses the MOB looking for an entity matching the filter.
:param view_type: The type of ManagedEntity to find.
:type view_type: str
:param begin_entity: The MOR to start searching for the entity. \
The default is to start the search at the root folder.
:type begin_entity: ManagedObjectReference or None
:param filter: Key/value pairs to filter the results. The key is \
a valid parameter of the ManagedEntity type. The value is what \
that parameter should match.
:type filter: dict
:returns: If an entity is found, a ManagedEntity matching the search.
:rtype: ManagedEntity
### Response:
def find_entity_view(self, view_type, begin_entity=None, filter={},
properties=None):
"""Find a ManagedEntity of the requested type.
Traverses the MOB looking for an entity matching the filter.
:param view_type: The type of ManagedEntity to find.
:type view_type: str
:param begin_entity: The MOR to start searching for the entity. \
The default is to start the search at the root folder.
:type begin_entity: ManagedObjectReference or None
:param filter: Key/value pairs to filter the results. The key is \
a valid parameter of the ManagedEntity type. The value is what \
that parameter should match.
:type filter: dict
:returns: If an entity is found, a ManagedEntity matching the search.
:rtype: ManagedEntity
"""
if properties is None:
properties = []
kls = classmapper(view_type)
# Start the search at the root folder if no begin_entity was given
if not begin_entity:
begin_entity = self.sc.rootFolder._mo_ref
logger.debug("Using %s", self.sc.rootFolder._mo_ref)
property_spec = self.create('PropertySpec')
property_spec.type = view_type
property_spec.all = False
property_spec.pathSet = list(filter.keys())
pfs = self.get_search_filter_spec(begin_entity, property_spec)
# Retrieve properties from server and update entity
#obj_contents = self.propertyCollector.RetrieveProperties(specSet=pfs)
obj_contents = self.sc.propertyCollector.RetrieveProperties(specSet=pfs)
# TODO: Implement filtering
if not filter:
logger.warning('No filter specified, returning first match.')
# If no filter is specified we just return the first item
# in the list of returned objects
logger.debug("Creating class in find_entity_view (filter)")
view = kls(obj_contents[0].obj._mo_ref, self)
logger.debug("Completed creating class in find_entity_view (filter)")
#view.update_view_data(properties)
return view
matched = False
# Iterate through obj_contents retrieved
for obj_content in obj_contents:
# If there are is no propSet, skip this one
if not obj_content.propSet:
continue
matches = 0
# Iterate through each property in the set
for prop in obj_content.propSet:
for key in filter.keys():
# If the property name is in the defined filter
if prop.name == key:
# ...and it matches the value specified
# TODO: Regex this?
if prop.val == filter[prop.name]:
# We've found a match
matches += 1
else:
break
else:
continue
if matches == len(filter):
filtered_obj_content = obj_content
matched = True
break
else:
continue
if matched is not True:
# There were no matches
raise ObjectNotFoundError("No matching objects for filter")
logger.debug("Creating class in find_entity_view")
view = kls(filtered_obj_content.obj._mo_ref, self)
logger.debug("Completed creating class in find_entity_view")
#view.update_view_data(properties=properties)
return view |
def record_process(self, process, prg=''):
"""
log a process or program - log a physical program (.py, .bat, .exe)
"""
self._log(self.logFileProcess, force_to_string(process), prg) | log a process or program - log a physical program (.py, .bat, .exe) | Below is the the instruction that describes the task:
### Input:
log a process or program - log a physical program (.py, .bat, .exe)
### Response:
def record_process(self, process, prg=''):
"""
log a process or program - log a physical program (.py, .bat, .exe)
"""
self._log(self.logFileProcess, force_to_string(process), prg) |
def blacklist_bulk(self, blacklist):
"""
Add blacklist entries to the engine node in bulk. For blacklist to work,
you must also create a rule with action "Apply Blacklist".
First create your blacklist entries using :class:`smc.elements.other.Blacklist`
then provide the blacklist to this method.
:param blacklist Blacklist: pre-configured blacklist entries
.. note:: This method requires SMC version >= 6.4
"""
self.make_request(
EngineCommandFailed,
method='create',
resource='blacklist',
json=blacklist.entries) | Add blacklist entries to the engine node in bulk. For blacklist to work,
you must also create a rule with action "Apply Blacklist".
First create your blacklist entries using :class:`smc.elements.other.Blacklist`
then provide the blacklist to this method.
:param blacklist Blacklist: pre-configured blacklist entries
.. note:: This method requires SMC version >= 6.4 | Below is the the instruction that describes the task:
### Input:
Add blacklist entries to the engine node in bulk. For blacklist to work,
you must also create a rule with action "Apply Blacklist".
First create your blacklist entries using :class:`smc.elements.other.Blacklist`
then provide the blacklist to this method.
:param blacklist Blacklist: pre-configured blacklist entries
.. note:: This method requires SMC version >= 6.4
### Response:
def blacklist_bulk(self, blacklist):
"""
Add blacklist entries to the engine node in bulk. For blacklist to work,
you must also create a rule with action "Apply Blacklist".
First create your blacklist entries using :class:`smc.elements.other.Blacklist`
then provide the blacklist to this method.
:param blacklist Blacklist: pre-configured blacklist entries
.. note:: This method requires SMC version >= 6.4
"""
self.make_request(
EngineCommandFailed,
method='create',
resource='blacklist',
json=blacklist.entries) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.