code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def compare_files(path1, path2):
# type: (str, str) -> List[str]
"""Returns the delta between two files using -, ?, + format excluding
lines that are the same
Args:
path1 (str): Path to first file
path2 (str): Path to second file
Returns:
List[str]: Delta between the two files
"""
diff = difflib.ndiff(open(path1).readlines(), open(path2).readlines())
return [x for x in diff if x[0] in ['-', '+', '?']] | Returns the delta between two files using -, ?, + format excluding
lines that are the same
Args:
path1 (str): Path to first file
path2 (str): Path to second file
Returns:
List[str]: Delta between the two files | Below is the the instruction that describes the task:
### Input:
Returns the delta between two files using -, ?, + format excluding
lines that are the same
Args:
path1 (str): Path to first file
path2 (str): Path to second file
Returns:
List[str]: Delta between the two files
### Response:
def compare_files(path1, path2):
# type: (str, str) -> List[str]
"""Returns the delta between two files using -, ?, + format excluding
lines that are the same
Args:
path1 (str): Path to first file
path2 (str): Path to second file
Returns:
List[str]: Delta between the two files
"""
diff = difflib.ndiff(open(path1).readlines(), open(path2).readlines())
return [x for x in diff if x[0] in ['-', '+', '?']] |
def _docf(self, tag, val):
"""
Callback used as the handler argument to process_docs(). This converts
Stone doc references to Sphinx-friendly annotations.
"""
if tag == 'type':
return ':class:`{}`'.format(val)
elif tag == 'route':
if self.args.route_method:
return ':meth:`%s`' % self.args.route_method.format(
ns=self.cur_namespace.name, route=fmt_func(val))
else:
return val
elif tag == 'link':
anchor, link = val.rsplit(' ', 1)
return '`{} <{}>`_'.format(anchor, link)
elif tag == 'val':
if val == 'null':
return 'None'
elif val == 'true' or val == 'false':
return '``{}``'.format(val.capitalize())
else:
return val
elif tag == 'field':
return '``{}``'.format(val)
else:
raise RuntimeError('Unknown doc ref tag %r' % tag) | Callback used as the handler argument to process_docs(). This converts
Stone doc references to Sphinx-friendly annotations. | Below is the the instruction that describes the task:
### Input:
Callback used as the handler argument to process_docs(). This converts
Stone doc references to Sphinx-friendly annotations.
### Response:
def _docf(self, tag, val):
"""
Callback used as the handler argument to process_docs(). This converts
Stone doc references to Sphinx-friendly annotations.
"""
if tag == 'type':
return ':class:`{}`'.format(val)
elif tag == 'route':
if self.args.route_method:
return ':meth:`%s`' % self.args.route_method.format(
ns=self.cur_namespace.name, route=fmt_func(val))
else:
return val
elif tag == 'link':
anchor, link = val.rsplit(' ', 1)
return '`{} <{}>`_'.format(anchor, link)
elif tag == 'val':
if val == 'null':
return 'None'
elif val == 'true' or val == 'false':
return '``{}``'.format(val.capitalize())
else:
return val
elif tag == 'field':
return '``{}``'.format(val)
else:
raise RuntimeError('Unknown doc ref tag %r' % tag) |
def map_exception_codes():
'''Helper function to intialise CODES_TO_EXCEPTIONS.'''
werkex = inspect.getmembers(exceptions, lambda x: getattr(x, 'code', None))
return {e.code: e for _, e in werkex} | Helper function to intialise CODES_TO_EXCEPTIONS. | Below is the the instruction that describes the task:
### Input:
Helper function to intialise CODES_TO_EXCEPTIONS.
### Response:
def map_exception_codes():
'''Helper function to intialise CODES_TO_EXCEPTIONS.'''
werkex = inspect.getmembers(exceptions, lambda x: getattr(x, 'code', None))
return {e.code: e for _, e in werkex} |
def session(self, sid, namespace=None):
"""Return the user session for a client with context manager syntax.
:param sid: The session id of the client.
This is a context manager that returns the user session dictionary for
the client. Any changes that are made to this dictionary inside the
context manager block are saved back to the session. Example usage::
@sio.on('connect')
def on_connect(sid, environ):
username = authenticate_user(environ)
if not username:
return False
with sio.session(sid) as session:
session['username'] = username
@sio.on('message')
def on_message(sid, msg):
with sio.session(sid) as session:
print('received message from ', session['username'])
"""
class _session_context_manager(object):
def __init__(self, server, sid, namespace):
self.server = server
self.sid = sid
self.namespace = namespace
self.session = None
def __enter__(self):
self.session = self.server.get_session(sid,
namespace=namespace)
return self.session
def __exit__(self, *args):
self.server.save_session(sid, self.session,
namespace=namespace)
return _session_context_manager(self, sid, namespace) | Return the user session for a client with context manager syntax.
:param sid: The session id of the client.
This is a context manager that returns the user session dictionary for
the client. Any changes that are made to this dictionary inside the
context manager block are saved back to the session. Example usage::
@sio.on('connect')
def on_connect(sid, environ):
username = authenticate_user(environ)
if not username:
return False
with sio.session(sid) as session:
session['username'] = username
@sio.on('message')
def on_message(sid, msg):
with sio.session(sid) as session:
print('received message from ', session['username']) | Below is the the instruction that describes the task:
### Input:
Return the user session for a client with context manager syntax.
:param sid: The session id of the client.
This is a context manager that returns the user session dictionary for
the client. Any changes that are made to this dictionary inside the
context manager block are saved back to the session. Example usage::
@sio.on('connect')
def on_connect(sid, environ):
username = authenticate_user(environ)
if not username:
return False
with sio.session(sid) as session:
session['username'] = username
@sio.on('message')
def on_message(sid, msg):
with sio.session(sid) as session:
print('received message from ', session['username'])
### Response:
def session(self, sid, namespace=None):
"""Return the user session for a client with context manager syntax.
:param sid: The session id of the client.
This is a context manager that returns the user session dictionary for
the client. Any changes that are made to this dictionary inside the
context manager block are saved back to the session. Example usage::
@sio.on('connect')
def on_connect(sid, environ):
username = authenticate_user(environ)
if not username:
return False
with sio.session(sid) as session:
session['username'] = username
@sio.on('message')
def on_message(sid, msg):
with sio.session(sid) as session:
print('received message from ', session['username'])
"""
class _session_context_manager(object):
def __init__(self, server, sid, namespace):
self.server = server
self.sid = sid
self.namespace = namespace
self.session = None
def __enter__(self):
self.session = self.server.get_session(sid,
namespace=namespace)
return self.session
def __exit__(self, *args):
self.server.save_session(sid, self.session,
namespace=namespace)
return _session_context_manager(self, sid, namespace) |
def reset_index(self):
"""
Resets the index of the Series to simple integer list and the index name to 'index'.
:return: nothing
"""
self.index = list(range(self.__len__()))
self.index_name = 'index' | Resets the index of the Series to simple integer list and the index name to 'index'.
:return: nothing | Below is the the instruction that describes the task:
### Input:
Resets the index of the Series to simple integer list and the index name to 'index'.
:return: nothing
### Response:
def reset_index(self):
"""
Resets the index of the Series to simple integer list and the index name to 'index'.
:return: nothing
"""
self.index = list(range(self.__len__()))
self.index_name = 'index' |
def get_output_nodes(G: nx.DiGraph) -> List[str]:
""" Get all output nodes from a network. """
return [n for n, d in G.out_degree() if d == 0] | Get all output nodes from a network. | Below is the the instruction that describes the task:
### Input:
Get all output nodes from a network.
### Response:
def get_output_nodes(G: nx.DiGraph) -> List[str]:
""" Get all output nodes from a network. """
return [n for n, d in G.out_degree() if d == 0] |
def get_most_recent_versions(self, group, artifact, limit, remote=False, integration=False):
"""Get a list of the version numbers of the most recent artifacts (integration
or non-integration), ordered by the version number, for a particular group and
artifact combination.
:param str group: Group of the artifact to get versions of
:param str artifact: Name of the artifact to get versions of
:param int limit: Fetch only this many of the most recent releases
:param bool remote: Should remote repositories be searched to find the latest
versions? Note this can make the request much slower. Default is false.
:param bool integration: If true, fetch only "integration versions", otherwise
fetch only non-integration versions.
:return: Version numbers of the most recent artifacts
:rtype: list
:raises requests.exceptions.HTTPError: For any non-success HTTP responses
from the Artifactory API.
:raises ValueError: If limit is 0 or negative.
"""
if limit < 1:
raise ValueError("Releases limit must be positive")
url = self._base_url + '/api/search/versions'
params = {'g': group, 'a': artifact, 'repos': self._repo, 'remote': int(remote)}
self._logger.debug("Using all version API at %s - params %s", url, params)
response = self._session.get(url, params=params)
response.raise_for_status()
json = response.json()
versions = [
item['version'] for item in json['results'] if item['integration'] is integration]
# pylint: disable=no-member
versions.sort(key=distutils.version.LooseVersion, reverse=True)
return versions[:limit] | Get a list of the version numbers of the most recent artifacts (integration
or non-integration), ordered by the version number, for a particular group and
artifact combination.
:param str group: Group of the artifact to get versions of
:param str artifact: Name of the artifact to get versions of
:param int limit: Fetch only this many of the most recent releases
:param bool remote: Should remote repositories be searched to find the latest
versions? Note this can make the request much slower. Default is false.
:param bool integration: If true, fetch only "integration versions", otherwise
fetch only non-integration versions.
:return: Version numbers of the most recent artifacts
:rtype: list
:raises requests.exceptions.HTTPError: For any non-success HTTP responses
from the Artifactory API.
:raises ValueError: If limit is 0 or negative. | Below is the the instruction that describes the task:
### Input:
Get a list of the version numbers of the most recent artifacts (integration
or non-integration), ordered by the version number, for a particular group and
artifact combination.
:param str group: Group of the artifact to get versions of
:param str artifact: Name of the artifact to get versions of
:param int limit: Fetch only this many of the most recent releases
:param bool remote: Should remote repositories be searched to find the latest
versions? Note this can make the request much slower. Default is false.
:param bool integration: If true, fetch only "integration versions", otherwise
fetch only non-integration versions.
:return: Version numbers of the most recent artifacts
:rtype: list
:raises requests.exceptions.HTTPError: For any non-success HTTP responses
from the Artifactory API.
:raises ValueError: If limit is 0 or negative.
### Response:
def get_most_recent_versions(self, group, artifact, limit, remote=False, integration=False):
"""Get a list of the version numbers of the most recent artifacts (integration
or non-integration), ordered by the version number, for a particular group and
artifact combination.
:param str group: Group of the artifact to get versions of
:param str artifact: Name of the artifact to get versions of
:param int limit: Fetch only this many of the most recent releases
:param bool remote: Should remote repositories be searched to find the latest
versions? Note this can make the request much slower. Default is false.
:param bool integration: If true, fetch only "integration versions", otherwise
fetch only non-integration versions.
:return: Version numbers of the most recent artifacts
:rtype: list
:raises requests.exceptions.HTTPError: For any non-success HTTP responses
from the Artifactory API.
:raises ValueError: If limit is 0 or negative.
"""
if limit < 1:
raise ValueError("Releases limit must be positive")
url = self._base_url + '/api/search/versions'
params = {'g': group, 'a': artifact, 'repos': self._repo, 'remote': int(remote)}
self._logger.debug("Using all version API at %s - params %s", url, params)
response = self._session.get(url, params=params)
response.raise_for_status()
json = response.json()
versions = [
item['version'] for item in json['results'] if item['integration'] is integration]
# pylint: disable=no-member
versions.sort(key=distutils.version.LooseVersion, reverse=True)
return versions[:limit] |
def logodds(args):
"""
%prog logodds cnt1 cnt2
Compute log likelihood between two db.
"""
from math import log
from jcvi.formats.base import DictFile
p = OptionParser(logodds.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
cnt1, cnt2 = args
d = DictFile(cnt2)
fp = open(cnt1)
for row in fp:
scf, c1 = row.split()
c2 = d[scf]
c1, c2 = float(c1), float(c2)
c1 += 1
c2 += 1
score = int(100 * (log(c1) - log(c2)))
print("{0}\t{1}".format(scf, score)) | %prog logodds cnt1 cnt2
Compute log likelihood between two db. | Below is the the instruction that describes the task:
### Input:
%prog logodds cnt1 cnt2
Compute log likelihood between two db.
### Response:
def logodds(args):
"""
%prog logodds cnt1 cnt2
Compute log likelihood between two db.
"""
from math import log
from jcvi.formats.base import DictFile
p = OptionParser(logodds.__doc__)
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
cnt1, cnt2 = args
d = DictFile(cnt2)
fp = open(cnt1)
for row in fp:
scf, c1 = row.split()
c2 = d[scf]
c1, c2 = float(c1), float(c2)
c1 += 1
c2 += 1
score = int(100 * (log(c1) - log(c2)))
print("{0}\t{1}".format(scf, score)) |
def create_rflink_connection(port=None, host=None, baud=57600, protocol=RflinkProtocol,
packet_callback=None, event_callback=None,
disconnect_callback=None, ignore=None, loop=None):
"""Create Rflink manager class, returns transport coroutine."""
# use default protocol if not specified
protocol = partial(
protocol,
loop=loop if loop else asyncio.get_event_loop(),
packet_callback=packet_callback,
event_callback=event_callback,
disconnect_callback=disconnect_callback,
ignore=ignore if ignore else [],
)
# setup serial connection if no transport specified
if host:
conn = loop.create_connection(protocol, host, port)
else:
baud = baud
conn = create_serial_connection(loop, protocol, port, baud)
return conn | Create Rflink manager class, returns transport coroutine. | Below is the the instruction that describes the task:
### Input:
Create Rflink manager class, returns transport coroutine.
### Response:
def create_rflink_connection(port=None, host=None, baud=57600, protocol=RflinkProtocol,
packet_callback=None, event_callback=None,
disconnect_callback=None, ignore=None, loop=None):
"""Create Rflink manager class, returns transport coroutine."""
# use default protocol if not specified
protocol = partial(
protocol,
loop=loop if loop else asyncio.get_event_loop(),
packet_callback=packet_callback,
event_callback=event_callback,
disconnect_callback=disconnect_callback,
ignore=ignore if ignore else [],
)
# setup serial connection if no transport specified
if host:
conn = loop.create_connection(protocol, host, port)
else:
baud = baud
conn = create_serial_connection(loop, protocol, port, baud)
return conn |
def model_builds(self):
"""
Access the model_builds
:returns: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList
:rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList
"""
if self._model_builds is None:
self._model_builds = ModelBuildList(self._version, assistant_sid=self._solution['sid'], )
return self._model_builds | Access the model_builds
:returns: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList
:rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList | Below is the the instruction that describes the task:
### Input:
Access the model_builds
:returns: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList
:rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList
### Response:
def model_builds(self):
"""
Access the model_builds
:returns: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList
:rtype: twilio.rest.autopilot.v1.assistant.model_build.ModelBuildList
"""
if self._model_builds is None:
self._model_builds = ModelBuildList(self._version, assistant_sid=self._solution['sid'], )
return self._model_builds |
def _get_dep_to_dot_name_mapping(dependencies):
""" Creates mapping between Dependency classes and names used in DOT graph
"""
dot_name_to_deps = {}
for dep in dependencies:
dot_name = dep.name
if dot_name not in dot_name_to_deps:
dot_name_to_deps[dot_name] = [dep]
else:
dot_name_to_deps[dot_name].append(dep)
dep_to_dot_name = {}
for dot_name, deps in dot_name_to_deps.items():
if len(deps) == 1:
dep_to_dot_name[deps[0]] = dot_name
continue
for idx, dep in enumerate(deps):
dep_to_dot_name[dep] = dot_name + str(idx)
return dep_to_dot_name | Creates mapping between Dependency classes and names used in DOT graph | Below is the the instruction that describes the task:
### Input:
Creates mapping between Dependency classes and names used in DOT graph
### Response:
def _get_dep_to_dot_name_mapping(dependencies):
""" Creates mapping between Dependency classes and names used in DOT graph
"""
dot_name_to_deps = {}
for dep in dependencies:
dot_name = dep.name
if dot_name not in dot_name_to_deps:
dot_name_to_deps[dot_name] = [dep]
else:
dot_name_to_deps[dot_name].append(dep)
dep_to_dot_name = {}
for dot_name, deps in dot_name_to_deps.items():
if len(deps) == 1:
dep_to_dot_name[deps[0]] = dot_name
continue
for idx, dep in enumerate(deps):
dep_to_dot_name[dep] = dot_name + str(idx)
return dep_to_dot_name |
def nth(iterable, n, default=None):
"""Returns the nth item or a default value.
Example::
>>> nth([0, 1, 2], 1)
1
>>> nth([0, 1, 2], 100)
None
**中文文档**
取出一个可循环对象中的第n个元素。等效于list(iterable)[n], 但占用极小的内存。
因为list(iterable)要将所有元素放在内存中并生成一个新列表。该方法常用语对于
那些取index操作被改写了的可循环对象。
"""
return next(itertools.islice(iterable, n, None), default) | Returns the nth item or a default value.
Example::
>>> nth([0, 1, 2], 1)
1
>>> nth([0, 1, 2], 100)
None
**中文文档**
取出一个可循环对象中的第n个元素。等效于list(iterable)[n], 但占用极小的内存。
因为list(iterable)要将所有元素放在内存中并生成一个新列表。该方法常用语对于
那些取index操作被改写了的可循环对象。 | Below is the the instruction that describes the task:
### Input:
Returns the nth item or a default value.
Example::
>>> nth([0, 1, 2], 1)
1
>>> nth([0, 1, 2], 100)
None
**中文文档**
取出一个可循环对象中的第n个元素。等效于list(iterable)[n], 但占用极小的内存。
因为list(iterable)要将所有元素放在内存中并生成一个新列表。该方法常用语对于
那些取index操作被改写了的可循环对象。
### Response:
def nth(iterable, n, default=None):
"""Returns the nth item or a default value.
Example::
>>> nth([0, 1, 2], 1)
1
>>> nth([0, 1, 2], 100)
None
**中文文档**
取出一个可循环对象中的第n个元素。等效于list(iterable)[n], 但占用极小的内存。
因为list(iterable)要将所有元素放在内存中并生成一个新列表。该方法常用语对于
那些取index操作被改写了的可循环对象。
"""
return next(itertools.islice(iterable, n, None), default) |
def suggest_pairs(top_n=10, per_n=3, ignore_before=300):
""" Find the maximally interesting pairs of players to match up
First, sort the ratings by uncertainty.
Then, take the ten highest players with the highest uncertainty
For each of them, call them `p1`
Sort all the models by their distance from p1's rating and take the 20
nearest rated models. ('candidate_p2s')
Choose pairings, (p1, p2), randomly from this list.
`top_n` will pair the top n models by uncertainty.
`per_n` will give each of the top_n models this many opponents
`ignore_before` is the model number to `filter` off, i.e., the early models.
Returns a list of *model numbers*, not model ids.
"""
db = sqlite3.connect("ratings.db")
data = db.execute("select model_winner, model_loser from wins").fetchall()
bucket_ids = [id[0] for id in db.execute(
"select id from models where bucket = ?", (fsdb.models_dir(),)).fetchall()]
bucket_ids.sort()
data = [d for d in data if d[0] in bucket_ids and d[1] in bucket_ids]
ratings = [(model_num_for(k), v[0], v[1]) for k, v in compute_ratings(data).items()]
ratings.sort()
ratings = ratings[ignore_before:] # Filter off the first 100 models, which improve too fast.
ratings.sort(key=lambda r: r[2], reverse=True)
res = []
for p1 in ratings[:top_n]:
candidate_p2s = sorted(ratings, key=lambda p2_tup: abs(p1[1] - p2_tup[1]))[1:20]
choices = random.sample(candidate_p2s, per_n)
print("Pairing {}, sigma {:.2f} (Rating {:.2f})".format(p1[0], p1[2], p1[1]))
for p2 in choices:
res.append([p1[0], p2[0]])
print(" {}, ratings delta {:.2f}".format(p2[0], abs(p1[1] - p2[1])))
return res | Find the maximally interesting pairs of players to match up
First, sort the ratings by uncertainty.
Then, take the ten highest players with the highest uncertainty
For each of them, call them `p1`
Sort all the models by their distance from p1's rating and take the 20
nearest rated models. ('candidate_p2s')
Choose pairings, (p1, p2), randomly from this list.
`top_n` will pair the top n models by uncertainty.
`per_n` will give each of the top_n models this many opponents
`ignore_before` is the model number to `filter` off, i.e., the early models.
Returns a list of *model numbers*, not model ids. | Below is the the instruction that describes the task:
### Input:
Find the maximally interesting pairs of players to match up
First, sort the ratings by uncertainty.
Then, take the ten highest players with the highest uncertainty
For each of them, call them `p1`
Sort all the models by their distance from p1's rating and take the 20
nearest rated models. ('candidate_p2s')
Choose pairings, (p1, p2), randomly from this list.
`top_n` will pair the top n models by uncertainty.
`per_n` will give each of the top_n models this many opponents
`ignore_before` is the model number to `filter` off, i.e., the early models.
Returns a list of *model numbers*, not model ids.
### Response:
def suggest_pairs(top_n=10, per_n=3, ignore_before=300):
""" Find the maximally interesting pairs of players to match up
First, sort the ratings by uncertainty.
Then, take the ten highest players with the highest uncertainty
For each of them, call them `p1`
Sort all the models by their distance from p1's rating and take the 20
nearest rated models. ('candidate_p2s')
Choose pairings, (p1, p2), randomly from this list.
`top_n` will pair the top n models by uncertainty.
`per_n` will give each of the top_n models this many opponents
`ignore_before` is the model number to `filter` off, i.e., the early models.
Returns a list of *model numbers*, not model ids.
"""
db = sqlite3.connect("ratings.db")
data = db.execute("select model_winner, model_loser from wins").fetchall()
bucket_ids = [id[0] for id in db.execute(
"select id from models where bucket = ?", (fsdb.models_dir(),)).fetchall()]
bucket_ids.sort()
data = [d for d in data if d[0] in bucket_ids and d[1] in bucket_ids]
ratings = [(model_num_for(k), v[0], v[1]) for k, v in compute_ratings(data).items()]
ratings.sort()
ratings = ratings[ignore_before:] # Filter off the first 100 models, which improve too fast.
ratings.sort(key=lambda r: r[2], reverse=True)
res = []
for p1 in ratings[:top_n]:
candidate_p2s = sorted(ratings, key=lambda p2_tup: abs(p1[1] - p2_tup[1]))[1:20]
choices = random.sample(candidate_p2s, per_n)
print("Pairing {}, sigma {:.2f} (Rating {:.2f})".format(p1[0], p1[2], p1[1]))
for p2 in choices:
res.append([p1[0], p2[0]])
print(" {}, ratings delta {:.2f}".format(p2[0], abs(p1[1] - p2[1])))
return res |
def load(self, yr=None, doy=None, date=None, fname=None, fid=None,
verifyPad=False):
"""Load instrument data into Instrument object .data.
Parameters
----------
yr : integer
year for desired data
doy : integer
day of year
date : datetime object
date to load
fname : 'string'
filename to be loaded
verifyPad : boolean
if True, padding data not removed (debug purposes)
Returns
--------
Void. Data is added to self.data
Note
----
Loads data for a chosen instrument into .data. Any functions chosen
by the user and added to the custom processing queue (.custom.add)
are automatically applied to the data before it is available to
user in .data.
"""
# set options used by loading routine based upon user input
if date is not None:
self._set_load_parameters(date=date, fid=None)
# increment
inc = pds.DateOffset(days=1)
curr = date
elif (yr is not None) & (doy is not None):
date = pds.datetime(yr, 1, 1) + pds.DateOffset(days=(doy-1))
self._set_load_parameters(date=date, fid=None)
# increment
inc = pds.DateOffset(days=1)
curr = self.date
elif fname is not None:
# date will have to be set later by looking at the data
self._set_load_parameters(date=None,
fid=self.files.get_index(fname))
# increment one file at a time
inc = 1
curr = self._fid.copy()
elif fid is not None:
self._set_load_parameters(date=None, fid=fid)
# increment one file at a time
inc = 1
curr = fid
else:
estr = 'Must supply a yr,doy pair, or datetime object, or filename'
estr = '{:s} to load data from.'.format(estr)
raise TypeError(estr)
self.orbits._reset()
# if pad or multi_file_day is true, need to have a three day/file load
loop_pad = self.pad if self.pad is not None else pds.DateOffset(seconds=0)
if (self.pad is not None) | self.multi_file_day:
if self._next_data.empty & self._prev_data.empty:
# data has not already been loaded for previous and next days
# load data for all three
print('Initializing three day/file window')
# using current date or fid
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = \
self._load_data(date=self.date, fid=self._fid)
self._next_data, self._next_meta = self._load_next()
else:
# moving forward in time
if self._next_data_track == curr:
del self._prev_data
self._prev_data = self._curr_data
self._prev_meta = self._curr_meta
self._curr_data = self._next_data
self._curr_meta = self._next_meta
self._next_data, self._next_meta = self._load_next()
# moving backward in time
elif self._prev_data_track == curr:
del self._next_data
self._next_data = self._curr_data
self._next_meta = self._curr_meta
self._curr_data = self._prev_data
self._curr_meta = self._prev_meta
self._prev_data, self._prev_meta = self._load_prev()
# jumped in time/or switched from filebased to date based access
else:
del self._prev_data
del self._curr_data
del self._next_data
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = \
self._load_data(date=self.date, fid=self._fid)
self._next_data, self._next_meta = self._load_next()
# make sure datetime indices for all data is monotonic
if not self._prev_data.index.is_monotonic_increasing:
self._prev_data.sort_index(inplace=True)
if not self._curr_data.index.is_monotonic_increasing:
self._curr_data.sort_index(inplace=True)
if not self._next_data.index.is_monotonic_increasing:
self._next_data.sort_index(inplace=True)
# make tracking indexes consistent with new loads
self._next_data_track = curr + inc
self._prev_data_track = curr - inc
# attach data to object
if not self._curr_data.empty:
self.data = self._curr_data.copy()
self.meta = self._curr_meta.copy()
else:
self.data = DataFrame(None)
# line below removed as it would delete previous meta, if any
# if you end a seasonal analysis with a day with no data, then
# no meta: self.meta = _meta.Meta()
# multi file days can extend past a single day, only want data from
# specific date if loading by day
# set up times for the possible data padding coming up
if self._load_by_date:
#print ('double trouble')
first_time = self.date
first_pad = self.date - loop_pad
last_time = self.date + pds.DateOffset(days=1)
last_pad = self.date + pds.DateOffset(days=1) + loop_pad
want_last_pad = False
# loading by file, can't be a multi_file-day flag situation
elif (not self._load_by_date) and (not self.multi_file_day):
#print ('single trouble')
first_time = self._curr_data.index[0]
first_pad = first_time - loop_pad
last_time = self._curr_data.index[-1]
last_pad = last_time + loop_pad
want_last_pad = True
else:
raise ValueError("multi_file_day and loading by date are " +
"effectively equivalent. Can't have " +
"multi_file_day and load by file.")
#print (first_pad, first_time, last_time, last_pad)
# pad data based upon passed parameter
if (not self._prev_data.empty) & (not self.data.empty):
padLeft = self._prev_data.loc[first_pad : self.data.index[0]]
if len(padLeft) > 0:
if (padLeft.index[-1] == self.data.index[0]) :
padLeft = padLeft.iloc[:-1, :]
self.data = pds.concat([padLeft, self.data])
if (not self._next_data.empty) & (not self.data.empty):
padRight = self._next_data.loc[self.data.index[-1] : last_pad]
if len(padRight) > 0:
if (padRight.index[0] == self.data.index[-1]) :
padRight = padRight.iloc[1:, :]
self.data = pds.concat([self.data, padRight])
self.data = self.data.ix[first_pad : last_pad]
# want exclusive end slicing behavior from above
if not self.empty:
if (self.data.index[-1] == last_pad) & (not want_last_pad):
self.data = self.data.iloc[:-1, :]
## drop any possible duplicate index times
##self.data.drop_duplicates(inplace=True)
#self.data = self.data[~self.data.index.duplicated()]
# if self.pad is False, load single day
else:
self.data, meta = self._load_data(date=self.date, fid=self._fid)
if not self.data.empty:
self.meta = meta
# check if load routine actually returns meta
if self.meta.data.empty:
self.meta[self.data.columns] = {self.name_label: self.data.columns,
self.units_label: [''] *
len(self.data.columns)}
# if loading by file set the yr, doy, and date
if not self._load_by_date:
if self.pad is not None:
temp = first_time
else:
temp = self.data.index[0]
self.date = pds.datetime(temp.year, temp.month, temp.day)
self.yr, self.doy = utils.getyrdoy(self.date)
if not self.data.empty:
self._default_rtn(self)
# clean
if (not self.data.empty) & (self.clean_level != 'none'):
self._clean_rtn(self)
# apply custom functions
if not self.data.empty:
self.custom._apply_all(self)
# remove the excess padding, if any applied
if (self.pad is not None) & (not self.data.empty) & (not verifyPad):
self.data = self.data[first_time: last_time]
if not self.empty:
if (self.data.index[-1] == last_time) & (not want_last_pad):
self.data = self.data.iloc[:-1, :]
# transfer any extra attributes in meta to the Instrument object
self.meta.transfer_attributes_to_instrument(self)
sys.stdout.flush()
return | Load instrument data into Instrument object .data.
Parameters
----------
yr : integer
year for desired data
doy : integer
day of year
date : datetime object
date to load
fname : 'string'
filename to be loaded
verifyPad : boolean
if True, padding data not removed (debug purposes)
Returns
--------
Void. Data is added to self.data
Note
----
Loads data for a chosen instrument into .data. Any functions chosen
by the user and added to the custom processing queue (.custom.add)
are automatically applied to the data before it is available to
user in .data. | Below is the the instruction that describes the task:
### Input:
Load instrument data into Instrument object .data.
Parameters
----------
yr : integer
year for desired data
doy : integer
day of year
date : datetime object
date to load
fname : 'string'
filename to be loaded
verifyPad : boolean
if True, padding data not removed (debug purposes)
Returns
--------
Void. Data is added to self.data
Note
----
Loads data for a chosen instrument into .data. Any functions chosen
by the user and added to the custom processing queue (.custom.add)
are automatically applied to the data before it is available to
user in .data.
### Response:
def load(self, yr=None, doy=None, date=None, fname=None, fid=None,
verifyPad=False):
"""Load instrument data into Instrument object .data.
Parameters
----------
yr : integer
year for desired data
doy : integer
day of year
date : datetime object
date to load
fname : 'string'
filename to be loaded
verifyPad : boolean
if True, padding data not removed (debug purposes)
Returns
--------
Void. Data is added to self.data
Note
----
Loads data for a chosen instrument into .data. Any functions chosen
by the user and added to the custom processing queue (.custom.add)
are automatically applied to the data before it is available to
user in .data.
"""
# set options used by loading routine based upon user input
if date is not None:
self._set_load_parameters(date=date, fid=None)
# increment
inc = pds.DateOffset(days=1)
curr = date
elif (yr is not None) & (doy is not None):
date = pds.datetime(yr, 1, 1) + pds.DateOffset(days=(doy-1))
self._set_load_parameters(date=date, fid=None)
# increment
inc = pds.DateOffset(days=1)
curr = self.date
elif fname is not None:
# date will have to be set later by looking at the data
self._set_load_parameters(date=None,
fid=self.files.get_index(fname))
# increment one file at a time
inc = 1
curr = self._fid.copy()
elif fid is not None:
self._set_load_parameters(date=None, fid=fid)
# increment one file at a time
inc = 1
curr = fid
else:
estr = 'Must supply a yr,doy pair, or datetime object, or filename'
estr = '{:s} to load data from.'.format(estr)
raise TypeError(estr)
self.orbits._reset()
# if pad or multi_file_day is true, need to have a three day/file load
loop_pad = self.pad if self.pad is not None else pds.DateOffset(seconds=0)
if (self.pad is not None) | self.multi_file_day:
if self._next_data.empty & self._prev_data.empty:
# data has not already been loaded for previous and next days
# load data for all three
print('Initializing three day/file window')
# using current date or fid
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = \
self._load_data(date=self.date, fid=self._fid)
self._next_data, self._next_meta = self._load_next()
else:
# moving forward in time
if self._next_data_track == curr:
del self._prev_data
self._prev_data = self._curr_data
self._prev_meta = self._curr_meta
self._curr_data = self._next_data
self._curr_meta = self._next_meta
self._next_data, self._next_meta = self._load_next()
# moving backward in time
elif self._prev_data_track == curr:
del self._next_data
self._next_data = self._curr_data
self._next_meta = self._curr_meta
self._curr_data = self._prev_data
self._curr_meta = self._prev_meta
self._prev_data, self._prev_meta = self._load_prev()
# jumped in time/or switched from filebased to date based access
else:
del self._prev_data
del self._curr_data
del self._next_data
self._prev_data, self._prev_meta = self._load_prev()
self._curr_data, self._curr_meta = \
self._load_data(date=self.date, fid=self._fid)
self._next_data, self._next_meta = self._load_next()
# make sure datetime indices for all data is monotonic
if not self._prev_data.index.is_monotonic_increasing:
self._prev_data.sort_index(inplace=True)
if not self._curr_data.index.is_monotonic_increasing:
self._curr_data.sort_index(inplace=True)
if not self._next_data.index.is_monotonic_increasing:
self._next_data.sort_index(inplace=True)
# make tracking indexes consistent with new loads
self._next_data_track = curr + inc
self._prev_data_track = curr - inc
# attach data to object
if not self._curr_data.empty:
self.data = self._curr_data.copy()
self.meta = self._curr_meta.copy()
else:
self.data = DataFrame(None)
# line below removed as it would delete previous meta, if any
# if you end a seasonal analysis with a day with no data, then
# no meta: self.meta = _meta.Meta()
# multi file days can extend past a single day, only want data from
# specific date if loading by day
# set up times for the possible data padding coming up
if self._load_by_date:
#print ('double trouble')
first_time = self.date
first_pad = self.date - loop_pad
last_time = self.date + pds.DateOffset(days=1)
last_pad = self.date + pds.DateOffset(days=1) + loop_pad
want_last_pad = False
# loading by file, can't be a multi_file-day flag situation
elif (not self._load_by_date) and (not self.multi_file_day):
#print ('single trouble')
first_time = self._curr_data.index[0]
first_pad = first_time - loop_pad
last_time = self._curr_data.index[-1]
last_pad = last_time + loop_pad
want_last_pad = True
else:
raise ValueError("multi_file_day and loading by date are " +
"effectively equivalent. Can't have " +
"multi_file_day and load by file.")
#print (first_pad, first_time, last_time, last_pad)
# pad data based upon passed parameter
if (not self._prev_data.empty) & (not self.data.empty):
padLeft = self._prev_data.loc[first_pad : self.data.index[0]]
if len(padLeft) > 0:
if (padLeft.index[-1] == self.data.index[0]) :
padLeft = padLeft.iloc[:-1, :]
self.data = pds.concat([padLeft, self.data])
if (not self._next_data.empty) & (not self.data.empty):
padRight = self._next_data.loc[self.data.index[-1] : last_pad]
if len(padRight) > 0:
if (padRight.index[0] == self.data.index[-1]) :
padRight = padRight.iloc[1:, :]
self.data = pds.concat([self.data, padRight])
self.data = self.data.ix[first_pad : last_pad]
# want exclusive end slicing behavior from above
if not self.empty:
if (self.data.index[-1] == last_pad) & (not want_last_pad):
self.data = self.data.iloc[:-1, :]
## drop any possible duplicate index times
##self.data.drop_duplicates(inplace=True)
#self.data = self.data[~self.data.index.duplicated()]
# if self.pad is False, load single day
else:
self.data, meta = self._load_data(date=self.date, fid=self._fid)
if not self.data.empty:
self.meta = meta
# check if load routine actually returns meta
if self.meta.data.empty:
self.meta[self.data.columns] = {self.name_label: self.data.columns,
self.units_label: [''] *
len(self.data.columns)}
# if loading by file set the yr, doy, and date
if not self._load_by_date:
if self.pad is not None:
temp = first_time
else:
temp = self.data.index[0]
self.date = pds.datetime(temp.year, temp.month, temp.day)
self.yr, self.doy = utils.getyrdoy(self.date)
if not self.data.empty:
self._default_rtn(self)
# clean
if (not self.data.empty) & (self.clean_level != 'none'):
self._clean_rtn(self)
# apply custom functions
if not self.data.empty:
self.custom._apply_all(self)
# remove the excess padding, if any applied
if (self.pad is not None) & (not self.data.empty) & (not verifyPad):
self.data = self.data[first_time: last_time]
if not self.empty:
if (self.data.index[-1] == last_time) & (not want_last_pad):
self.data = self.data.iloc[:-1, :]
# transfer any extra attributes in meta to the Instrument object
self.meta.transfer_attributes_to_instrument(self)
sys.stdout.flush()
return |
def get(self, event):
"""Return a single schema"""
self.log("Schemarequest for", event.data, "from",
event.user, lvl=debug)
if event.data in schemastore:
response = {
'component': 'hfos.events.schemamanager',
'action': 'get',
'data': l10n_schemastore[event.client.language][event.data]
}
self.fireEvent(send(event.client.uuid, response))
else:
self.log("Unavailable schema requested!", lvl=warn) | Return a single schema | Below is the the instruction that describes the task:
### Input:
Return a single schema
### Response:
def get(self, event):
"""Return a single schema"""
self.log("Schemarequest for", event.data, "from",
event.user, lvl=debug)
if event.data in schemastore:
response = {
'component': 'hfos.events.schemamanager',
'action': 'get',
'data': l10n_schemastore[event.client.language][event.data]
}
self.fireEvent(send(event.client.uuid, response))
else:
self.log("Unavailable schema requested!", lvl=warn) |
def mangle_coverage(local_path, log):
"""Edit .coverage file substituting Windows file paths to Linux paths.
:param str local_path: Destination path to save file to.
:param logging.Logger log: Logger for this function. Populated by with_log() decorator.
"""
# Read the file, or return if not a .coverage file.
with open(local_path, mode='rb') as handle:
if handle.read(13) != b'!coverage.py:':
log.debug('File %s not a coverage file.', local_path)
return
handle.seek(0)
# I'm lazy, reading all of this into memory. What could possibly go wrong?
file_contents = handle.read(52428800).decode('utf-8') # 50 MiB limit, surely this is enough?
# Substitute paths.
for windows_path in set(REGEX_MANGLE.findall(file_contents)):
unix_relative_path = windows_path.replace(r'\\', '/').split('/', 3)[-1]
unix_absolute_path = os.path.abspath(unix_relative_path)
if not os.path.isfile(unix_absolute_path):
log.debug('Windows path: %s', windows_path)
log.debug('Unix relative path: %s', unix_relative_path)
log.error('No such file: %s', unix_absolute_path)
raise HandledError
file_contents = file_contents.replace(windows_path, unix_absolute_path)
# Write.
with open(local_path, 'w') as handle:
handle.write(file_contents) | Edit .coverage file substituting Windows file paths to Linux paths.
:param str local_path: Destination path to save file to.
:param logging.Logger log: Logger for this function. Populated by with_log() decorator. | Below is the the instruction that describes the task:
### Input:
Edit .coverage file substituting Windows file paths to Linux paths.
:param str local_path: Destination path to save file to.
:param logging.Logger log: Logger for this function. Populated by with_log() decorator.
### Response:
def mangle_coverage(local_path, log):
"""Edit .coverage file substituting Windows file paths to Linux paths.
:param str local_path: Destination path to save file to.
:param logging.Logger log: Logger for this function. Populated by with_log() decorator.
"""
# Read the file, or return if not a .coverage file.
with open(local_path, mode='rb') as handle:
if handle.read(13) != b'!coverage.py:':
log.debug('File %s not a coverage file.', local_path)
return
handle.seek(0)
# I'm lazy, reading all of this into memory. What could possibly go wrong?
file_contents = handle.read(52428800).decode('utf-8') # 50 MiB limit, surely this is enough?
# Substitute paths.
for windows_path in set(REGEX_MANGLE.findall(file_contents)):
unix_relative_path = windows_path.replace(r'\\', '/').split('/', 3)[-1]
unix_absolute_path = os.path.abspath(unix_relative_path)
if not os.path.isfile(unix_absolute_path):
log.debug('Windows path: %s', windows_path)
log.debug('Unix relative path: %s', unix_relative_path)
log.error('No such file: %s', unix_absolute_path)
raise HandledError
file_contents = file_contents.replace(windows_path, unix_absolute_path)
# Write.
with open(local_path, 'w') as handle:
handle.write(file_contents) |
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"' | Return a JSON representation of a Python string | Below is the the instruction that describes the task:
### Input:
Return a JSON representation of a Python string
### Response:
def encode_basestring(s):
"""Return a JSON representation of a Python string
"""
if isinstance(s, str) and HAS_UTF8.search(s) is not None:
s = s.decode('utf-8')
def replace(match):
return ESCAPE_DCT[match.group(0)]
return u'"' + ESCAPE.sub(replace, s) + u'"' |
def _bucket_time(self, event_time):
"""
The seconds since epoch that represent a computed bucket.
An event bucket is the time of the earliest possible event for
that `bucket_width`. Example: if `bucket_width =
timedelta(minutes=10)`, bucket times will be the number of seconds
since epoch at 12:00, 12:10, ... on each day.
"""
event_time = kronos_time_to_epoch_time(event_time)
return event_time - (event_time % self._bucket_width) | The seconds since epoch that represent a computed bucket.
An event bucket is the time of the earliest possible event for
that `bucket_width`. Example: if `bucket_width =
timedelta(minutes=10)`, bucket times will be the number of seconds
since epoch at 12:00, 12:10, ... on each day. | Below is the the instruction that describes the task:
### Input:
The seconds since epoch that represent a computed bucket.
An event bucket is the time of the earliest possible event for
that `bucket_width`. Example: if `bucket_width =
timedelta(minutes=10)`, bucket times will be the number of seconds
since epoch at 12:00, 12:10, ... on each day.
### Response:
def _bucket_time(self, event_time):
"""
The seconds since epoch that represent a computed bucket.
An event bucket is the time of the earliest possible event for
that `bucket_width`. Example: if `bucket_width =
timedelta(minutes=10)`, bucket times will be the number of seconds
since epoch at 12:00, 12:10, ... on each day.
"""
event_time = kronos_time_to_epoch_time(event_time)
return event_time - (event_time % self._bucket_width) |
def derive_iobject_type(self, embedding_ns, embedded_ns, elt_name):
"""
Derive type of information object stemming from an embedded element
based on namespace information of embedding element, the embedded
element itself, and the name of the element.
"""
# Extract namespace-information
ns_info = search_by_re_list(self.RE_LIST_NS_TYPE_FROM_NS_URL, self.namespace_dict.get(embedding_ns, ""))
if not ns_info:
ns_info = {}
# This should yield the following information:
# - For namespace of an Cybox Object such as http://cybox.mitre.org/objects#AddressObject-2:
# - iotype_ns = http://cybox.mitre.org/objects#AddressObject
# - family = cybox.mitre.org
# - family_tag = cybox
# - type = AddressObject
# - revision = 2
# - For a base namespace such as http://cybox.mitre.org/common-2:
# - iotype_ns = http://cybox.mitre.org/common
# - family = cybox.mitre.org
# - family_tag = cybox
# - type = common
# - revision = 2
iobject_family_name = ns_info.get('family',None)
if not iobject_family_name:
iobject_family_name = ""
family_info = {}
if ns_info.get('family_tag',None) in ['stix', 'cybox']:
family_info = search_by_re_list(self.RE_LIST_NS_TYPE_FROM_NS_URL,
self.namespace_dict.get(ns_info['family_tag'], ""))
if family_info:
iobject_family_revision_name = family_info["revision"]
else:
iobject_family_revision_name = None
else:
iobject_family_revision_name = ns_info.get("revision",None)
if not iobject_family_revision_name:
iobject_family_revision_name = ''
# We take the object type from the ``xsi:type`` attribute
# given as in the following example::
# <cybox:Properties xsi:type="EmailMessageObj:EmailMessageObjectType">
# <cybox:Properties xsi:type="AddrObj:AddressObjectType" category="ipv4-addr">
#
if embedded_ns:
namespace_uri = self.namespace_dict.get(embedded_ns, "")
type_info = search_by_re_list(self.RE_LIST_NS_TYPE_FROM_NS_URL, namespace_uri)
if not type_info:
type_info = {}
if type_info and type_info.get('type',None) in ['common', 'cybox', 'stix']:
iobject_type_name = elt_name
iobject_type_namespace_uri = ns_info['iotype_ns']
iobject_type_revision_name = ns_info['revision']
else:
iobject_type_namespace_uri = type_info.get('iotype_ns',"%s/%s" % (dingos.DINGOS_MISSING_ID_NAMESPACE_URI_PREFIX,embedded_ns))
iobject_type_name = type_info.get('type',embedded_ns)#.split('Object')[0]
iobject_type_revision_name = type_info.get('revision','')
else:
iobject_type_name = elt_name
iobject_type_revision_name = iobject_family_revision_name
iobject_type_namespace_uri = ns_info.get("iotype_ns", "%s/%s" % (dingos.DINGOS_MISSING_ID_NAMESPACE_URI_PREFIX,elt_name))
if not iobject_type_revision_name:
iobject_type_revision_name = ''
logger.debug("Results of datatype extraction for ns %s, embedded ns %s and element name %s" % (
embedding_ns, embedded_ns, elt_name))
logger.debug("Family Name: %s" % iobject_family_name)
logger.debug("Family Revision %s" % iobject_family_revision_name)
logger.debug("Type Name %s" % iobject_type_name)
logger.debug("Type NS URI %s" % iobject_type_namespace_uri)
logger.debug("Type Revision %s" % iobject_type_revision_name)
return {'iobject_type_name': iobject_type_name,
'iobject_type_revision_name': iobject_type_revision_name,
'iobject_type_namespace_uri': iobject_type_namespace_uri,
'iobject_family_name': iobject_family_name,
'iobject_family_revision_name': iobject_family_revision_name} | Derive type of information object stemming from an embedded element
based on namespace information of embedding element, the embedded
element itself, and the name of the element. | Below is the the instruction that describes the task:
### Input:
Derive type of information object stemming from an embedded element
based on namespace information of embedding element, the embedded
element itself, and the name of the element.
### Response:
def derive_iobject_type(self, embedding_ns, embedded_ns, elt_name):
"""
Derive type of information object stemming from an embedded element
based on namespace information of embedding element, the embedded
element itself, and the name of the element.
"""
# Extract namespace-information
ns_info = search_by_re_list(self.RE_LIST_NS_TYPE_FROM_NS_URL, self.namespace_dict.get(embedding_ns, ""))
if not ns_info:
ns_info = {}
# This should yield the following information:
# - For namespace of an Cybox Object such as http://cybox.mitre.org/objects#AddressObject-2:
# - iotype_ns = http://cybox.mitre.org/objects#AddressObject
# - family = cybox.mitre.org
# - family_tag = cybox
# - type = AddressObject
# - revision = 2
# - For a base namespace such as http://cybox.mitre.org/common-2:
# - iotype_ns = http://cybox.mitre.org/common
# - family = cybox.mitre.org
# - family_tag = cybox
# - type = common
# - revision = 2
iobject_family_name = ns_info.get('family',None)
if not iobject_family_name:
iobject_family_name = ""
family_info = {}
if ns_info.get('family_tag',None) in ['stix', 'cybox']:
family_info = search_by_re_list(self.RE_LIST_NS_TYPE_FROM_NS_URL,
self.namespace_dict.get(ns_info['family_tag'], ""))
if family_info:
iobject_family_revision_name = family_info["revision"]
else:
iobject_family_revision_name = None
else:
iobject_family_revision_name = ns_info.get("revision",None)
if not iobject_family_revision_name:
iobject_family_revision_name = ''
# We take the object type from the ``xsi:type`` attribute
# given as in the following example::
# <cybox:Properties xsi:type="EmailMessageObj:EmailMessageObjectType">
# <cybox:Properties xsi:type="AddrObj:AddressObjectType" category="ipv4-addr">
#
if embedded_ns:
namespace_uri = self.namespace_dict.get(embedded_ns, "")
type_info = search_by_re_list(self.RE_LIST_NS_TYPE_FROM_NS_URL, namespace_uri)
if not type_info:
type_info = {}
if type_info and type_info.get('type',None) in ['common', 'cybox', 'stix']:
iobject_type_name = elt_name
iobject_type_namespace_uri = ns_info['iotype_ns']
iobject_type_revision_name = ns_info['revision']
else:
iobject_type_namespace_uri = type_info.get('iotype_ns',"%s/%s" % (dingos.DINGOS_MISSING_ID_NAMESPACE_URI_PREFIX,embedded_ns))
iobject_type_name = type_info.get('type',embedded_ns)#.split('Object')[0]
iobject_type_revision_name = type_info.get('revision','')
else:
iobject_type_name = elt_name
iobject_type_revision_name = iobject_family_revision_name
iobject_type_namespace_uri = ns_info.get("iotype_ns", "%s/%s" % (dingos.DINGOS_MISSING_ID_NAMESPACE_URI_PREFIX,elt_name))
if not iobject_type_revision_name:
iobject_type_revision_name = ''
logger.debug("Results of datatype extraction for ns %s, embedded ns %s and element name %s" % (
embedding_ns, embedded_ns, elt_name))
logger.debug("Family Name: %s" % iobject_family_name)
logger.debug("Family Revision %s" % iobject_family_revision_name)
logger.debug("Type Name %s" % iobject_type_name)
logger.debug("Type NS URI %s" % iobject_type_namespace_uri)
logger.debug("Type Revision %s" % iobject_type_revision_name)
return {'iobject_type_name': iobject_type_name,
'iobject_type_revision_name': iobject_type_revision_name,
'iobject_type_namespace_uri': iobject_type_namespace_uri,
'iobject_family_name': iobject_family_name,
'iobject_family_revision_name': iobject_family_revision_name} |
def _prm_write_shared_array(self, key, data, hdf5_group, full_name, flag, **kwargs):
"""Creates and array that can be used with an HDF5 array object"""
if flag == HDF5StorageService.ARRAY:
self._prm_write_into_array(key, data, hdf5_group, full_name, **kwargs)
elif flag in (HDF5StorageService.CARRAY,
HDF5StorageService.EARRAY,
HDF5StorageService.VLARRAY):
self._prm_write_into_other_array(key, data, hdf5_group, full_name,
flag=flag, **kwargs)
else:
raise RuntimeError('Flag `%s` of hdf5 data `%s` of `%s` not understood' %
(flag, key, full_name))
self._hdf5file.flush() | Creates and array that can be used with an HDF5 array object | Below is the the instruction that describes the task:
### Input:
Creates and array that can be used with an HDF5 array object
### Response:
def _prm_write_shared_array(self, key, data, hdf5_group, full_name, flag, **kwargs):
"""Creates and array that can be used with an HDF5 array object"""
if flag == HDF5StorageService.ARRAY:
self._prm_write_into_array(key, data, hdf5_group, full_name, **kwargs)
elif flag in (HDF5StorageService.CARRAY,
HDF5StorageService.EARRAY,
HDF5StorageService.VLARRAY):
self._prm_write_into_other_array(key, data, hdf5_group, full_name,
flag=flag, **kwargs)
else:
raise RuntimeError('Flag `%s` of hdf5 data `%s` of `%s` not understood' %
(flag, key, full_name))
self._hdf5file.flush() |
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr) | Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard | Below is the the instruction that describes the task:
### Input:
Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
### Response:
def put_job_into(self, tube_name, data, pri=65536, delay=0, ttr=120):
"""Insert a new job into a specific queue. Wrapper around :func:`put_job`.
:param tube_name: Tube name
:type tube_name: str
:param data: Job body
:type data: Text (either str which will be encoded as utf-8, or bytes which are already utf-8
:param pri: Priority for the job
:type pri: int
:param delay: Delay in seconds before the job should be placed on the ready queue
:type delay: int
:param ttr: Time to reserve (how long a worker may work on this job before we assume the worker is blocked
and give the job to another worker
:type ttr: int
.. seealso::
:func:`put_job()`
Put a job into whatever the current tube is
:func:`using()`
Insert a job using an external guard
"""
with self.using(tube_name) as inserter:
return inserter.put_job(data=data, pri=pri, delay=delay, ttr=ttr) |
def volume_to_distance_with_errors(vol, vol_err):
""" Return the distance and standard deviation upper and lower bounds
Parameters
----------
vol: float
vol_err: float
Returns
-------
dist: float
ehigh: float
elow: float
"""
dist = (vol * 3.0/4.0/numpy.pi) ** (1.0/3.0)
ehigh = ((vol + vol_err) * 3.0/4.0/numpy.pi) ** (1.0/3.0) - dist
delta = numpy.where(vol >= vol_err, vol - vol_err, 0)
elow = dist - (delta * 3.0/4.0/numpy.pi) ** (1.0/3.0)
return dist, ehigh, elow | Return the distance and standard deviation upper and lower bounds
Parameters
----------
vol: float
vol_err: float
Returns
-------
dist: float
ehigh: float
elow: float | Below is the the instruction that describes the task:
### Input:
Return the distance and standard deviation upper and lower bounds
Parameters
----------
vol: float
vol_err: float
Returns
-------
dist: float
ehigh: float
elow: float
### Response:
def volume_to_distance_with_errors(vol, vol_err):
""" Return the distance and standard deviation upper and lower bounds
Parameters
----------
vol: float
vol_err: float
Returns
-------
dist: float
ehigh: float
elow: float
"""
dist = (vol * 3.0/4.0/numpy.pi) ** (1.0/3.0)
ehigh = ((vol + vol_err) * 3.0/4.0/numpy.pi) ** (1.0/3.0) - dist
delta = numpy.where(vol >= vol_err, vol - vol_err, 0)
elow = dist - (delta * 3.0/4.0/numpy.pi) ** (1.0/3.0)
return dist, ehigh, elow |
def _tr_paren(line_info):
"Translate lines escaped with: /"
return '%s%s(%s)' % (line_info.pre, line_info.ifun,
", ".join(line_info.the_rest.split())) | Translate lines escaped with: / | Below is the the instruction that describes the task:
### Input:
Translate lines escaped with: /
### Response:
def _tr_paren(line_info):
"Translate lines escaped with: /"
return '%s%s(%s)' % (line_info.pre, line_info.ifun,
", ".join(line_info.the_rest.split())) |
def _CreateRoutePatternsFolder(self, parent, route,
style_id=None, visible=True):
"""Create a KML Folder containing placemarks for each pattern in the route.
A pattern is a sequence of stops used by one of the trips in the route.
If there are not patterns for the route then no folder is created and None
is returned.
Args:
parent: The parent ElementTree.Element instance.
route: The transitfeed.Route instance.
style_id: The id of a style to use if not None.
visible: Whether the folder is initially visible or not.
Returns:
The Folder ElementTree.Element instance or None if there are no patterns.
"""
pattern_id_to_trips = route.GetPatternIdTripDict()
if not pattern_id_to_trips:
return None
# sort by number of trips using the pattern
pattern_trips = pattern_id_to_trips.values()
pattern_trips.sort(lambda a, b: cmp(len(b), len(a)))
folder = self._CreateFolder(parent, 'Patterns', visible)
for n, trips in enumerate(pattern_trips):
trip_ids = [trip.trip_id for trip in trips]
name = 'Pattern %d (trips: %d)' % (n+1, len(trips))
description = 'Trips using this pattern (%d in total): %s' % (
len(trips), ', '.join(trip_ids))
placemark = self._CreatePlacemark(folder, name, style_id, visible,
description)
coordinates = [(stop.stop_lon, stop.stop_lat)
for stop in trips[0].GetPattern()]
self._CreateLineString(placemark, coordinates)
return folder | Create a KML Folder containing placemarks for each pattern in the route.
A pattern is a sequence of stops used by one of the trips in the route.
If there are not patterns for the route then no folder is created and None
is returned.
Args:
parent: The parent ElementTree.Element instance.
route: The transitfeed.Route instance.
style_id: The id of a style to use if not None.
visible: Whether the folder is initially visible or not.
Returns:
The Folder ElementTree.Element instance or None if there are no patterns. | Below is the the instruction that describes the task:
### Input:
Create a KML Folder containing placemarks for each pattern in the route.
A pattern is a sequence of stops used by one of the trips in the route.
If there are not patterns for the route then no folder is created and None
is returned.
Args:
parent: The parent ElementTree.Element instance.
route: The transitfeed.Route instance.
style_id: The id of a style to use if not None.
visible: Whether the folder is initially visible or not.
Returns:
The Folder ElementTree.Element instance or None if there are no patterns.
### Response:
def _CreateRoutePatternsFolder(self, parent, route,
style_id=None, visible=True):
"""Create a KML Folder containing placemarks for each pattern in the route.
A pattern is a sequence of stops used by one of the trips in the route.
If there are not patterns for the route then no folder is created and None
is returned.
Args:
parent: The parent ElementTree.Element instance.
route: The transitfeed.Route instance.
style_id: The id of a style to use if not None.
visible: Whether the folder is initially visible or not.
Returns:
The Folder ElementTree.Element instance or None if there are no patterns.
"""
pattern_id_to_trips = route.GetPatternIdTripDict()
if not pattern_id_to_trips:
return None
# sort by number of trips using the pattern
pattern_trips = pattern_id_to_trips.values()
pattern_trips.sort(lambda a, b: cmp(len(b), len(a)))
folder = self._CreateFolder(parent, 'Patterns', visible)
for n, trips in enumerate(pattern_trips):
trip_ids = [trip.trip_id for trip in trips]
name = 'Pattern %d (trips: %d)' % (n+1, len(trips))
description = 'Trips using this pattern (%d in total): %s' % (
len(trips), ', '.join(trip_ids))
placemark = self._CreatePlacemark(folder, name, style_id, visible,
description)
coordinates = [(stop.stop_lon, stop.stop_lat)
for stop in trips[0].GetPattern()]
self._CreateLineString(placemark, coordinates)
return folder |
def get_metaData(self, dcmlist, series_number):
"""
Get metadata.
Voxel size is obtained from PixelSpacing and difference of
SliceLocation of two neighboorhoding slices (first have index ifile).
Files in are used.
"""
# if dcmlist is None:
# dcmlist = self.files_in_serie
# number of slice where to extract metadata inforamtion
ifile = 0
if len(dcmlist) == 0:
return {}
logger.debug("Filename: " + dcmlist[ifile])
data1 = self._read_file(dcmlist[ifile])
try:
# try to get difference from the beginning and also from the end
voxeldepth = self._get_slice_location_difference(dcmlist, ifile)
voxeldepth_end = self._get_slice_location_difference(dcmlist, -2)
if voxeldepth != voxeldepth_end:
logger.warning("Depth of slices is not the same in beginning and end of the sequence")
voxeldepth_1 = self._get_slice_location_difference(dcmlist, 1)
voxeldepth = np.median([voxeldepth, voxeldepth_end, voxeldepth_1])
# head1, teil1 = os.path.split(dcmlist[ifile])
# head2, teil2 = os.path.split(dcmlist[ifile + 1])
#
# data2 = self._read_file(dcmlist[ifile + 1])
# loc1 = get_slice_location(data1, teil1)
# loc2 = get_slice_location(data2, teil2)
# voxeldepth = float(np.abs(loc1 - loc2))
except Exception:
logger.warning('Problem with voxel depth. Using SliceThickness')
logger.debug(traceback.format_exc())
# + ' SeriesNumber: ' + str(data1.SeriesNumber))
try:
voxeldepth = float(data1.SliceThickness)
except Exception:
logger.warning('Probem with SliceThicknes, setting zero. '
+ traceback.format_exc())
voxeldepth = 0
try:
pixelsize_mm = data1.PixelSpacing
except:
logger.warning('Problem with PixelSpacing. Using [1,1]')
pixelsize_mm = [1, 1]
voxelsize_mm = [
voxeldepth,
float(pixelsize_mm[0]),
float(pixelsize_mm[1]),
]
metadata = {'voxelsize_mm': voxelsize_mm,
'Modality': data1.Modality,
'SeriesNumber': series_number
}
try:
metadata['SeriesDescription'] = data1.SeriesDescription
except:
logger.info(
'Problem with tag SeriesDescription, SeriesNumber: ' +
str(data1.SeriesNumber))
try:
metadata['ImageComments'] = data1.ImageComments
except:
logger.info(
'Problem with tag ImageComments')
# , SeriesNumber: ' +
# str(data1.SeriesNumber))
try:
metadata['Modality'] = data1.Modality
except:
logger.info(
'Problem with tag Modality')
# SeriesNumber: ' +
# str(data1.SeriesNumber))
metadata = attr_to_dict(data1, "AcquisitionDate", metadata)
metadata = attr_to_dict(data1, "StudyDate", metadata)
metadata = attr_to_dict(data1, "StudyID", metadata)
metadata = attr_to_dict(data1, "StudyDescription", metadata)
metadata = attr_to_dict(data1, "RequestedProcedureDescription", metadata)
metadata = attr_to_dict(data1, "PatientSex", metadata)
metadata = attr_to_dict(data1, "PatientAge", metadata)
metadata = attr_to_dict(data1, "PatientID", metadata)
metadata = attr_to_dict(data1, "PatientName", metadata)
# metadata = attr_to_dict(data1, "AcquisitionTime", metadata)
metadata['dcmfilelist'] = dcmlist
return metadata | Get metadata.
Voxel size is obtained from PixelSpacing and difference of
SliceLocation of two neighboorhoding slices (first have index ifile).
Files in are used. | Below is the the instruction that describes the task:
### Input:
Get metadata.
Voxel size is obtained from PixelSpacing and difference of
SliceLocation of two neighboorhoding slices (first have index ifile).
Files in are used.
### Response:
def get_metaData(self, dcmlist, series_number):
"""
Get metadata.
Voxel size is obtained from PixelSpacing and difference of
SliceLocation of two neighboorhoding slices (first have index ifile).
Files in are used.
"""
# if dcmlist is None:
# dcmlist = self.files_in_serie
# number of slice where to extract metadata inforamtion
ifile = 0
if len(dcmlist) == 0:
return {}
logger.debug("Filename: " + dcmlist[ifile])
data1 = self._read_file(dcmlist[ifile])
try:
# try to get difference from the beginning and also from the end
voxeldepth = self._get_slice_location_difference(dcmlist, ifile)
voxeldepth_end = self._get_slice_location_difference(dcmlist, -2)
if voxeldepth != voxeldepth_end:
logger.warning("Depth of slices is not the same in beginning and end of the sequence")
voxeldepth_1 = self._get_slice_location_difference(dcmlist, 1)
voxeldepth = np.median([voxeldepth, voxeldepth_end, voxeldepth_1])
# head1, teil1 = os.path.split(dcmlist[ifile])
# head2, teil2 = os.path.split(dcmlist[ifile + 1])
#
# data2 = self._read_file(dcmlist[ifile + 1])
# loc1 = get_slice_location(data1, teil1)
# loc2 = get_slice_location(data2, teil2)
# voxeldepth = float(np.abs(loc1 - loc2))
except Exception:
logger.warning('Problem with voxel depth. Using SliceThickness')
logger.debug(traceback.format_exc())
# + ' SeriesNumber: ' + str(data1.SeriesNumber))
try:
voxeldepth = float(data1.SliceThickness)
except Exception:
logger.warning('Probem with SliceThicknes, setting zero. '
+ traceback.format_exc())
voxeldepth = 0
try:
pixelsize_mm = data1.PixelSpacing
except:
logger.warning('Problem with PixelSpacing. Using [1,1]')
pixelsize_mm = [1, 1]
voxelsize_mm = [
voxeldepth,
float(pixelsize_mm[0]),
float(pixelsize_mm[1]),
]
metadata = {'voxelsize_mm': voxelsize_mm,
'Modality': data1.Modality,
'SeriesNumber': series_number
}
try:
metadata['SeriesDescription'] = data1.SeriesDescription
except:
logger.info(
'Problem with tag SeriesDescription, SeriesNumber: ' +
str(data1.SeriesNumber))
try:
metadata['ImageComments'] = data1.ImageComments
except:
logger.info(
'Problem with tag ImageComments')
# , SeriesNumber: ' +
# str(data1.SeriesNumber))
try:
metadata['Modality'] = data1.Modality
except:
logger.info(
'Problem with tag Modality')
# SeriesNumber: ' +
# str(data1.SeriesNumber))
metadata = attr_to_dict(data1, "AcquisitionDate", metadata)
metadata = attr_to_dict(data1, "StudyDate", metadata)
metadata = attr_to_dict(data1, "StudyID", metadata)
metadata = attr_to_dict(data1, "StudyDescription", metadata)
metadata = attr_to_dict(data1, "RequestedProcedureDescription", metadata)
metadata = attr_to_dict(data1, "PatientSex", metadata)
metadata = attr_to_dict(data1, "PatientAge", metadata)
metadata = attr_to_dict(data1, "PatientID", metadata)
metadata = attr_to_dict(data1, "PatientName", metadata)
# metadata = attr_to_dict(data1, "AcquisitionTime", metadata)
metadata['dcmfilelist'] = dcmlist
return metadata |
def horizon_main_nav(context):
"""Generates top-level dashboard navigation entries."""
if 'request' not in context:
return {}
current_dashboard = context['request'].horizon.get('dashboard', None)
dashboards = []
for dash in Horizon.get_dashboards():
if dash.can_access(context):
if callable(dash.nav) and dash.nav(context):
dashboards.append(dash)
elif dash.nav:
dashboards.append(dash)
return {'components': dashboards,
'user': context['request'].user,
'current': current_dashboard,
'request': context['request']} | Generates top-level dashboard navigation entries. | Below is the the instruction that describes the task:
### Input:
Generates top-level dashboard navigation entries.
### Response:
def horizon_main_nav(context):
"""Generates top-level dashboard navigation entries."""
if 'request' not in context:
return {}
current_dashboard = context['request'].horizon.get('dashboard', None)
dashboards = []
for dash in Horizon.get_dashboards():
if dash.can_access(context):
if callable(dash.nav) and dash.nav(context):
dashboards.append(dash)
elif dash.nav:
dashboards.append(dash)
return {'components': dashboards,
'user': context['request'].user,
'current': current_dashboard,
'request': context['request']} |
def mailto_to_envelope(mailto_str):
"""
Interpret mailto-string into a :class:`alot.db.envelope.Envelope`
"""
from alot.db.envelope import Envelope
headers, body = parse_mailto(mailto_str)
return Envelope(bodytext=body, headers=headers) | Interpret mailto-string into a :class:`alot.db.envelope.Envelope` | Below is the the instruction that describes the task:
### Input:
Interpret mailto-string into a :class:`alot.db.envelope.Envelope`
### Response:
def mailto_to_envelope(mailto_str):
"""
Interpret mailto-string into a :class:`alot.db.envelope.Envelope`
"""
from alot.db.envelope import Envelope
headers, body = parse_mailto(mailto_str)
return Envelope(bodytext=body, headers=headers) |
def quantile_gaussianize(x):
"""Normalize a sequence of values via rank and Normal c.d.f.
Args:
x (array_like): sequence of values.
Returns:
Gaussian-normalized values.
Example:
.. doctest::
>>> from scipy_sugar.stats import quantile_gaussianize
>>> print(quantile_gaussianize([-1, 0, 2]))
[-0.67448975 0. 0.67448975]
"""
from scipy.stats import norm, rankdata
x = asarray(x, float).copy()
ok = isfinite(x)
x[ok] *= -1
y = empty_like(x)
y[ok] = rankdata(x[ok])
y[ok] = norm.isf(y[ok] / (sum(ok) + 1))
y[~ok] = x[~ok]
return y | Normalize a sequence of values via rank and Normal c.d.f.
Args:
x (array_like): sequence of values.
Returns:
Gaussian-normalized values.
Example:
.. doctest::
>>> from scipy_sugar.stats import quantile_gaussianize
>>> print(quantile_gaussianize([-1, 0, 2]))
[-0.67448975 0. 0.67448975] | Below is the the instruction that describes the task:
### Input:
Normalize a sequence of values via rank and Normal c.d.f.
Args:
x (array_like): sequence of values.
Returns:
Gaussian-normalized values.
Example:
.. doctest::
>>> from scipy_sugar.stats import quantile_gaussianize
>>> print(quantile_gaussianize([-1, 0, 2]))
[-0.67448975 0. 0.67448975]
### Response:
def quantile_gaussianize(x):
"""Normalize a sequence of values via rank and Normal c.d.f.
Args:
x (array_like): sequence of values.
Returns:
Gaussian-normalized values.
Example:
.. doctest::
>>> from scipy_sugar.stats import quantile_gaussianize
>>> print(quantile_gaussianize([-1, 0, 2]))
[-0.67448975 0. 0.67448975]
"""
from scipy.stats import norm, rankdata
x = asarray(x, float).copy()
ok = isfinite(x)
x[ok] *= -1
y = empty_like(x)
y[ok] = rankdata(x[ok])
y[ok] = norm.isf(y[ok] / (sum(ok) + 1))
y[~ok] = x[~ok]
return y |
def handle(self, *args, **options):
""" Purge notifications """
# retrieve layers
notifications = self.retrieve_old_notifications()
count = len(notifications)
if count > 0:
self.output('found %d notifications to purge...' % count)
notifications.delete()
self.output('%d notifications deleted successfully.' % count)
else:
self.output('there are no old notifications to purge') | Purge notifications | Below is the the instruction that describes the task:
### Input:
Purge notifications
### Response:
def handle(self, *args, **options):
""" Purge notifications """
# retrieve layers
notifications = self.retrieve_old_notifications()
count = len(notifications)
if count > 0:
self.output('found %d notifications to purge...' % count)
notifications.delete()
self.output('%d notifications deleted successfully.' % count)
else:
self.output('there are no old notifications to purge') |
def _get_token_possibilities(cls, token, mode):
"""
Returns all possible component types of a token without regard to its context. For example "26" could be year,
date or minute, but can't be a month or an hour.
:param token: the token to classify
:param mode: the parse mode
:return: the dict of possible types and values if token was of that type
"""
token = token.lower().strip()
possibilities = {}
try:
as_int = int(token)
if mode != Mode.TIME:
if 1 <= as_int <= 9999 and (len(token) == 2 or len(token) == 4):
possibilities[Component.YEAR] = as_int
if 1 <= as_int <= 12:
possibilities[Component.MONTH] = as_int
if 1 <= as_int <= 31:
possibilities[Component.DAY] = as_int
if mode != Mode.DATE:
if 0 <= as_int <= 23:
possibilities[Component.HOUR] = as_int
if 0 <= as_int <= 59:
possibilities[Component.MINUTE] = as_int
if 0 <= as_int <= 59:
possibilities[Component.SECOND] = as_int
if len(token) == 3 or len(token) == 6 or len(token) == 9:
nano = 0
if len(token) == 3: # millisecond precision
nano = as_int * 1000000
elif len(token) == 6: # microsecond precision
nano = as_int * 1000
elif len(token) == 9:
nano = as_int
possibilities[Component.NANO] = nano
if len(token) == 4:
hour = as_int // 100
minute = as_int - (hour * 100)
if 1 <= hour <= 24 and 1 <= minute <= 59:
possibilities[Component.HOUR_AND_MINUTE] = as_int
except ValueError:
if mode != Mode.TIME:
# could it be a month alias?
month = MONTHS_BY_ALIAS.get(token, None)
if month is not None:
possibilities[Component.MONTH] = month
if mode != Mode.DATE:
# could it be an AM/PM marker?
is_am_marker = token == "am"
is_pm_marker = token == "pm"
if is_am_marker or is_pm_marker:
possibilities[Component.AM_PM] = cls.AM if is_am_marker else cls.PM
# offset parsing is limited to Z meaning UTC for now
if token == "z":
possibilities[Component.OFFSET] = 0
return possibilities | Returns all possible component types of a token without regard to its context. For example "26" could be year,
date or minute, but can't be a month or an hour.
:param token: the token to classify
:param mode: the parse mode
:return: the dict of possible types and values if token was of that type | Below is the the instruction that describes the task:
### Input:
Returns all possible component types of a token without regard to its context. For example "26" could be year,
date or minute, but can't be a month or an hour.
:param token: the token to classify
:param mode: the parse mode
:return: the dict of possible types and values if token was of that type
### Response:
def _get_token_possibilities(cls, token, mode):
"""
Returns all possible component types of a token without regard to its context. For example "26" could be year,
date or minute, but can't be a month or an hour.
:param token: the token to classify
:param mode: the parse mode
:return: the dict of possible types and values if token was of that type
"""
token = token.lower().strip()
possibilities = {}
try:
as_int = int(token)
if mode != Mode.TIME:
if 1 <= as_int <= 9999 and (len(token) == 2 or len(token) == 4):
possibilities[Component.YEAR] = as_int
if 1 <= as_int <= 12:
possibilities[Component.MONTH] = as_int
if 1 <= as_int <= 31:
possibilities[Component.DAY] = as_int
if mode != Mode.DATE:
if 0 <= as_int <= 23:
possibilities[Component.HOUR] = as_int
if 0 <= as_int <= 59:
possibilities[Component.MINUTE] = as_int
if 0 <= as_int <= 59:
possibilities[Component.SECOND] = as_int
if len(token) == 3 or len(token) == 6 or len(token) == 9:
nano = 0
if len(token) == 3: # millisecond precision
nano = as_int * 1000000
elif len(token) == 6: # microsecond precision
nano = as_int * 1000
elif len(token) == 9:
nano = as_int
possibilities[Component.NANO] = nano
if len(token) == 4:
hour = as_int // 100
minute = as_int - (hour * 100)
if 1 <= hour <= 24 and 1 <= minute <= 59:
possibilities[Component.HOUR_AND_MINUTE] = as_int
except ValueError:
if mode != Mode.TIME:
# could it be a month alias?
month = MONTHS_BY_ALIAS.get(token, None)
if month is not None:
possibilities[Component.MONTH] = month
if mode != Mode.DATE:
# could it be an AM/PM marker?
is_am_marker = token == "am"
is_pm_marker = token == "pm"
if is_am_marker or is_pm_marker:
possibilities[Component.AM_PM] = cls.AM if is_am_marker else cls.PM
# offset parsing is limited to Z meaning UTC for now
if token == "z":
possibilities[Component.OFFSET] = 0
return possibilities |
def _read_config_list():
"""
配置列表读取
"""
with codecs.open('conf.ini', 'w+', encoding='utf-8') as f1:
conf_list = [conf for conf in f1.read().split('\n') if conf != '']
return conf_list | 配置列表读取 | Below is the the instruction that describes the task:
### Input:
配置列表读取
### Response:
def _read_config_list():
"""
配置列表读取
"""
with codecs.open('conf.ini', 'w+', encoding='utf-8') as f1:
conf_list = [conf for conf in f1.read().split('\n') if conf != '']
return conf_list |
def parse_answers(html_question):
"""Parse the answers of a given HTML question.
The method parses the answers related with a given HTML question,
as well as all the comments related to the answer.
:param html_question: raw HTML question element
:returns: a list with the answers
"""
def parse_answer_container(update_info):
"""Parse the answer info container of a given HTML question.
The method parses the information available in the answer information
container. The container can have up to 2 elements: the first one
contains the information related with the user who generated the question
and the date (if any). The second one contains the date of the updated,
and the user who updated it (if not the same who generated the question).
:param update_info: beautiful soup update_info container element
:returns: an object with the parsed information
"""
container_info = {}
created = update_info[0]
answered_at = created.abbr.attrs["title"]
# Convert date to UNIX timestamp
container_info['added_at'] = str(str_to_datetime(answered_at).timestamp())
container_info['answered_by'] = AskbotParser.parse_user_info(created)
try:
update_info[1]
except IndexError:
pass
else:
updated = update_info[1]
updated_at = updated.abbr.attrs["title"]
# Convert date to UNIX timestamp
container_info['updated_at'] = str(str_to_datetime(updated_at).timestamp())
if AskbotParser.parse_user_info(updated):
container_info['updated_by'] = AskbotParser.parse_user_info(updated)
return container_info
answer_list = []
# Select all the answers
bs_question = bs4.BeautifulSoup(html_question, "html.parser")
bs_answers = bs_question.select("div.answer")
for bs_answer in bs_answers:
answer_id = bs_answer.attrs["data-post-id"]
votes_element = bs_answer.select("div.vote-number")[0].text
accepted_answer = bs_answer.select("div.answer-img-accept")[0].get('title').endswith("correct")
# Select the body of the answer
body = bs_answer.select("div.post-body")
# Get the user information container and parse it
update_info = body[0].select("div.post-update-info")
answer_container = parse_answer_container(update_info)
# Remove the update-info-container div to be able to get the body
body[0].div.extract().select("div.post-update-info-container")
# Override the body with a clean one
body = body[0].get_text(strip=True)
# Generate the answer object
answer = {'id': answer_id,
'score': votes_element,
'summary': body,
'accepted': accepted_answer
}
# Update the object with the information in the answer container
answer.update(answer_container)
answer_list.append(answer)
return answer_list | Parse the answers of a given HTML question.
The method parses the answers related with a given HTML question,
as well as all the comments related to the answer.
:param html_question: raw HTML question element
:returns: a list with the answers | Below is the the instruction that describes the task:
### Input:
Parse the answers of a given HTML question.
The method parses the answers related with a given HTML question,
as well as all the comments related to the answer.
:param html_question: raw HTML question element
:returns: a list with the answers
### Response:
def parse_answers(html_question):
"""Parse the answers of a given HTML question.
The method parses the answers related with a given HTML question,
as well as all the comments related to the answer.
:param html_question: raw HTML question element
:returns: a list with the answers
"""
def parse_answer_container(update_info):
"""Parse the answer info container of a given HTML question.
The method parses the information available in the answer information
container. The container can have up to 2 elements: the first one
contains the information related with the user who generated the question
and the date (if any). The second one contains the date of the updated,
and the user who updated it (if not the same who generated the question).
:param update_info: beautiful soup update_info container element
:returns: an object with the parsed information
"""
container_info = {}
created = update_info[0]
answered_at = created.abbr.attrs["title"]
# Convert date to UNIX timestamp
container_info['added_at'] = str(str_to_datetime(answered_at).timestamp())
container_info['answered_by'] = AskbotParser.parse_user_info(created)
try:
update_info[1]
except IndexError:
pass
else:
updated = update_info[1]
updated_at = updated.abbr.attrs["title"]
# Convert date to UNIX timestamp
container_info['updated_at'] = str(str_to_datetime(updated_at).timestamp())
if AskbotParser.parse_user_info(updated):
container_info['updated_by'] = AskbotParser.parse_user_info(updated)
return container_info
answer_list = []
# Select all the answers
bs_question = bs4.BeautifulSoup(html_question, "html.parser")
bs_answers = bs_question.select("div.answer")
for bs_answer in bs_answers:
answer_id = bs_answer.attrs["data-post-id"]
votes_element = bs_answer.select("div.vote-number")[0].text
accepted_answer = bs_answer.select("div.answer-img-accept")[0].get('title').endswith("correct")
# Select the body of the answer
body = bs_answer.select("div.post-body")
# Get the user information container and parse it
update_info = body[0].select("div.post-update-info")
answer_container = parse_answer_container(update_info)
# Remove the update-info-container div to be able to get the body
body[0].div.extract().select("div.post-update-info-container")
# Override the body with a clean one
body = body[0].get_text(strip=True)
# Generate the answer object
answer = {'id': answer_id,
'score': votes_element,
'summary': body,
'accepted': accepted_answer
}
# Update the object with the information in the answer container
answer.update(answer_container)
answer_list.append(answer)
return answer_list |
def callback_read_char(self):
u'''Reads a character and informs the readline callback interface when a line is received'''
if self.keyboard_poll():
line = self.get_line_buffer() + u'\n'
# however there is another newline added by
# self.mode.readline_setup(prompt) which is called by callback_handler_install
# this differs from GNU readline
self.add_history(self.mode.l_buffer)
# TADA:
self.callback(line) | u'''Reads a character and informs the readline callback interface when a line is received | Below is the the instruction that describes the task:
### Input:
u'''Reads a character and informs the readline callback interface when a line is received
### Response:
def callback_read_char(self):
u'''Reads a character and informs the readline callback interface when a line is received'''
if self.keyboard_poll():
line = self.get_line_buffer() + u'\n'
# however there is another newline added by
# self.mode.readline_setup(prompt) which is called by callback_handler_install
# this differs from GNU readline
self.add_history(self.mode.l_buffer)
# TADA:
self.callback(line) |
def pull_raw(url, name, verify=False):
'''
Execute a ``machinectl pull-raw`` to download a .qcow2 or raw disk image,
and add it to /var/lib/machines as a new container.
.. note::
**Requires systemd >= 219**
url
URL from which to download the container
name
Name for the new container
verify : False
Perform signature or checksum verification on the container. See the
``machinectl(1)`` man page (section titled "Image Transfer Commands")
for more information on requirements for image verification. To perform
signature verification, use ``verify=signature``. For checksum
verification, use ``verify=checksum``. By default, no verification will
be performed.
CLI Examples:
.. code-block:: bash
salt myminion nspawn.pull_raw http://ftp.halifax.rwth-aachen.de/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.raw.xz fedora21
'''
return _pull_image('raw', url, name, verify=verify) | Execute a ``machinectl pull-raw`` to download a .qcow2 or raw disk image,
and add it to /var/lib/machines as a new container.
.. note::
**Requires systemd >= 219**
url
URL from which to download the container
name
Name for the new container
verify : False
Perform signature or checksum verification on the container. See the
``machinectl(1)`` man page (section titled "Image Transfer Commands")
for more information on requirements for image verification. To perform
signature verification, use ``verify=signature``. For checksum
verification, use ``verify=checksum``. By default, no verification will
be performed.
CLI Examples:
.. code-block:: bash
salt myminion nspawn.pull_raw http://ftp.halifax.rwth-aachen.de/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.raw.xz fedora21 | Below is the the instruction that describes the task:
### Input:
Execute a ``machinectl pull-raw`` to download a .qcow2 or raw disk image,
and add it to /var/lib/machines as a new container.
.. note::
**Requires systemd >= 219**
url
URL from which to download the container
name
Name for the new container
verify : False
Perform signature or checksum verification on the container. See the
``machinectl(1)`` man page (section titled "Image Transfer Commands")
for more information on requirements for image verification. To perform
signature verification, use ``verify=signature``. For checksum
verification, use ``verify=checksum``. By default, no verification will
be performed.
CLI Examples:
.. code-block:: bash
salt myminion nspawn.pull_raw http://ftp.halifax.rwth-aachen.de/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.raw.xz fedora21
### Response:
def pull_raw(url, name, verify=False):
'''
Execute a ``machinectl pull-raw`` to download a .qcow2 or raw disk image,
and add it to /var/lib/machines as a new container.
.. note::
**Requires systemd >= 219**
url
URL from which to download the container
name
Name for the new container
verify : False
Perform signature or checksum verification on the container. See the
``machinectl(1)`` man page (section titled "Image Transfer Commands")
for more information on requirements for image verification. To perform
signature verification, use ``verify=signature``. For checksum
verification, use ``verify=checksum``. By default, no verification will
be performed.
CLI Examples:
.. code-block:: bash
salt myminion nspawn.pull_raw http://ftp.halifax.rwth-aachen.de/fedora/linux/releases/21/Cloud/Images/x86_64/Fedora-Cloud-Base-20141203-21.x86_64.raw.xz fedora21
'''
return _pull_image('raw', url, name, verify=verify) |
def countdown_timer(seconds=10):
"""Show a simple countdown progress bar
Parameters
----------
seconds
Period of time the progress bar takes to reach zero.
"""
tick = 0.1 # seconds
n_ticks = int(seconds / tick)
widgets = ['Pause for panic: ', progressbar.ETA(), ' ', progressbar.Bar()]
pbar = progressbar.ProgressBar(
widgets=widgets, max_value=n_ticks
).start()
for i in range(n_ticks):
pbar.update(i)
sleep(tick)
pbar.finish() | Show a simple countdown progress bar
Parameters
----------
seconds
Period of time the progress bar takes to reach zero. | Below is the the instruction that describes the task:
### Input:
Show a simple countdown progress bar
Parameters
----------
seconds
Period of time the progress bar takes to reach zero.
### Response:
def countdown_timer(seconds=10):
"""Show a simple countdown progress bar
Parameters
----------
seconds
Period of time the progress bar takes to reach zero.
"""
tick = 0.1 # seconds
n_ticks = int(seconds / tick)
widgets = ['Pause for panic: ', progressbar.ETA(), ' ', progressbar.Bar()]
pbar = progressbar.ProgressBar(
widgets=widgets, max_value=n_ticks
).start()
for i in range(n_ticks):
pbar.update(i)
sleep(tick)
pbar.finish() |
def variance(self):
'''
Compute variance.
Returns:
variance.
'''
alpha = self.__success + self.__default_alpha
beta = self.__failure + self.__default_beta
try:
variance = alpha * beta / ((alpha + beta) ** 2) * (alpha + beta + 1)
except ZeroDivisionError:
variance = 0.0
return variance | Compute variance.
Returns:
variance. | Below is the the instruction that describes the task:
### Input:
Compute variance.
Returns:
variance.
### Response:
def variance(self):
'''
Compute variance.
Returns:
variance.
'''
alpha = self.__success + self.__default_alpha
beta = self.__failure + self.__default_beta
try:
variance = alpha * beta / ((alpha + beta) ** 2) * (alpha + beta + 1)
except ZeroDivisionError:
variance = 0.0
return variance |
def _check_field_value(field_value, pattern):
"""Check a song metadata field value for a pattern."""
if isinstance(field_value, list):
return any(re.search(pattern, str(value), re.I) for value in field_value)
else:
return re.search(pattern, str(field_value), re.I) | Check a song metadata field value for a pattern. | Below is the the instruction that describes the task:
### Input:
Check a song metadata field value for a pattern.
### Response:
def _check_field_value(field_value, pattern):
"""Check a song metadata field value for a pattern."""
if isinstance(field_value, list):
return any(re.search(pattern, str(value), re.I) for value in field_value)
else:
return re.search(pattern, str(field_value), re.I) |
def es_query_template(path):
"""
RETURN TEMPLATE AND PATH-TO-FILTER AS A 2-TUPLE
:param path: THE NESTED PATH (NOT INCLUDING TABLE NAME)
:return: (es_query, es_filters) TUPLE
"""
if not is_text(path):
Log.error("expecting path to be a string")
if path != ".":
f0 = {}
f1 = {}
output = wrap({
"query": es_and([
f0,
{"nested": {
"path": path,
"query": f1,
"inner_hits": {"size": 100000}
}}
]),
"from": 0,
"size": 0,
"sort": []
})
return output, wrap([f0, f1])
else:
f0 = {}
output = wrap({
"query": es_and([f0]),
"from": 0,
"size": 0,
"sort": []
})
return output, wrap([f0]) | RETURN TEMPLATE AND PATH-TO-FILTER AS A 2-TUPLE
:param path: THE NESTED PATH (NOT INCLUDING TABLE NAME)
:return: (es_query, es_filters) TUPLE | Below is the the instruction that describes the task:
### Input:
RETURN TEMPLATE AND PATH-TO-FILTER AS A 2-TUPLE
:param path: THE NESTED PATH (NOT INCLUDING TABLE NAME)
:return: (es_query, es_filters) TUPLE
### Response:
def es_query_template(path):
"""
RETURN TEMPLATE AND PATH-TO-FILTER AS A 2-TUPLE
:param path: THE NESTED PATH (NOT INCLUDING TABLE NAME)
:return: (es_query, es_filters) TUPLE
"""
if not is_text(path):
Log.error("expecting path to be a string")
if path != ".":
f0 = {}
f1 = {}
output = wrap({
"query": es_and([
f0,
{"nested": {
"path": path,
"query": f1,
"inner_hits": {"size": 100000}
}}
]),
"from": 0,
"size": 0,
"sort": []
})
return output, wrap([f0, f1])
else:
f0 = {}
output = wrap({
"query": es_and([f0]),
"from": 0,
"size": 0,
"sort": []
})
return output, wrap([f0]) |
def TaxonomicAmendmentStore(repos_dict=None,
repos_par=None,
with_caching=True,
assumed_doc_version=None,
git_ssh=None,
pkey=None,
git_action_class=TaxonomicAmendmentsGitAction,
mirror_info=None,
infrastructure_commit_author='OpenTree API <[email protected]>'):
"""Factory function for a _TaxonomicAmendmentStore object.
A wrapper around the _TaxonomicAmendmentStore class instantiation for
the most common use case: a singleton _TaxonomicAmendmentStore.
If you need distinct _TaxonomicAmendmentStore objects, you'll need to
call that class directly.
"""
global _THE_TAXONOMIC_AMENDMENT_STORE
if _THE_TAXONOMIC_AMENDMENT_STORE is None:
_THE_TAXONOMIC_AMENDMENT_STORE = _TaxonomicAmendmentStore(repos_dict=repos_dict,
repos_par=repos_par,
with_caching=with_caching,
assumed_doc_version=assumed_doc_version,
git_ssh=git_ssh,
pkey=pkey,
git_action_class=git_action_class,
mirror_info=mirror_info,
infrastructure_commit_author=infrastructure_commit_author)
return _THE_TAXONOMIC_AMENDMENT_STORE | Factory function for a _TaxonomicAmendmentStore object.
A wrapper around the _TaxonomicAmendmentStore class instantiation for
the most common use case: a singleton _TaxonomicAmendmentStore.
If you need distinct _TaxonomicAmendmentStore objects, you'll need to
call that class directly. | Below is the the instruction that describes the task:
### Input:
Factory function for a _TaxonomicAmendmentStore object.
A wrapper around the _TaxonomicAmendmentStore class instantiation for
the most common use case: a singleton _TaxonomicAmendmentStore.
If you need distinct _TaxonomicAmendmentStore objects, you'll need to
call that class directly.
### Response:
def TaxonomicAmendmentStore(repos_dict=None,
repos_par=None,
with_caching=True,
assumed_doc_version=None,
git_ssh=None,
pkey=None,
git_action_class=TaxonomicAmendmentsGitAction,
mirror_info=None,
infrastructure_commit_author='OpenTree API <[email protected]>'):
"""Factory function for a _TaxonomicAmendmentStore object.
A wrapper around the _TaxonomicAmendmentStore class instantiation for
the most common use case: a singleton _TaxonomicAmendmentStore.
If you need distinct _TaxonomicAmendmentStore objects, you'll need to
call that class directly.
"""
global _THE_TAXONOMIC_AMENDMENT_STORE
if _THE_TAXONOMIC_AMENDMENT_STORE is None:
_THE_TAXONOMIC_AMENDMENT_STORE = _TaxonomicAmendmentStore(repos_dict=repos_dict,
repos_par=repos_par,
with_caching=with_caching,
assumed_doc_version=assumed_doc_version,
git_ssh=git_ssh,
pkey=pkey,
git_action_class=git_action_class,
mirror_info=mirror_info,
infrastructure_commit_author=infrastructure_commit_author)
return _THE_TAXONOMIC_AMENDMENT_STORE |
def cli(parser):
'''
Currently a cop-out -- just calls easy_install
'''
parser.add_argument('-n', '--dry-run', action='store_true', help='Print uninstall actions without running')
parser.add_argument('packages', nargs='+', help='Packages to install')
opts = parser.parse_args()
for package in opts.packages:
install(package, execute=not opts.dry_run) | Currently a cop-out -- just calls easy_install | Below is the the instruction that describes the task:
### Input:
Currently a cop-out -- just calls easy_install
### Response:
def cli(parser):
'''
Currently a cop-out -- just calls easy_install
'''
parser.add_argument('-n', '--dry-run', action='store_true', help='Print uninstall actions without running')
parser.add_argument('packages', nargs='+', help='Packages to install')
opts = parser.parse_args()
for package in opts.packages:
install(package, execute=not opts.dry_run) |
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res | Return the cached item, item represents a label indexer. | Below is the the instruction that describes the task:
### Input:
Return the cached item, item represents a label indexer.
### Response:
def _get_item_cache(self, item):
"""Return the cached item, item represents a label indexer."""
cache = self._item_cache
res = cache.get(item)
if res is None:
values = self._data.get(item)
res = self._box_item_values(item, values)
cache[item] = res
res._set_as_cached(item, self)
# for a chain
res._is_copy = self._is_copy
return res |
def meta_set(self, key, metafield, value):
''' Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. '''
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key] | Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. | Below is the the instruction that describes the task:
### Input:
Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys.
### Response:
def meta_set(self, key, metafield, value):
''' Set the meta field for a key to a new value. This triggers the
on-change handler for existing keys. '''
self._meta.setdefault(key, {})[metafield] = value
if key in self:
self[key] = self[key] |
def bbox_transform(ex_rois, gt_rois, box_stds):
"""
compute bounding box regression targets from ex_rois to gt_rois
:param ex_rois: [N, 4]
:param gt_rois: [N, 4]
:return: [N, 4]
"""
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0)
ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0)
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * (gt_widths - 1.0)
gt_ctr_y = gt_rois[:, 1] + 0.5 * (gt_heights - 1.0)
targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-14) / box_stds[0]
targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-14) / box_stds[1]
targets_dw = np.log(gt_widths / ex_widths) / box_stds[2]
targets_dh = np.log(gt_heights / ex_heights) / box_stds[3]
targets = np.vstack((targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets | compute bounding box regression targets from ex_rois to gt_rois
:param ex_rois: [N, 4]
:param gt_rois: [N, 4]
:return: [N, 4] | Below is the the instruction that describes the task:
### Input:
compute bounding box regression targets from ex_rois to gt_rois
:param ex_rois: [N, 4]
:param gt_rois: [N, 4]
:return: [N, 4]
### Response:
def bbox_transform(ex_rois, gt_rois, box_stds):
"""
compute bounding box regression targets from ex_rois to gt_rois
:param ex_rois: [N, 4]
:param gt_rois: [N, 4]
:return: [N, 4]
"""
assert ex_rois.shape[0] == gt_rois.shape[0], 'inconsistent rois number'
ex_widths = ex_rois[:, 2] - ex_rois[:, 0] + 1.0
ex_heights = ex_rois[:, 3] - ex_rois[:, 1] + 1.0
ex_ctr_x = ex_rois[:, 0] + 0.5 * (ex_widths - 1.0)
ex_ctr_y = ex_rois[:, 1] + 0.5 * (ex_heights - 1.0)
gt_widths = gt_rois[:, 2] - gt_rois[:, 0] + 1.0
gt_heights = gt_rois[:, 3] - gt_rois[:, 1] + 1.0
gt_ctr_x = gt_rois[:, 0] + 0.5 * (gt_widths - 1.0)
gt_ctr_y = gt_rois[:, 1] + 0.5 * (gt_heights - 1.0)
targets_dx = (gt_ctr_x - ex_ctr_x) / (ex_widths + 1e-14) / box_stds[0]
targets_dy = (gt_ctr_y - ex_ctr_y) / (ex_heights + 1e-14) / box_stds[1]
targets_dw = np.log(gt_widths / ex_widths) / box_stds[2]
targets_dh = np.log(gt_heights / ex_heights) / box_stds[3]
targets = np.vstack((targets_dx, targets_dy, targets_dw, targets_dh)).transpose()
return targets |
def p_sysargs(self, p):
'sysargs : sysargs COMMA sysarg'
p[0] = p[1] + (p[3],)
p.set_lineno(0, p.lineno(1)) | sysargs : sysargs COMMA sysarg | Below is the the instruction that describes the task:
### Input:
sysargs : sysargs COMMA sysarg
### Response:
def p_sysargs(self, p):
'sysargs : sysargs COMMA sysarg'
p[0] = p[1] + (p[3],)
p.set_lineno(0, p.lineno(1)) |
def get_winner_number(self):
"""!
@brief Calculates number of winner at the last step of learning process.
@return (uint) Number of winner.
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
winner_number = 0
for i in range(self._size):
if self._award[i] > 0:
winner_number += 1
return winner_number | !
@brief Calculates number of winner at the last step of learning process.
@return (uint) Number of winner. | Below is the the instruction that describes the task:
### Input:
!
@brief Calculates number of winner at the last step of learning process.
@return (uint) Number of winner.
### Response:
def get_winner_number(self):
"""!
@brief Calculates number of winner at the last step of learning process.
@return (uint) Number of winner.
"""
if self.__ccore_som_pointer is not None:
self._award = wrapper.som_get_awards(self.__ccore_som_pointer)
winner_number = 0
for i in range(self._size):
if self._award[i] > 0:
winner_number += 1
return winner_number |
def call_on_each_endpoint(self, callback):
"""Find all server endpoints defined in the swagger spec and calls 'callback' for each,
with an instance of EndpointData as argument.
"""
if 'paths' not in self.swagger_dict:
return
for path, d in list(self.swagger_dict['paths'].items()):
for method, op_spec in list(d.items()):
data = EndpointData(path, method)
# Which server method handles this endpoint?
if 'x-bind-server' not in op_spec:
if 'x-no-bind-server' in op_spec:
# That route should not be auto-generated
log.info("Skipping generation of %s %s" % (method, path))
continue
else:
raise Exception("Swagger api defines no x-bind-server for %s %s" % (method, path))
data.handler_server = op_spec['x-bind-server']
# Make sure that endpoint only produces 'application/json'
if 'produces' not in op_spec:
raise Exception("Swagger api has no 'produces' section for %s %s" % (method, path))
if len(op_spec['produces']) != 1:
raise Exception("Expecting only one type under 'produces' for %s %s" % (method, path))
if op_spec['produces'][0] == 'application/json':
data.produces_json = True
elif op_spec['produces'][0] == 'text/html':
data.produces_html = True
else:
raise Exception("Only 'application/json' or 'text/html' are supported. See %s %s" % (method, path))
# Which client method handles this endpoint?
if 'x-bind-client' in op_spec:
data.handler_client = op_spec['x-bind-client']
# Should we decorate the server handler?
if 'x-decorate-server' in op_spec:
data.decorate_server = op_spec['x-decorate-server']
# Should we manipulate the requests parameters?
if 'x-decorate-request' in op_spec:
data.decorate_request = op_spec['x-decorate-request']
# Generate a bravado-core operation object
data.operation = Operation.from_spec(self.spec, path, method, op_spec)
# Figure out how parameters are passed: one json in body? one or
# more values in query?
if 'parameters' in op_spec:
params = op_spec['parameters']
for p in params:
if p['in'] == 'body':
data.param_in_body = True
if p['in'] == 'query':
data.param_in_query = True
if p['in'] == 'path':
data.param_in_path = True
if data.param_in_path:
# Substitute {...} with <...> in path, to make a Flask friendly path
data.path = data.path.replace('{', '<').replace('}', '>')
if data.param_in_body and data.param_in_query:
raise Exception("Cannot support params in both body and param (%s %s)" % (method, path))
else:
data.no_params = True
callback(data) | Find all server endpoints defined in the swagger spec and calls 'callback' for each,
with an instance of EndpointData as argument. | Below is the the instruction that describes the task:
### Input:
Find all server endpoints defined in the swagger spec and calls 'callback' for each,
with an instance of EndpointData as argument.
### Response:
def call_on_each_endpoint(self, callback):
"""Find all server endpoints defined in the swagger spec and calls 'callback' for each,
with an instance of EndpointData as argument.
"""
if 'paths' not in self.swagger_dict:
return
for path, d in list(self.swagger_dict['paths'].items()):
for method, op_spec in list(d.items()):
data = EndpointData(path, method)
# Which server method handles this endpoint?
if 'x-bind-server' not in op_spec:
if 'x-no-bind-server' in op_spec:
# That route should not be auto-generated
log.info("Skipping generation of %s %s" % (method, path))
continue
else:
raise Exception("Swagger api defines no x-bind-server for %s %s" % (method, path))
data.handler_server = op_spec['x-bind-server']
# Make sure that endpoint only produces 'application/json'
if 'produces' not in op_spec:
raise Exception("Swagger api has no 'produces' section for %s %s" % (method, path))
if len(op_spec['produces']) != 1:
raise Exception("Expecting only one type under 'produces' for %s %s" % (method, path))
if op_spec['produces'][0] == 'application/json':
data.produces_json = True
elif op_spec['produces'][0] == 'text/html':
data.produces_html = True
else:
raise Exception("Only 'application/json' or 'text/html' are supported. See %s %s" % (method, path))
# Which client method handles this endpoint?
if 'x-bind-client' in op_spec:
data.handler_client = op_spec['x-bind-client']
# Should we decorate the server handler?
if 'x-decorate-server' in op_spec:
data.decorate_server = op_spec['x-decorate-server']
# Should we manipulate the requests parameters?
if 'x-decorate-request' in op_spec:
data.decorate_request = op_spec['x-decorate-request']
# Generate a bravado-core operation object
data.operation = Operation.from_spec(self.spec, path, method, op_spec)
# Figure out how parameters are passed: one json in body? one or
# more values in query?
if 'parameters' in op_spec:
params = op_spec['parameters']
for p in params:
if p['in'] == 'body':
data.param_in_body = True
if p['in'] == 'query':
data.param_in_query = True
if p['in'] == 'path':
data.param_in_path = True
if data.param_in_path:
# Substitute {...} with <...> in path, to make a Flask friendly path
data.path = data.path.replace('{', '<').replace('}', '>')
if data.param_in_body and data.param_in_query:
raise Exception("Cannot support params in both body and param (%s %s)" % (method, path))
else:
data.no_params = True
callback(data) |
def solve(self, reaction_1, reaction_2):
"""Return the flux coupling between two reactions
The flux coupling is returned as a tuple indicating the minimum and
maximum value of the v1/v2 reaction flux ratio. A value of None as
either the minimum or maximum indicates that the interval is unbounded
in that direction.
"""
# Update objective for reaction_1
self._prob.set_objective(self._vbow(reaction_1))
# Update constraint for reaction_2
if self._reaction_constr is not None:
self._reaction_constr.delete()
self._reaction_constr, = self._prob.add_linear_constraints(
self._vbow(reaction_2) == 1)
results = []
for sense in (lp.ObjectiveSense.Minimize, lp.ObjectiveSense.Maximize):
try:
result = self._prob.solve(sense)
except lp.SolverError:
results.append(None)
else:
results.append(result.get_value(self._vbow(reaction_1)))
return tuple(results) | Return the flux coupling between two reactions
The flux coupling is returned as a tuple indicating the minimum and
maximum value of the v1/v2 reaction flux ratio. A value of None as
either the minimum or maximum indicates that the interval is unbounded
in that direction. | Below is the the instruction that describes the task:
### Input:
Return the flux coupling between two reactions
The flux coupling is returned as a tuple indicating the minimum and
maximum value of the v1/v2 reaction flux ratio. A value of None as
either the minimum or maximum indicates that the interval is unbounded
in that direction.
### Response:
def solve(self, reaction_1, reaction_2):
"""Return the flux coupling between two reactions
The flux coupling is returned as a tuple indicating the minimum and
maximum value of the v1/v2 reaction flux ratio. A value of None as
either the minimum or maximum indicates that the interval is unbounded
in that direction.
"""
# Update objective for reaction_1
self._prob.set_objective(self._vbow(reaction_1))
# Update constraint for reaction_2
if self._reaction_constr is not None:
self._reaction_constr.delete()
self._reaction_constr, = self._prob.add_linear_constraints(
self._vbow(reaction_2) == 1)
results = []
for sense in (lp.ObjectiveSense.Minimize, lp.ObjectiveSense.Maximize):
try:
result = self._prob.solve(sense)
except lp.SolverError:
results.append(None)
else:
results.append(result.get_value(self._vbow(reaction_1)))
return tuple(results) |
def clear_alarms(alarm):
'''
Clear (acknowledge) an alarm event. The arguments are “all” for all current
alarms, a specific alarm number (e.g. ‘‘1’‘), or an alarm string identifier
(e.g. ‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘).
.. code-block:: bash
salt '*' trafficserver.clear_alarms [all | #event | name]
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('alarm', 'clear', alarm)
else:
cmd = _traffic_line('--clear_alarms', alarm)
return _subprocess(cmd) | Clear (acknowledge) an alarm event. The arguments are “all” for all current
alarms, a specific alarm number (e.g. ‘‘1’‘), or an alarm string identifier
(e.g. ‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘).
.. code-block:: bash
salt '*' trafficserver.clear_alarms [all | #event | name] | Below is the the instruction that describes the task:
### Input:
Clear (acknowledge) an alarm event. The arguments are “all” for all current
alarms, a specific alarm number (e.g. ‘‘1’‘), or an alarm string identifier
(e.g. ‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘).
.. code-block:: bash
salt '*' trafficserver.clear_alarms [all | #event | name]
### Response:
def clear_alarms(alarm):
'''
Clear (acknowledge) an alarm event. The arguments are “all” for all current
alarms, a specific alarm number (e.g. ‘‘1’‘), or an alarm string identifier
(e.g. ‘’MGMT_ALARM_PROXY_CONFIG_ERROR’‘).
.. code-block:: bash
salt '*' trafficserver.clear_alarms [all | #event | name]
'''
if _TRAFFICCTL:
cmd = _traffic_ctl('alarm', 'clear', alarm)
else:
cmd = _traffic_line('--clear_alarms', alarm)
return _subprocess(cmd) |
def uinit(self, ushape):
"""Return initialiser for working variable U."""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
# NB: still needs to be worked out.
return np.zeros(ushape, dtype=self.dtype) | Return initialiser for working variable U. | Below is the the instruction that describes the task:
### Input:
Return initialiser for working variable U.
### Response:
def uinit(self, ushape):
"""Return initialiser for working variable U."""
if self.opt['Y0'] is None:
return np.zeros(ushape, dtype=self.dtype)
else:
# If initial Y is non-zero, initial U is chosen so that
# the relevant dual optimality criterion (see (3.10) in
# boyd-2010-distributed) is satisfied.
# NB: still needs to be worked out.
return np.zeros(ushape, dtype=self.dtype) |
def _make_operator(method_name):
"""Return an operator method that takes parameters of type
:class:`Dimension`, evaluates them, and delegates to the :class:`float`
operator with name `method_name`"""
def operator(self, other):
"""Operator delegating to the :class:`float` method `method_name`"""
float_operator = getattr(float, method_name)
try:
float_other = float(other)
except (ValueError, TypeError):
return False
return float_operator(float(self), float_other)
return operator | Return an operator method that takes parameters of type
:class:`Dimension`, evaluates them, and delegates to the :class:`float`
operator with name `method_name` | Below is the the instruction that describes the task:
### Input:
Return an operator method that takes parameters of type
:class:`Dimension`, evaluates them, and delegates to the :class:`float`
operator with name `method_name`
### Response:
def _make_operator(method_name):
"""Return an operator method that takes parameters of type
:class:`Dimension`, evaluates them, and delegates to the :class:`float`
operator with name `method_name`"""
def operator(self, other):
"""Operator delegating to the :class:`float` method `method_name`"""
float_operator = getattr(float, method_name)
try:
float_other = float(other)
except (ValueError, TypeError):
return False
return float_operator(float(self), float_other)
return operator |
def get_out_ip_addr(cls, tenant_id):
"""Retrieves the 'out' service subnet attributes. """
if tenant_id not in cls.serv_obj_dict:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return
tenant_obj = cls.serv_obj_dict.get(tenant_id)
return tenant_obj.get_out_ip_addr() | Retrieves the 'out' service subnet attributes. | Below is the the instruction that describes the task:
### Input:
Retrieves the 'out' service subnet attributes.
### Response:
def get_out_ip_addr(cls, tenant_id):
"""Retrieves the 'out' service subnet attributes. """
if tenant_id not in cls.serv_obj_dict:
LOG.error("Fabric not prepared for tenant %s", tenant_id)
return
tenant_obj = cls.serv_obj_dict.get(tenant_id)
return tenant_obj.get_out_ip_addr() |
def destroy(self):
"""Destroys a session completely, by deleting all keys and removing it
from the internal store immediately.
This allows removing a session for security reasons, e.g. a login
stored in a session will cease to exist if the session is destroyed.
"""
for k in list(self.keys()):
del self[k]
if getattr(self, 'sid_s', None):
current_app.kvsession_store.delete(self.sid_s)
self.sid_s = None
self.modified = False
self.new = False | Destroys a session completely, by deleting all keys and removing it
from the internal store immediately.
This allows removing a session for security reasons, e.g. a login
stored in a session will cease to exist if the session is destroyed. | Below is the the instruction that describes the task:
### Input:
Destroys a session completely, by deleting all keys and removing it
from the internal store immediately.
This allows removing a session for security reasons, e.g. a login
stored in a session will cease to exist if the session is destroyed.
### Response:
def destroy(self):
"""Destroys a session completely, by deleting all keys and removing it
from the internal store immediately.
This allows removing a session for security reasons, e.g. a login
stored in a session will cease to exist if the session is destroyed.
"""
for k in list(self.keys()):
del self[k]
if getattr(self, 'sid_s', None):
current_app.kvsession_store.delete(self.sid_s)
self.sid_s = None
self.modified = False
self.new = False |
def spread_value(value: Decimal, spread_p: Decimal) -> Tuple[Decimal, Decimal]:
"""Returns a lower and upper value separated by a spread percentage"""
upper = value * (1 + spread_p)
lower = value / (1 + spread_p)
return lower, upper | Returns a lower and upper value separated by a spread percentage | Below is the the instruction that describes the task:
### Input:
Returns a lower and upper value separated by a spread percentage
### Response:
def spread_value(value: Decimal, spread_p: Decimal) -> Tuple[Decimal, Decimal]:
"""Returns a lower and upper value separated by a spread percentage"""
upper = value * (1 + spread_p)
lower = value / (1 + spread_p)
return lower, upper |
def prepare_relationship(config, model_name, raml_resource):
""" Create referenced model if it doesn't exist.
When preparing a relationship, we check to see if the model that will be
referenced already exists. If not, it is created so that it will be possible
to use it in a relationship. Thus the first usage of this model in RAML file
must provide its schema in POST method resource body schema.
:param model_name: Name of model which should be generated.
:param raml_resource: Instance of ramlfications.raml.ResourceNode for
which :model_name: will be defined.
"""
if get_existing_model(model_name) is None:
plural_route = '/' + pluralize(model_name.lower())
route = '/' + model_name.lower()
for res in raml_resource.root.resources:
if res.method.upper() != 'POST':
continue
if res.path.endswith(plural_route) or res.path.endswith(route):
break
else:
raise ValueError('Model `{}` used in relationship is not '
'defined'.format(model_name))
setup_data_model(config, res, model_name) | Create referenced model if it doesn't exist.
When preparing a relationship, we check to see if the model that will be
referenced already exists. If not, it is created so that it will be possible
to use it in a relationship. Thus the first usage of this model in RAML file
must provide its schema in POST method resource body schema.
:param model_name: Name of model which should be generated.
:param raml_resource: Instance of ramlfications.raml.ResourceNode for
which :model_name: will be defined. | Below is the the instruction that describes the task:
### Input:
Create referenced model if it doesn't exist.
When preparing a relationship, we check to see if the model that will be
referenced already exists. If not, it is created so that it will be possible
to use it in a relationship. Thus the first usage of this model in RAML file
must provide its schema in POST method resource body schema.
:param model_name: Name of model which should be generated.
:param raml_resource: Instance of ramlfications.raml.ResourceNode for
which :model_name: will be defined.
### Response:
def prepare_relationship(config, model_name, raml_resource):
""" Create referenced model if it doesn't exist.
When preparing a relationship, we check to see if the model that will be
referenced already exists. If not, it is created so that it will be possible
to use it in a relationship. Thus the first usage of this model in RAML file
must provide its schema in POST method resource body schema.
:param model_name: Name of model which should be generated.
:param raml_resource: Instance of ramlfications.raml.ResourceNode for
which :model_name: will be defined.
"""
if get_existing_model(model_name) is None:
plural_route = '/' + pluralize(model_name.lower())
route = '/' + model_name.lower()
for res in raml_resource.root.resources:
if res.method.upper() != 'POST':
continue
if res.path.endswith(plural_route) or res.path.endswith(route):
break
else:
raise ValueError('Model `{}` used in relationship is not '
'defined'.format(model_name))
setup_data_model(config, res, model_name) |
def new_rsa_key(key_size=2048, kid='', use='', public_exponent=65537):
"""
Creates a new RSA key pair and wraps it in a
:py:class:`cryptojwt.jwk.rsa.RSAKey` instance
:param key_size: The size of the key
:param kid: The key ID
:param use: What the is supposed to be used for. 2 choices 'sig'/'enc'
:param public_exponent: The value of the public exponent.
:return: A :py:class:`cryptojwt.jwk.rsa.RSAKey` instance
"""
_key = rsa.generate_private_key(public_exponent=public_exponent,
key_size=key_size,
backend=default_backend())
_rk = RSAKey(priv_key=_key, use=use, kid=kid)
if not kid:
_rk.add_kid()
return _rk | Creates a new RSA key pair and wraps it in a
:py:class:`cryptojwt.jwk.rsa.RSAKey` instance
:param key_size: The size of the key
:param kid: The key ID
:param use: What the is supposed to be used for. 2 choices 'sig'/'enc'
:param public_exponent: The value of the public exponent.
:return: A :py:class:`cryptojwt.jwk.rsa.RSAKey` instance | Below is the the instruction that describes the task:
### Input:
Creates a new RSA key pair and wraps it in a
:py:class:`cryptojwt.jwk.rsa.RSAKey` instance
:param key_size: The size of the key
:param kid: The key ID
:param use: What the is supposed to be used for. 2 choices 'sig'/'enc'
:param public_exponent: The value of the public exponent.
:return: A :py:class:`cryptojwt.jwk.rsa.RSAKey` instance
### Response:
def new_rsa_key(key_size=2048, kid='', use='', public_exponent=65537):
"""
Creates a new RSA key pair and wraps it in a
:py:class:`cryptojwt.jwk.rsa.RSAKey` instance
:param key_size: The size of the key
:param kid: The key ID
:param use: What the is supposed to be used for. 2 choices 'sig'/'enc'
:param public_exponent: The value of the public exponent.
:return: A :py:class:`cryptojwt.jwk.rsa.RSAKey` instance
"""
_key = rsa.generate_private_key(public_exponent=public_exponent,
key_size=key_size,
backend=default_backend())
_rk = RSAKey(priv_key=_key, use=use, kid=kid)
if not kid:
_rk.add_kid()
return _rk |
def iter_predecessors(self, graph, dest, branch, turn, tick, *, forward=None):
"""Iterate over predecessors to a given destination node at a given time."""
if self.db._no_kc:
yield from self._adds_dels_sucpred(self.predecessors[graph, dest], branch, turn, tick)[0]
return
if forward is None:
forward = self.db._forward
yield from self._get_origcache(graph, dest, branch, turn, tick, forward=forward) | Iterate over predecessors to a given destination node at a given time. | Below is the the instruction that describes the task:
### Input:
Iterate over predecessors to a given destination node at a given time.
### Response:
def iter_predecessors(self, graph, dest, branch, turn, tick, *, forward=None):
"""Iterate over predecessors to a given destination node at a given time."""
if self.db._no_kc:
yield from self._adds_dels_sucpred(self.predecessors[graph, dest], branch, turn, tick)[0]
return
if forward is None:
forward = self.db._forward
yield from self._get_origcache(graph, dest, branch, turn, tick, forward=forward) |
def bidi(request):
"""Adds to the context BiDi related variables
LANGUAGE_DIRECTION -- Direction of current language ('ltr' or 'rtl')
LANGUAGE_START -- Start of language layout ('right' for rtl, 'left'
for 'ltr')
LANGUAGE_END -- End of language layout ('left' for rtl, 'right'
for 'ltr')
LANGUAGE_MARKER -- Language marker entity ('‏' for rtl, '&lrm'
for ltr)
"""
from django.utils import translation
from django.utils.safestring import mark_safe
if translation.get_language_bidi():
extra_context = {
'LANGUAGE_DIRECTION':'rtl',
'LANGUAGE_START':'right',
'LANGUAGE_END':'left',
'LANGUAGE_MARKER': mark_safe('‏'),
}
else:
extra_context = {
'LANGUAGE_DIRECTION':'ltr',
'LANGUAGE_START':'left',
'LANGUAGE_END':'right',
'LANGUAGE_MARKER': mark_safe('‎'),
}
return extra_context | Adds to the context BiDi related variables
LANGUAGE_DIRECTION -- Direction of current language ('ltr' or 'rtl')
LANGUAGE_START -- Start of language layout ('right' for rtl, 'left'
for 'ltr')
LANGUAGE_END -- End of language layout ('left' for rtl, 'right'
for 'ltr')
LANGUAGE_MARKER -- Language marker entity ('‏' for rtl, '&lrm'
for ltr) | Below is the the instruction that describes the task:
### Input:
Adds to the context BiDi related variables
LANGUAGE_DIRECTION -- Direction of current language ('ltr' or 'rtl')
LANGUAGE_START -- Start of language layout ('right' for rtl, 'left'
for 'ltr')
LANGUAGE_END -- End of language layout ('left' for rtl, 'right'
for 'ltr')
LANGUAGE_MARKER -- Language marker entity ('‏' for rtl, '&lrm'
for ltr)
### Response:
def bidi(request):
"""Adds to the context BiDi related variables
LANGUAGE_DIRECTION -- Direction of current language ('ltr' or 'rtl')
LANGUAGE_START -- Start of language layout ('right' for rtl, 'left'
for 'ltr')
LANGUAGE_END -- End of language layout ('left' for rtl, 'right'
for 'ltr')
LANGUAGE_MARKER -- Language marker entity ('‏' for rtl, '&lrm'
for ltr)
"""
from django.utils import translation
from django.utils.safestring import mark_safe
if translation.get_language_bidi():
extra_context = {
'LANGUAGE_DIRECTION':'rtl',
'LANGUAGE_START':'right',
'LANGUAGE_END':'left',
'LANGUAGE_MARKER': mark_safe('‏'),
}
else:
extra_context = {
'LANGUAGE_DIRECTION':'ltr',
'LANGUAGE_START':'left',
'LANGUAGE_END':'right',
'LANGUAGE_MARKER': mark_safe('‎'),
}
return extra_context |
def _int_to_pos(self, flat_position):
"""Returns x, y from flat_position integer.
Args:
flat_position: flattened position integer
Returns: x, y
"""
return flat_position % self.env.action_space.screen_shape[0],\
flat_position % self.env.action_space.screen_shape[1] | Returns x, y from flat_position integer.
Args:
flat_position: flattened position integer
Returns: x, y | Below is the the instruction that describes the task:
### Input:
Returns x, y from flat_position integer.
Args:
flat_position: flattened position integer
Returns: x, y
### Response:
def _int_to_pos(self, flat_position):
"""Returns x, y from flat_position integer.
Args:
flat_position: flattened position integer
Returns: x, y
"""
return flat_position % self.env.action_space.screen_shape[0],\
flat_position % self.env.action_space.screen_shape[1] |
def _decrypt(self, fp, password=None):
"""
Internal decryption function
Uses either the password argument for the decryption,
or, if not supplied, the password field of the object
:param fp: a file object or similar which supports the readline and read methods
:rtype: Proxy
"""
if AES is None:
raise ImportError("PyCrypto required")
if password is None:
password = self.password
if password is None:
raise ValueError(
"Password need to be provided to extract encrypted archives")
# read the PBKDF2 parameters
# salt
user_salt = fp.readline().strip()
user_salt = binascii.a2b_hex(user_salt)
# checksum salt
ck_salt = fp.readline().strip()
ck_salt = binascii.a2b_hex(ck_salt)
# hashing rounds
rounds = fp.readline().strip()
rounds = int(rounds)
# encryption IV
iv = fp.readline().strip()
iv = binascii.a2b_hex(iv)
# encrypted master key
master_key = fp.readline().strip()
master_key = binascii.a2b_hex(master_key)
# generate key for decrypting the master key
user_key = PBKDF2(password, user_salt, dkLen=256 // 8, count=rounds)
# decrypt the master key and iv
cipher = AES.new(user_key,
mode=AES.MODE_CBC,
IV=iv)
master_key = bytearray(cipher.decrypt(master_key))
# format: <len IV: 1 byte><IV: n bytes><len key: 1 byte><key: m bytes><len checksum: 1 byte><checksum: k bytes>
# get IV
l = master_key.pop(0)
master_iv = bytes(master_key[:l])
master_key = master_key[l:]
# get key
l = master_key.pop(0)
mk = bytes(master_key[:l])
master_key = master_key[l:]
# get checksum
l = master_key.pop(0)
master_ck = bytes(master_key[:l])
# double encode utf8
utf8mk = self.encode_utf8(mk)
# calculate checksum by using PBKDF2
calc_ck = PBKDF2(utf8mk, ck_salt, dkLen=256//8, count=rounds)
assert calc_ck == master_ck
# install decryption key
cipher = AES.new(mk,
mode=AES.MODE_CBC,
IV=master_iv)
off = fp.tell()
fp.seek(0, 2)
length = fp.tell() - off
fp.seek(off)
if self.stream:
# decryption transformer for Proxy class
def decrypt(data):
data = bytearray(cipher.decrypt(data))
if fp.tell() - off >= length:
# check padding (PKCS#7)
pad = data[-1]
assert data.endswith(bytearray([pad] * pad)), "Expected {!r} got {!r}".format(bytearray([pad] * pad), data[-pad:])
data = data[:-pad]
return data
return Proxy(decrypt, fp, cipher.block_size)
else:
data = bytearray(cipher.decrypt(fp.read()))
pad = data[-1]
assert data.endswith(bytearray([pad] * pad)), "Expected {!r} got {!r}".format(bytearray([pad] * pad), data[-pad:])
data = data[:-pad]
return io.BytesIO(data) | Internal decryption function
Uses either the password argument for the decryption,
or, if not supplied, the password field of the object
:param fp: a file object or similar which supports the readline and read methods
:rtype: Proxy | Below is the the instruction that describes the task:
### Input:
Internal decryption function
Uses either the password argument for the decryption,
or, if not supplied, the password field of the object
:param fp: a file object or similar which supports the readline and read methods
:rtype: Proxy
### Response:
def _decrypt(self, fp, password=None):
"""
Internal decryption function
Uses either the password argument for the decryption,
or, if not supplied, the password field of the object
:param fp: a file object or similar which supports the readline and read methods
:rtype: Proxy
"""
if AES is None:
raise ImportError("PyCrypto required")
if password is None:
password = self.password
if password is None:
raise ValueError(
"Password need to be provided to extract encrypted archives")
# read the PBKDF2 parameters
# salt
user_salt = fp.readline().strip()
user_salt = binascii.a2b_hex(user_salt)
# checksum salt
ck_salt = fp.readline().strip()
ck_salt = binascii.a2b_hex(ck_salt)
# hashing rounds
rounds = fp.readline().strip()
rounds = int(rounds)
# encryption IV
iv = fp.readline().strip()
iv = binascii.a2b_hex(iv)
# encrypted master key
master_key = fp.readline().strip()
master_key = binascii.a2b_hex(master_key)
# generate key for decrypting the master key
user_key = PBKDF2(password, user_salt, dkLen=256 // 8, count=rounds)
# decrypt the master key and iv
cipher = AES.new(user_key,
mode=AES.MODE_CBC,
IV=iv)
master_key = bytearray(cipher.decrypt(master_key))
# format: <len IV: 1 byte><IV: n bytes><len key: 1 byte><key: m bytes><len checksum: 1 byte><checksum: k bytes>
# get IV
l = master_key.pop(0)
master_iv = bytes(master_key[:l])
master_key = master_key[l:]
# get key
l = master_key.pop(0)
mk = bytes(master_key[:l])
master_key = master_key[l:]
# get checksum
l = master_key.pop(0)
master_ck = bytes(master_key[:l])
# double encode utf8
utf8mk = self.encode_utf8(mk)
# calculate checksum by using PBKDF2
calc_ck = PBKDF2(utf8mk, ck_salt, dkLen=256//8, count=rounds)
assert calc_ck == master_ck
# install decryption key
cipher = AES.new(mk,
mode=AES.MODE_CBC,
IV=master_iv)
off = fp.tell()
fp.seek(0, 2)
length = fp.tell() - off
fp.seek(off)
if self.stream:
# decryption transformer for Proxy class
def decrypt(data):
data = bytearray(cipher.decrypt(data))
if fp.tell() - off >= length:
# check padding (PKCS#7)
pad = data[-1]
assert data.endswith(bytearray([pad] * pad)), "Expected {!r} got {!r}".format(bytearray([pad] * pad), data[-pad:])
data = data[:-pad]
return data
return Proxy(decrypt, fp, cipher.block_size)
else:
data = bytearray(cipher.decrypt(fp.read()))
pad = data[-1]
assert data.endswith(bytearray([pad] * pad)), "Expected {!r} got {!r}".format(bytearray([pad] * pad), data[-pad:])
data = data[:-pad]
return io.BytesIO(data) |
def array(
item_processor, # type: Processor
alias=None, # type: Optional[Text]
nested=None, # type: Optional[Text]
omit_empty=False, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create an array processor that can be used to parse and serialize array data.
XML arrays may be nested within an array element, or they may be embedded
within their parent. A nested array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<nested-array>
<array-item>0</array-item>
<array-item>1</array-item>
</nested-array>
</root-element>
The corresponding embedded array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<array-item>0</array-item>
<array-item>1</array-item>
</root-element>
An array is considered required when its item processor is configured as being
required.
:param item_processor: A declxml processor object for the items of the array.
:param alias: If specified, the name given to the array when read from XML.
If not specified, then the name of the item processor is used instead.
:param nested: If the array is a nested array, then this should be the name of
the element under which all array items are located. If not specified, then
the array is treated as an embedded array. Can also be specified using supported
XPath syntax.
:param omit_empty: If True, then nested arrays will be omitted when serializing if
they are empty. Only valid when nested is specified. Note that an empty array
may only be omitted if it is not itself contained within an array. That is,
for an array of arrays, any empty arrays in the outer array will always be
serialized to prevent information about the original array from being lost
when serializing.
:param hooks: A Hooks object.
:return: A declxml processor object.
"""
processor = _Array(item_processor, alias, nested, omit_empty)
return _processor_wrap_if_hooks(processor, hooks) | Create an array processor that can be used to parse and serialize array data.
XML arrays may be nested within an array element, or they may be embedded
within their parent. A nested array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<nested-array>
<array-item>0</array-item>
<array-item>1</array-item>
</nested-array>
</root-element>
The corresponding embedded array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<array-item>0</array-item>
<array-item>1</array-item>
</root-element>
An array is considered required when its item processor is configured as being
required.
:param item_processor: A declxml processor object for the items of the array.
:param alias: If specified, the name given to the array when read from XML.
If not specified, then the name of the item processor is used instead.
:param nested: If the array is a nested array, then this should be the name of
the element under which all array items are located. If not specified, then
the array is treated as an embedded array. Can also be specified using supported
XPath syntax.
:param omit_empty: If True, then nested arrays will be omitted when serializing if
they are empty. Only valid when nested is specified. Note that an empty array
may only be omitted if it is not itself contained within an array. That is,
for an array of arrays, any empty arrays in the outer array will always be
serialized to prevent information about the original array from being lost
when serializing.
:param hooks: A Hooks object.
:return: A declxml processor object. | Below is the the instruction that describes the task:
### Input:
Create an array processor that can be used to parse and serialize array data.
XML arrays may be nested within an array element, or they may be embedded
within their parent. A nested array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<nested-array>
<array-item>0</array-item>
<array-item>1</array-item>
</nested-array>
</root-element>
The corresponding embedded array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<array-item>0</array-item>
<array-item>1</array-item>
</root-element>
An array is considered required when its item processor is configured as being
required.
:param item_processor: A declxml processor object for the items of the array.
:param alias: If specified, the name given to the array when read from XML.
If not specified, then the name of the item processor is used instead.
:param nested: If the array is a nested array, then this should be the name of
the element under which all array items are located. If not specified, then
the array is treated as an embedded array. Can also be specified using supported
XPath syntax.
:param omit_empty: If True, then nested arrays will be omitted when serializing if
they are empty. Only valid when nested is specified. Note that an empty array
may only be omitted if it is not itself contained within an array. That is,
for an array of arrays, any empty arrays in the outer array will always be
serialized to prevent information about the original array from being lost
when serializing.
:param hooks: A Hooks object.
:return: A declxml processor object.
### Response:
def array(
item_processor, # type: Processor
alias=None, # type: Optional[Text]
nested=None, # type: Optional[Text]
omit_empty=False, # type: bool
hooks=None # type: Optional[Hooks]
):
# type: (...) -> RootProcessor
"""
Create an array processor that can be used to parse and serialize array data.
XML arrays may be nested within an array element, or they may be embedded
within their parent. A nested array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<nested-array>
<array-item>0</array-item>
<array-item>1</array-item>
</nested-array>
</root-element>
The corresponding embedded array would look like:
.. sourcecode:: xml
<root-element>
<some-element>ABC</some-element>
<array-item>0</array-item>
<array-item>1</array-item>
</root-element>
An array is considered required when its item processor is configured as being
required.
:param item_processor: A declxml processor object for the items of the array.
:param alias: If specified, the name given to the array when read from XML.
If not specified, then the name of the item processor is used instead.
:param nested: If the array is a nested array, then this should be the name of
the element under which all array items are located. If not specified, then
the array is treated as an embedded array. Can also be specified using supported
XPath syntax.
:param omit_empty: If True, then nested arrays will be omitted when serializing if
they are empty. Only valid when nested is specified. Note that an empty array
may only be omitted if it is not itself contained within an array. That is,
for an array of arrays, any empty arrays in the outer array will always be
serialized to prevent information about the original array from being lost
when serializing.
:param hooks: A Hooks object.
:return: A declxml processor object.
"""
processor = _Array(item_processor, alias, nested, omit_empty)
return _processor_wrap_if_hooks(processor, hooks) |
def rand_ssn():
"""Random SSN. (9 digits)
Example::
>>> rand_ssn()
295-50-0178
"""
return "%s-%s-%s" % (rand_str(3, string.digits),
rand_str(2, string.digits),
rand_str(4, string.digits)) | Random SSN. (9 digits)
Example::
>>> rand_ssn()
295-50-0178 | Below is the the instruction that describes the task:
### Input:
Random SSN. (9 digits)
Example::
>>> rand_ssn()
295-50-0178
### Response:
def rand_ssn():
"""Random SSN. (9 digits)
Example::
>>> rand_ssn()
295-50-0178
"""
return "%s-%s-%s" % (rand_str(3, string.digits),
rand_str(2, string.digits),
rand_str(4, string.digits)) |
def update(self):
"""update performs the analysis, then runs the forecast using the updated self.parensemble.
This can be called repeatedly to iterate..."""
parensemble = self.analysis_evensen()
obsensemble = self.forecast(parensemble=parensemble)
# todo: check for phi improvement
if True:
self.obsensemble = obsensemble
self.parensemble = parensemble
self.iter_num += 1 | update performs the analysis, then runs the forecast using the updated self.parensemble.
This can be called repeatedly to iterate... | Below is the the instruction that describes the task:
### Input:
update performs the analysis, then runs the forecast using the updated self.parensemble.
This can be called repeatedly to iterate...
### Response:
def update(self):
"""update performs the analysis, then runs the forecast using the updated self.parensemble.
This can be called repeatedly to iterate..."""
parensemble = self.analysis_evensen()
obsensemble = self.forecast(parensemble=parensemble)
# todo: check for phi improvement
if True:
self.obsensemble = obsensemble
self.parensemble = parensemble
self.iter_num += 1 |
def create_widget(self, place, type, file=None, **kwargs):
'''
Create a widget object based on given arguments.
If file object is provided, callable arguments will be resolved:
its return value will be used after calling them with file as first
parameter.
All extra `kwargs` parameters will be passed to widget constructor.
:param place: place hint where widget should be shown.
:type place: str
:param type: widget type name as taken from :attr:`widget_types` dict
keys.
:type type: str
:param file: optional file object for widget attribute resolving
:type type: browsepy.files.Node or None
:returns: widget instance
:rtype: object
'''
widget_class = self.widget_types.get(type, self.widget_types['base'])
kwargs.update(place=place, type=type)
try:
element = widget_class(**kwargs)
except TypeError as e:
message = e.args[0] if e.args else ''
if (
'unexpected keyword argument' in message or
'required positional argument' in message
):
raise WidgetParameterException(
'type %s; %s; available: %r'
% (type, message, widget_class._fields)
)
raise e
if file and any(map(callable, element)):
return self._resolve_widget(file, element)
return element | Create a widget object based on given arguments.
If file object is provided, callable arguments will be resolved:
its return value will be used after calling them with file as first
parameter.
All extra `kwargs` parameters will be passed to widget constructor.
:param place: place hint where widget should be shown.
:type place: str
:param type: widget type name as taken from :attr:`widget_types` dict
keys.
:type type: str
:param file: optional file object for widget attribute resolving
:type type: browsepy.files.Node or None
:returns: widget instance
:rtype: object | Below is the the instruction that describes the task:
### Input:
Create a widget object based on given arguments.
If file object is provided, callable arguments will be resolved:
its return value will be used after calling them with file as first
parameter.
All extra `kwargs` parameters will be passed to widget constructor.
:param place: place hint where widget should be shown.
:type place: str
:param type: widget type name as taken from :attr:`widget_types` dict
keys.
:type type: str
:param file: optional file object for widget attribute resolving
:type type: browsepy.files.Node or None
:returns: widget instance
:rtype: object
### Response:
def create_widget(self, place, type, file=None, **kwargs):
'''
Create a widget object based on given arguments.
If file object is provided, callable arguments will be resolved:
its return value will be used after calling them with file as first
parameter.
All extra `kwargs` parameters will be passed to widget constructor.
:param place: place hint where widget should be shown.
:type place: str
:param type: widget type name as taken from :attr:`widget_types` dict
keys.
:type type: str
:param file: optional file object for widget attribute resolving
:type type: browsepy.files.Node or None
:returns: widget instance
:rtype: object
'''
widget_class = self.widget_types.get(type, self.widget_types['base'])
kwargs.update(place=place, type=type)
try:
element = widget_class(**kwargs)
except TypeError as e:
message = e.args[0] if e.args else ''
if (
'unexpected keyword argument' in message or
'required positional argument' in message
):
raise WidgetParameterException(
'type %s; %s; available: %r'
% (type, message, widget_class._fields)
)
raise e
if file and any(map(callable, element)):
return self._resolve_widget(file, element)
return element |
def export_element(bpmn_graph, export_elements, node, nodes_classification, order=0, prefix="", condition="",
who="", add_join=False):
"""
Export a node with "Element" classification (task, subprocess or gateway)
:param bpmn_graph: an instance of BpmnDiagramGraph class,
:param export_elements: a dictionary object. The key is a node ID, value is a dictionary of parameters that
will be used in exported CSV document,
:param node: networkx.Node object,
:param nodes_classification: dictionary of classification labels. Key - node id. Value - a list of labels,
:param order: the order param of exported node,
:param prefix: the prefix of exported node - if the task appears after some gateway, the prefix will identify
the branch
:param condition: the condition param of exported node,
:param who: the condition param of exported node,
:param add_join: boolean flag. Used to indicate if "Join" element should be added to CSV.
:return: None or the next node object if the exported node was a gateway join.
"""
node_type = node[1][consts.Consts.type]
node_classification = nodes_classification[node[0]]
outgoing_flows = node[1].get(consts.Consts.outgoing_flow)
if node_type != consts.Consts.parallel_gateway and consts.Consts.default in node[1] \
and node[1][consts.Consts.default] is not None:
default_flow_id = node[1][consts.Consts.default]
else:
default_flow_id = None
if BpmnDiagramGraphCsvExport.classification_join in node_classification and not add_join:
# If the node is a join, then retract the recursion back to the split.
# In case of activity - return current node. In case of gateway - return outgoing node
# (we are making assumption that join has only one outgoing node)
if node_type == consts.Consts.task or node_type == consts.Consts.subprocess:
return node
else:
outgoing_flow_id = outgoing_flows[0]
outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id)
outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref])
return outgoing_node
else:
if node_type == consts.Consts.task:
export_elements.append({"Order": prefix + str(order), "Activity": node[1][consts.Consts.node_name],
"Condition": condition, "Who": who, "Subprocess": "", "Terminated": ""})
elif node_type == consts.Consts.subprocess:
export_elements.append({"Order": prefix + str(order), "Activity": node[1][consts.Consts.node_name],
"Condition": condition, "Who": who, "Subprocess": "yes", "Terminated": ""})
if BpmnDiagramGraphCsvExport.classification_split in node_classification:
next_node = None
alphabet_suffix_index = 0
for outgoing_flow_id in outgoing_flows:
outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id)
outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref])
# This will work only up to 26 outgoing flows
suffix = string.ascii_lowercase[alphabet_suffix_index]
next_prefix = prefix + str(order) + suffix
alphabet_suffix_index += 1
# parallel gateway does not uses conditions
if node_type != consts.Consts.parallel_gateway and consts.Consts.name in outgoing_flow[2] \
and outgoing_flow[2][consts.Consts.name] is not None:
condition = outgoing_flow[2][consts.Consts.name]
else:
condition = ""
if BpmnDiagramGraphCsvExport.classification_join in nodes_classification[outgoing_node[0]]:
export_elements.append(
{"Order": next_prefix + str(1), "Activity": "goto " + prefix + str(order + 1),
"Condition": condition, "Who": who, "Subprocess": "", "Terminated": ""})
elif outgoing_flow_id == default_flow_id:
tmp_next_node = BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node,
nodes_classification, 1, next_prefix, "else",
who)
if tmp_next_node is not None:
next_node = tmp_next_node
else:
tmp_next_node = BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node,
nodes_classification, 1, next_prefix,
condition, who)
if tmp_next_node is not None:
next_node = tmp_next_node
if next_node is not None:
return BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, next_node,
nodes_classification, order=(order + 1), prefix=prefix,
who=who, add_join=True)
elif len(outgoing_flows) == 1:
outgoing_flow_id = outgoing_flows[0]
outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id)
outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref])
return BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node,
nodes_classification, order=(order + 1), prefix=prefix,
who=who)
else:
return None | Export a node with "Element" classification (task, subprocess or gateway)
:param bpmn_graph: an instance of BpmnDiagramGraph class,
:param export_elements: a dictionary object. The key is a node ID, value is a dictionary of parameters that
will be used in exported CSV document,
:param node: networkx.Node object,
:param nodes_classification: dictionary of classification labels. Key - node id. Value - a list of labels,
:param order: the order param of exported node,
:param prefix: the prefix of exported node - if the task appears after some gateway, the prefix will identify
the branch
:param condition: the condition param of exported node,
:param who: the condition param of exported node,
:param add_join: boolean flag. Used to indicate if "Join" element should be added to CSV.
:return: None or the next node object if the exported node was a gateway join. | Below is the the instruction that describes the task:
### Input:
Export a node with "Element" classification (task, subprocess or gateway)
:param bpmn_graph: an instance of BpmnDiagramGraph class,
:param export_elements: a dictionary object. The key is a node ID, value is a dictionary of parameters that
will be used in exported CSV document,
:param node: networkx.Node object,
:param nodes_classification: dictionary of classification labels. Key - node id. Value - a list of labels,
:param order: the order param of exported node,
:param prefix: the prefix of exported node - if the task appears after some gateway, the prefix will identify
the branch
:param condition: the condition param of exported node,
:param who: the condition param of exported node,
:param add_join: boolean flag. Used to indicate if "Join" element should be added to CSV.
:return: None or the next node object if the exported node was a gateway join.
### Response:
def export_element(bpmn_graph, export_elements, node, nodes_classification, order=0, prefix="", condition="",
who="", add_join=False):
"""
Export a node with "Element" classification (task, subprocess or gateway)
:param bpmn_graph: an instance of BpmnDiagramGraph class,
:param export_elements: a dictionary object. The key is a node ID, value is a dictionary of parameters that
will be used in exported CSV document,
:param node: networkx.Node object,
:param nodes_classification: dictionary of classification labels. Key - node id. Value - a list of labels,
:param order: the order param of exported node,
:param prefix: the prefix of exported node - if the task appears after some gateway, the prefix will identify
the branch
:param condition: the condition param of exported node,
:param who: the condition param of exported node,
:param add_join: boolean flag. Used to indicate if "Join" element should be added to CSV.
:return: None or the next node object if the exported node was a gateway join.
"""
node_type = node[1][consts.Consts.type]
node_classification = nodes_classification[node[0]]
outgoing_flows = node[1].get(consts.Consts.outgoing_flow)
if node_type != consts.Consts.parallel_gateway and consts.Consts.default in node[1] \
and node[1][consts.Consts.default] is not None:
default_flow_id = node[1][consts.Consts.default]
else:
default_flow_id = None
if BpmnDiagramGraphCsvExport.classification_join in node_classification and not add_join:
# If the node is a join, then retract the recursion back to the split.
# In case of activity - return current node. In case of gateway - return outgoing node
# (we are making assumption that join has only one outgoing node)
if node_type == consts.Consts.task or node_type == consts.Consts.subprocess:
return node
else:
outgoing_flow_id = outgoing_flows[0]
outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id)
outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref])
return outgoing_node
else:
if node_type == consts.Consts.task:
export_elements.append({"Order": prefix + str(order), "Activity": node[1][consts.Consts.node_name],
"Condition": condition, "Who": who, "Subprocess": "", "Terminated": ""})
elif node_type == consts.Consts.subprocess:
export_elements.append({"Order": prefix + str(order), "Activity": node[1][consts.Consts.node_name],
"Condition": condition, "Who": who, "Subprocess": "yes", "Terminated": ""})
if BpmnDiagramGraphCsvExport.classification_split in node_classification:
next_node = None
alphabet_suffix_index = 0
for outgoing_flow_id in outgoing_flows:
outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id)
outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref])
# This will work only up to 26 outgoing flows
suffix = string.ascii_lowercase[alphabet_suffix_index]
next_prefix = prefix + str(order) + suffix
alphabet_suffix_index += 1
# parallel gateway does not uses conditions
if node_type != consts.Consts.parallel_gateway and consts.Consts.name in outgoing_flow[2] \
and outgoing_flow[2][consts.Consts.name] is not None:
condition = outgoing_flow[2][consts.Consts.name]
else:
condition = ""
if BpmnDiagramGraphCsvExport.classification_join in nodes_classification[outgoing_node[0]]:
export_elements.append(
{"Order": next_prefix + str(1), "Activity": "goto " + prefix + str(order + 1),
"Condition": condition, "Who": who, "Subprocess": "", "Terminated": ""})
elif outgoing_flow_id == default_flow_id:
tmp_next_node = BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node,
nodes_classification, 1, next_prefix, "else",
who)
if tmp_next_node is not None:
next_node = tmp_next_node
else:
tmp_next_node = BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node,
nodes_classification, 1, next_prefix,
condition, who)
if tmp_next_node is not None:
next_node = tmp_next_node
if next_node is not None:
return BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, next_node,
nodes_classification, order=(order + 1), prefix=prefix,
who=who, add_join=True)
elif len(outgoing_flows) == 1:
outgoing_flow_id = outgoing_flows[0]
outgoing_flow = bpmn_graph.get_flow_by_id(outgoing_flow_id)
outgoing_node = bpmn_graph.get_node_by_id(outgoing_flow[2][consts.Consts.target_ref])
return BpmnDiagramGraphCsvExport.export_node(bpmn_graph, export_elements, outgoing_node,
nodes_classification, order=(order + 1), prefix=prefix,
who=who)
else:
return None |
def partial_autocorrelation(x, param):
"""
Calculates the value of the partial autocorrelation function at the given lag. The lag `k` partial autocorrelation
of a time series :math:`\\lbrace x_t, t = 1 \\ldots T \\rbrace` equals the partial correlation of :math:`x_t` and
:math:`x_{t-k}`, adjusted for the intermediate variables
:math:`\\lbrace x_{t-1}, \\ldots, x_{t-k+1} \\rbrace` ([1]).
Following [2], it can be defined as
.. math::
\\alpha_k = \\frac{ Cov(x_t, x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1})}
{\\sqrt{ Var(x_t | x_{t-1}, \\ldots, x_{t-k+1}) Var(x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1} )}}
with (a) :math:`x_t = f(x_{t-1}, \\ldots, x_{t-k+1})` and (b) :math:`x_{t-k} = f(x_{t-1}, \\ldots, x_{t-k+1})`
being AR(k-1) models that can be fitted by OLS. Be aware that in (a), the regression is done on past values to
predict :math:`x_t` whereas in (b), future values are used to calculate the past value :math:`x_{t-k}`.
It is said in [1] that "for an AR(p), the partial autocorrelations [ :math:`\\alpha_k` ] will be nonzero for `k<=p`
and zero for `k>p`."
With this property, it is used to determine the lag of an AR-Process.
.. rubric:: References
| [1] Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015).
| Time series analysis: forecasting and control. John Wiley & Sons.
| [2] https://onlinecourses.science.psu.edu/stat510/node/62
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"lag": val} with int val indicating the lag to be returned
:type param: list
:return: the value of this feature
:return type: float
"""
# Check the difference between demanded lags by param and possible lags to calculate (depends on len(x))
max_demanded_lag = max([lag["lag"] for lag in param])
n = len(x)
# Check if list is too short to make calculations
if n <= 1:
pacf_coeffs = [np.nan] * (max_demanded_lag + 1)
else:
if (n <= max_demanded_lag):
max_lag = n - 1
else:
max_lag = max_demanded_lag
pacf_coeffs = list(pacf(x, method="ld", nlags=max_lag))
pacf_coeffs = pacf_coeffs + [np.nan] * max(0, (max_demanded_lag - max_lag))
return [("lag_{}".format(lag["lag"]), pacf_coeffs[lag["lag"]]) for lag in param] | Calculates the value of the partial autocorrelation function at the given lag. The lag `k` partial autocorrelation
of a time series :math:`\\lbrace x_t, t = 1 \\ldots T \\rbrace` equals the partial correlation of :math:`x_t` and
:math:`x_{t-k}`, adjusted for the intermediate variables
:math:`\\lbrace x_{t-1}, \\ldots, x_{t-k+1} \\rbrace` ([1]).
Following [2], it can be defined as
.. math::
\\alpha_k = \\frac{ Cov(x_t, x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1})}
{\\sqrt{ Var(x_t | x_{t-1}, \\ldots, x_{t-k+1}) Var(x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1} )}}
with (a) :math:`x_t = f(x_{t-1}, \\ldots, x_{t-k+1})` and (b) :math:`x_{t-k} = f(x_{t-1}, \\ldots, x_{t-k+1})`
being AR(k-1) models that can be fitted by OLS. Be aware that in (a), the regression is done on past values to
predict :math:`x_t` whereas in (b), future values are used to calculate the past value :math:`x_{t-k}`.
It is said in [1] that "for an AR(p), the partial autocorrelations [ :math:`\\alpha_k` ] will be nonzero for `k<=p`
and zero for `k>p`."
With this property, it is used to determine the lag of an AR-Process.
.. rubric:: References
| [1] Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015).
| Time series analysis: forecasting and control. John Wiley & Sons.
| [2] https://onlinecourses.science.psu.edu/stat510/node/62
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"lag": val} with int val indicating the lag to be returned
:type param: list
:return: the value of this feature
:return type: float | Below is the the instruction that describes the task:
### Input:
Calculates the value of the partial autocorrelation function at the given lag. The lag `k` partial autocorrelation
of a time series :math:`\\lbrace x_t, t = 1 \\ldots T \\rbrace` equals the partial correlation of :math:`x_t` and
:math:`x_{t-k}`, adjusted for the intermediate variables
:math:`\\lbrace x_{t-1}, \\ldots, x_{t-k+1} \\rbrace` ([1]).
Following [2], it can be defined as
.. math::
\\alpha_k = \\frac{ Cov(x_t, x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1})}
{\\sqrt{ Var(x_t | x_{t-1}, \\ldots, x_{t-k+1}) Var(x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1} )}}
with (a) :math:`x_t = f(x_{t-1}, \\ldots, x_{t-k+1})` and (b) :math:`x_{t-k} = f(x_{t-1}, \\ldots, x_{t-k+1})`
being AR(k-1) models that can be fitted by OLS. Be aware that in (a), the regression is done on past values to
predict :math:`x_t` whereas in (b), future values are used to calculate the past value :math:`x_{t-k}`.
It is said in [1] that "for an AR(p), the partial autocorrelations [ :math:`\\alpha_k` ] will be nonzero for `k<=p`
and zero for `k>p`."
With this property, it is used to determine the lag of an AR-Process.
.. rubric:: References
| [1] Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015).
| Time series analysis: forecasting and control. John Wiley & Sons.
| [2] https://onlinecourses.science.psu.edu/stat510/node/62
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"lag": val} with int val indicating the lag to be returned
:type param: list
:return: the value of this feature
:return type: float
### Response:
def partial_autocorrelation(x, param):
"""
Calculates the value of the partial autocorrelation function at the given lag. The lag `k` partial autocorrelation
of a time series :math:`\\lbrace x_t, t = 1 \\ldots T \\rbrace` equals the partial correlation of :math:`x_t` and
:math:`x_{t-k}`, adjusted for the intermediate variables
:math:`\\lbrace x_{t-1}, \\ldots, x_{t-k+1} \\rbrace` ([1]).
Following [2], it can be defined as
.. math::
\\alpha_k = \\frac{ Cov(x_t, x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1})}
{\\sqrt{ Var(x_t | x_{t-1}, \\ldots, x_{t-k+1}) Var(x_{t-k} | x_{t-1}, \\ldots, x_{t-k+1} )}}
with (a) :math:`x_t = f(x_{t-1}, \\ldots, x_{t-k+1})` and (b) :math:`x_{t-k} = f(x_{t-1}, \\ldots, x_{t-k+1})`
being AR(k-1) models that can be fitted by OLS. Be aware that in (a), the regression is done on past values to
predict :math:`x_t` whereas in (b), future values are used to calculate the past value :math:`x_{t-k}`.
It is said in [1] that "for an AR(p), the partial autocorrelations [ :math:`\\alpha_k` ] will be nonzero for `k<=p`
and zero for `k>p`."
With this property, it is used to determine the lag of an AR-Process.
.. rubric:: References
| [1] Box, G. E., Jenkins, G. M., Reinsel, G. C., & Ljung, G. M. (2015).
| Time series analysis: forecasting and control. John Wiley & Sons.
| [2] https://onlinecourses.science.psu.edu/stat510/node/62
:param x: the time series to calculate the feature of
:type x: numpy.ndarray
:param param: contains dictionaries {"lag": val} with int val indicating the lag to be returned
:type param: list
:return: the value of this feature
:return type: float
"""
# Check the difference between demanded lags by param and possible lags to calculate (depends on len(x))
max_demanded_lag = max([lag["lag"] for lag in param])
n = len(x)
# Check if list is too short to make calculations
if n <= 1:
pacf_coeffs = [np.nan] * (max_demanded_lag + 1)
else:
if (n <= max_demanded_lag):
max_lag = n - 1
else:
max_lag = max_demanded_lag
pacf_coeffs = list(pacf(x, method="ld", nlags=max_lag))
pacf_coeffs = pacf_coeffs + [np.nan] * max(0, (max_demanded_lag - max_lag))
return [("lag_{}".format(lag["lag"]), pacf_coeffs[lag["lag"]]) for lag in param] |
def get(self, key=None, **kwargs):
"""
Ensures that only one result is returned from DB and raises an exception otherwise.
Can work in 3 different way.
- If no argument is given, only does "ensuring about one and only object" job.
- If key given as only argument, retrieves the object from DB.
- if query filters given, implicitly calls filter() method.
Raises:
MultipleObjectsReturned: If there is more than one (1) record is returned.
"""
clone = copy.deepcopy(self)
# If we are in a slice, adjust the start and rows
if self._start:
clone.adapter.set_params(start=self._start)
if self._rows:
clone.adapter.set_params(rows=self._rows)
if key:
data, key = clone.adapter.get(key)
elif kwargs:
data, key = clone.filter(**kwargs).adapter.get()
else:
data, key = clone.adapter.get()
if clone._cfg['rtype'] == ReturnType.Object:
return data, key
return self._make_model(data, key) | Ensures that only one result is returned from DB and raises an exception otherwise.
Can work in 3 different way.
- If no argument is given, only does "ensuring about one and only object" job.
- If key given as only argument, retrieves the object from DB.
- if query filters given, implicitly calls filter() method.
Raises:
MultipleObjectsReturned: If there is more than one (1) record is returned. | Below is the the instruction that describes the task:
### Input:
Ensures that only one result is returned from DB and raises an exception otherwise.
Can work in 3 different way.
- If no argument is given, only does "ensuring about one and only object" job.
- If key given as only argument, retrieves the object from DB.
- if query filters given, implicitly calls filter() method.
Raises:
MultipleObjectsReturned: If there is more than one (1) record is returned.
### Response:
def get(self, key=None, **kwargs):
"""
Ensures that only one result is returned from DB and raises an exception otherwise.
Can work in 3 different way.
- If no argument is given, only does "ensuring about one and only object" job.
- If key given as only argument, retrieves the object from DB.
- if query filters given, implicitly calls filter() method.
Raises:
MultipleObjectsReturned: If there is more than one (1) record is returned.
"""
clone = copy.deepcopy(self)
# If we are in a slice, adjust the start and rows
if self._start:
clone.adapter.set_params(start=self._start)
if self._rows:
clone.adapter.set_params(rows=self._rows)
if key:
data, key = clone.adapter.get(key)
elif kwargs:
data, key = clone.filter(**kwargs).adapter.get()
else:
data, key = clone.adapter.get()
if clone._cfg['rtype'] == ReturnType.Object:
return data, key
return self._make_model(data, key) |
def listar_por_equipamento(self, id_equipment):
"""List all Script related Equipment.
:param id_equipment: Identifier of the Equipment. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{script': [{‘id’: < id >,
‘nome’: < nome >,
‘descricao’: < descricao >,
‘id_tipo_roteiro’: < id_tipo_roteiro >,
‘nome_tipo_roteiro’: < nome_tipo_roteiro >,
‘descricao_tipo_roteiro’: < descricao_tipo_roteiro >}, ...more Script...]}
:raise InvalidParameterError: The identifier of Equipment is null and invalid.
:raise EquipamentoNaoExisteError: Equipment not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_equipment):
raise InvalidParameterError(
u'The identifier of Equipment is invalid or was not informed.')
url = 'script/equipment/' + str(id_equipment) + '/'
code, map = self.submit(None, 'GET', url)
key = 'script'
return get_list_map(self.response(code, map, [key]), key) | List all Script related Equipment.
:param id_equipment: Identifier of the Equipment. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{script': [{‘id’: < id >,
‘nome’: < nome >,
‘descricao’: < descricao >,
‘id_tipo_roteiro’: < id_tipo_roteiro >,
‘nome_tipo_roteiro’: < nome_tipo_roteiro >,
‘descricao_tipo_roteiro’: < descricao_tipo_roteiro >}, ...more Script...]}
:raise InvalidParameterError: The identifier of Equipment is null and invalid.
:raise EquipamentoNaoExisteError: Equipment not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response. | Below is the the instruction that describes the task:
### Input:
List all Script related Equipment.
:param id_equipment: Identifier of the Equipment. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{script': [{‘id’: < id >,
‘nome’: < nome >,
‘descricao’: < descricao >,
‘id_tipo_roteiro’: < id_tipo_roteiro >,
‘nome_tipo_roteiro’: < nome_tipo_roteiro >,
‘descricao_tipo_roteiro’: < descricao_tipo_roteiro >}, ...more Script...]}
:raise InvalidParameterError: The identifier of Equipment is null and invalid.
:raise EquipamentoNaoExisteError: Equipment not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
### Response:
def listar_por_equipamento(self, id_equipment):
"""List all Script related Equipment.
:param id_equipment: Identifier of the Equipment. Integer value and greater than zero.
:return: Dictionary with the following structure:
::
{script': [{‘id’: < id >,
‘nome’: < nome >,
‘descricao’: < descricao >,
‘id_tipo_roteiro’: < id_tipo_roteiro >,
‘nome_tipo_roteiro’: < nome_tipo_roteiro >,
‘descricao_tipo_roteiro’: < descricao_tipo_roteiro >}, ...more Script...]}
:raise InvalidParameterError: The identifier of Equipment is null and invalid.
:raise EquipamentoNaoExisteError: Equipment not registered.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
"""
if not is_valid_int_param(id_equipment):
raise InvalidParameterError(
u'The identifier of Equipment is invalid or was not informed.')
url = 'script/equipment/' + str(id_equipment) + '/'
code, map = self.submit(None, 'GET', url)
key = 'script'
return get_list_map(self.response(code, map, [key]), key) |
def enter_event_loop(self):
"""\
Main loop of the IRCConnection - reads from the socket and dispatches
based on regex matching
"""
patterns = self.dispatch_patterns()
self.logger.debug('entering receive loop')
while 1:
try:
data = self._sock_file.readline()
except socket.error:
data = None
if not data:
self.logger.info('server closed connection')
self.close()
return True
data = data.rstrip()
for pattern, callback in patterns:
match = pattern.match(data)
if match:
callback(**match.groupdict()) | \
Main loop of the IRCConnection - reads from the socket and dispatches
based on regex matching | Below is the the instruction that describes the task:
### Input:
\
Main loop of the IRCConnection - reads from the socket and dispatches
based on regex matching
### Response:
def enter_event_loop(self):
"""\
Main loop of the IRCConnection - reads from the socket and dispatches
based on regex matching
"""
patterns = self.dispatch_patterns()
self.logger.debug('entering receive loop')
while 1:
try:
data = self._sock_file.readline()
except socket.error:
data = None
if not data:
self.logger.info('server closed connection')
self.close()
return True
data = data.rstrip()
for pattern, callback in patterns:
match = pattern.match(data)
if match:
callback(**match.groupdict()) |
def create_page(cls, webdriver=None, **kwargs):
"""Class method short cut to call PageFactory on itself. Use it to instantiate
this PageObject using a webdriver.
Args:
webdriver (Webdriver): Instance of Selenium Webdriver.
Returns:
PageObject
Raises:
InvalidPageError
"""
if not webdriver:
webdriver = WTF_WEBDRIVER_MANAGER.get_driver()
return PageFactory.create_page(cls, webdriver=webdriver, **kwargs) | Class method short cut to call PageFactory on itself. Use it to instantiate
this PageObject using a webdriver.
Args:
webdriver (Webdriver): Instance of Selenium Webdriver.
Returns:
PageObject
Raises:
InvalidPageError | Below is the the instruction that describes the task:
### Input:
Class method short cut to call PageFactory on itself. Use it to instantiate
this PageObject using a webdriver.
Args:
webdriver (Webdriver): Instance of Selenium Webdriver.
Returns:
PageObject
Raises:
InvalidPageError
### Response:
def create_page(cls, webdriver=None, **kwargs):
"""Class method short cut to call PageFactory on itself. Use it to instantiate
this PageObject using a webdriver.
Args:
webdriver (Webdriver): Instance of Selenium Webdriver.
Returns:
PageObject
Raises:
InvalidPageError
"""
if not webdriver:
webdriver = WTF_WEBDRIVER_MANAGER.get_driver()
return PageFactory.create_page(cls, webdriver=webdriver, **kwargs) |
def indicate_fulfillment_ficon(self, control_unit, unit_address):
"""
TODO: Add ControlUnit objects etc for FICON support.
Indicate completion of :term:`fulfillment` for this ECKD (=FICON)
storage volume and provide identifying information (control unit
and unit address) about the actual storage volume on the storage
subsystem.
Manually indicating fulfillment is required for all ECKD volumes,
because they are not auto-discovered by the CPC.
This method performs the "Fulfill FICON Storage Volume" HMC operation.
Upon successful completion of this operation, the "fulfillment-state"
property of this storage volume object will have been set to
"complete". That is necessary for the CPC to be able to address and
connect to the volume.
If the "fulfillment-state" properties of all storage volumes in the
owning storage group are "complete", the owning storage group's
"fulfillment-state" property will also be set to "complete".
Parameters:
control_unit (:class:`~zhmcclient.ControlUnit`):
Logical control unit (LCU) in which the backing ECKD volume is
defined.
unit_address (:term:`string`):
Unit address of the backing ECKD volume within its logical control
unit,
as a hexadecimal number of up to 2 characters in any lexical case.
Authorization requirements:
* Object-access permission to the storage group owning this storage
volume.
* Task permission to the "Configure Storage - Storage Administrator"
task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# The operation requires exactly 2 characters in lower case
unit_address_2 = format(int(unit_address, 16), '02x')
body = {
'control-unit-uri': control_unit.uri,
'unit-address': unit_address_2,
}
self.manager.session.post(
self.uri + '/operations/fulfill-ficon-storage-volume',
body=body) | TODO: Add ControlUnit objects etc for FICON support.
Indicate completion of :term:`fulfillment` for this ECKD (=FICON)
storage volume and provide identifying information (control unit
and unit address) about the actual storage volume on the storage
subsystem.
Manually indicating fulfillment is required for all ECKD volumes,
because they are not auto-discovered by the CPC.
This method performs the "Fulfill FICON Storage Volume" HMC operation.
Upon successful completion of this operation, the "fulfillment-state"
property of this storage volume object will have been set to
"complete". That is necessary for the CPC to be able to address and
connect to the volume.
If the "fulfillment-state" properties of all storage volumes in the
owning storage group are "complete", the owning storage group's
"fulfillment-state" property will also be set to "complete".
Parameters:
control_unit (:class:`~zhmcclient.ControlUnit`):
Logical control unit (LCU) in which the backing ECKD volume is
defined.
unit_address (:term:`string`):
Unit address of the backing ECKD volume within its logical control
unit,
as a hexadecimal number of up to 2 characters in any lexical case.
Authorization requirements:
* Object-access permission to the storage group owning this storage
volume.
* Task permission to the "Configure Storage - Storage Administrator"
task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError` | Below is the the instruction that describes the task:
### Input:
TODO: Add ControlUnit objects etc for FICON support.
Indicate completion of :term:`fulfillment` for this ECKD (=FICON)
storage volume and provide identifying information (control unit
and unit address) about the actual storage volume on the storage
subsystem.
Manually indicating fulfillment is required for all ECKD volumes,
because they are not auto-discovered by the CPC.
This method performs the "Fulfill FICON Storage Volume" HMC operation.
Upon successful completion of this operation, the "fulfillment-state"
property of this storage volume object will have been set to
"complete". That is necessary for the CPC to be able to address and
connect to the volume.
If the "fulfillment-state" properties of all storage volumes in the
owning storage group are "complete", the owning storage group's
"fulfillment-state" property will also be set to "complete".
Parameters:
control_unit (:class:`~zhmcclient.ControlUnit`):
Logical control unit (LCU) in which the backing ECKD volume is
defined.
unit_address (:term:`string`):
Unit address of the backing ECKD volume within its logical control
unit,
as a hexadecimal number of up to 2 characters in any lexical case.
Authorization requirements:
* Object-access permission to the storage group owning this storage
volume.
* Task permission to the "Configure Storage - Storage Administrator"
task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
### Response:
def indicate_fulfillment_ficon(self, control_unit, unit_address):
"""
TODO: Add ControlUnit objects etc for FICON support.
Indicate completion of :term:`fulfillment` for this ECKD (=FICON)
storage volume and provide identifying information (control unit
and unit address) about the actual storage volume on the storage
subsystem.
Manually indicating fulfillment is required for all ECKD volumes,
because they are not auto-discovered by the CPC.
This method performs the "Fulfill FICON Storage Volume" HMC operation.
Upon successful completion of this operation, the "fulfillment-state"
property of this storage volume object will have been set to
"complete". That is necessary for the CPC to be able to address and
connect to the volume.
If the "fulfillment-state" properties of all storage volumes in the
owning storage group are "complete", the owning storage group's
"fulfillment-state" property will also be set to "complete".
Parameters:
control_unit (:class:`~zhmcclient.ControlUnit`):
Logical control unit (LCU) in which the backing ECKD volume is
defined.
unit_address (:term:`string`):
Unit address of the backing ECKD volume within its logical control
unit,
as a hexadecimal number of up to 2 characters in any lexical case.
Authorization requirements:
* Object-access permission to the storage group owning this storage
volume.
* Task permission to the "Configure Storage - Storage Administrator"
task.
Raises:
:exc:`~zhmcclient.HTTPError`
:exc:`~zhmcclient.ParseError`
:exc:`~zhmcclient.AuthError`
:exc:`~zhmcclient.ConnectionError`
"""
# The operation requires exactly 2 characters in lower case
unit_address_2 = format(int(unit_address, 16), '02x')
body = {
'control-unit-uri': control_unit.uri,
'unit-address': unit_address_2,
}
self.manager.session.post(
self.uri + '/operations/fulfill-ficon-storage-volume',
body=body) |
def bread(stream):
""" Decode a file or stream to an object.
"""
if hasattr(stream, "read"):
return bdecode(stream.read())
else:
handle = open(stream, "rb")
try:
return bdecode(handle.read())
finally:
handle.close() | Decode a file or stream to an object. | Below is the the instruction that describes the task:
### Input:
Decode a file or stream to an object.
### Response:
def bread(stream):
""" Decode a file or stream to an object.
"""
if hasattr(stream, "read"):
return bdecode(stream.read())
else:
handle = open(stream, "rb")
try:
return bdecode(handle.read())
finally:
handle.close() |
def minowski(h1, h2, p = 2): # 46..45..14,11..43..44 / 45 us for p=int(-inf..-24..-1,1..24..inf) / float @array, +20 us @list \w 100 bins
r"""
Minowski distance.
With :math:`p=2` equal to the Euclidean distance, with :math:`p=1` equal to the Manhattan distance,
and the Chebyshev distance implementation represents the case of :math:`p=\pm inf`.
The Minowksi distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is
defined as:
.. math::
d_p(H, H') = \left(\sum_{m=1}^M|H_m - H'_m|^p
\right)^{\frac{1}{p}}
*Attributes:*
- a real metric
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[0, \sqrt[p]{2}]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[0, \infty)`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram.
p : float
The :math:`p` value in the Minowksi distance formula.
Returns
-------
minowski : float
Minowski distance.
Raises
------
ValueError
If ``p`` is zero.
"""
h1, h2 = __prepare_histogram(h1, h2)
if 0 == p: raise ValueError('p can not be zero')
elif int == type(p):
if p > 0 and p < 25: return __minowski_low_positive_integer_p(h1, h2, p)
elif p < 0 and p > -25: return __minowski_low_negative_integer_p(h1, h2, p)
return math.pow(scipy.sum(scipy.power(scipy.absolute(h1 - h2), p)), 1./p) | r"""
Minowski distance.
With :math:`p=2` equal to the Euclidean distance, with :math:`p=1` equal to the Manhattan distance,
and the Chebyshev distance implementation represents the case of :math:`p=\pm inf`.
The Minowksi distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is
defined as:
.. math::
d_p(H, H') = \left(\sum_{m=1}^M|H_m - H'_m|^p
\right)^{\frac{1}{p}}
*Attributes:*
- a real metric
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[0, \sqrt[p]{2}]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[0, \infty)`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram.
p : float
The :math:`p` value in the Minowksi distance formula.
Returns
-------
minowski : float
Minowski distance.
Raises
------
ValueError
If ``p`` is zero. | Below is the the instruction that describes the task:
### Input:
r"""
Minowski distance.
With :math:`p=2` equal to the Euclidean distance, with :math:`p=1` equal to the Manhattan distance,
and the Chebyshev distance implementation represents the case of :math:`p=\pm inf`.
The Minowksi distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is
defined as:
.. math::
d_p(H, H') = \left(\sum_{m=1}^M|H_m - H'_m|^p
\right)^{\frac{1}{p}}
*Attributes:*
- a real metric
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[0, \sqrt[p]{2}]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[0, \infty)`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram.
p : float
The :math:`p` value in the Minowksi distance formula.
Returns
-------
minowski : float
Minowski distance.
Raises
------
ValueError
If ``p`` is zero.
### Response:
def minowski(h1, h2, p = 2): # 46..45..14,11..43..44 / 45 us for p=int(-inf..-24..-1,1..24..inf) / float @array, +20 us @list \w 100 bins
r"""
Minowski distance.
With :math:`p=2` equal to the Euclidean distance, with :math:`p=1` equal to the Manhattan distance,
and the Chebyshev distance implementation represents the case of :math:`p=\pm inf`.
The Minowksi distance between two histograms :math:`H` and :math:`H'` of size :math:`m` is
defined as:
.. math::
d_p(H, H') = \left(\sum_{m=1}^M|H_m - H'_m|^p
\right)^{\frac{1}{p}}
*Attributes:*
- a real metric
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[0, \sqrt[p]{2}]`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[0, \infty)`
- :math:`d(H, H) = 0`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram.
p : float
The :math:`p` value in the Minowksi distance formula.
Returns
-------
minowski : float
Minowski distance.
Raises
------
ValueError
If ``p`` is zero.
"""
h1, h2 = __prepare_histogram(h1, h2)
if 0 == p: raise ValueError('p can not be zero')
elif int == type(p):
if p > 0 and p < 25: return __minowski_low_positive_integer_p(h1, h2, p)
elif p < 0 and p > -25: return __minowski_low_negative_integer_p(h1, h2, p)
return math.pow(scipy.sum(scipy.power(scipy.absolute(h1 - h2), p)), 1./p) |
def missing_values(self):
'''
Computes the values that must be missing from each
player's hand, based on when they have passed.
:return: a list of sets, each one containing the
values that must be missing from the
corresponding player's hand
'''
missing = [set() for _ in self.hands]
# replay the game from the beginning
board = dominoes.SkinnyBoard()
player = self.starting_player
for move in self.moves:
if move is None:
# pass - update the missing values
missing[player].update([board.left_end(), board.right_end()])
else:
# not a pass - update the board
board.add(*move)
# move on to the next player
player = next_player(player)
return missing | Computes the values that must be missing from each
player's hand, based on when they have passed.
:return: a list of sets, each one containing the
values that must be missing from the
corresponding player's hand | Below is the the instruction that describes the task:
### Input:
Computes the values that must be missing from each
player's hand, based on when they have passed.
:return: a list of sets, each one containing the
values that must be missing from the
corresponding player's hand
### Response:
def missing_values(self):
'''
Computes the values that must be missing from each
player's hand, based on when they have passed.
:return: a list of sets, each one containing the
values that must be missing from the
corresponding player's hand
'''
missing = [set() for _ in self.hands]
# replay the game from the beginning
board = dominoes.SkinnyBoard()
player = self.starting_player
for move in self.moves:
if move is None:
# pass - update the missing values
missing[player].update([board.left_end(), board.right_end()])
else:
# not a pass - update the board
board.add(*move)
# move on to the next player
player = next_player(player)
return missing |
def is_defined_by_module(item, module, parent=None):
"""
Check if item is directly defined by a module.
This check may be prone to errors.
"""
flag = False
if isinstance(item, types.ModuleType):
if not hasattr(item, '__file__'):
try:
# hack for cv2 and xfeatures2d
import utool as ut
name = ut.get_modname_from_modpath(module.__file__)
flag = name in str(item)
except:
flag = False
else:
item_modpath = os.path.realpath(dirname(item.__file__))
mod_fpath = module.__file__.replace('.pyc', '.py')
if not mod_fpath.endswith('__init__.py'):
flag = False
else:
modpath = os.path.realpath(dirname(mod_fpath))
modpath = modpath.replace('.pyc', '.py')
flag = item_modpath.startswith(modpath)
elif hasattr(item, '_utinfo'):
# Capture case where there is a utool wrapper
orig_func = item._utinfo['orig_func']
flag = is_defined_by_module(orig_func, module, parent)
else:
if isinstance(item, staticmethod):
# static methods are a wrapper around a function
item = item.__func__
try:
func_globals = meta_util_six.get_funcglobals(item)
func_module_name = func_globals['__name__']
if func_module_name == 'line_profiler':
valid_names = dir(module)
if parent is not None:
valid_names += dir(parent)
if item.func_name in valid_names:
# hack to prevent small names
#if len(item.func_name) > 8:
if len(item.func_name) > 6:
flag = True
elif func_module_name == module.__name__:
flag = True
except AttributeError:
if hasattr(item, '__module__'):
flag = item.__module__ == module.__name__
return flag | Check if item is directly defined by a module.
This check may be prone to errors. | Below is the the instruction that describes the task:
### Input:
Check if item is directly defined by a module.
This check may be prone to errors.
### Response:
def is_defined_by_module(item, module, parent=None):
"""
Check if item is directly defined by a module.
This check may be prone to errors.
"""
flag = False
if isinstance(item, types.ModuleType):
if not hasattr(item, '__file__'):
try:
# hack for cv2 and xfeatures2d
import utool as ut
name = ut.get_modname_from_modpath(module.__file__)
flag = name in str(item)
except:
flag = False
else:
item_modpath = os.path.realpath(dirname(item.__file__))
mod_fpath = module.__file__.replace('.pyc', '.py')
if not mod_fpath.endswith('__init__.py'):
flag = False
else:
modpath = os.path.realpath(dirname(mod_fpath))
modpath = modpath.replace('.pyc', '.py')
flag = item_modpath.startswith(modpath)
elif hasattr(item, '_utinfo'):
# Capture case where there is a utool wrapper
orig_func = item._utinfo['orig_func']
flag = is_defined_by_module(orig_func, module, parent)
else:
if isinstance(item, staticmethod):
# static methods are a wrapper around a function
item = item.__func__
try:
func_globals = meta_util_six.get_funcglobals(item)
func_module_name = func_globals['__name__']
if func_module_name == 'line_profiler':
valid_names = dir(module)
if parent is not None:
valid_names += dir(parent)
if item.func_name in valid_names:
# hack to prevent small names
#if len(item.func_name) > 8:
if len(item.func_name) > 6:
flag = True
elif func_module_name == module.__name__:
flag = True
except AttributeError:
if hasattr(item, '__module__'):
flag = item.__module__ == module.__name__
return flag |
def describeSObject(self, sObjectsType):
'''
Describes metadata (field list and object properties) for the specified
object.
'''
self._setHeaders('describeSObject')
return self._sforce.service.describeSObject(sObjectsType) | Describes metadata (field list and object properties) for the specified
object. | Below is the the instruction that describes the task:
### Input:
Describes metadata (field list and object properties) for the specified
object.
### Response:
def describeSObject(self, sObjectsType):
'''
Describes metadata (field list and object properties) for the specified
object.
'''
self._setHeaders('describeSObject')
return self._sforce.service.describeSObject(sObjectsType) |
def can_start_on_cluster(nodes_status,
nodes,
start,
walltime):
"""Check if #nodes can be started on a given cluster.
This is intended to give a good enough approximation.
This can be use to prefiltered possible reservation dates before submitting
them on oar.
"""
candidates = []
for node, status in nodes_status.items():
reservations = status.get("reservations", [])
# we search for the overlapping reservations
overlapping_reservations = []
for reservation in reservations:
queue = reservation.get("queue")
if queue == "besteffort":
# ignoring any besteffort reservation
continue
r_start = reservation.get("started_at",
reservation.get("scheduled_at"))
if r_start is None:
break
r_start = int(r_start)
r_end = r_start + int(reservation["walltime"])
# compute segment intersection
_intersect = min(r_end, start + walltime) - max(r_start, start)
if _intersect > 0:
overlapping_reservations.append(reservation)
if len(overlapping_reservations) == 0:
# this node can be accounted for a potential reservation
candidates.append(node)
if len(candidates) >= nodes:
return True
return False | Check if #nodes can be started on a given cluster.
This is intended to give a good enough approximation.
This can be use to prefiltered possible reservation dates before submitting
them on oar. | Below is the the instruction that describes the task:
### Input:
Check if #nodes can be started on a given cluster.
This is intended to give a good enough approximation.
This can be use to prefiltered possible reservation dates before submitting
them on oar.
### Response:
def can_start_on_cluster(nodes_status,
nodes,
start,
walltime):
"""Check if #nodes can be started on a given cluster.
This is intended to give a good enough approximation.
This can be use to prefiltered possible reservation dates before submitting
them on oar.
"""
candidates = []
for node, status in nodes_status.items():
reservations = status.get("reservations", [])
# we search for the overlapping reservations
overlapping_reservations = []
for reservation in reservations:
queue = reservation.get("queue")
if queue == "besteffort":
# ignoring any besteffort reservation
continue
r_start = reservation.get("started_at",
reservation.get("scheduled_at"))
if r_start is None:
break
r_start = int(r_start)
r_end = r_start + int(reservation["walltime"])
# compute segment intersection
_intersect = min(r_end, start + walltime) - max(r_start, start)
if _intersect > 0:
overlapping_reservations.append(reservation)
if len(overlapping_reservations) == 0:
# this node can be accounted for a potential reservation
candidates.append(node)
if len(candidates) >= nodes:
return True
return False |
def new_from_tokens(self, *args, **kwargs):
"""
Takes in a name that has been split by spaces.
Names which are in [last, first] format need to be preprocessed.
The nickname must be in double quotes to be recognized as such.
This can take name parts in in these orders:
first, middle, last, nick, suffix, honorific
first, middle, last, nick, suffix
first, middle, last, suffix, honorific
first, middle, last, honorific
first, middle, last, suffix
first, middle, last, nick
first, last, honorific
first, last, suffix
first, last, nick
first, middle, last
first, last
last
"""
if kwargs.get('allow_quoted_nicknames'):
args = [ x.strip() for x in args if not re.match(r'^[(]', x) ]
else:
args = [ x.strip() for x in args if not re.match(r'^[("]', x) ]
if len(args) > 2:
self.detect_and_fix_two_part_surname(args)
# set defaults
self.first = ''
self.last = ''
# the final few tokens should always be detectable, otherwise a last name
if len(args):
if self.is_an_honorific(args[-1]):
self.honorific = args.pop()
if not self.honorific[-1] == '.':
self.honorific += '.'
if self.is_a_suffix(args[-1]):
self.suffix = args.pop()
if re.match(r'[js]r(?!\.)', self.suffix, re.IGNORECASE):
self.suffix += '.'
if self.is_a_nickname(args[-1]):
self.nick = args.pop()
self.last = args.pop()
num_remaining_parts = len(args)
if num_remaining_parts == 3:
# if we've still got this many parts, we'll consider what's left as first name
# plus multi-part middle name
self.first = args[0]
self.middle = ' '.join(args[1:3])
elif num_remaining_parts == 2:
self.first, self.middle = args
if len(self.middle) == 1:
self.middle += '.'
elif num_remaining_parts == 1:
self.first = ' '.join(args)
if self.first and len(self.first) == 1:
self.first += '.'
return self | Takes in a name that has been split by spaces.
Names which are in [last, first] format need to be preprocessed.
The nickname must be in double quotes to be recognized as such.
This can take name parts in in these orders:
first, middle, last, nick, suffix, honorific
first, middle, last, nick, suffix
first, middle, last, suffix, honorific
first, middle, last, honorific
first, middle, last, suffix
first, middle, last, nick
first, last, honorific
first, last, suffix
first, last, nick
first, middle, last
first, last
last | Below is the the instruction that describes the task:
### Input:
Takes in a name that has been split by spaces.
Names which are in [last, first] format need to be preprocessed.
The nickname must be in double quotes to be recognized as such.
This can take name parts in in these orders:
first, middle, last, nick, suffix, honorific
first, middle, last, nick, suffix
first, middle, last, suffix, honorific
first, middle, last, honorific
first, middle, last, suffix
first, middle, last, nick
first, last, honorific
first, last, suffix
first, last, nick
first, middle, last
first, last
last
### Response:
def new_from_tokens(self, *args, **kwargs):
"""
Takes in a name that has been split by spaces.
Names which are in [last, first] format need to be preprocessed.
The nickname must be in double quotes to be recognized as such.
This can take name parts in in these orders:
first, middle, last, nick, suffix, honorific
first, middle, last, nick, suffix
first, middle, last, suffix, honorific
first, middle, last, honorific
first, middle, last, suffix
first, middle, last, nick
first, last, honorific
first, last, suffix
first, last, nick
first, middle, last
first, last
last
"""
if kwargs.get('allow_quoted_nicknames'):
args = [ x.strip() for x in args if not re.match(r'^[(]', x) ]
else:
args = [ x.strip() for x in args if not re.match(r'^[("]', x) ]
if len(args) > 2:
self.detect_and_fix_two_part_surname(args)
# set defaults
self.first = ''
self.last = ''
# the final few tokens should always be detectable, otherwise a last name
if len(args):
if self.is_an_honorific(args[-1]):
self.honorific = args.pop()
if not self.honorific[-1] == '.':
self.honorific += '.'
if self.is_a_suffix(args[-1]):
self.suffix = args.pop()
if re.match(r'[js]r(?!\.)', self.suffix, re.IGNORECASE):
self.suffix += '.'
if self.is_a_nickname(args[-1]):
self.nick = args.pop()
self.last = args.pop()
num_remaining_parts = len(args)
if num_remaining_parts == 3:
# if we've still got this many parts, we'll consider what's left as first name
# plus multi-part middle name
self.first = args[0]
self.middle = ' '.join(args[1:3])
elif num_remaining_parts == 2:
self.first, self.middle = args
if len(self.middle) == 1:
self.middle += '.'
elif num_remaining_parts == 1:
self.first = ' '.join(args)
if self.first and len(self.first) == 1:
self.first += '.'
return self |
def write(self, *data):
"""Augment write routine to keep track of current line"""
for l in data:
## print("XXX write: '%s'" % l)
for i in str(l):
if i == '\n':
self.current_line_number += 1
pass
pass
pass
return super(LineMapWalker, self).write(*data) | Augment write routine to keep track of current line | Below is the the instruction that describes the task:
### Input:
Augment write routine to keep track of current line
### Response:
def write(self, *data):
"""Augment write routine to keep track of current line"""
for l in data:
## print("XXX write: '%s'" % l)
for i in str(l):
if i == '\n':
self.current_line_number += 1
pass
pass
pass
return super(LineMapWalker, self).write(*data) |
def remove_not_valid_selections(self):
"""
update the value to remove any that are (no longer) valid
"""
value = [v for v in self.get_value() if self.valid_selection(v)]
self.set_value(value) | update the value to remove any that are (no longer) valid | Below is the the instruction that describes the task:
### Input:
update the value to remove any that are (no longer) valid
### Response:
def remove_not_valid_selections(self):
"""
update the value to remove any that are (no longer) valid
"""
value = [v for v in self.get_value() if self.valid_selection(v)]
self.set_value(value) |
def DataCopyWithOverlay(self, dcmfilelist, out_dir, overlays):
"""
Function make 3D data from dicom file slices
:dcmfilelist list of sorted .dcm files
:overlays dictionary of binary overlays. {1:np.array([...]), 3:...}
:out_dir output directory
"""
dcmlist = dcmfilelist
# data3d = []
for i in range(len(dcmlist)):
onefile = dcmlist[i]
logger.info(onefile)
data = dicom.read_file(onefile)
for i_overlay in overlays.keys():
overlay3d = overlays[i_overlay]
data = self.encode_overlay_slice(data,
overlay3d[-1 - i, :, :],
i_overlay)
# construct output path
head, tail = os.path.split(os.path.normpath(onefile))
filename_out = os.path.join(out_dir, tail)
# save
data.save_as(filename_out) | Function make 3D data from dicom file slices
:dcmfilelist list of sorted .dcm files
:overlays dictionary of binary overlays. {1:np.array([...]), 3:...}
:out_dir output directory | Below is the the instruction that describes the task:
### Input:
Function make 3D data from dicom file slices
:dcmfilelist list of sorted .dcm files
:overlays dictionary of binary overlays. {1:np.array([...]), 3:...}
:out_dir output directory
### Response:
def DataCopyWithOverlay(self, dcmfilelist, out_dir, overlays):
"""
Function make 3D data from dicom file slices
:dcmfilelist list of sorted .dcm files
:overlays dictionary of binary overlays. {1:np.array([...]), 3:...}
:out_dir output directory
"""
dcmlist = dcmfilelist
# data3d = []
for i in range(len(dcmlist)):
onefile = dcmlist[i]
logger.info(onefile)
data = dicom.read_file(onefile)
for i_overlay in overlays.keys():
overlay3d = overlays[i_overlay]
data = self.encode_overlay_slice(data,
overlay3d[-1 - i, :, :],
i_overlay)
# construct output path
head, tail = os.path.split(os.path.normpath(onefile))
filename_out = os.path.join(out_dir, tail)
# save
data.save_as(filename_out) |
def cluster(self, window):
""" Cluster the dict array, assuming it has the relevant Coinc colums,
time1, time2, stat, and timeslide_id
"""
# If no events, do nothing
pivot_ifo = self.attrs['pivot']
fixed_ifo = self.attrs['fixed']
if len(self.data['%s/time' % pivot_ifo]) == 0 or len(self.data['%s/time' % fixed_ifo]) == 0:
return self
from pycbc.events import cluster_coincs
interval = self.attrs['timeslide_interval']
cid = cluster_coincs(self.stat,
self.data['%s/time' % pivot_ifo],
self.data['%s/time' % fixed_ifo],
self.timeslide_id,
interval,
window)
return self.select(cid) | Cluster the dict array, assuming it has the relevant Coinc colums,
time1, time2, stat, and timeslide_id | Below is the the instruction that describes the task:
### Input:
Cluster the dict array, assuming it has the relevant Coinc colums,
time1, time2, stat, and timeslide_id
### Response:
def cluster(self, window):
""" Cluster the dict array, assuming it has the relevant Coinc colums,
time1, time2, stat, and timeslide_id
"""
# If no events, do nothing
pivot_ifo = self.attrs['pivot']
fixed_ifo = self.attrs['fixed']
if len(self.data['%s/time' % pivot_ifo]) == 0 or len(self.data['%s/time' % fixed_ifo]) == 0:
return self
from pycbc.events import cluster_coincs
interval = self.attrs['timeslide_interval']
cid = cluster_coincs(self.stat,
self.data['%s/time' % pivot_ifo],
self.data['%s/time' % fixed_ifo],
self.timeslide_id,
interval,
window)
return self.select(cid) |
def p_namespace(self, p):
'''namespace : NAMESPACE namespace_scope IDENTIFIER'''
p[0] = ast.Namespace(scope=p[2], name=p[3], lineno=p.lineno(1)) | namespace : NAMESPACE namespace_scope IDENTIFIER | Below is the the instruction that describes the task:
### Input:
namespace : NAMESPACE namespace_scope IDENTIFIER
### Response:
def p_namespace(self, p):
'''namespace : NAMESPACE namespace_scope IDENTIFIER'''
p[0] = ast.Namespace(scope=p[2], name=p[3], lineno=p.lineno(1)) |
def search_all(self, quiet=False):
'''a "show all" search that doesn't require a query
Parameters
==========
quiet: if quiet is True, we only are using the function to return
rows of results.
'''
results = []
for obj in self.bucket.objects.all():
subsrc = obj.Object()
# Metadata bug will capitalize all fields, workaround is to lowercase
# https://github.com/boto/boto3/issues/1709
metadata = dict((k.lower(), v) for k, v in subsrc.metadata.items())
size = ''
# MM-DD-YYYY
datestr = "%s-%s-%s" %(obj.last_modified.month,
obj.last_modified.day,
obj.last_modified.year)
if 'sizemb' in metadata:
size = '%sMB' % metadata['sizemb']
results.append([obj.key, datestr, size ])
if len(results) == 0:
bot.info("No container collections found.")
sys.exit(1)
if not quiet:
bot.info("Containers")
bot.table(results)
return results | a "show all" search that doesn't require a query
Parameters
==========
quiet: if quiet is True, we only are using the function to return
rows of results. | Below is the the instruction that describes the task:
### Input:
a "show all" search that doesn't require a query
Parameters
==========
quiet: if quiet is True, we only are using the function to return
rows of results.
### Response:
def search_all(self, quiet=False):
'''a "show all" search that doesn't require a query
Parameters
==========
quiet: if quiet is True, we only are using the function to return
rows of results.
'''
results = []
for obj in self.bucket.objects.all():
subsrc = obj.Object()
# Metadata bug will capitalize all fields, workaround is to lowercase
# https://github.com/boto/boto3/issues/1709
metadata = dict((k.lower(), v) for k, v in subsrc.metadata.items())
size = ''
# MM-DD-YYYY
datestr = "%s-%s-%s" %(obj.last_modified.month,
obj.last_modified.day,
obj.last_modified.year)
if 'sizemb' in metadata:
size = '%sMB' % metadata['sizemb']
results.append([obj.key, datestr, size ])
if len(results) == 0:
bot.info("No container collections found.")
sys.exit(1)
if not quiet:
bot.info("Containers")
bot.table(results)
return results |
def get_topclasses(cls):
"""Gets the base classes that are in pycbc."""
bases = [c for c in inspect.getmro(cls)
if c.__module__.startswith('pycbc') and c != cls]
return ', '.join(['{}.{}'.format(c.__module__, c.__name__) for c in bases]) | Gets the base classes that are in pycbc. | Below is the the instruction that describes the task:
### Input:
Gets the base classes that are in pycbc.
### Response:
def get_topclasses(cls):
"""Gets the base classes that are in pycbc."""
bases = [c for c in inspect.getmro(cls)
if c.__module__.startswith('pycbc') and c != cls]
return ', '.join(['{}.{}'.format(c.__module__, c.__name__) for c in bases]) |
def linear_set_layer(layer_size,
inputs,
context=None,
activation_fn=tf.nn.relu,
dropout=0.0,
name=None):
"""Basic layer type for doing funky things with sets.
Applies a linear transformation to each element in the input set.
If a context is supplied, it is concatenated with the inputs.
e.g. One can use global_pool_1d to get a representation of the set which
can then be used as the context for the next layer.
TODO: Add bias add (or control the biases used).
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
context: A tensor of shape [batch_size, context_dims] containing a global
statistic about the set.
activation_fn: The activation function to use.
dropout: Dropout probability.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, output_dims] containing the
sequences of transformed vectors.
"""
with tf.variable_scope(
name, default_name="linear_set_layer", values=[inputs]):
# Apply 1D convolution to apply linear filter to each element
# along the 2nd dimension.
outputs = conv1d(inputs, layer_size, 1, activation=None, name="set_conv")
# Apply the context if it exists.
if context is not None:
# Unfortunately tf doesn't support broadcasting via concat, but we can
# simply add the transformed context to get the same effect.
if len(context.get_shape().as_list()) == 2:
context = tf.expand_dims(context, axis=1)
cont_tfm = conv1d(
context, layer_size, 1, activation=None, name="cont_conv")
outputs += cont_tfm
if activation_fn is not None:
outputs = activation_fn(outputs)
if dropout != 0.0:
outputs = tf.nn.dropout(outputs, 1.0 - dropout)
return outputs | Basic layer type for doing funky things with sets.
Applies a linear transformation to each element in the input set.
If a context is supplied, it is concatenated with the inputs.
e.g. One can use global_pool_1d to get a representation of the set which
can then be used as the context for the next layer.
TODO: Add bias add (or control the biases used).
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
context: A tensor of shape [batch_size, context_dims] containing a global
statistic about the set.
activation_fn: The activation function to use.
dropout: Dropout probability.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, output_dims] containing the
sequences of transformed vectors. | Below is the the instruction that describes the task:
### Input:
Basic layer type for doing funky things with sets.
Applies a linear transformation to each element in the input set.
If a context is supplied, it is concatenated with the inputs.
e.g. One can use global_pool_1d to get a representation of the set which
can then be used as the context for the next layer.
TODO: Add bias add (or control the biases used).
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
context: A tensor of shape [batch_size, context_dims] containing a global
statistic about the set.
activation_fn: The activation function to use.
dropout: Dropout probability.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, output_dims] containing the
sequences of transformed vectors.
### Response:
def linear_set_layer(layer_size,
inputs,
context=None,
activation_fn=tf.nn.relu,
dropout=0.0,
name=None):
"""Basic layer type for doing funky things with sets.
Applies a linear transformation to each element in the input set.
If a context is supplied, it is concatenated with the inputs.
e.g. One can use global_pool_1d to get a representation of the set which
can then be used as the context for the next layer.
TODO: Add bias add (or control the biases used).
Args:
layer_size: Dimension to transform the input vectors to.
inputs: A tensor of shape [batch_size, sequence_length, input_dims]
containing the sequences of input vectors.
context: A tensor of shape [batch_size, context_dims] containing a global
statistic about the set.
activation_fn: The activation function to use.
dropout: Dropout probability.
name: name.
Returns:
Tensor of shape [batch_size, sequence_length, output_dims] containing the
sequences of transformed vectors.
"""
with tf.variable_scope(
name, default_name="linear_set_layer", values=[inputs]):
# Apply 1D convolution to apply linear filter to each element
# along the 2nd dimension.
outputs = conv1d(inputs, layer_size, 1, activation=None, name="set_conv")
# Apply the context if it exists.
if context is not None:
# Unfortunately tf doesn't support broadcasting via concat, but we can
# simply add the transformed context to get the same effect.
if len(context.get_shape().as_list()) == 2:
context = tf.expand_dims(context, axis=1)
cont_tfm = conv1d(
context, layer_size, 1, activation=None, name="cont_conv")
outputs += cont_tfm
if activation_fn is not None:
outputs = activation_fn(outputs)
if dropout != 0.0:
outputs = tf.nn.dropout(outputs, 1.0 - dropout)
return outputs |
def _read_para_puzzle(self, code, cbit, clen, *, desc, length, version):
"""Read HIP PUZZLE parameter.
Structure of HIP PUZZLE parameter [RFC 5201][RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| #K, 1 byte | Lifetime | Opaque, 2 bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Random #I, RHASH_len / 8 bytes |
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 puzzle.type Parameter Type
1 15 puzzle.critical Critical Bit
2 16 puzzle.length Length of Contents
4 32 puzzle.number Number of Verified Bits
5 40 puzzle.lifetime Lifetime
6 48 puzzle.opaque Opaque
8 64 puzzle.random Random Number
"""
if version == 1 and clen != 12:
raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format')
_numk = self._read_unpack(1)
_time = self._read_unpack(1)
_opak = self._read_fileng(2)
_rand = self._read_unpack(clen-4)
puzzle = dict(
type=desc,
critical=cbit,
length=clen,
number=_numk,
lifetime=2 ** (_time - 32),
opaque=_opak,
random=_rand,
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return puzzle | Read HIP PUZZLE parameter.
Structure of HIP PUZZLE parameter [RFC 5201][RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| #K, 1 byte | Lifetime | Opaque, 2 bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Random #I, RHASH_len / 8 bytes |
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 puzzle.type Parameter Type
1 15 puzzle.critical Critical Bit
2 16 puzzle.length Length of Contents
4 32 puzzle.number Number of Verified Bits
5 40 puzzle.lifetime Lifetime
6 48 puzzle.opaque Opaque
8 64 puzzle.random Random Number | Below is the the instruction that describes the task:
### Input:
Read HIP PUZZLE parameter.
Structure of HIP PUZZLE parameter [RFC 5201][RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| #K, 1 byte | Lifetime | Opaque, 2 bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Random #I, RHASH_len / 8 bytes |
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 puzzle.type Parameter Type
1 15 puzzle.critical Critical Bit
2 16 puzzle.length Length of Contents
4 32 puzzle.number Number of Verified Bits
5 40 puzzle.lifetime Lifetime
6 48 puzzle.opaque Opaque
8 64 puzzle.random Random Number
### Response:
def _read_para_puzzle(self, code, cbit, clen, *, desc, length, version):
"""Read HIP PUZZLE parameter.
Structure of HIP PUZZLE parameter [RFC 5201][RFC 7401]:
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Type | Length |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| #K, 1 byte | Lifetime | Opaque, 2 bytes |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| Random #I, RHASH_len / 8 bytes |
/ /
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
Octets Bits Name Description
0 0 puzzle.type Parameter Type
1 15 puzzle.critical Critical Bit
2 16 puzzle.length Length of Contents
4 32 puzzle.number Number of Verified Bits
5 40 puzzle.lifetime Lifetime
6 48 puzzle.opaque Opaque
8 64 puzzle.random Random Number
"""
if version == 1 and clen != 12:
raise ProtocolError(f'HIPv{version}: [Parano {code}] invalid format')
_numk = self._read_unpack(1)
_time = self._read_unpack(1)
_opak = self._read_fileng(2)
_rand = self._read_unpack(clen-4)
puzzle = dict(
type=desc,
critical=cbit,
length=clen,
number=_numk,
lifetime=2 ** (_time - 32),
opaque=_opak,
random=_rand,
)
_plen = length - clen
if _plen:
self._read_fileng(_plen)
return puzzle |
def xline(self):
"""
Interact with segy in crossline mode
Returns
-------
xline : Line or None
Raises
------
ValueError
If the file is unstructured
Notes
-----
.. versionadded:: 1.1
"""
if self.unstructured:
raise ValueError(self._unstructured_errmsg)
if self._xline is not None:
return self._xline
self._xline = Line(self,
self.xlines,
self._xline_length,
self._xline_stride,
self.offsets,
'crossline',
)
return self._xline | Interact with segy in crossline mode
Returns
-------
xline : Line or None
Raises
------
ValueError
If the file is unstructured
Notes
-----
.. versionadded:: 1.1 | Below is the the instruction that describes the task:
### Input:
Interact with segy in crossline mode
Returns
-------
xline : Line or None
Raises
------
ValueError
If the file is unstructured
Notes
-----
.. versionadded:: 1.1
### Response:
def xline(self):
"""
Interact with segy in crossline mode
Returns
-------
xline : Line or None
Raises
------
ValueError
If the file is unstructured
Notes
-----
.. versionadded:: 1.1
"""
if self.unstructured:
raise ValueError(self._unstructured_errmsg)
if self._xline is not None:
return self._xline
self._xline = Line(self,
self.xlines,
self._xline_length,
self._xline_stride,
self.offsets,
'crossline',
)
return self._xline |
def makeelement(E, tagname, **kwargs):
"""Internal function"""
if sys.version < '3':
try:
kwargs2 = {}
for k,v in kwargs.items():
kwargs2[k.encode('utf-8')] = v.encode('utf-8')
#return E._makeelement(tagname.encode('utf-8'), **{ k.encode('utf-8'): v.encode('utf-8') for k,v in kwargs.items() } ) #In one go fails on some older Python 2.6s
return E._makeelement(tagname.encode('utf-8'), **kwargs2 ) #pylint: disable=protected-access
except ValueError as e:
try:
#older versions of lxml may misbehave, compensate:
e = E._makeelement(tagname.encode('utf-8')) #pylint: disable=protected-access
for k,v in kwargs.items():
e.attrib[k.encode('utf-8')] = v
return e
except ValueError:
print(e,file=stderr)
print("tagname=",tagname,file=stderr)
print("kwargs=",kwargs,file=stderr)
raise e
else:
return E._makeelement(tagname,**kwargs) | Internal function | Below is the the instruction that describes the task:
### Input:
Internal function
### Response:
def makeelement(E, tagname, **kwargs):
"""Internal function"""
if sys.version < '3':
try:
kwargs2 = {}
for k,v in kwargs.items():
kwargs2[k.encode('utf-8')] = v.encode('utf-8')
#return E._makeelement(tagname.encode('utf-8'), **{ k.encode('utf-8'): v.encode('utf-8') for k,v in kwargs.items() } ) #In one go fails on some older Python 2.6s
return E._makeelement(tagname.encode('utf-8'), **kwargs2 ) #pylint: disable=protected-access
except ValueError as e:
try:
#older versions of lxml may misbehave, compensate:
e = E._makeelement(tagname.encode('utf-8')) #pylint: disable=protected-access
for k,v in kwargs.items():
e.attrib[k.encode('utf-8')] = v
return e
except ValueError:
print(e,file=stderr)
print("tagname=",tagname,file=stderr)
print("kwargs=",kwargs,file=stderr)
raise e
else:
return E._makeelement(tagname,**kwargs) |
def zero_pad_features(features: List[np.ndarray],
target_shape: tuple) -> List[np.ndarray]:
"""
Zero pad to numpy array.
:param features: List of numpy arrays.
:param target_shape: Target shape of each numpy array in the list feat. Note:
target_shape should be greater that the largest shapes in feat.
:return: A list of padded numpy arrays.
"""
pad_features = []
for feature in features:
feature_shape = feature.shape
if len(feature_shape) < len(target_shape): # add extra dimensions
for i in range(len(target_shape) - len(feature_shape)):
feature = np.expand_dims(feature, axis=len(feature.shape) + 1)
feature_shape = feature.shape
elif len(feature_shape) > len(target_shape):
raise ValueError("Provided target shape must be bigger then the original "
"shape. (provided: {}, original {})".format(len(target_shape), len(feature_shape)))
diff_shape = np.subtract(target_shape, feature_shape) # pylint: disable=assignment-from-no-return
if np.any(diff_shape < 0):
raise ValueError("Provided target values must be bigger then the original "
"values for each dimension. (provided: {}, original {})".format(target_shape, feature_shape))
# pad format: ((before_1, after_1), ... (before_N, after_N))
diff_shape = [[0, d] for d in diff_shape] # pylint: disable=not-an-iterable
p = np.pad(feature, diff_shape, 'constant', constant_values=0)
pad_features.append(p)
return pad_features | Zero pad to numpy array.
:param features: List of numpy arrays.
:param target_shape: Target shape of each numpy array in the list feat. Note:
target_shape should be greater that the largest shapes in feat.
:return: A list of padded numpy arrays. | Below is the the instruction that describes the task:
### Input:
Zero pad to numpy array.
:param features: List of numpy arrays.
:param target_shape: Target shape of each numpy array in the list feat. Note:
target_shape should be greater that the largest shapes in feat.
:return: A list of padded numpy arrays.
### Response:
def zero_pad_features(features: List[np.ndarray],
target_shape: tuple) -> List[np.ndarray]:
"""
Zero pad to numpy array.
:param features: List of numpy arrays.
:param target_shape: Target shape of each numpy array in the list feat. Note:
target_shape should be greater that the largest shapes in feat.
:return: A list of padded numpy arrays.
"""
pad_features = []
for feature in features:
feature_shape = feature.shape
if len(feature_shape) < len(target_shape): # add extra dimensions
for i in range(len(target_shape) - len(feature_shape)):
feature = np.expand_dims(feature, axis=len(feature.shape) + 1)
feature_shape = feature.shape
elif len(feature_shape) > len(target_shape):
raise ValueError("Provided target shape must be bigger then the original "
"shape. (provided: {}, original {})".format(len(target_shape), len(feature_shape)))
diff_shape = np.subtract(target_shape, feature_shape) # pylint: disable=assignment-from-no-return
if np.any(diff_shape < 0):
raise ValueError("Provided target values must be bigger then the original "
"values for each dimension. (provided: {}, original {})".format(target_shape, feature_shape))
# pad format: ((before_1, after_1), ... (before_N, after_N))
diff_shape = [[0, d] for d in diff_shape] # pylint: disable=not-an-iterable
p = np.pad(feature, diff_shape, 'constant', constant_values=0)
pad_features.append(p)
return pad_features |
def clickable(self):
"""
Property used for determining if the widget should be clickable by the user.
This is only true if the submenu of this widget is active and this widget is enabled.
The widget may be either disabled by setting this property or the :py:attr:`enabled` attribute.
"""
if not isinstance(self.submenu,Container):
return self.submenu.name == self.submenu.menu.activeSubMenu and self.submenu.menu.name == self.window.activeMenu and self.enabled
else:
return self.submenu.clickable and self.enabled | Property used for determining if the widget should be clickable by the user.
This is only true if the submenu of this widget is active and this widget is enabled.
The widget may be either disabled by setting this property or the :py:attr:`enabled` attribute. | Below is the the instruction that describes the task:
### Input:
Property used for determining if the widget should be clickable by the user.
This is only true if the submenu of this widget is active and this widget is enabled.
The widget may be either disabled by setting this property or the :py:attr:`enabled` attribute.
### Response:
def clickable(self):
"""
Property used for determining if the widget should be clickable by the user.
This is only true if the submenu of this widget is active and this widget is enabled.
The widget may be either disabled by setting this property or the :py:attr:`enabled` attribute.
"""
if not isinstance(self.submenu,Container):
return self.submenu.name == self.submenu.menu.activeSubMenu and self.submenu.menu.name == self.window.activeMenu and self.enabled
else:
return self.submenu.clickable and self.enabled |
def gen_random_company_name():
"""
随机生成一个公司名称
:returns:
* company_name: (string) 银行名称
举例如下::
print('--- gen_random_company_name demo ---')
print(gen_random_company_name())
print('---')
输出结果::
--- gen_random_company_name demo ---
上海大升旅游质询有限责任公司
---
"""
region_info = ("北京,上海,广州,深圳,天津,成都,杭州,苏州,重庆,武汉,南京,大连,沈阳,长沙,郑州,西安,青岛,"
"无锡,济南,宁波,佛山,南通,哈尔滨,东莞,福州,长春,石家庄,烟台,合肥,唐山,常州,太原,昆明,"
"潍坊,南昌,泉州,温州,绍兴,嘉兴,厦门,贵阳,淄博,徐州,南宁,扬州,呼和浩特,鄂尔多斯,乌鲁木齐,"
"金华,台州,镇江,威海,珠海,东营,大庆,中山,盐城,包头,保定,济宁,泰州,廊坊,兰州,洛阳,宜昌,"
"沧州,临沂,泰安,鞍山,邯郸,惠州,江门,襄阳,湖州,吉林,芜湖,德州,聊城,漳州,株洲,淮安,榆林,"
"常德,咸阳,衡阳,滨州,柳州,遵义,菏泽,南阳,新乡,湛江,岳阳,郴州,许昌,连云港,枣庄,茂名,周口,"
"宿迁")
middle_word = ("泰宏本晶辉昌昌本同永康洪皇贵久圣正裕如恒长佳协义晶合优荣汇洪千东祥复昌皇久丰兴昌国裕亚大"
"荣康通仁元裕厚瑞如弘升久隆旺吉德谦长贵百久汇百伟升隆复飞佳隆浩发丰亨公荣复光福美禄欣丰大"
"祥晶宏中仁宏华隆盈旺仁顺春满美中谦瑞和圣多信合盛千亚晶祥鑫隆飞鑫优合本旺发久国汇百恒佳东"
"洪通恒大公中优广宝盈泰如合丰捷本伟华春元亚广中晶如浩仁汇亚永凯富富裕茂华中飞浩台美佳圣仁"
"成全润金庆百贵康仁茂皇东广荣宏荣新元康公升亨洪福伟永义巨国升进合耀巨润巨元发洪源寿仁发光"
"顺升凯全全辉欣成公裕康合禄兴汇顺浩贵晶捷东飞益福宏国禄元昌弘和满发巨宝生耀隆大欣昌佳本兴"
"吉生宝凯润新高和元亨巨久光益旺春巨鑫进东晶中飞兴中美丰同晶复耀进洪全兴汇宝捷伟仁安宏多庆"
"益生和干干福亚新复吉亚恒亚春德飞伟利庆华丰宏合德瑞进顺祥信合康富益全巨茂台谦厚台成福捷浩"
"信长飞长金利美昌满丰干佳美金洪昌富千和美旺旺晶春仁华中凯浩鼎泰辉新干高进辉同欣广庆吉益德"
"浩中润和春元生高进皇茂利同盈复复晶多巨圣弘捷公宝汇鑫成高新正和和巨祥光宏大丰欣恒昌昌厚合"
"庆泰丰干益和金洪复元顺捷金万辉全吉庆德瑞优长鼎顺汇顺欣飞浩荣祥光泰多春凯信进公优飞昌协美"
"多发中盈协成祥益昌汇泰春满千鼎东光优谦仁中飞生恒伟福晶宝信辉金皇升飞亨鑫安伟华元旺益大寿"
"皇元康耀久荣满协信凯谦宝巨丰正光发康康捷中源国多多康公利顺光辉如茂晶永大高成生裕裕和万干"
"飞全洪伟同发禄升欣盈高欣谦亨裕康宝复庆光皇源凯凯圣发东本辉寿捷茂和庆丰多宏亚万益公福捷升"
"福茂宝捷同复合隆中汇禄鑫中新德昌新大皇安东信瑞元皇皇洪瑞弘捷本鑫中亨亚广昌永宏润同成高利"
"台中生如百康旺巨福德春元通国成浩永康泰盛泰利生茂巨久昌佳复富隆通盈同庆皇顺如辉全旺捷皇长"
"全富广源恒鼎顺汇本百洪鼎进欣吉凯汇欣义东长禄捷浩益旺复弘昌生发伟荣高亨元聚广新复多富千中"
"兴佳升康成同贵宝生捷晶全泰全永旺发富康仁兴谦利茂亨洪佳洪元鼎全国本丰亨鑫弘富干寿春贵国成"
"盛大发久弘国大金生久高久益浩晶盈益瑞正丰百浩泰台合德昌昌美皇合隆裕东广亚国升益福旺高贵信"
"生汇多泰元厚瑞飞千顺盛如大德润新新顺润飞瑞优源宏千盛吉高大耀进信欣信利瑞荣升亨盈盛千合复"
"隆贵丰义公优荣宏广福华洪洪捷吉进盛盛裕国洪浩祥晶弘吉欣鼎德佳成和满台光复汇佳通浩昌欣康瑞"
"万亚谦兴福利千元皇瑞润禄信合长润捷中旺成金益公隆宏康亚禄隆通光广国义中优多富复盛庆千长永"
"国源安永千中正康发复协利皇亚协鑫义巨源中润旺高进巨新高协兴生福恒富国协捷盛同复巨千益长洪"
"亚欣美复康洪全高安进千汇通益美耀美台耀万康合洪禄中宏百凯华优鑫协泰兴裕欣进安茂丰光飞全飞"
"高康进同大洪永祥飞美满兴丰谦和鑫贵百洪通裕升干永升亨光德盛永金东鼎永裕佳和德仁荣辉同瑞恒"
"聚谦长广鑫金久庆国吉禄弘顺汇恒汇瑞隆洪光鼎复公鼎泰盛佳恒鼎中飞聚亚宏盈光安谦成合巨洪飞庆"
"久瑞正茂信协百生盛合国圣盛同同盈信宏禄仁大中皇宝德金台优长成成亚盛公美荣成昌久禄泰亚进台"
"辉佳凯安久本荣飞晶隆晶弘同丰辉华高光兴庆贵如耀飞仁宏欣皇洪宏金满鼎耀巨义德昌源中洪裕祥晶"
"本国金洪昌金源恒福万义久多谦高佳欣和凯本泰春贵大浩永寿昌禄金弘仁美久升亨辉久茂皇弘泰德成"
"宏美辉辉禄仁华晶春干圣长同耀光庆华晶生新辉鑫金满中千谦瑞祥昌茂复长新祥祥福同优佳恒千如兴"
"裕华凯康全贵巨旺祥捷厚贵富宏义盛谦同盛同益谦润东广千进辉升复昌聚吉飞飞元公台本华升美久长"
"庆亚升东正高弘亚庆和寿宏满万优伟浩新合聚庆万广寿东恒光圣润同高谦昌兴义仁安本捷公进康益金"
"庆正进正千辉和升本益高广中百新庆金同如鼎寿茂鼎庆茂瑞全禄辉美贵优丰益同信兴聚浩新协宝耀圣"
"晶盈飞安荣富千祥成源裕合兴佳裕旺金长禄亨本大德成亨皇通全华贵弘成福聚信福光盛丰满宏福益国"
"弘生弘源新万泰成生伟兴兴辉和大元和协通千宝协伟荣长禄晶盛欣隆新本复正盛和皇升万益高盈义裕"
"成仁巨弘千亚耀吉庆厚国新高利和润中捷亚信百合亨佳佳多信鑫永复公千佳捷元东宝协大贵本满泰长"
"协耀圣仁旺生干盛恒义多宏益协润长皇伟晶茂大辉谦多台高恒巨兴辉台华升满公升成元利利厚隆裕厚"
"高公通浩凯金皇庆新发宏大本谦升欣升华益巨益百辉亨辉成欣庆同晶瑞义久成佳利优进满康信盈东盛"
"华义公贵美宝信丰正谦旺华皇吉如鑫泰协全优福寿中生厚成生亚公弘顺千信祥和圣金华康德台顺全厚"
"协亨美万瑞美东飞万飞如长仁高全汇升宏利吉泰益发谦亚汇亚恒耀恒飞浩益通捷亨新恒百佳中成公圣"
"宏满鑫成旺禄元福凯百永东源庆耀万鼎公春昌广润全聚德旺洪隆宝伟亨合满隆进升盛东正新多进浩康"
"长合大耀和美厚如寿鑫禄德仁发庆光通义荣盈昌升荣优华国成欣大宏丰光亚复万光春鼎汇旺和辉辉伟"
"捷汇通寿耀益皇盛晶隆义同合益春通万飞弘如安信本利安复协庆吉新永久公鑫广同富源公宏台长辉耀"
"光千佳宝康祥盛富升顺亚吉皇美润仁广仁台瑞干隆美信优伟安生如成耀盛润升正升新公荣宏恒洪圣泰"
"弘升美益顺隆大生新茂复丰亚华恒仁弘富公美昌干永满汇如洪昌荣飞新谦万百丰进宝禄贵千生进大润"
"禄祥公金祥聚兴和旺盈晶百义协巨顺裕中发千辉亨美本元丰金盈盛新全国源和协富谦发万耀福大发浩"
"隆正宏升弘旺长德百发鼎金满春新成新台正弘润晶大盈茂厚富泰通厚协百源复广恒欣合圣本巨复多正"
"伟润高满凯仁凯高禄万本复信满德升茂金如富谦旺佳美盈千发宝禄进兴鼎丰圣广公进昌东润进优祥生"
"辉茂安顺正伟圣宝优庆厚新益亚鑫皇浩兴顺多生寿金益千丰旺义东光庆泰全协吉兴千瑞丰兴茂泰庆捷"
"丰升弘茂鼎润复永发多成美聚福贵合光亚聚庆大大万顺贵进光国顺飞耀佳合巨洪源祥聚百汇兴本洪荣"
"利春庆协成昌瑞同厚春百光国如升同仁佳合成复凯佳汇升鼎宝宝进洪和信昌康润源圣巨康同欣浩辉正"
"永汇泰禄弘鼎多厚和佳进荣如茂全贵祥飞祥祥汇禄合源盈如和庆利寿旺汇春盈荣洪宏凯宝润如洪金鼎"
"聚安和吉宏捷亚伟美洪元吉厚谦吉凯汇晶中义升协吉大益祥中鑫成正盛福满辉成亨福富益洪厚禄佳益"
"亨巨圣辉厚皇")
service_type = ("咨询,中介,科技服务,文化交流服务,技术服务,信息服务,零售贸易,制造,批发贸易,集团,餐饮服务,"
"餐饮管理,旅游质询,人事服务")
company_type = "股份有限公司,有限责任公司"
company_pattern = '{region_info}{middle_word}{service_type}{company_type}'
return company_pattern.format(region_info=random.choice(region_info.split(',')),
middle_word=''.join([random.choice(middle_word)
for _ in range(random.randint(2, 5))]),
service_type=random.choice(service_type.split(',')),
company_type=random.choice(company_type.split(','))) | 随机生成一个公司名称
:returns:
* company_name: (string) 银行名称
举例如下::
print('--- gen_random_company_name demo ---')
print(gen_random_company_name())
print('---')
输出结果::
--- gen_random_company_name demo ---
上海大升旅游质询有限责任公司
--- | Below is the the instruction that describes the task:
### Input:
随机生成一个公司名称
:returns:
* company_name: (string) 银行名称
举例如下::
print('--- gen_random_company_name demo ---')
print(gen_random_company_name())
print('---')
输出结果::
--- gen_random_company_name demo ---
上海大升旅游质询有限责任公司
---
### Response:
def gen_random_company_name():
"""
随机生成一个公司名称
:returns:
* company_name: (string) 银行名称
举例如下::
print('--- gen_random_company_name demo ---')
print(gen_random_company_name())
print('---')
输出结果::
--- gen_random_company_name demo ---
上海大升旅游质询有限责任公司
---
"""
region_info = ("北京,上海,广州,深圳,天津,成都,杭州,苏州,重庆,武汉,南京,大连,沈阳,长沙,郑州,西安,青岛,"
"无锡,济南,宁波,佛山,南通,哈尔滨,东莞,福州,长春,石家庄,烟台,合肥,唐山,常州,太原,昆明,"
"潍坊,南昌,泉州,温州,绍兴,嘉兴,厦门,贵阳,淄博,徐州,南宁,扬州,呼和浩特,鄂尔多斯,乌鲁木齐,"
"金华,台州,镇江,威海,珠海,东营,大庆,中山,盐城,包头,保定,济宁,泰州,廊坊,兰州,洛阳,宜昌,"
"沧州,临沂,泰安,鞍山,邯郸,惠州,江门,襄阳,湖州,吉林,芜湖,德州,聊城,漳州,株洲,淮安,榆林,"
"常德,咸阳,衡阳,滨州,柳州,遵义,菏泽,南阳,新乡,湛江,岳阳,郴州,许昌,连云港,枣庄,茂名,周口,"
"宿迁")
middle_word = ("泰宏本晶辉昌昌本同永康洪皇贵久圣正裕如恒长佳协义晶合优荣汇洪千东祥复昌皇久丰兴昌国裕亚大"
"荣康通仁元裕厚瑞如弘升久隆旺吉德谦长贵百久汇百伟升隆复飞佳隆浩发丰亨公荣复光福美禄欣丰大"
"祥晶宏中仁宏华隆盈旺仁顺春满美中谦瑞和圣多信合盛千亚晶祥鑫隆飞鑫优合本旺发久国汇百恒佳东"
"洪通恒大公中优广宝盈泰如合丰捷本伟华春元亚广中晶如浩仁汇亚永凯富富裕茂华中飞浩台美佳圣仁"
"成全润金庆百贵康仁茂皇东广荣宏荣新元康公升亨洪福伟永义巨国升进合耀巨润巨元发洪源寿仁发光"
"顺升凯全全辉欣成公裕康合禄兴汇顺浩贵晶捷东飞益福宏国禄元昌弘和满发巨宝生耀隆大欣昌佳本兴"
"吉生宝凯润新高和元亨巨久光益旺春巨鑫进东晶中飞兴中美丰同晶复耀进洪全兴汇宝捷伟仁安宏多庆"
"益生和干干福亚新复吉亚恒亚春德飞伟利庆华丰宏合德瑞进顺祥信合康富益全巨茂台谦厚台成福捷浩"
"信长飞长金利美昌满丰干佳美金洪昌富千和美旺旺晶春仁华中凯浩鼎泰辉新干高进辉同欣广庆吉益德"
"浩中润和春元生高进皇茂利同盈复复晶多巨圣弘捷公宝汇鑫成高新正和和巨祥光宏大丰欣恒昌昌厚合"
"庆泰丰干益和金洪复元顺捷金万辉全吉庆德瑞优长鼎顺汇顺欣飞浩荣祥光泰多春凯信进公优飞昌协美"
"多发中盈协成祥益昌汇泰春满千鼎东光优谦仁中飞生恒伟福晶宝信辉金皇升飞亨鑫安伟华元旺益大寿"
"皇元康耀久荣满协信凯谦宝巨丰正光发康康捷中源国多多康公利顺光辉如茂晶永大高成生裕裕和万干"
"飞全洪伟同发禄升欣盈高欣谦亨裕康宝复庆光皇源凯凯圣发东本辉寿捷茂和庆丰多宏亚万益公福捷升"
"福茂宝捷同复合隆中汇禄鑫中新德昌新大皇安东信瑞元皇皇洪瑞弘捷本鑫中亨亚广昌永宏润同成高利"
"台中生如百康旺巨福德春元通国成浩永康泰盛泰利生茂巨久昌佳复富隆通盈同庆皇顺如辉全旺捷皇长"
"全富广源恒鼎顺汇本百洪鼎进欣吉凯汇欣义东长禄捷浩益旺复弘昌生发伟荣高亨元聚广新复多富千中"
"兴佳升康成同贵宝生捷晶全泰全永旺发富康仁兴谦利茂亨洪佳洪元鼎全国本丰亨鑫弘富干寿春贵国成"
"盛大发久弘国大金生久高久益浩晶盈益瑞正丰百浩泰台合德昌昌美皇合隆裕东广亚国升益福旺高贵信"
"生汇多泰元厚瑞飞千顺盛如大德润新新顺润飞瑞优源宏千盛吉高大耀进信欣信利瑞荣升亨盈盛千合复"
"隆贵丰义公优荣宏广福华洪洪捷吉进盛盛裕国洪浩祥晶弘吉欣鼎德佳成和满台光复汇佳通浩昌欣康瑞"
"万亚谦兴福利千元皇瑞润禄信合长润捷中旺成金益公隆宏康亚禄隆通光广国义中优多富复盛庆千长永"
"国源安永千中正康发复协利皇亚协鑫义巨源中润旺高进巨新高协兴生福恒富国协捷盛同复巨千益长洪"
"亚欣美复康洪全高安进千汇通益美耀美台耀万康合洪禄中宏百凯华优鑫协泰兴裕欣进安茂丰光飞全飞"
"高康进同大洪永祥飞美满兴丰谦和鑫贵百洪通裕升干永升亨光德盛永金东鼎永裕佳和德仁荣辉同瑞恒"
"聚谦长广鑫金久庆国吉禄弘顺汇恒汇瑞隆洪光鼎复公鼎泰盛佳恒鼎中飞聚亚宏盈光安谦成合巨洪飞庆"
"久瑞正茂信协百生盛合国圣盛同同盈信宏禄仁大中皇宝德金台优长成成亚盛公美荣成昌久禄泰亚进台"
"辉佳凯安久本荣飞晶隆晶弘同丰辉华高光兴庆贵如耀飞仁宏欣皇洪宏金满鼎耀巨义德昌源中洪裕祥晶"
"本国金洪昌金源恒福万义久多谦高佳欣和凯本泰春贵大浩永寿昌禄金弘仁美久升亨辉久茂皇弘泰德成"
"宏美辉辉禄仁华晶春干圣长同耀光庆华晶生新辉鑫金满中千谦瑞祥昌茂复长新祥祥福同优佳恒千如兴"
"裕华凯康全贵巨旺祥捷厚贵富宏义盛谦同盛同益谦润东广千进辉升复昌聚吉飞飞元公台本华升美久长"
"庆亚升东正高弘亚庆和寿宏满万优伟浩新合聚庆万广寿东恒光圣润同高谦昌兴义仁安本捷公进康益金"
"庆正进正千辉和升本益高广中百新庆金同如鼎寿茂鼎庆茂瑞全禄辉美贵优丰益同信兴聚浩新协宝耀圣"
"晶盈飞安荣富千祥成源裕合兴佳裕旺金长禄亨本大德成亨皇通全华贵弘成福聚信福光盛丰满宏福益国"
"弘生弘源新万泰成生伟兴兴辉和大元和协通千宝协伟荣长禄晶盛欣隆新本复正盛和皇升万益高盈义裕"
"成仁巨弘千亚耀吉庆厚国新高利和润中捷亚信百合亨佳佳多信鑫永复公千佳捷元东宝协大贵本满泰长"
"协耀圣仁旺生干盛恒义多宏益协润长皇伟晶茂大辉谦多台高恒巨兴辉台华升满公升成元利利厚隆裕厚"
"高公通浩凯金皇庆新发宏大本谦升欣升华益巨益百辉亨辉成欣庆同晶瑞义久成佳利优进满康信盈东盛"
"华义公贵美宝信丰正谦旺华皇吉如鑫泰协全优福寿中生厚成生亚公弘顺千信祥和圣金华康德台顺全厚"
"协亨美万瑞美东飞万飞如长仁高全汇升宏利吉泰益发谦亚汇亚恒耀恒飞浩益通捷亨新恒百佳中成公圣"
"宏满鑫成旺禄元福凯百永东源庆耀万鼎公春昌广润全聚德旺洪隆宝伟亨合满隆进升盛东正新多进浩康"
"长合大耀和美厚如寿鑫禄德仁发庆光通义荣盈昌升荣优华国成欣大宏丰光亚复万光春鼎汇旺和辉辉伟"
"捷汇通寿耀益皇盛晶隆义同合益春通万飞弘如安信本利安复协庆吉新永久公鑫广同富源公宏台长辉耀"
"光千佳宝康祥盛富升顺亚吉皇美润仁广仁台瑞干隆美信优伟安生如成耀盛润升正升新公荣宏恒洪圣泰"
"弘升美益顺隆大生新茂复丰亚华恒仁弘富公美昌干永满汇如洪昌荣飞新谦万百丰进宝禄贵千生进大润"
"禄祥公金祥聚兴和旺盈晶百义协巨顺裕中发千辉亨美本元丰金盈盛新全国源和协富谦发万耀福大发浩"
"隆正宏升弘旺长德百发鼎金满春新成新台正弘润晶大盈茂厚富泰通厚协百源复广恒欣合圣本巨复多正"
"伟润高满凯仁凯高禄万本复信满德升茂金如富谦旺佳美盈千发宝禄进兴鼎丰圣广公进昌东润进优祥生"
"辉茂安顺正伟圣宝优庆厚新益亚鑫皇浩兴顺多生寿金益千丰旺义东光庆泰全协吉兴千瑞丰兴茂泰庆捷"
"丰升弘茂鼎润复永发多成美聚福贵合光亚聚庆大大万顺贵进光国顺飞耀佳合巨洪源祥聚百汇兴本洪荣"
"利春庆协成昌瑞同厚春百光国如升同仁佳合成复凯佳汇升鼎宝宝进洪和信昌康润源圣巨康同欣浩辉正"
"永汇泰禄弘鼎多厚和佳进荣如茂全贵祥飞祥祥汇禄合源盈如和庆利寿旺汇春盈荣洪宏凯宝润如洪金鼎"
"聚安和吉宏捷亚伟美洪元吉厚谦吉凯汇晶中义升协吉大益祥中鑫成正盛福满辉成亨福富益洪厚禄佳益"
"亨巨圣辉厚皇")
service_type = ("咨询,中介,科技服务,文化交流服务,技术服务,信息服务,零售贸易,制造,批发贸易,集团,餐饮服务,"
"餐饮管理,旅游质询,人事服务")
company_type = "股份有限公司,有限责任公司"
company_pattern = '{region_info}{middle_word}{service_type}{company_type}'
return company_pattern.format(region_info=random.choice(region_info.split(',')),
middle_word=''.join([random.choice(middle_word)
for _ in range(random.randint(2, 5))]),
service_type=random.choice(service_type.split(',')),
company_type=random.choice(company_type.split(','))) |
def data(self):
"""bytes: value data as a byte string.
Raises:
WinRegistryValueError: if the value data cannot be read.
"""
try:
return self._pyregf_value.data
except IOError as exception:
raise errors.WinRegistryValueError(
'Unable to read data from value: {0:s} with error: {1!s}'.format(
self._pyregf_value.name, exception)) | bytes: value data as a byte string.
Raises:
WinRegistryValueError: if the value data cannot be read. | Below is the the instruction that describes the task:
### Input:
bytes: value data as a byte string.
Raises:
WinRegistryValueError: if the value data cannot be read.
### Response:
def data(self):
"""bytes: value data as a byte string.
Raises:
WinRegistryValueError: if the value data cannot be read.
"""
try:
return self._pyregf_value.data
except IOError as exception:
raise errors.WinRegistryValueError(
'Unable to read data from value: {0:s} with error: {1!s}'.format(
self._pyregf_value.name, exception)) |
def get_config(self):
"""
serialize to a dict all attributes except model weights
Returns
-------
dict
"""
self.update_network_description()
result = dict(self.__dict__)
result['_network'] = None
result['network_weights'] = None
result['network_weights_loader'] = None
result['prediction_cache'] = None
return result | serialize to a dict all attributes except model weights
Returns
-------
dict | Below is the the instruction that describes the task:
### Input:
serialize to a dict all attributes except model weights
Returns
-------
dict
### Response:
def get_config(self):
"""
serialize to a dict all attributes except model weights
Returns
-------
dict
"""
self.update_network_description()
result = dict(self.__dict__)
result['_network'] = None
result['network_weights'] = None
result['network_weights_loader'] = None
result['prediction_cache'] = None
return result |
def _initialize(self, runtime):
"""Common initializer for OsidManager and OsidProxyManager"""
if runtime is None:
raise NullArgument()
if self._my_runtime is not None:
raise IllegalState('this manager has already been initialized.')
self._my_runtime = runtime
config = runtime.get_configuration()
cf_public_key_param_id = Id('parameter:cloudFrontPublicKey@aws_adapter')
cf_private_key_param_id = Id('parameter:cloudFrontPrivateKey@aws_adapter')
cf_keypair_id_param_id = Id('parameter:cloudFrontSigningKeypairId@aws_adapter')
cf_private_key_file_param_id = Id('parameter:cloudFrontSigningPrivateKeyFile@aws_adapter')
cf_distro_param_id = Id('parameter:cloudFrontDistro@aws_adapter')
cf_distro_id_param_id = Id('parameter:cloudFrontDistroId@aws_adapter')
s3_public_key_param_id = Id('parameter:S3PublicKey@aws_adapter')
s3_private_key_param_id = Id('parameter:S3PrivateKey@aws_adapter')
s3_bucket_param_id = Id('parameter:S3Bucket@aws_adapter')
cf_public_key = config.get_value_by_parameter(cf_public_key_param_id).get_string_value()
cf_private_key = config.get_value_by_parameter(cf_private_key_param_id).get_string_value()
cf_keypair_id = config.get_value_by_parameter(cf_keypair_id_param_id).get_string_value()
cf_private_key_file = config.get_value_by_parameter(
cf_private_key_file_param_id).get_string_value()
cf_distro = config.get_value_by_parameter(cf_distro_param_id).get_string_value()
cf_distro_id = config.get_value_by_parameter(cf_distro_id_param_id).get_string_value()
s3_public_key = config.get_value_by_parameter(s3_public_key_param_id).get_string_value()
s3_private_key = config.get_value_by_parameter(s3_private_key_param_id).get_string_value()
s3_bucket = config.get_value_by_parameter(s3_bucket_param_id).get_string_value()
self._config_map['cloudfront_public_key'] = cf_public_key
self._config_map['cloudfront_private_key'] = cf_private_key
self._config_map['cloudfront_keypair_id'] = cf_keypair_id
self._config_map['cloudfront_private_key_file'] = cf_private_key_file
self._config_map['cloudfront_distro'] = cf_distro
self._config_map['cloudfront_distro_id'] = cf_distro_id
self._config_map['put_public_key'] = s3_public_key
self._config_map['put_private_key'] = s3_private_key
self._config_map['s3_bucket'] = s3_bucket | Common initializer for OsidManager and OsidProxyManager | Below is the the instruction that describes the task:
### Input:
Common initializer for OsidManager and OsidProxyManager
### Response:
def _initialize(self, runtime):
"""Common initializer for OsidManager and OsidProxyManager"""
if runtime is None:
raise NullArgument()
if self._my_runtime is not None:
raise IllegalState('this manager has already been initialized.')
self._my_runtime = runtime
config = runtime.get_configuration()
cf_public_key_param_id = Id('parameter:cloudFrontPublicKey@aws_adapter')
cf_private_key_param_id = Id('parameter:cloudFrontPrivateKey@aws_adapter')
cf_keypair_id_param_id = Id('parameter:cloudFrontSigningKeypairId@aws_adapter')
cf_private_key_file_param_id = Id('parameter:cloudFrontSigningPrivateKeyFile@aws_adapter')
cf_distro_param_id = Id('parameter:cloudFrontDistro@aws_adapter')
cf_distro_id_param_id = Id('parameter:cloudFrontDistroId@aws_adapter')
s3_public_key_param_id = Id('parameter:S3PublicKey@aws_adapter')
s3_private_key_param_id = Id('parameter:S3PrivateKey@aws_adapter')
s3_bucket_param_id = Id('parameter:S3Bucket@aws_adapter')
cf_public_key = config.get_value_by_parameter(cf_public_key_param_id).get_string_value()
cf_private_key = config.get_value_by_parameter(cf_private_key_param_id).get_string_value()
cf_keypair_id = config.get_value_by_parameter(cf_keypair_id_param_id).get_string_value()
cf_private_key_file = config.get_value_by_parameter(
cf_private_key_file_param_id).get_string_value()
cf_distro = config.get_value_by_parameter(cf_distro_param_id).get_string_value()
cf_distro_id = config.get_value_by_parameter(cf_distro_id_param_id).get_string_value()
s3_public_key = config.get_value_by_parameter(s3_public_key_param_id).get_string_value()
s3_private_key = config.get_value_by_parameter(s3_private_key_param_id).get_string_value()
s3_bucket = config.get_value_by_parameter(s3_bucket_param_id).get_string_value()
self._config_map['cloudfront_public_key'] = cf_public_key
self._config_map['cloudfront_private_key'] = cf_private_key
self._config_map['cloudfront_keypair_id'] = cf_keypair_id
self._config_map['cloudfront_private_key_file'] = cf_private_key_file
self._config_map['cloudfront_distro'] = cf_distro
self._config_map['cloudfront_distro_id'] = cf_distro_id
self._config_map['put_public_key'] = s3_public_key
self._config_map['put_private_key'] = s3_private_key
self._config_map['s3_bucket'] = s3_bucket |
def new(self, rows, columns=None):
"""
A shortcut for manually instantiating a new
:class:`~riak.ts_object.TsObject`
:param rows: An list of lists with timeseries data
:type rows: list
:param columns: An list of Column names and types. Optional.
:type columns: list
:rtype: :class:`~riak.ts_object.TsObject`
"""
from riak.ts_object import TsObject
return TsObject(self._client, self, rows, columns) | A shortcut for manually instantiating a new
:class:`~riak.ts_object.TsObject`
:param rows: An list of lists with timeseries data
:type rows: list
:param columns: An list of Column names and types. Optional.
:type columns: list
:rtype: :class:`~riak.ts_object.TsObject` | Below is the the instruction that describes the task:
### Input:
A shortcut for manually instantiating a new
:class:`~riak.ts_object.TsObject`
:param rows: An list of lists with timeseries data
:type rows: list
:param columns: An list of Column names and types. Optional.
:type columns: list
:rtype: :class:`~riak.ts_object.TsObject`
### Response:
def new(self, rows, columns=None):
"""
A shortcut for manually instantiating a new
:class:`~riak.ts_object.TsObject`
:param rows: An list of lists with timeseries data
:type rows: list
:param columns: An list of Column names and types. Optional.
:type columns: list
:rtype: :class:`~riak.ts_object.TsObject`
"""
from riak.ts_object import TsObject
return TsObject(self._client, self, rows, columns) |
def predict(self, u=0):
"""
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array, optional
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system.
"""
# x = Fx + Bu
self.x = dot(self.F, self.x) + dot(self.B, u)
# P = FPF' + Q
_, P2 = qr(np.hstack([dot(self.F, self._P1_2), self._Q1_2]).T)
self._P1_2 = P2[:self.dim_x, :self.dim_x].T
# copy prior
self.x_prior = np.copy(self.x)
self._P1_2_prior = np.copy(self._P1_2) | Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array, optional
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system. | Below is the the instruction that describes the task:
### Input:
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array, optional
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system.
### Response:
def predict(self, u=0):
"""
Predict next state (prior) using the Kalman filter state propagation
equations.
Parameters
----------
u : np.array, optional
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system.
"""
# x = Fx + Bu
self.x = dot(self.F, self.x) + dot(self.B, u)
# P = FPF' + Q
_, P2 = qr(np.hstack([dot(self.F, self._P1_2), self._Q1_2]).T)
self._P1_2 = P2[:self.dim_x, :self.dim_x].T
# copy prior
self.x_prior = np.copy(self.x)
self._P1_2_prior = np.copy(self._P1_2) |
def start(name):
'''
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
cmd = '/usr/sbin/svcadm enable -s -t {0}'.format(name)
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
if not retcode:
return True
if retcode == 3:
# Return code 3 means there was a problem with the service
# A common case is being in the 'maintenance' state
# Attempt a clear and try one more time
clear_cmd = '/usr/sbin/svcadm clear {0}'.format(name)
__salt__['cmd.retcode'](clear_cmd, python_shell=False)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
return False | Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name> | Below is the the instruction that describes the task:
### Input:
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
### Response:
def start(name):
'''
Start the specified service
CLI Example:
.. code-block:: bash
salt '*' service.start <service name>
'''
cmd = '/usr/sbin/svcadm enable -s -t {0}'.format(name)
retcode = __salt__['cmd.retcode'](cmd, python_shell=False)
if not retcode:
return True
if retcode == 3:
# Return code 3 means there was a problem with the service
# A common case is being in the 'maintenance' state
# Attempt a clear and try one more time
clear_cmd = '/usr/sbin/svcadm clear {0}'.format(name)
__salt__['cmd.retcode'](clear_cmd, python_shell=False)
return not __salt__['cmd.retcode'](cmd, python_shell=False)
return False |
def settings(self, **kwargs):
'''
Pass a load of settings into the canvas
'''
for k, v in kwargs.items():
setattr(self, k, v) | Pass a load of settings into the canvas | Below is the the instruction that describes the task:
### Input:
Pass a load of settings into the canvas
### Response:
def settings(self, **kwargs):
'''
Pass a load of settings into the canvas
'''
for k, v in kwargs.items():
setattr(self, k, v) |
async def stream_as_text(stream):
"""
Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once we return text streams
instead of byte streams.
"""
async for data in stream:
if not isinstance(data, six.text_type):
data = data.decode('utf-8', 'replace')
yield data | Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once we return text streams
instead of byte streams. | Below is the the instruction that describes the task:
### Input:
Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once we return text streams
instead of byte streams.
### Response:
async def stream_as_text(stream):
"""
Given a stream of bytes or text, if any of the items in the stream
are bytes convert them to text.
This function can be removed once we return text streams
instead of byte streams.
"""
async for data in stream:
if not isinstance(data, six.text_type):
data = data.decode('utf-8', 'replace')
yield data |
def pcpool(name, cvals):
"""
This entry point provides toolkit programmers a method for
programmatically inserting character data into the
kernel pool.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pcpool_c.html
:param name: The kernel pool name to associate with cvals.
:type name: str
:param cvals: An array of strings to insert into the kernel pool.
:type cvals: Array of str
"""
name = stypes.stringToCharP(name)
lenvals = ctypes.c_int(len(max(cvals, key=len)) + 1)
n = ctypes.c_int(len(cvals))
cvals = stypes.listToCharArray(cvals, lenvals, n)
libspice.pcpool_c(name, n, lenvals, cvals) | This entry point provides toolkit programmers a method for
programmatically inserting character data into the
kernel pool.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pcpool_c.html
:param name: The kernel pool name to associate with cvals.
:type name: str
:param cvals: An array of strings to insert into the kernel pool.
:type cvals: Array of str | Below is the the instruction that describes the task:
### Input:
This entry point provides toolkit programmers a method for
programmatically inserting character data into the
kernel pool.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pcpool_c.html
:param name: The kernel pool name to associate with cvals.
:type name: str
:param cvals: An array of strings to insert into the kernel pool.
:type cvals: Array of str
### Response:
def pcpool(name, cvals):
"""
This entry point provides toolkit programmers a method for
programmatically inserting character data into the
kernel pool.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/pcpool_c.html
:param name: The kernel pool name to associate with cvals.
:type name: str
:param cvals: An array of strings to insert into the kernel pool.
:type cvals: Array of str
"""
name = stypes.stringToCharP(name)
lenvals = ctypes.c_int(len(max(cvals, key=len)) + 1)
n = ctypes.c_int(len(cvals))
cvals = stypes.listToCharArray(cvals, lenvals, n)
libspice.pcpool_c(name, n, lenvals, cvals) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.