code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def next_unwrittable_on_row(view, coords):
"""Return position of the next (in row) letter that is unwrittable"""
x, y = coords
maxx = max(view.keys(), key=itemgetter(0))[0]
for offset in range(x + 1, maxx):
letter = view[offset, y]
if letter not in REWRITABLE_LETTERS:
return offset
return None | Return position of the next (in row) letter that is unwrittable | Below is the the instruction that describes the task:
### Input:
Return position of the next (in row) letter that is unwrittable
### Response:
def next_unwrittable_on_row(view, coords):
"""Return position of the next (in row) letter that is unwrittable"""
x, y = coords
maxx = max(view.keys(), key=itemgetter(0))[0]
for offset in range(x + 1, maxx):
letter = view[offset, y]
if letter not in REWRITABLE_LETTERS:
return offset
return None |
def notify(fn, opts, task, *args, **kwargs):
""" notify(on_success=True, on_failure=True, **kwargs)
Wraps a bound method of a task and guards its execution. Information about the execution (task
name, duration, etc) is collected and dispatched to all notification transports registered on
wrapped task via adding :py:class:`law.NotifyParameter` parameters. Example:
.. code-block:: python
class MyTask(law.Task):
notify_mail = law.NotifyMailParameter()
@notify
# or
@notify(sender="[email protected]", recipient="[email protected]")
def run(self):
...
When the *notify_mail* parameter is *True*, a notification is sent to the configured email
address. Also see :ref:`config-notifications`.
"""
_task = get_task(task)
# get notification transports
transports = []
for param_name, param in _task.get_params():
if isinstance(param, NotifyParameter) and getattr(_task, param_name):
try:
transport = param.get_transport()
if transport:
transports.append(transport)
except Exception as e:
logger.warning("get_transport() failed for '{}' parameter: {}".format(
param_name, e))
# nothing to do when there is no transport
if not transports:
return fn(task, *args, **kwargs)
# guard the fn call and gather infos
error = None
t0 = time.time()
try:
return fn(task, *args, **kwargs)
except (Exception, KeyboardInterrupt) as e:
error = e
raise
finally:
success = error is None
# do nothing on KeyboardInterrupt, or when on_success / on_failure do not match the status
if isinstance(error, KeyboardInterrupt):
return
elif success and not opts["on_success"]:
return
elif not success and not opts["on_failure"]:
return
duration = human_time_diff(seconds=round(time.time() - t0, 1))
status_string = "succeeded" if success else "failed"
title = "Task {} {}!".format(_task.get_task_family(), status_string)
parts = collections.OrderedDict([
("Host", socket.gethostname()),
("Duration", duration),
("Last message", "-" if not len(_task._message_cache) else _task._message_cache[-1]),
("Task", str(_task)),
])
if not success:
parts["Traceback"] = traceback.format_exc()
message = "\n".join("{}: {}".format(*tpl) for tpl in parts.items())
# dispatch via all transports
for transport in transports:
fn = transport["func"]
raw = transport.get("raw", False)
try:
fn(success, title, parts.copy() if raw else message, **opts)
except Exception as e:
t = traceback.format_exc()
logger.warning("notification failed via transport '{}': {}\n{}".format(fn, e, t)) | notify(on_success=True, on_failure=True, **kwargs)
Wraps a bound method of a task and guards its execution. Information about the execution (task
name, duration, etc) is collected and dispatched to all notification transports registered on
wrapped task via adding :py:class:`law.NotifyParameter` parameters. Example:
.. code-block:: python
class MyTask(law.Task):
notify_mail = law.NotifyMailParameter()
@notify
# or
@notify(sender="[email protected]", recipient="[email protected]")
def run(self):
...
When the *notify_mail* parameter is *True*, a notification is sent to the configured email
address. Also see :ref:`config-notifications`. | Below is the the instruction that describes the task:
### Input:
notify(on_success=True, on_failure=True, **kwargs)
Wraps a bound method of a task and guards its execution. Information about the execution (task
name, duration, etc) is collected and dispatched to all notification transports registered on
wrapped task via adding :py:class:`law.NotifyParameter` parameters. Example:
.. code-block:: python
class MyTask(law.Task):
notify_mail = law.NotifyMailParameter()
@notify
# or
@notify(sender="[email protected]", recipient="[email protected]")
def run(self):
...
When the *notify_mail* parameter is *True*, a notification is sent to the configured email
address. Also see :ref:`config-notifications`.
### Response:
def notify(fn, opts, task, *args, **kwargs):
""" notify(on_success=True, on_failure=True, **kwargs)
Wraps a bound method of a task and guards its execution. Information about the execution (task
name, duration, etc) is collected and dispatched to all notification transports registered on
wrapped task via adding :py:class:`law.NotifyParameter` parameters. Example:
.. code-block:: python
class MyTask(law.Task):
notify_mail = law.NotifyMailParameter()
@notify
# or
@notify(sender="[email protected]", recipient="[email protected]")
def run(self):
...
When the *notify_mail* parameter is *True*, a notification is sent to the configured email
address. Also see :ref:`config-notifications`.
"""
_task = get_task(task)
# get notification transports
transports = []
for param_name, param in _task.get_params():
if isinstance(param, NotifyParameter) and getattr(_task, param_name):
try:
transport = param.get_transport()
if transport:
transports.append(transport)
except Exception as e:
logger.warning("get_transport() failed for '{}' parameter: {}".format(
param_name, e))
# nothing to do when there is no transport
if not transports:
return fn(task, *args, **kwargs)
# guard the fn call and gather infos
error = None
t0 = time.time()
try:
return fn(task, *args, **kwargs)
except (Exception, KeyboardInterrupt) as e:
error = e
raise
finally:
success = error is None
# do nothing on KeyboardInterrupt, or when on_success / on_failure do not match the status
if isinstance(error, KeyboardInterrupt):
return
elif success and not opts["on_success"]:
return
elif not success and not opts["on_failure"]:
return
duration = human_time_diff(seconds=round(time.time() - t0, 1))
status_string = "succeeded" if success else "failed"
title = "Task {} {}!".format(_task.get_task_family(), status_string)
parts = collections.OrderedDict([
("Host", socket.gethostname()),
("Duration", duration),
("Last message", "-" if not len(_task._message_cache) else _task._message_cache[-1]),
("Task", str(_task)),
])
if not success:
parts["Traceback"] = traceback.format_exc()
message = "\n".join("{}: {}".format(*tpl) for tpl in parts.items())
# dispatch via all transports
for transport in transports:
fn = transport["func"]
raw = transport.get("raw", False)
try:
fn(success, title, parts.copy() if raw else message, **opts)
except Exception as e:
t = traceback.format_exc()
logger.warning("notification failed via transport '{}': {}\n{}".format(fn, e, t)) |
def _pos_nt(pr, pos, stranded=False):
"""
Given a pileup read and a position, return the base that is covered by the
read at the given position if the position is covered.
Parameters
----------
pr : pysam.calignmentfile.PileupRead
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
pos : int
Zero-based position of the nucleotide of interest in genomic
coordinates.
stranded : boolean
Boolean indicating whether data is stranded and stranded nucleotide
should be returned. Assumes R1 read on reverse strand implies + strand
coverage etc.
Returns
-------
nt : str or None
If None, then the read did not cover the position. If not None, returns
the nucleotide at that position (with + or - appended to indicate strand
if desired).
"""
nt = None
bases = dict(zip(pr.alignment.get_reference_positions(),
list(pr.alignment.seq.upper())))
if pos in bases.keys():
nt = bases[pos]
if nt and stranded:
strand = None
if pr.alignment.is_read1 and pr.alignment.is_reverse:
strand = '+'
if pr.alignment.is_read2 and not pr.alignment.is_reverse:
strand = '+'
if pr.alignment.is_read1 and not pr.alignment.is_reverse:
nt = str(Seq(nt).reverse_complement())
strand = '-'
if pr.alignment.is_read2 and pr.alignment.is_reverse:
nt = str(Seq(nt).reverse_complement())
strand = '-'
nt = '{}{}'.format(nt, strand)
return nt | Given a pileup read and a position, return the base that is covered by the
read at the given position if the position is covered.
Parameters
----------
pr : pysam.calignmentfile.PileupRead
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
pos : int
Zero-based position of the nucleotide of interest in genomic
coordinates.
stranded : boolean
Boolean indicating whether data is stranded and stranded nucleotide
should be returned. Assumes R1 read on reverse strand implies + strand
coverage etc.
Returns
-------
nt : str or None
If None, then the read did not cover the position. If not None, returns
the nucleotide at that position (with + or - appended to indicate strand
if desired). | Below is the the instruction that describes the task:
### Input:
Given a pileup read and a position, return the base that is covered by the
read at the given position if the position is covered.
Parameters
----------
pr : pysam.calignmentfile.PileupRead
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
pos : int
Zero-based position of the nucleotide of interest in genomic
coordinates.
stranded : boolean
Boolean indicating whether data is stranded and stranded nucleotide
should be returned. Assumes R1 read on reverse strand implies + strand
coverage etc.
Returns
-------
nt : str or None
If None, then the read did not cover the position. If not None, returns
the nucleotide at that position (with + or - appended to indicate strand
if desired).
### Response:
def _pos_nt(pr, pos, stranded=False):
"""
Given a pileup read and a position, return the base that is covered by the
read at the given position if the position is covered.
Parameters
----------
pr : pysam.calignmentfile.PileupRead
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
pos : int
Zero-based position of the nucleotide of interest in genomic
coordinates.
stranded : boolean
Boolean indicating whether data is stranded and stranded nucleotide
should be returned. Assumes R1 read on reverse strand implies + strand
coverage etc.
Returns
-------
nt : str or None
If None, then the read did not cover the position. If not None, returns
the nucleotide at that position (with + or - appended to indicate strand
if desired).
"""
nt = None
bases = dict(zip(pr.alignment.get_reference_positions(),
list(pr.alignment.seq.upper())))
if pos in bases.keys():
nt = bases[pos]
if nt and stranded:
strand = None
if pr.alignment.is_read1 and pr.alignment.is_reverse:
strand = '+'
if pr.alignment.is_read2 and not pr.alignment.is_reverse:
strand = '+'
if pr.alignment.is_read1 and not pr.alignment.is_reverse:
nt = str(Seq(nt).reverse_complement())
strand = '-'
if pr.alignment.is_read2 and pr.alignment.is_reverse:
nt = str(Seq(nt).reverse_complement())
strand = '-'
nt = '{}{}'.format(nt, strand)
return nt |
def search_list(text_list, pattern, flags=0):
"""
CommandLine:
python -m utool.util_list --test-search_list
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> text_list = ['ham', 'jam', 'eggs', 'spam']
>>> pattern = '.am'
>>> flags = 0
>>> (valid_index_list, valid_match_list) = ut.search_list(text_list, pattern, flags)
>>> result = str(valid_index_list)
>>> print(result)
[0, 1, 3]
"""
import re
import utool as ut
match_list = [re.search(pattern, text, flags=flags) for text in text_list]
valid_index_list = [index for index, match in enumerate(match_list) if match is not None]
valid_match_list = ut.take(match_list, valid_index_list)
return valid_index_list, valid_match_list | CommandLine:
python -m utool.util_list --test-search_list
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> text_list = ['ham', 'jam', 'eggs', 'spam']
>>> pattern = '.am'
>>> flags = 0
>>> (valid_index_list, valid_match_list) = ut.search_list(text_list, pattern, flags)
>>> result = str(valid_index_list)
>>> print(result)
[0, 1, 3] | Below is the the instruction that describes the task:
### Input:
CommandLine:
python -m utool.util_list --test-search_list
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> text_list = ['ham', 'jam', 'eggs', 'spam']
>>> pattern = '.am'
>>> flags = 0
>>> (valid_index_list, valid_match_list) = ut.search_list(text_list, pattern, flags)
>>> result = str(valid_index_list)
>>> print(result)
[0, 1, 3]
### Response:
def search_list(text_list, pattern, flags=0):
"""
CommandLine:
python -m utool.util_list --test-search_list
Example:
>>> # ENABLE_DOCTEST
>>> import utool as ut
>>> text_list = ['ham', 'jam', 'eggs', 'spam']
>>> pattern = '.am'
>>> flags = 0
>>> (valid_index_list, valid_match_list) = ut.search_list(text_list, pattern, flags)
>>> result = str(valid_index_list)
>>> print(result)
[0, 1, 3]
"""
import re
import utool as ut
match_list = [re.search(pattern, text, flags=flags) for text in text_list]
valid_index_list = [index for index, match in enumerate(match_list) if match is not None]
valid_match_list = ut.take(match_list, valid_index_list)
return valid_index_list, valid_match_list |
def dameraulevenshtein(seq1, seq2):
"""Calculate the Damerau-Levenshtein distance between sequences.
This distance is the number of additions, deletions, substitutions,
and transpositions needed to transform the first sequence into the
second. Although generally used with strings, any sequences of
comparable objects will work.
Transpositions are exchanges of *consecutive* characters; all other
operations are self-explanatory.
This implementation is O(N*M) time and O(M) space, for N and M the
lengths of the two sequences.
>>> dameraulevenshtein('ba', 'abc')
2
>>> dameraulevenshtein('fee', 'deed')
2
It works with arbitrary sequences too:
>>> dameraulevenshtein('abcd', ['b', 'a', 'c', 'd', 'e'])
2
"""
# codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F
# Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.
# However, only the current and two previous rows are needed at once,
# so we only store those.
oneago = None
thisrow = list(range_(1, len(seq2) + 1)) + [0]
for x in range_(len(seq1)):
# Python lists wrap around for negative indices, so put the
# leftmost column at the *end* of the list. This matches with
# the zero-indexed strings and saves extra calculation.
twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]
for y in range_(len(seq2)):
delcost = oneago[y] + 1
addcost = thisrow[y - 1] + 1
subcost = oneago[y - 1] + (seq1[x] != seq2[y])
thisrow[y] = min(delcost, addcost, subcost)
# This block deals with transpositions
if (x > 0 and y > 0 and seq1[x] == seq2[y - 1] and
seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):
thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)
return thisrow[len(seq2) - 1] | Calculate the Damerau-Levenshtein distance between sequences.
This distance is the number of additions, deletions, substitutions,
and transpositions needed to transform the first sequence into the
second. Although generally used with strings, any sequences of
comparable objects will work.
Transpositions are exchanges of *consecutive* characters; all other
operations are self-explanatory.
This implementation is O(N*M) time and O(M) space, for N and M the
lengths of the two sequences.
>>> dameraulevenshtein('ba', 'abc')
2
>>> dameraulevenshtein('fee', 'deed')
2
It works with arbitrary sequences too:
>>> dameraulevenshtein('abcd', ['b', 'a', 'c', 'd', 'e'])
2 | Below is the the instruction that describes the task:
### Input:
Calculate the Damerau-Levenshtein distance between sequences.
This distance is the number of additions, deletions, substitutions,
and transpositions needed to transform the first sequence into the
second. Although generally used with strings, any sequences of
comparable objects will work.
Transpositions are exchanges of *consecutive* characters; all other
operations are self-explanatory.
This implementation is O(N*M) time and O(M) space, for N and M the
lengths of the two sequences.
>>> dameraulevenshtein('ba', 'abc')
2
>>> dameraulevenshtein('fee', 'deed')
2
It works with arbitrary sequences too:
>>> dameraulevenshtein('abcd', ['b', 'a', 'c', 'd', 'e'])
2
### Response:
def dameraulevenshtein(seq1, seq2):
"""Calculate the Damerau-Levenshtein distance between sequences.
This distance is the number of additions, deletions, substitutions,
and transpositions needed to transform the first sequence into the
second. Although generally used with strings, any sequences of
comparable objects will work.
Transpositions are exchanges of *consecutive* characters; all other
operations are self-explanatory.
This implementation is O(N*M) time and O(M) space, for N and M the
lengths of the two sequences.
>>> dameraulevenshtein('ba', 'abc')
2
>>> dameraulevenshtein('fee', 'deed')
2
It works with arbitrary sequences too:
>>> dameraulevenshtein('abcd', ['b', 'a', 'c', 'd', 'e'])
2
"""
# codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F
# Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1 matrix.
# However, only the current and two previous rows are needed at once,
# so we only store those.
oneago = None
thisrow = list(range_(1, len(seq2) + 1)) + [0]
for x in range_(len(seq1)):
# Python lists wrap around for negative indices, so put the
# leftmost column at the *end* of the list. This matches with
# the zero-indexed strings and saves extra calculation.
twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]
for y in range_(len(seq2)):
delcost = oneago[y] + 1
addcost = thisrow[y - 1] + 1
subcost = oneago[y - 1] + (seq1[x] != seq2[y])
thisrow[y] = min(delcost, addcost, subcost)
# This block deals with transpositions
if (x > 0 and y > 0 and seq1[x] == seq2[y - 1] and
seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]):
thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)
return thisrow[len(seq2) - 1] |
def init(main_dir: Path, logfile_path: Path, log_level: str):
"""
Initialize the _downloader. TODO.
:param main_dir: main directory
:type main_dir: ~pathlib.Path
:param logfile_path: logfile path
:type logfile_path: ~pathlib.Path
:param log_level: logging level
:type log_level: str
"""
dynamic_data.reset()
dynamic_data.init_dirs(main_dir, logfile_path)
dynamic_data.check_dirs()
tools.create_dir_rec(dynamic_data.MAIN_DIR)
tools.create_dir_rec(dynamic_data.TEMP_DIR)
tools.create_dir_rec(dynamic_data.DOWNLOAD_DIR)
tools.create_dir_rec(dynamic_data.SAVESTAT_DIR)
tools.create_dir_rec(Path.resolve(dynamic_data.LOGFILE_PATH).parent)
dynamic_data.LOG_LEVEL = log_level
logging.basicConfig(filename=dynamic_data.LOGFILE_PATH, filemode='a', level=dynamic_data.LOG_LEVEL,
format='%(asctime)s.%(msecs)03d | %(levelname)s - %(name)s | %(module)s.%(funcName)s: %('
'message)s',
datefmt='%Y.%m.%d %H:%M:%S')
logging.captureWarnings(True)
cores = multiprocessing.cpu_count()
dynamic_data.USING_CORES = min(4, max(1, cores - 1))
info = f"{static_data.NAME} {static_data.VERSION}\n\n" \
f"System: {platform.system()} - {platform.version()} - {platform.machine()} - {cores} cores\n" \
f"Python: {platform.python_version()} - {' - '.join(platform.python_build())}\n" \
f"Arguments: main={main_dir.resolve()} | logfile={logfile_path.resolve()} | loglevel={log_level}\n" \
f"Using cores: {dynamic_data.USING_CORES}\n\n"
with dynamic_data.LOGFILE_PATH.open(mode='w', encoding="utf8") as writer:
writer.write(info)
dynamic_data.AVAIL_PLUGINS = APlugin.get_plugins() | Initialize the _downloader. TODO.
:param main_dir: main directory
:type main_dir: ~pathlib.Path
:param logfile_path: logfile path
:type logfile_path: ~pathlib.Path
:param log_level: logging level
:type log_level: str | Below is the the instruction that describes the task:
### Input:
Initialize the _downloader. TODO.
:param main_dir: main directory
:type main_dir: ~pathlib.Path
:param logfile_path: logfile path
:type logfile_path: ~pathlib.Path
:param log_level: logging level
:type log_level: str
### Response:
def init(main_dir: Path, logfile_path: Path, log_level: str):
"""
Initialize the _downloader. TODO.
:param main_dir: main directory
:type main_dir: ~pathlib.Path
:param logfile_path: logfile path
:type logfile_path: ~pathlib.Path
:param log_level: logging level
:type log_level: str
"""
dynamic_data.reset()
dynamic_data.init_dirs(main_dir, logfile_path)
dynamic_data.check_dirs()
tools.create_dir_rec(dynamic_data.MAIN_DIR)
tools.create_dir_rec(dynamic_data.TEMP_DIR)
tools.create_dir_rec(dynamic_data.DOWNLOAD_DIR)
tools.create_dir_rec(dynamic_data.SAVESTAT_DIR)
tools.create_dir_rec(Path.resolve(dynamic_data.LOGFILE_PATH).parent)
dynamic_data.LOG_LEVEL = log_level
logging.basicConfig(filename=dynamic_data.LOGFILE_PATH, filemode='a', level=dynamic_data.LOG_LEVEL,
format='%(asctime)s.%(msecs)03d | %(levelname)s - %(name)s | %(module)s.%(funcName)s: %('
'message)s',
datefmt='%Y.%m.%d %H:%M:%S')
logging.captureWarnings(True)
cores = multiprocessing.cpu_count()
dynamic_data.USING_CORES = min(4, max(1, cores - 1))
info = f"{static_data.NAME} {static_data.VERSION}\n\n" \
f"System: {platform.system()} - {platform.version()} - {platform.machine()} - {cores} cores\n" \
f"Python: {platform.python_version()} - {' - '.join(platform.python_build())}\n" \
f"Arguments: main={main_dir.resolve()} | logfile={logfile_path.resolve()} | loglevel={log_level}\n" \
f"Using cores: {dynamic_data.USING_CORES}\n\n"
with dynamic_data.LOGFILE_PATH.open(mode='w', encoding="utf8") as writer:
writer.write(info)
dynamic_data.AVAIL_PLUGINS = APlugin.get_plugins() |
def fwdl_status_output_fwdl_entries_blade_app(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
blade_app = ET.SubElement(fwdl_entries, "blade-app")
blade_app.text = kwargs.pop('blade_app')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def fwdl_status_output_fwdl_entries_blade_app(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fwdl_status = ET.Element("fwdl_status")
config = fwdl_status
output = ET.SubElement(fwdl_status, "output")
fwdl_entries = ET.SubElement(output, "fwdl-entries")
blade_app = ET.SubElement(fwdl_entries, "blade-app")
blade_app.text = kwargs.pop('blade_app')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def ashraeiam(aoi, b=0.05):
'''
Determine the incidence angle modifier using the ASHRAE transmission
model.
ashraeiam calculates the incidence angle modifier as developed in
[1], and adopted by ASHRAE (American Society of Heating,
Refrigeration, and Air Conditioning Engineers) [2]. The model has
been used by model programs such as PVSyst [3].
Note: For incident angles near 90 degrees, this model has a
discontinuity which has been addressed in this function.
Parameters
----------
aoi : numeric
The angle of incidence between the module normal vector and the
sun-beam vector in degrees. Angles of nan will result in nan.
b : float, default 0.05
A parameter to adjust the modifier as a function of angle of
incidence. Typical values are on the order of 0.05 [3].
Returns
-------
IAM : numeric
The incident angle modifier calculated as 1-b*(sec(aoi)-1) as
described in [2,3].
Returns zeros for all abs(aoi) >= 90 and for all IAM values that
would be less than 0.
References
----------
[1] Souka A.F., Safwat H.H., "Determination of the optimum
orientations for the double exposure flat-plate collector and its
reflections". Solar Energy vol .10, pp 170-174. 1966.
[2] ASHRAE standard 93-77
[3] PVsyst Contextual Help.
http://files.pvsyst.com/help/index.html?iam_loss.htm retrieved on
September 10, 2012
See Also
--------
irradiance.aoi
physicaliam
'''
iam = 1 - b * ((1 / np.cos(np.radians(aoi)) - 1))
aoi_gte_90 = np.full_like(aoi, False, dtype='bool')
np.greater_equal(np.abs(aoi), 90, where=~np.isnan(aoi), out=aoi_gte_90)
iam = np.where(aoi_gte_90, 0, iam)
iam = np.maximum(0, iam)
if isinstance(iam, pd.Series):
iam = pd.Series(iam, index=aoi.index)
return iam | Determine the incidence angle modifier using the ASHRAE transmission
model.
ashraeiam calculates the incidence angle modifier as developed in
[1], and adopted by ASHRAE (American Society of Heating,
Refrigeration, and Air Conditioning Engineers) [2]. The model has
been used by model programs such as PVSyst [3].
Note: For incident angles near 90 degrees, this model has a
discontinuity which has been addressed in this function.
Parameters
----------
aoi : numeric
The angle of incidence between the module normal vector and the
sun-beam vector in degrees. Angles of nan will result in nan.
b : float, default 0.05
A parameter to adjust the modifier as a function of angle of
incidence. Typical values are on the order of 0.05 [3].
Returns
-------
IAM : numeric
The incident angle modifier calculated as 1-b*(sec(aoi)-1) as
described in [2,3].
Returns zeros for all abs(aoi) >= 90 and for all IAM values that
would be less than 0.
References
----------
[1] Souka A.F., Safwat H.H., "Determination of the optimum
orientations for the double exposure flat-plate collector and its
reflections". Solar Energy vol .10, pp 170-174. 1966.
[2] ASHRAE standard 93-77
[3] PVsyst Contextual Help.
http://files.pvsyst.com/help/index.html?iam_loss.htm retrieved on
September 10, 2012
See Also
--------
irradiance.aoi
physicaliam | Below is the the instruction that describes the task:
### Input:
Determine the incidence angle modifier using the ASHRAE transmission
model.
ashraeiam calculates the incidence angle modifier as developed in
[1], and adopted by ASHRAE (American Society of Heating,
Refrigeration, and Air Conditioning Engineers) [2]. The model has
been used by model programs such as PVSyst [3].
Note: For incident angles near 90 degrees, this model has a
discontinuity which has been addressed in this function.
Parameters
----------
aoi : numeric
The angle of incidence between the module normal vector and the
sun-beam vector in degrees. Angles of nan will result in nan.
b : float, default 0.05
A parameter to adjust the modifier as a function of angle of
incidence. Typical values are on the order of 0.05 [3].
Returns
-------
IAM : numeric
The incident angle modifier calculated as 1-b*(sec(aoi)-1) as
described in [2,3].
Returns zeros for all abs(aoi) >= 90 and for all IAM values that
would be less than 0.
References
----------
[1] Souka A.F., Safwat H.H., "Determination of the optimum
orientations for the double exposure flat-plate collector and its
reflections". Solar Energy vol .10, pp 170-174. 1966.
[2] ASHRAE standard 93-77
[3] PVsyst Contextual Help.
http://files.pvsyst.com/help/index.html?iam_loss.htm retrieved on
September 10, 2012
See Also
--------
irradiance.aoi
physicaliam
### Response:
def ashraeiam(aoi, b=0.05):
'''
Determine the incidence angle modifier using the ASHRAE transmission
model.
ashraeiam calculates the incidence angle modifier as developed in
[1], and adopted by ASHRAE (American Society of Heating,
Refrigeration, and Air Conditioning Engineers) [2]. The model has
been used by model programs such as PVSyst [3].
Note: For incident angles near 90 degrees, this model has a
discontinuity which has been addressed in this function.
Parameters
----------
aoi : numeric
The angle of incidence between the module normal vector and the
sun-beam vector in degrees. Angles of nan will result in nan.
b : float, default 0.05
A parameter to adjust the modifier as a function of angle of
incidence. Typical values are on the order of 0.05 [3].
Returns
-------
IAM : numeric
The incident angle modifier calculated as 1-b*(sec(aoi)-1) as
described in [2,3].
Returns zeros for all abs(aoi) >= 90 and for all IAM values that
would be less than 0.
References
----------
[1] Souka A.F., Safwat H.H., "Determination of the optimum
orientations for the double exposure flat-plate collector and its
reflections". Solar Energy vol .10, pp 170-174. 1966.
[2] ASHRAE standard 93-77
[3] PVsyst Contextual Help.
http://files.pvsyst.com/help/index.html?iam_loss.htm retrieved on
September 10, 2012
See Also
--------
irradiance.aoi
physicaliam
'''
iam = 1 - b * ((1 / np.cos(np.radians(aoi)) - 1))
aoi_gte_90 = np.full_like(aoi, False, dtype='bool')
np.greater_equal(np.abs(aoi), 90, where=~np.isnan(aoi), out=aoi_gte_90)
iam = np.where(aoi_gte_90, 0, iam)
iam = np.maximum(0, iam)
if isinstance(iam, pd.Series):
iam = pd.Series(iam, index=aoi.index)
return iam |
def get_filters(self, filter_id=None, params=None):
"""
`<>`_
:arg filter_id: The ID of the filter to fetch
:arg from_: skips a number of filters
:arg size: specifies a max number of filters to get
"""
return self.transport.perform_request(
"GET", _make_path("_ml", "filters", filter_id), params=params
) | `<>`_
:arg filter_id: The ID of the filter to fetch
:arg from_: skips a number of filters
:arg size: specifies a max number of filters to get | Below is the the instruction that describes the task:
### Input:
`<>`_
:arg filter_id: The ID of the filter to fetch
:arg from_: skips a number of filters
:arg size: specifies a max number of filters to get
### Response:
def get_filters(self, filter_id=None, params=None):
"""
`<>`_
:arg filter_id: The ID of the filter to fetch
:arg from_: skips a number of filters
:arg size: specifies a max number of filters to get
"""
return self.transport.perform_request(
"GET", _make_path("_ml", "filters", filter_id), params=params
) |
def check_sizes(size, width, height):
"""
Check that these arguments, if supplied, are consistent.
Return a (width, height) pair.
"""
if not size:
return width, height
if len(size) != 2:
raise ProtocolError(
"size argument should be a pair (width, height)")
if width is not None and width != size[0]:
raise ProtocolError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width))
if height is not None and height != size[1]:
raise ProtocolError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height))
return size | Check that these arguments, if supplied, are consistent.
Return a (width, height) pair. | Below is the the instruction that describes the task:
### Input:
Check that these arguments, if supplied, are consistent.
Return a (width, height) pair.
### Response:
def check_sizes(size, width, height):
"""
Check that these arguments, if supplied, are consistent.
Return a (width, height) pair.
"""
if not size:
return width, height
if len(size) != 2:
raise ProtocolError(
"size argument should be a pair (width, height)")
if width is not None and width != size[0]:
raise ProtocolError(
"size[0] (%r) and width (%r) should match when both are used."
% (size[0], width))
if height is not None and height != size[1]:
raise ProtocolError(
"size[1] (%r) and height (%r) should match when both are used."
% (size[1], height))
return size |
def prepare_axes(wave, flux, fig=None, ax_lower=(0.1, 0.1),
ax_dim=(0.85, 0.65)):
"""Create fig and axes if needed and layout axes in fig."""
# Axes location in figure.
if not fig:
fig = plt.figure()
ax = fig.add_axes([ax_lower[0], ax_lower[1], ax_dim[0], ax_dim[1]])
ax.plot(wave, flux)
return fig, ax | Create fig and axes if needed and layout axes in fig. | Below is the the instruction that describes the task:
### Input:
Create fig and axes if needed and layout axes in fig.
### Response:
def prepare_axes(wave, flux, fig=None, ax_lower=(0.1, 0.1),
ax_dim=(0.85, 0.65)):
"""Create fig and axes if needed and layout axes in fig."""
# Axes location in figure.
if not fig:
fig = plt.figure()
ax = fig.add_axes([ax_lower[0], ax_lower[1], ax_dim[0], ax_dim[1]])
ax.plot(wave, flux)
return fig, ax |
def parallel_apply_transformations(x, transforms, black_border_size=0):
"""
Apply image transformations in parallel.
:param transforms: TODO
:param black_border_size: int, size of black border to apply
Returns:
Transformed images
"""
transforms = tf.convert_to_tensor(transforms, dtype=tf.float32)
x = _apply_black_border(x, black_border_size)
num_transforms = transforms.get_shape().as_list()[0]
im_shape = x.get_shape().as_list()[1:]
# Pass a copy of x and a transformation to each iteration of the map_fn
# callable
tiled_x = tf.reshape(
tf.tile(x, [num_transforms, 1, 1, 1]),
[num_transforms, -1] + im_shape)
elems = [tiled_x, transforms]
transformed_ims = tf.map_fn(
_apply_transformation,
elems,
dtype=tf.float32,
parallel_iterations=1, # Must be 1 to avoid keras race conditions
)
return transformed_ims | Apply image transformations in parallel.
:param transforms: TODO
:param black_border_size: int, size of black border to apply
Returns:
Transformed images | Below is the the instruction that describes the task:
### Input:
Apply image transformations in parallel.
:param transforms: TODO
:param black_border_size: int, size of black border to apply
Returns:
Transformed images
### Response:
def parallel_apply_transformations(x, transforms, black_border_size=0):
"""
Apply image transformations in parallel.
:param transforms: TODO
:param black_border_size: int, size of black border to apply
Returns:
Transformed images
"""
transforms = tf.convert_to_tensor(transforms, dtype=tf.float32)
x = _apply_black_border(x, black_border_size)
num_transforms = transforms.get_shape().as_list()[0]
im_shape = x.get_shape().as_list()[1:]
# Pass a copy of x and a transformation to each iteration of the map_fn
# callable
tiled_x = tf.reshape(
tf.tile(x, [num_transforms, 1, 1, 1]),
[num_transforms, -1] + im_shape)
elems = [tiled_x, transforms]
transformed_ims = tf.map_fn(
_apply_transformation,
elems,
dtype=tf.float32,
parallel_iterations=1, # Must be 1 to avoid keras race conditions
)
return transformed_ims |
def __VersionIsSupported(desiredVersion, serviceVersionDescription):
"""
Private method that returns true if the service version description document
indicates that the desired version is supported
@param desiredVersion: The version we want to see if the server supports
(eg. vim.version.version2.
@type desiredVersion: string
@param serviceVersionDescription: A root ElementTree for vimServiceVersions.xml
or vimService.wsdl.
@type serviceVersionDescription: root ElementTree
"""
root = serviceVersionDescription
if root.tag == 'namespaces':
# serviceVersionDescription appears to be a vimServiceVersions.xml document
if root.get('version') != '1.0':
raise RuntimeError('vimServiceVersions.xml has version %s,' \
' which is not understood' % (root.get('version')))
desiredVersionId = versionIdMap[desiredVersion]
supportedVersion = None
for namespace in root.findall('namespace'):
versionId = namespace.findtext('version')
if versionId == desiredVersionId:
return True
else:
for versionId in namespace.findall('priorVersions/version'):
if versionId.text == desiredVersionId:
return True
else:
# serviceVersionDescription must be a vimService.wsdl document
wsdlNS = 'http://schemas.xmlsoap.org/wsdl/'
importElement = serviceVersionDescription.find('.//{%s}import' % wsdlNS)
supportedVersion = versionMap[importElement.get('namespace')[4:]]
if IsChildVersion(supportedVersion, desiredVersion):
return True
return False | Private method that returns true if the service version description document
indicates that the desired version is supported
@param desiredVersion: The version we want to see if the server supports
(eg. vim.version.version2.
@type desiredVersion: string
@param serviceVersionDescription: A root ElementTree for vimServiceVersions.xml
or vimService.wsdl.
@type serviceVersionDescription: root ElementTree | Below is the the instruction that describes the task:
### Input:
Private method that returns true if the service version description document
indicates that the desired version is supported
@param desiredVersion: The version we want to see if the server supports
(eg. vim.version.version2.
@type desiredVersion: string
@param serviceVersionDescription: A root ElementTree for vimServiceVersions.xml
or vimService.wsdl.
@type serviceVersionDescription: root ElementTree
### Response:
def __VersionIsSupported(desiredVersion, serviceVersionDescription):
"""
Private method that returns true if the service version description document
indicates that the desired version is supported
@param desiredVersion: The version we want to see if the server supports
(eg. vim.version.version2.
@type desiredVersion: string
@param serviceVersionDescription: A root ElementTree for vimServiceVersions.xml
or vimService.wsdl.
@type serviceVersionDescription: root ElementTree
"""
root = serviceVersionDescription
if root.tag == 'namespaces':
# serviceVersionDescription appears to be a vimServiceVersions.xml document
if root.get('version') != '1.0':
raise RuntimeError('vimServiceVersions.xml has version %s,' \
' which is not understood' % (root.get('version')))
desiredVersionId = versionIdMap[desiredVersion]
supportedVersion = None
for namespace in root.findall('namespace'):
versionId = namespace.findtext('version')
if versionId == desiredVersionId:
return True
else:
for versionId in namespace.findall('priorVersions/version'):
if versionId.text == desiredVersionId:
return True
else:
# serviceVersionDescription must be a vimService.wsdl document
wsdlNS = 'http://schemas.xmlsoap.org/wsdl/'
importElement = serviceVersionDescription.find('.//{%s}import' % wsdlNS)
supportedVersion = versionMap[importElement.get('namespace')[4:]]
if IsChildVersion(supportedVersion, desiredVersion):
return True
return False |
def host_events(self, host):
'''
Given a host name, this will return all task events executed on that host
'''
all_host_events = filter(lambda x: 'event_data' in x and 'host' in x['event_data'] and x['event_data']['host'] == host,
self.events)
return all_host_events | Given a host name, this will return all task events executed on that host | Below is the the instruction that describes the task:
### Input:
Given a host name, this will return all task events executed on that host
### Response:
def host_events(self, host):
'''
Given a host name, this will return all task events executed on that host
'''
all_host_events = filter(lambda x: 'event_data' in x and 'host' in x['event_data'] and x['event_data']['host'] == host,
self.events)
return all_host_events |
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a queue exists.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.exists myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.get_queue_url(QueueName=name)
except botocore.exceptions.ClientError as e:
missing_code = 'AWS.SimpleQueueService.NonExistentQueue'
if e.response.get('Error', {}).get('Code') == missing_code:
return {'result': False}
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True} | Check to see if a queue exists.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.exists myqueue region=us-east-1 | Below is the the instruction that describes the task:
### Input:
Check to see if a queue exists.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.exists myqueue region=us-east-1
### Response:
def exists(name, region=None, key=None, keyid=None, profile=None):
'''
Check to see if a queue exists.
CLI Example:
.. code-block:: bash
salt myminion boto_sqs.exists myqueue region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
conn.get_queue_url(QueueName=name)
except botocore.exceptions.ClientError as e:
missing_code = 'AWS.SimpleQueueService.NonExistentQueue'
if e.response.get('Error', {}).get('Code') == missing_code:
return {'result': False}
return {'error': __utils__['boto3.get_error'](e)}
return {'result': True} |
def is_close_to_int(x):
"""
Check if value is close to an integer
Parameters
----------
x : float
Numeric value to check
Returns
-------
out : bool
"""
if not np.isfinite(x):
return False
return abs(x - nearest_int(x)) < 1e-10 | Check if value is close to an integer
Parameters
----------
x : float
Numeric value to check
Returns
-------
out : bool | Below is the the instruction that describes the task:
### Input:
Check if value is close to an integer
Parameters
----------
x : float
Numeric value to check
Returns
-------
out : bool
### Response:
def is_close_to_int(x):
"""
Check if value is close to an integer
Parameters
----------
x : float
Numeric value to check
Returns
-------
out : bool
"""
if not np.isfinite(x):
return False
return abs(x - nearest_int(x)) < 1e-10 |
def close_ns(symbol):
'''generates a closing names statement from a symbol'''
closing = ' '.join(['}' for x in symbol.module.name_parts])
name = '::'.join(symbol.module.name_parts)
return '{0} // namespace {1}'.format(closing, name) | generates a closing names statement from a symbol | Below is the the instruction that describes the task:
### Input:
generates a closing names statement from a symbol
### Response:
def close_ns(symbol):
'''generates a closing names statement from a symbol'''
closing = ' '.join(['}' for x in symbol.module.name_parts])
name = '::'.join(symbol.module.name_parts)
return '{0} // namespace {1}'.format(closing, name) |
def open(self):
"""Open the working area
Returns
-------
None
"""
self.path = self._prepare_dir(self.topdir)
self._copy_executable(area_path=self.path)
self._save_logging_levels(area_path=self.path)
self._put_python_modules(modules=self.python_modules, area_path=self.path) | Open the working area
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Open the working area
Returns
-------
None
### Response:
def open(self):
"""Open the working area
Returns
-------
None
"""
self.path = self._prepare_dir(self.topdir)
self._copy_executable(area_path=self.path)
self._save_logging_levels(area_path=self.path)
self._put_python_modules(modules=self.python_modules, area_path=self.path) |
def match(self, path):
'''Match a path and return ``None`` if no matching, otherwise
a dictionary of matched variables with values. If there is more
to be match in the path, the remaining string is placed in the
``__remaining__`` key of the dictionary.'''
match = self._regex.search(path)
if match is not None:
remaining = path[match.end():]
groups = match.groupdict()
result = {}
for name, value in groups.items():
try:
value = self._converters[name].to_python(value)
except Http404:
return
result[str(name)] = value
if remaining:
result['__remaining__'] = remaining
return result | Match a path and return ``None`` if no matching, otherwise
a dictionary of matched variables with values. If there is more
to be match in the path, the remaining string is placed in the
``__remaining__`` key of the dictionary. | Below is the the instruction that describes the task:
### Input:
Match a path and return ``None`` if no matching, otherwise
a dictionary of matched variables with values. If there is more
to be match in the path, the remaining string is placed in the
``__remaining__`` key of the dictionary.
### Response:
def match(self, path):
'''Match a path and return ``None`` if no matching, otherwise
a dictionary of matched variables with values. If there is more
to be match in the path, the remaining string is placed in the
``__remaining__`` key of the dictionary.'''
match = self._regex.search(path)
if match is not None:
remaining = path[match.end():]
groups = match.groupdict()
result = {}
for name, value in groups.items():
try:
value = self._converters[name].to_python(value)
except Http404:
return
result[str(name)] = value
if remaining:
result['__remaining__'] = remaining
return result |
def beacon(config):
'''
Return status for requested information
'''
log.debug(config)
ctime = datetime.datetime.utcnow().isoformat()
if not config:
config = [{
'loadavg': ['all'],
'cpustats': ['all'],
'meminfo': ['all'],
'vmstats': ['all'],
'time': ['all'],
}]
if not isinstance(config, list):
# To support the old dictionary config format
config = [config]
ret = {}
for entry in config:
for func in entry:
ret[func] = {}
try:
data = __salt__['status.{0}'.format(func)]()
except salt.exceptions.CommandExecutionError as exc:
log.debug('Status beacon attempted to process function %s '
'but encountered error: %s', func, exc)
continue
if not isinstance(entry[func], list):
func_items = [entry[func]]
else:
func_items = entry[func]
for item in func_items:
if item == 'all':
ret[func] = data
else:
try:
try:
ret[func][item] = data[item]
except TypeError:
ret[func][item] = data[int(item)]
except KeyError as exc:
ret[func] = 'Status beacon is incorrectly configured: {0}'.format(exc)
return [{
'tag': ctime,
'data': ret,
}] | Return status for requested information | Below is the the instruction that describes the task:
### Input:
Return status for requested information
### Response:
def beacon(config):
'''
Return status for requested information
'''
log.debug(config)
ctime = datetime.datetime.utcnow().isoformat()
if not config:
config = [{
'loadavg': ['all'],
'cpustats': ['all'],
'meminfo': ['all'],
'vmstats': ['all'],
'time': ['all'],
}]
if not isinstance(config, list):
# To support the old dictionary config format
config = [config]
ret = {}
for entry in config:
for func in entry:
ret[func] = {}
try:
data = __salt__['status.{0}'.format(func)]()
except salt.exceptions.CommandExecutionError as exc:
log.debug('Status beacon attempted to process function %s '
'but encountered error: %s', func, exc)
continue
if not isinstance(entry[func], list):
func_items = [entry[func]]
else:
func_items = entry[func]
for item in func_items:
if item == 'all':
ret[func] = data
else:
try:
try:
ret[func][item] = data[item]
except TypeError:
ret[func][item] = data[int(item)]
except KeyError as exc:
ret[func] = 'Status beacon is incorrectly configured: {0}'.format(exc)
return [{
'tag': ctime,
'data': ret,
}] |
def on_recording_change(self, enable):
"""Triggered when recording settings have changed.
in enable of type bool
TODO
"""
if not isinstance(enable, bool):
raise TypeError("enable can only be an instance of type bool")
self._call("onRecordingChange",
in_p=[enable]) | Triggered when recording settings have changed.
in enable of type bool
TODO | Below is the the instruction that describes the task:
### Input:
Triggered when recording settings have changed.
in enable of type bool
TODO
### Response:
def on_recording_change(self, enable):
"""Triggered when recording settings have changed.
in enable of type bool
TODO
"""
if not isinstance(enable, bool):
raise TypeError("enable can only be an instance of type bool")
self._call("onRecordingChange",
in_p=[enable]) |
def _digits(self):
""" 0-9 """
self.number += self.key
try:
if self.compact is False:
self.top.body.focus_position = \
self.items.index(self.items_com[max(int(self.number) - 1, 0)])
else:
self.top.body.focus_position = \
self.items.index(self.items[max(int(self.number) - 1, 0)])
except IndexError:
self.number = self.number[:-1]
self.top.keypress(self.size, "") # Trick urwid into redisplaying the cursor
if self.number:
self._footer_start_thread("Selection: {}".format(self.number), 1) | 0-9 | Below is the the instruction that describes the task:
### Input:
0-9
### Response:
def _digits(self):
""" 0-9 """
self.number += self.key
try:
if self.compact is False:
self.top.body.focus_position = \
self.items.index(self.items_com[max(int(self.number) - 1, 0)])
else:
self.top.body.focus_position = \
self.items.index(self.items[max(int(self.number) - 1, 0)])
except IndexError:
self.number = self.number[:-1]
self.top.keypress(self.size, "") # Trick urwid into redisplaying the cursor
if self.number:
self._footer_start_thread("Selection: {}".format(self.number), 1) |
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
) | Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None | Below is the the instruction that describes the task:
### Input:
Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
### Response:
def add_value(self, value_type, value_min, value_max):
'''Add a tunable value to the ABC (fitness function must be
configured to handle it)
Args:
value_type (string): type of the value, 'int' or 'float'
value_min (int or float): minimum bound for the value
value_max (int or float): maximum bound for the value
Returns:
None
'''
if len(self._employers) > 0:
self._logger.log(
'warn',
'Adding a value after employers have been created'
)
value = (value_type, (value_min, value_max))
self._value_ranges.append(value)
self._limit = self._num_employers*len(self._value_ranges)
self._logger.log(
'debug',
'Limit set to {}'.format(self._limit)
) |
def createShadowHandlerWithName(self, shadowName, isPersistentSubscribe):
"""
**Description**
Create a device shadow handler using the specified shadow name and isPersistentSubscribe.
**Syntax**
.. code:: python
# Create a device shadow handler for shadow named "Bot1", using persistent subscription
Bot1Shadow = myAWSIoTMQTTShadowClient.createShadowHandlerWithName("Bot1", True)
# Create a device shadow handler for shadow named "Bot2", using non-persistent subscription
Bot2Shadow = myAWSIoTMQTTShadowClient.createShadowHandlerWithName("Bot2", False)
**Parameters**
*shadowName* - Name of the device shadow.
*isPersistentSubscribe* - Whether to unsubscribe from shadow response (accepted/rejected) topics
when there is a response. Will subscribe at the first time the shadow request is made and will
not unsubscribe if isPersistentSubscribe is set.
**Returns**
AWSIoTPythonSDK.core.shadow.deviceShadow.deviceShadow object, which exposes the device shadow interface.
"""
# Create and return a deviceShadow instance
return deviceShadow.deviceShadow(shadowName, isPersistentSubscribe, self._shadowManager) | **Description**
Create a device shadow handler using the specified shadow name and isPersistentSubscribe.
**Syntax**
.. code:: python
# Create a device shadow handler for shadow named "Bot1", using persistent subscription
Bot1Shadow = myAWSIoTMQTTShadowClient.createShadowHandlerWithName("Bot1", True)
# Create a device shadow handler for shadow named "Bot2", using non-persistent subscription
Bot2Shadow = myAWSIoTMQTTShadowClient.createShadowHandlerWithName("Bot2", False)
**Parameters**
*shadowName* - Name of the device shadow.
*isPersistentSubscribe* - Whether to unsubscribe from shadow response (accepted/rejected) topics
when there is a response. Will subscribe at the first time the shadow request is made and will
not unsubscribe if isPersistentSubscribe is set.
**Returns**
AWSIoTPythonSDK.core.shadow.deviceShadow.deviceShadow object, which exposes the device shadow interface. | Below is the the instruction that describes the task:
### Input:
**Description**
Create a device shadow handler using the specified shadow name and isPersistentSubscribe.
**Syntax**
.. code:: python
# Create a device shadow handler for shadow named "Bot1", using persistent subscription
Bot1Shadow = myAWSIoTMQTTShadowClient.createShadowHandlerWithName("Bot1", True)
# Create a device shadow handler for shadow named "Bot2", using non-persistent subscription
Bot2Shadow = myAWSIoTMQTTShadowClient.createShadowHandlerWithName("Bot2", False)
**Parameters**
*shadowName* - Name of the device shadow.
*isPersistentSubscribe* - Whether to unsubscribe from shadow response (accepted/rejected) topics
when there is a response. Will subscribe at the first time the shadow request is made and will
not unsubscribe if isPersistentSubscribe is set.
**Returns**
AWSIoTPythonSDK.core.shadow.deviceShadow.deviceShadow object, which exposes the device shadow interface.
### Response:
def createShadowHandlerWithName(self, shadowName, isPersistentSubscribe):
"""
**Description**
Create a device shadow handler using the specified shadow name and isPersistentSubscribe.
**Syntax**
.. code:: python
# Create a device shadow handler for shadow named "Bot1", using persistent subscription
Bot1Shadow = myAWSIoTMQTTShadowClient.createShadowHandlerWithName("Bot1", True)
# Create a device shadow handler for shadow named "Bot2", using non-persistent subscription
Bot2Shadow = myAWSIoTMQTTShadowClient.createShadowHandlerWithName("Bot2", False)
**Parameters**
*shadowName* - Name of the device shadow.
*isPersistentSubscribe* - Whether to unsubscribe from shadow response (accepted/rejected) topics
when there is a response. Will subscribe at the first time the shadow request is made and will
not unsubscribe if isPersistentSubscribe is set.
**Returns**
AWSIoTPythonSDK.core.shadow.deviceShadow.deviceShadow object, which exposes the device shadow interface.
"""
# Create and return a deviceShadow instance
return deviceShadow.deviceShadow(shadowName, isPersistentSubscribe, self._shadowManager) |
async def browse(self, device):
"""
Launch file manager on the mount path of the specified device.
:param device: device object, block device path or mount path
:returns: whether the program was successfully launched.
"""
device = self._find_device(device)
if not device.is_mounted:
self._log.error(_("not browsing {0}: not mounted", device))
return False
if not self._browser:
self._log.error(_("not browsing {0}: no program", device))
return False
self._log.debug(_('opening {0} on {0.mount_paths[0]}', device))
self._browser(device.mount_paths[0])
self._log.info(_('opened {0} on {0.mount_paths[0]}', device))
return True | Launch file manager on the mount path of the specified device.
:param device: device object, block device path or mount path
:returns: whether the program was successfully launched. | Below is the the instruction that describes the task:
### Input:
Launch file manager on the mount path of the specified device.
:param device: device object, block device path or mount path
:returns: whether the program was successfully launched.
### Response:
async def browse(self, device):
"""
Launch file manager on the mount path of the specified device.
:param device: device object, block device path or mount path
:returns: whether the program was successfully launched.
"""
device = self._find_device(device)
if not device.is_mounted:
self._log.error(_("not browsing {0}: not mounted", device))
return False
if not self._browser:
self._log.error(_("not browsing {0}: no program", device))
return False
self._log.debug(_('opening {0} on {0.mount_paths[0]}', device))
self._browser(device.mount_paths[0])
self._log.info(_('opened {0} on {0.mount_paths[0]}', device))
return True |
def _get_exec_binary(binary, kw):
"""
On win32, the subprocess module can only reliably resolve the
target binary if it's actually a binary; as for a Node.js script
it seems to only work iff shell=True was specified, presenting
a security risk. Resolve the target manually through which will
account for that.
The kw argument is the keyword arguments that will be passed into
whatever respective subprocess.Popen family of methods. The PATH
environment variable will be used if available.
"""
binary = which(binary, path=kw.get('env', {}).get('PATH'))
if binary is None:
raise_os_error(errno.ENOENT)
return binary | On win32, the subprocess module can only reliably resolve the
target binary if it's actually a binary; as for a Node.js script
it seems to only work iff shell=True was specified, presenting
a security risk. Resolve the target manually through which will
account for that.
The kw argument is the keyword arguments that will be passed into
whatever respective subprocess.Popen family of methods. The PATH
environment variable will be used if available. | Below is the the instruction that describes the task:
### Input:
On win32, the subprocess module can only reliably resolve the
target binary if it's actually a binary; as for a Node.js script
it seems to only work iff shell=True was specified, presenting
a security risk. Resolve the target manually through which will
account for that.
The kw argument is the keyword arguments that will be passed into
whatever respective subprocess.Popen family of methods. The PATH
environment variable will be used if available.
### Response:
def _get_exec_binary(binary, kw):
"""
On win32, the subprocess module can only reliably resolve the
target binary if it's actually a binary; as for a Node.js script
it seems to only work iff shell=True was specified, presenting
a security risk. Resolve the target manually through which will
account for that.
The kw argument is the keyword arguments that will be passed into
whatever respective subprocess.Popen family of methods. The PATH
environment variable will be used if available.
"""
binary = which(binary, path=kw.get('env', {}).get('PATH'))
if binary is None:
raise_os_error(errno.ENOENT)
return binary |
def verify(self):
"""
Verify that the information gathered from the on-the-wire
representation is of the right type.
This is supposed to be run before the info is deserialized.
:return: True/False
"""
for param in self.longs:
item = getattr(self, param)
if not item or isinstance(item, str):
continue
if isinstance(item, bytes):
item = item.decode('utf-8')
setattr(self, param, item)
try:
_ = base64url_to_long(item)
except Exception:
return False
else:
if [e for e in ['+', '/', '='] if e in item]:
return False
if self.kid:
if not isinstance(self.kid, str):
raise ValueError("kid of wrong value type")
return True | Verify that the information gathered from the on-the-wire
representation is of the right type.
This is supposed to be run before the info is deserialized.
:return: True/False | Below is the the instruction that describes the task:
### Input:
Verify that the information gathered from the on-the-wire
representation is of the right type.
This is supposed to be run before the info is deserialized.
:return: True/False
### Response:
def verify(self):
"""
Verify that the information gathered from the on-the-wire
representation is of the right type.
This is supposed to be run before the info is deserialized.
:return: True/False
"""
for param in self.longs:
item = getattr(self, param)
if not item or isinstance(item, str):
continue
if isinstance(item, bytes):
item = item.decode('utf-8')
setattr(self, param, item)
try:
_ = base64url_to_long(item)
except Exception:
return False
else:
if [e for e in ['+', '/', '='] if e in item]:
return False
if self.kid:
if not isinstance(self.kid, str):
raise ValueError("kid of wrong value type")
return True |
def present(name, brand, zonepath, properties=None, resources=None):
'''
Ensure a zone with certain properties and resources
name : string
name of the zone
brand : string
brand of the zone
zonepath : string
path of the zone
properties : list of key-value pairs
dict of properties
resources : list of key-value pairs
dict of resources
.. note::
If the zone does not exist it will not be installed.
You can use the ```zone.installed``` state for this.
.. note::
Default resource selectors:
- fs: dir
- net: mac-addr
- device: match
- rctl: name
- attr: name
- dataset: name
- admin: user
.. warning::
Properties and resource will not be removed when they
are absent from the state!
For properties, simple set them to ```None```.
For resources, add the ```resource_prune``` property
and set it to ```True```. Also specify the
```resource_selector_property``` if the default is not
the one you want.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': []}
## sanitize defaults
if not properties:
properties = []
if not resources:
resources = []
properties.append(OrderedDict({"brand": brand}))
properties.append(OrderedDict({"zonepath": zonepath}))
zones = __salt__['zoneadm.list'](installed=True, configured=True)
## test mode only has limited support
if __opts__['test']:
ret['result'] = None
ret['comment'].append('Cannot determine of changes would happen to the zone {0}.'.format(name))
## create zone if needed
if name not in zones:
if __opts__['test']:
## we pretend we created the zone
res_create = {'status': True}
ret['comment'] = []
else:
## create and install
res_create = __salt__['zonecfg.create'](name, brand, zonepath)
if res_create['status']:
ret['result'] = True
ret['changes'][name] = 'created'
ret['comment'].append('The zone {0} was created.'.format(name))
if not __opts__['test']:
ret['result'] = True
if isinstance(properties, list):
for prop in properties:
if not isinstance(prop, OrderedDict) or len(prop) != 1:
log.warning('zone.present - failed to parse property: %s', prop)
continue
for key, value in prop.items():
res = None
if not value:
res = property_absent(name, key)
elif value:
res = property_present(name, key, value)
if res:
ret['result'] = ret['result'] if res['result'] else False
ret['comment'].append(res['comment'])
if res['changes']:
if 'property' not in ret['changes']:
ret['changes']['property'] = {}
ret['changes']['property'] = merge_dict(ret['changes']['property'], res['changes'])
if isinstance(resources, list):
for resource in resources:
if not isinstance(prop, OrderedDict) or len(prop) != 1:
log.warning('zone.present - failed to parse resource: %s', resource)
continue
for key, value in resource.items():
zonecfg = __salt__['zonecfg.info'](name, show_all=True)
resource_cfg = {}
resource_cfg['resource_type'] = key
if isinstance(value, list):
for respv in value:
resource_cfg.update(dict(respv))
resource_prune = False
resource_selector_property = None
if 'resource_prune' in resource_cfg:
resource_prune = resource_cfg['resource_prune']
del resource_cfg['resource_prune']
if 'resource_selector_property' in resource_cfg:
resource_selector_property = resource_cfg['resource_selector_property']
del resource_cfg['resource_selector_property']
if not resource_selector_property and key in _zonecfg_resource_default_selectors:
resource_selector_property = _zonecfg_resource_default_selectors[key]
res = None
if resource_prune:
res = resource_absent(
name,
resource_cfg['resource_type'],
resource_selector_property=resource_selector_property,
resource_selector_value=resource_cfg[resource_selector_property] if resource_selector_property else None,
)
else:
resource_cfg['resource_selector_property'] = resource_selector_property
if resource_selector_property in resource_cfg:
resource_cfg['resource_selector_value'] = resource_cfg[resource_selector_property]
else:
resource_cfg['resource_selector_value'] = None
resource_cfg['name'] = name # we do this last because name can also be a attrib value
res = resource_present(**resource_cfg)
if res:
ret['result'] = ret['result'] if res['result'] else False
ret['comment'].append(res['comment'])
if res['changes']:
if 'resource' not in ret['changes']:
ret['changes']['resource'] = {}
ret['changes']['resource'] = merge_dict(ret['changes']['resource'], res['changes'])
if isinstance(ret['comment'], list):
ret['comment'] = "\n".join(ret['comment'])
return ret | Ensure a zone with certain properties and resources
name : string
name of the zone
brand : string
brand of the zone
zonepath : string
path of the zone
properties : list of key-value pairs
dict of properties
resources : list of key-value pairs
dict of resources
.. note::
If the zone does not exist it will not be installed.
You can use the ```zone.installed``` state for this.
.. note::
Default resource selectors:
- fs: dir
- net: mac-addr
- device: match
- rctl: name
- attr: name
- dataset: name
- admin: user
.. warning::
Properties and resource will not be removed when they
are absent from the state!
For properties, simple set them to ```None```.
For resources, add the ```resource_prune``` property
and set it to ```True```. Also specify the
```resource_selector_property``` if the default is not
the one you want. | Below is the the instruction that describes the task:
### Input:
Ensure a zone with certain properties and resources
name : string
name of the zone
brand : string
brand of the zone
zonepath : string
path of the zone
properties : list of key-value pairs
dict of properties
resources : list of key-value pairs
dict of resources
.. note::
If the zone does not exist it will not be installed.
You can use the ```zone.installed``` state for this.
.. note::
Default resource selectors:
- fs: dir
- net: mac-addr
- device: match
- rctl: name
- attr: name
- dataset: name
- admin: user
.. warning::
Properties and resource will not be removed when they
are absent from the state!
For properties, simple set them to ```None```.
For resources, add the ```resource_prune``` property
and set it to ```True```. Also specify the
```resource_selector_property``` if the default is not
the one you want.
### Response:
def present(name, brand, zonepath, properties=None, resources=None):
'''
Ensure a zone with certain properties and resources
name : string
name of the zone
brand : string
brand of the zone
zonepath : string
path of the zone
properties : list of key-value pairs
dict of properties
resources : list of key-value pairs
dict of resources
.. note::
If the zone does not exist it will not be installed.
You can use the ```zone.installed``` state for this.
.. note::
Default resource selectors:
- fs: dir
- net: mac-addr
- device: match
- rctl: name
- attr: name
- dataset: name
- admin: user
.. warning::
Properties and resource will not be removed when they
are absent from the state!
For properties, simple set them to ```None```.
For resources, add the ```resource_prune``` property
and set it to ```True```. Also specify the
```resource_selector_property``` if the default is not
the one you want.
'''
ret = {'name': name,
'changes': {},
'result': None,
'comment': []}
## sanitize defaults
if not properties:
properties = []
if not resources:
resources = []
properties.append(OrderedDict({"brand": brand}))
properties.append(OrderedDict({"zonepath": zonepath}))
zones = __salt__['zoneadm.list'](installed=True, configured=True)
## test mode only has limited support
if __opts__['test']:
ret['result'] = None
ret['comment'].append('Cannot determine of changes would happen to the zone {0}.'.format(name))
## create zone if needed
if name not in zones:
if __opts__['test']:
## we pretend we created the zone
res_create = {'status': True}
ret['comment'] = []
else:
## create and install
res_create = __salt__['zonecfg.create'](name, brand, zonepath)
if res_create['status']:
ret['result'] = True
ret['changes'][name] = 'created'
ret['comment'].append('The zone {0} was created.'.format(name))
if not __opts__['test']:
ret['result'] = True
if isinstance(properties, list):
for prop in properties:
if not isinstance(prop, OrderedDict) or len(prop) != 1:
log.warning('zone.present - failed to parse property: %s', prop)
continue
for key, value in prop.items():
res = None
if not value:
res = property_absent(name, key)
elif value:
res = property_present(name, key, value)
if res:
ret['result'] = ret['result'] if res['result'] else False
ret['comment'].append(res['comment'])
if res['changes']:
if 'property' not in ret['changes']:
ret['changes']['property'] = {}
ret['changes']['property'] = merge_dict(ret['changes']['property'], res['changes'])
if isinstance(resources, list):
for resource in resources:
if not isinstance(prop, OrderedDict) or len(prop) != 1:
log.warning('zone.present - failed to parse resource: %s', resource)
continue
for key, value in resource.items():
zonecfg = __salt__['zonecfg.info'](name, show_all=True)
resource_cfg = {}
resource_cfg['resource_type'] = key
if isinstance(value, list):
for respv in value:
resource_cfg.update(dict(respv))
resource_prune = False
resource_selector_property = None
if 'resource_prune' in resource_cfg:
resource_prune = resource_cfg['resource_prune']
del resource_cfg['resource_prune']
if 'resource_selector_property' in resource_cfg:
resource_selector_property = resource_cfg['resource_selector_property']
del resource_cfg['resource_selector_property']
if not resource_selector_property and key in _zonecfg_resource_default_selectors:
resource_selector_property = _zonecfg_resource_default_selectors[key]
res = None
if resource_prune:
res = resource_absent(
name,
resource_cfg['resource_type'],
resource_selector_property=resource_selector_property,
resource_selector_value=resource_cfg[resource_selector_property] if resource_selector_property else None,
)
else:
resource_cfg['resource_selector_property'] = resource_selector_property
if resource_selector_property in resource_cfg:
resource_cfg['resource_selector_value'] = resource_cfg[resource_selector_property]
else:
resource_cfg['resource_selector_value'] = None
resource_cfg['name'] = name # we do this last because name can also be a attrib value
res = resource_present(**resource_cfg)
if res:
ret['result'] = ret['result'] if res['result'] else False
ret['comment'].append(res['comment'])
if res['changes']:
if 'resource' not in ret['changes']:
ret['changes']['resource'] = {}
ret['changes']['resource'] = merge_dict(ret['changes']['resource'], res['changes'])
if isinstance(ret['comment'], list):
ret['comment'] = "\n".join(ret['comment'])
return ret |
def global_position_int_cov_send(self, time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance, force_mavlink1=False):
'''
The filtered global position (e.g. fused GPS and accelerometers). The
position is in GPS-frame (right-handed, Z-up). It is
designed as scaled integer message since the
resolution of float is not sufficient. NOTE: This
message is intended for onboard networks / companion
computers and higher-bandwidth links and optimized for
accuracy and completeness. Please use the
GLOBAL_POSITION_INT message for a minimal subset.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
time_utc : Timestamp (microseconds since UNIX epoch) in UTC. 0 for unknown. Commonly filled by the precision time source of a GPS receiver. (uint64_t)
estimator_type : Class id of the estimator this estimate originated from. (uint8_t)
lat : Latitude, expressed as degrees * 1E7 (int32_t)
lon : Longitude, expressed as degrees * 1E7 (int32_t)
alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t)
relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
vx : Ground X Speed (Latitude), expressed as m/s (float)
vy : Ground Y Speed (Longitude), expressed as m/s (float)
vz : Ground Z Speed (Altitude), expressed as m/s (float)
covariance : Covariance matrix (first six entries are the first ROW, next six entries are the second row, etc.) (float)
'''
return self.send(self.global_position_int_cov_encode(time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance), force_mavlink1=force_mavlink1) | The filtered global position (e.g. fused GPS and accelerometers). The
position is in GPS-frame (right-handed, Z-up). It is
designed as scaled integer message since the
resolution of float is not sufficient. NOTE: This
message is intended for onboard networks / companion
computers and higher-bandwidth links and optimized for
accuracy and completeness. Please use the
GLOBAL_POSITION_INT message for a minimal subset.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
time_utc : Timestamp (microseconds since UNIX epoch) in UTC. 0 for unknown. Commonly filled by the precision time source of a GPS receiver. (uint64_t)
estimator_type : Class id of the estimator this estimate originated from. (uint8_t)
lat : Latitude, expressed as degrees * 1E7 (int32_t)
lon : Longitude, expressed as degrees * 1E7 (int32_t)
alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t)
relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
vx : Ground X Speed (Latitude), expressed as m/s (float)
vy : Ground Y Speed (Longitude), expressed as m/s (float)
vz : Ground Z Speed (Altitude), expressed as m/s (float)
covariance : Covariance matrix (first six entries are the first ROW, next six entries are the second row, etc.) (float) | Below is the the instruction that describes the task:
### Input:
The filtered global position (e.g. fused GPS and accelerometers). The
position is in GPS-frame (right-handed, Z-up). It is
designed as scaled integer message since the
resolution of float is not sufficient. NOTE: This
message is intended for onboard networks / companion
computers and higher-bandwidth links and optimized for
accuracy and completeness. Please use the
GLOBAL_POSITION_INT message for a minimal subset.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
time_utc : Timestamp (microseconds since UNIX epoch) in UTC. 0 for unknown. Commonly filled by the precision time source of a GPS receiver. (uint64_t)
estimator_type : Class id of the estimator this estimate originated from. (uint8_t)
lat : Latitude, expressed as degrees * 1E7 (int32_t)
lon : Longitude, expressed as degrees * 1E7 (int32_t)
alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t)
relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
vx : Ground X Speed (Latitude), expressed as m/s (float)
vy : Ground Y Speed (Longitude), expressed as m/s (float)
vz : Ground Z Speed (Altitude), expressed as m/s (float)
covariance : Covariance matrix (first six entries are the first ROW, next six entries are the second row, etc.) (float)
### Response:
def global_position_int_cov_send(self, time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance, force_mavlink1=False):
'''
The filtered global position (e.g. fused GPS and accelerometers). The
position is in GPS-frame (right-handed, Z-up). It is
designed as scaled integer message since the
resolution of float is not sufficient. NOTE: This
message is intended for onboard networks / companion
computers and higher-bandwidth links and optimized for
accuracy and completeness. Please use the
GLOBAL_POSITION_INT message for a minimal subset.
time_boot_ms : Timestamp (milliseconds since system boot) (uint32_t)
time_utc : Timestamp (microseconds since UNIX epoch) in UTC. 0 for unknown. Commonly filled by the precision time source of a GPS receiver. (uint64_t)
estimator_type : Class id of the estimator this estimate originated from. (uint8_t)
lat : Latitude, expressed as degrees * 1E7 (int32_t)
lon : Longitude, expressed as degrees * 1E7 (int32_t)
alt : Altitude in meters, expressed as * 1000 (millimeters), above MSL (int32_t)
relative_alt : Altitude above ground in meters, expressed as * 1000 (millimeters) (int32_t)
vx : Ground X Speed (Latitude), expressed as m/s (float)
vy : Ground Y Speed (Longitude), expressed as m/s (float)
vz : Ground Z Speed (Altitude), expressed as m/s (float)
covariance : Covariance matrix (first six entries are the first ROW, next six entries are the second row, etc.) (float)
'''
return self.send(self.global_position_int_cov_encode(time_boot_ms, time_utc, estimator_type, lat, lon, alt, relative_alt, vx, vy, vz, covariance), force_mavlink1=force_mavlink1) |
def _parse_return(cls, result):
"""Extract the result, return value and context from a result object
"""
return_value = None
success = result['result']
context = result['context']
if 'return_value' in result:
return_value = result['return_value']
return success, return_value, context | Extract the result, return value and context from a result object | Below is the the instruction that describes the task:
### Input:
Extract the result, return value and context from a result object
### Response:
def _parse_return(cls, result):
"""Extract the result, return value and context from a result object
"""
return_value = None
success = result['result']
context = result['context']
if 'return_value' in result:
return_value = result['return_value']
return success, return_value, context |
def _stage_local_files(local_dir, local_files={}):
"""
Either ``local_files`` and/or ``context`` should be supplied.
Will stage a ``local_files`` dictionary of path:filename pairs where path
is relative to ``local_dir`` into a local tmp staging directory.
Returns a path to the temporary local staging directory
"""
staging_dir = os.path.join(tempfile.mkdtemp(),os.path.basename(local_dir))
os.mkdir(staging_dir)
for root, dirs, files in os.walk(local_dir):
relative_tree = root.replace(local_dir,'')
if relative_tree:
relative_tree = relative_tree[1:]
if local_files:
files = local_files.get(relative_tree,[])
for file in files:
if relative_tree:
filepath = os.path.join(relative_tree,file)
if not os.path.exists(os.path.join(staging_dir,relative_tree)):
os.mkdir(os.path.join(staging_dir,relative_tree))
else: filepath = file
shutil.copy2(os.path.join(root,file),os.path.join(staging_dir,filepath))
return staging_dir | Either ``local_files`` and/or ``context`` should be supplied.
Will stage a ``local_files`` dictionary of path:filename pairs where path
is relative to ``local_dir`` into a local tmp staging directory.
Returns a path to the temporary local staging directory | Below is the the instruction that describes the task:
### Input:
Either ``local_files`` and/or ``context`` should be supplied.
Will stage a ``local_files`` dictionary of path:filename pairs where path
is relative to ``local_dir`` into a local tmp staging directory.
Returns a path to the temporary local staging directory
### Response:
def _stage_local_files(local_dir, local_files={}):
"""
Either ``local_files`` and/or ``context`` should be supplied.
Will stage a ``local_files`` dictionary of path:filename pairs where path
is relative to ``local_dir`` into a local tmp staging directory.
Returns a path to the temporary local staging directory
"""
staging_dir = os.path.join(tempfile.mkdtemp(),os.path.basename(local_dir))
os.mkdir(staging_dir)
for root, dirs, files in os.walk(local_dir):
relative_tree = root.replace(local_dir,'')
if relative_tree:
relative_tree = relative_tree[1:]
if local_files:
files = local_files.get(relative_tree,[])
for file in files:
if relative_tree:
filepath = os.path.join(relative_tree,file)
if not os.path.exists(os.path.join(staging_dir,relative_tree)):
os.mkdir(os.path.join(staging_dir,relative_tree))
else: filepath = file
shutil.copy2(os.path.join(root,file),os.path.join(staging_dir,filepath))
return staging_dir |
def load_from_string(self, content, container, **options):
"""
Load configuration data from given string 'content'.
:param content: Configuration string
:param container: callble to make a container object
:param options: keyword options passed to '_load_from_string_fn'
:return: container object holding the configuration data
"""
return load_with_fn(self._load_from_string_fn, content, container,
allow_primitives=self.allow_primitives(),
**options) | Load configuration data from given string 'content'.
:param content: Configuration string
:param container: callble to make a container object
:param options: keyword options passed to '_load_from_string_fn'
:return: container object holding the configuration data | Below is the the instruction that describes the task:
### Input:
Load configuration data from given string 'content'.
:param content: Configuration string
:param container: callble to make a container object
:param options: keyword options passed to '_load_from_string_fn'
:return: container object holding the configuration data
### Response:
def load_from_string(self, content, container, **options):
"""
Load configuration data from given string 'content'.
:param content: Configuration string
:param container: callble to make a container object
:param options: keyword options passed to '_load_from_string_fn'
:return: container object holding the configuration data
"""
return load_with_fn(self._load_from_string_fn, content, container,
allow_primitives=self.allow_primitives(),
**options) |
def notifications_get(input_params={}, always_retry=True, **kwargs):
"""
Invokes the /notifications/get API method.
"""
return DXHTTPRequest('/notifications/get', input_params, always_retry=always_retry, **kwargs) | Invokes the /notifications/get API method. | Below is the the instruction that describes the task:
### Input:
Invokes the /notifications/get API method.
### Response:
def notifications_get(input_params={}, always_retry=True, **kwargs):
"""
Invokes the /notifications/get API method.
"""
return DXHTTPRequest('/notifications/get', input_params, always_retry=always_retry, **kwargs) |
def slug_exists(self, request):
"""Check if given url slug exists.
Check if slug given in query parameter ``name`` exists. Return
``True`` if slug already exists and ``False`` otherwise.
"""
if not request.user.is_authenticated:
return Response(status=status.HTTP_401_UNAUTHORIZED)
if 'name' not in request.query_params:
return Response({'error': 'Query parameter `name` must be given.'},
status=status.HTTP_400_BAD_REQUEST)
queryset = self.get_queryset()
slug_name = request.query_params['name']
return Response(queryset.filter(slug__iexact=slug_name).exists()) | Check if given url slug exists.
Check if slug given in query parameter ``name`` exists. Return
``True`` if slug already exists and ``False`` otherwise. | Below is the the instruction that describes the task:
### Input:
Check if given url slug exists.
Check if slug given in query parameter ``name`` exists. Return
``True`` if slug already exists and ``False`` otherwise.
### Response:
def slug_exists(self, request):
"""Check if given url slug exists.
Check if slug given in query parameter ``name`` exists. Return
``True`` if slug already exists and ``False`` otherwise.
"""
if not request.user.is_authenticated:
return Response(status=status.HTTP_401_UNAUTHORIZED)
if 'name' not in request.query_params:
return Response({'error': 'Query parameter `name` must be given.'},
status=status.HTTP_400_BAD_REQUEST)
queryset = self.get_queryset()
slug_name = request.query_params['name']
return Response(queryset.filter(slug__iexact=slug_name).exists()) |
def style(self):
"""Add summaries and convert to Pandas Styler"""
row_titles = [a.title for a in self._cleaned_summary_rows]
col_titles = [a.title for a in self._cleaned_summary_cols]
row_ix = pd.IndexSlice[row_titles, :]
col_ix = pd.IndexSlice[:, col_titles]
def handle_na(df):
df.loc[col_ix] = df.loc[col_ix].fillna('')
df.loc[row_ix] = df.loc[row_ix].fillna('')
return df
styler = (
self
.frame
.pipe(handle_na)
.style
.applymap(lambda r: 'font-weight: 900', subset=row_ix)
.applymap(lambda r: 'font-weight: 900', subset=col_ix)
)
for formatter in self.formatters:
styler = formatter.apply(styler)
return styler | Add summaries and convert to Pandas Styler | Below is the the instruction that describes the task:
### Input:
Add summaries and convert to Pandas Styler
### Response:
def style(self):
"""Add summaries and convert to Pandas Styler"""
row_titles = [a.title for a in self._cleaned_summary_rows]
col_titles = [a.title for a in self._cleaned_summary_cols]
row_ix = pd.IndexSlice[row_titles, :]
col_ix = pd.IndexSlice[:, col_titles]
def handle_na(df):
df.loc[col_ix] = df.loc[col_ix].fillna('')
df.loc[row_ix] = df.loc[row_ix].fillna('')
return df
styler = (
self
.frame
.pipe(handle_na)
.style
.applymap(lambda r: 'font-weight: 900', subset=row_ix)
.applymap(lambda r: 'font-weight: 900', subset=col_ix)
)
for formatter in self.formatters:
styler = formatter.apply(styler)
return styler |
def load(self, patterns, dirs, ignore=None):
"""Load objects from the filesystem into the ``paths`` dictionary
Also include an attribute on the object, ``relative_path`` which is the
shortened, relative path the package/module
"""
for dir_ in dirs:
dir_root = dir_
if os.path.exists(os.path.join(dir_, "__init__.py")):
dir_root = os.path.abspath(os.path.join(dir_, os.pardir))
for path in self.find_files(patterns=patterns, dirs=[dir_], ignore=ignore):
data = self.read_file(path=path)
if data:
data["relative_path"] = os.path.relpath(path, dir_root)
self.paths[path] = data | Load objects from the filesystem into the ``paths`` dictionary
Also include an attribute on the object, ``relative_path`` which is the
shortened, relative path the package/module | Below is the the instruction that describes the task:
### Input:
Load objects from the filesystem into the ``paths`` dictionary
Also include an attribute on the object, ``relative_path`` which is the
shortened, relative path the package/module
### Response:
def load(self, patterns, dirs, ignore=None):
"""Load objects from the filesystem into the ``paths`` dictionary
Also include an attribute on the object, ``relative_path`` which is the
shortened, relative path the package/module
"""
for dir_ in dirs:
dir_root = dir_
if os.path.exists(os.path.join(dir_, "__init__.py")):
dir_root = os.path.abspath(os.path.join(dir_, os.pardir))
for path in self.find_files(patterns=patterns, dirs=[dir_], ignore=ignore):
data = self.read_file(path=path)
if data:
data["relative_path"] = os.path.relpath(path, dir_root)
self.paths[path] = data |
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.lingeling and self.status == True:
model = pysolvers.lingeling_model(self.lingeling)
return model if model != None else [] | Get a model if the formula was previously satisfied. | Below is the the instruction that describes the task:
### Input:
Get a model if the formula was previously satisfied.
### Response:
def get_model(self):
"""
Get a model if the formula was previously satisfied.
"""
if self.lingeling and self.status == True:
model = pysolvers.lingeling_model(self.lingeling)
return model if model != None else [] |
def is_binary(filename):
"""
:param filename: File to check.
:returns: True if it's a binary file, otherwise False.
"""
logger.debug('is_binary: %(filename)r', locals())
# Check if the file extension is in a list of known binary types
binary_extensions = ['pyc', 'iso', 'zip', 'pdf']
for ext in binary_extensions:
if filename.endswith(ext):
return True
# Check if the starting chunk is a binary string
chunk = get_starting_chunk(filename)
return is_binary_string(chunk) | :param filename: File to check.
:returns: True if it's a binary file, otherwise False. | Below is the the instruction that describes the task:
### Input:
:param filename: File to check.
:returns: True if it's a binary file, otherwise False.
### Response:
def is_binary(filename):
"""
:param filename: File to check.
:returns: True if it's a binary file, otherwise False.
"""
logger.debug('is_binary: %(filename)r', locals())
# Check if the file extension is in a list of known binary types
binary_extensions = ['pyc', 'iso', 'zip', 'pdf']
for ext in binary_extensions:
if filename.endswith(ext):
return True
# Check if the starting chunk is a binary string
chunk = get_starting_chunk(filename)
return is_binary_string(chunk) |
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'):
''' run a command on the remote host '''
vvv("EXEC COMMAND %s" % cmd)
if self.runner.sudo and sudoable:
raise errors.AnsibleError("fireball does not use sudo, but runs as whoever it was initiated as. (That itself is where to use sudo).")
data = dict(
mode='command',
cmd=cmd,
tmp_path=tmp_path,
executable=executable,
)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
self.socket.send(data)
response = self.socket.recv()
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr','')) | run a command on the remote host | Below is the the instruction that describes the task:
### Input:
run a command on the remote host
### Response:
def exec_command(self, cmd, tmp_path, sudo_user, sudoable=False, executable='/bin/sh'):
''' run a command on the remote host '''
vvv("EXEC COMMAND %s" % cmd)
if self.runner.sudo and sudoable:
raise errors.AnsibleError("fireball does not use sudo, but runs as whoever it was initiated as. (That itself is where to use sudo).")
data = dict(
mode='command',
cmd=cmd,
tmp_path=tmp_path,
executable=executable,
)
data = utils.jsonify(data)
data = utils.encrypt(self.key, data)
self.socket.send(data)
response = self.socket.recv()
response = utils.decrypt(self.key, response)
response = utils.parse_json(response)
return (response.get('rc',None), '', response.get('stdout',''), response.get('stderr','')) |
def hw_pipette(self) -> Dict[str, Any]:
""" View the information returned by the hardware API directly.
:raises: a :py:class:`.types.PipetteNotAttachedError` if the pipette is
no longer attached (should not happen).
"""
pipette = self._hw_manager.hardware.attached_instruments[self._mount]
if pipette is None:
raise types.PipetteNotAttachedError
return pipette | View the information returned by the hardware API directly.
:raises: a :py:class:`.types.PipetteNotAttachedError` if the pipette is
no longer attached (should not happen). | Below is the the instruction that describes the task:
### Input:
View the information returned by the hardware API directly.
:raises: a :py:class:`.types.PipetteNotAttachedError` if the pipette is
no longer attached (should not happen).
### Response:
def hw_pipette(self) -> Dict[str, Any]:
""" View the information returned by the hardware API directly.
:raises: a :py:class:`.types.PipetteNotAttachedError` if the pipette is
no longer attached (should not happen).
"""
pipette = self._hw_manager.hardware.attached_instruments[self._mount]
if pipette is None:
raise types.PipetteNotAttachedError
return pipette |
def applyTransformOnGroup(self, transform, group):
"""Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>)
"""
tr = self.attrConverter.convertTransform(transform)
for op, values in tr:
if op == "scale":
if not isinstance(values, tuple):
values = (values, values)
group.scale(*values)
elif op == "translate":
if isinstance(values, (int, float)):
# From the SVG spec: If <ty> is not provided, it is assumed to be zero.
values = values, 0
group.translate(*values)
elif op == "rotate":
if not isinstance(values, tuple) or len(values) == 1:
group.rotate(values)
elif len(values) == 3:
angle, cx, cy = values
group.translate(cx, cy)
group.rotate(angle)
group.translate(-cx, -cy)
elif op == "skewX":
group.skew(values, 0)
elif op == "skewY":
group.skew(0, values)
elif op == "matrix":
group.transform = values
else:
logger.debug("Ignoring transform: %s %s" % (op, values)) | Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>) | Below is the the instruction that describes the task:
### Input:
Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>)
### Response:
def applyTransformOnGroup(self, transform, group):
"""Apply an SVG transformation to a RL Group shape.
The transformation is the value of an SVG transform attribute
like transform="scale(1, -1) translate(10, 30)".
rotate(<angle> [<cx> <cy>]) is equivalent to:
translate(<cx> <cy>) rotate(<angle>) translate(-<cx> -<cy>)
"""
tr = self.attrConverter.convertTransform(transform)
for op, values in tr:
if op == "scale":
if not isinstance(values, tuple):
values = (values, values)
group.scale(*values)
elif op == "translate":
if isinstance(values, (int, float)):
# From the SVG spec: If <ty> is not provided, it is assumed to be zero.
values = values, 0
group.translate(*values)
elif op == "rotate":
if not isinstance(values, tuple) or len(values) == 1:
group.rotate(values)
elif len(values) == 3:
angle, cx, cy = values
group.translate(cx, cy)
group.rotate(angle)
group.translate(-cx, -cy)
elif op == "skewX":
group.skew(values, 0)
elif op == "skewY":
group.skew(0, values)
elif op == "matrix":
group.transform = values
else:
logger.debug("Ignoring transform: %s %s" % (op, values)) |
def int_to_string(number, alphabet, padding=None):
"""
Convert a number to a string, using the given alphabet.
The output has the most significant digit first.
"""
output = ""
alpha_len = len(alphabet)
while number:
number, digit = divmod(number, alpha_len)
output += alphabet[digit]
if padding:
remainder = max(padding - len(output), 0)
output = output + alphabet[0] * remainder
return output[::-1] | Convert a number to a string, using the given alphabet.
The output has the most significant digit first. | Below is the the instruction that describes the task:
### Input:
Convert a number to a string, using the given alphabet.
The output has the most significant digit first.
### Response:
def int_to_string(number, alphabet, padding=None):
"""
Convert a number to a string, using the given alphabet.
The output has the most significant digit first.
"""
output = ""
alpha_len = len(alphabet)
while number:
number, digit = divmod(number, alpha_len)
output += alphabet[digit]
if padding:
remainder = max(padding - len(output), 0)
output = output + alphabet[0] * remainder
return output[::-1] |
def load_config_from_setup(app):
"""
Replace values in app.config from package metadata
"""
# for now, assume project root is one level up
root = os.path.join(app.confdir, '..')
setup_script = os.path.join(root, 'setup.py')
fields = ['--name', '--version', '--url', '--author']
dist_info_cmd = [sys.executable, setup_script] + fields
output = subprocess.check_output(
dist_info_cmd,
cwd=root,
universal_newlines=True,
)
outputs = output.strip().split('\n')
project, version, url, author = outputs
app.config.project = project
app.config.version = app.config.release = version
app.config.package_url = url
app.config.author = app.config.copyright = author | Replace values in app.config from package metadata | Below is the the instruction that describes the task:
### Input:
Replace values in app.config from package metadata
### Response:
def load_config_from_setup(app):
"""
Replace values in app.config from package metadata
"""
# for now, assume project root is one level up
root = os.path.join(app.confdir, '..')
setup_script = os.path.join(root, 'setup.py')
fields = ['--name', '--version', '--url', '--author']
dist_info_cmd = [sys.executable, setup_script] + fields
output = subprocess.check_output(
dist_info_cmd,
cwd=root,
universal_newlines=True,
)
outputs = output.strip().split('\n')
project, version, url, author = outputs
app.config.project = project
app.config.version = app.config.release = version
app.config.package_url = url
app.config.author = app.config.copyright = author |
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
if tables:
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY
# So must use the much slower DELETE
from django.db import connections
cursor = connections[self.connection.alias].cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = {}
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE not in ('PRIMARY KEY','UNIQUE')")
fks = cursor.fetchall()
sql_list = ['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks]
sql_list.extend(['%s %s %s;' % (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)) ) for table in tables])
if self.on_azure_sql_db:
import warnings
warnings.warn("The identity columns will never be reset " \
"on Windows Azure SQL Database.",
RuntimeWarning)
else:
# Then reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks])
return sql_list
else:
return [] | Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color. | Below is the the instruction that describes the task:
### Input:
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
### Response:
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
"""
if tables:
# Cannot use TRUNCATE on tables that are referenced by a FOREIGN KEY
# So must use the much slower DELETE
from django.db import connections
cursor = connections[self.connection.alias].cursor()
# Try to minimize the risks of the braindeaded inconsistency in
# DBCC CHEKIDENT(table, RESEED, n) behavior.
seqs = []
for seq in sequences:
cursor.execute("SELECT COUNT(*) FROM %s" % self.quote_name(seq["table"]))
rowcnt = cursor.fetchone()[0]
elem = {}
if rowcnt:
elem['start_id'] = 0
else:
elem['start_id'] = 1
elem.update(seq)
seqs.append(elem)
cursor.execute("SELECT TABLE_NAME, CONSTRAINT_NAME FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS WHERE CONSTRAINT_TYPE not in ('PRIMARY KEY','UNIQUE')")
fks = cursor.fetchall()
sql_list = ['ALTER TABLE %s NOCHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks]
sql_list.extend(['%s %s %s;' % (style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'),
style.SQL_FIELD(self.quote_name(table)) ) for table in tables])
if self.on_azure_sql_db:
import warnings
warnings.warn("The identity columns will never be reset " \
"on Windows Azure SQL Database.",
RuntimeWarning)
else:
# Then reset the counters on each table.
sql_list.extend(['%s %s (%s, %s, %s) %s %s;' % (
style.SQL_KEYWORD('DBCC'),
style.SQL_KEYWORD('CHECKIDENT'),
style.SQL_FIELD(self.quote_name(seq["table"])),
style.SQL_KEYWORD('RESEED'),
style.SQL_FIELD('%d' % seq['start_id']),
style.SQL_KEYWORD('WITH'),
style.SQL_KEYWORD('NO_INFOMSGS'),
) for seq in seqs])
sql_list.extend(['ALTER TABLE %s CHECK CONSTRAINT %s;' % \
(self.quote_name(fk[0]), self.quote_name(fk[1])) for fk in fks])
return sql_list
else:
return [] |
def get_model_fields(model, add_reserver_flag=True):
"""
Creating fields suit for model_config , id will be skipped.
"""
import uliweb.orm as orm
fields = []
m = {'type':'type_name', 'hint':'hint',
'default':'default', 'required':'required'}
m1 = {'index':'index', 'unique':'unique'}
for name, prop in model.properties.items():
if name == 'id':
continue
d = {}
for k, v in m.items():
d[k] = getattr(prop, v)
for k, v in m1.items():
d[k] = bool(prop.kwargs.get(v))
d['name'] = prop.fieldname or name
d['verbose_name'] = unicode(prop.verbose_name)
d['nullable'] = bool(prop.kwargs.get('nullable', orm.__nullable__))
if d['type'] in ('VARCHAR', 'CHAR', 'BINARY', 'VARBINARY'):
d['max_length'] = prop.max_length
if d['type'] in ('Reference', 'OneToOne', 'ManyToMany'):
d['reference_class'] = prop.reference_class
#collection_name will be _collection_name, it the original value
d['collection_name'] = prop._collection_name
d['server_default'] = prop.kwargs.get('server_default')
d['_reserved'] = True
fields.append(d)
return fields | Creating fields suit for model_config , id will be skipped. | Below is the the instruction that describes the task:
### Input:
Creating fields suit for model_config , id will be skipped.
### Response:
def get_model_fields(model, add_reserver_flag=True):
"""
Creating fields suit for model_config , id will be skipped.
"""
import uliweb.orm as orm
fields = []
m = {'type':'type_name', 'hint':'hint',
'default':'default', 'required':'required'}
m1 = {'index':'index', 'unique':'unique'}
for name, prop in model.properties.items():
if name == 'id':
continue
d = {}
for k, v in m.items():
d[k] = getattr(prop, v)
for k, v in m1.items():
d[k] = bool(prop.kwargs.get(v))
d['name'] = prop.fieldname or name
d['verbose_name'] = unicode(prop.verbose_name)
d['nullable'] = bool(prop.kwargs.get('nullable', orm.__nullable__))
if d['type'] in ('VARCHAR', 'CHAR', 'BINARY', 'VARBINARY'):
d['max_length'] = prop.max_length
if d['type'] in ('Reference', 'OneToOne', 'ManyToMany'):
d['reference_class'] = prop.reference_class
#collection_name will be _collection_name, it the original value
d['collection_name'] = prop._collection_name
d['server_default'] = prop.kwargs.get('server_default')
d['_reserved'] = True
fields.append(d)
return fields |
def get_formset_class(self, **kwargs):
"""
Returns the formset for the queryset,
if a form class is available.
"""
form_class = self.get_formset_form_class()
if form_class:
kwargs['formfield_callback'] = self.formfield_for_dbfield
return model_forms.modelformset_factory(self.model,
form_class, fields=self.change_fields, extra=0,
**kwargs) | Returns the formset for the queryset,
if a form class is available. | Below is the the instruction that describes the task:
### Input:
Returns the formset for the queryset,
if a form class is available.
### Response:
def get_formset_class(self, **kwargs):
"""
Returns the formset for the queryset,
if a form class is available.
"""
form_class = self.get_formset_form_class()
if form_class:
kwargs['formfield_callback'] = self.formfield_for_dbfield
return model_forms.modelformset_factory(self.model,
form_class, fields=self.change_fields, extra=0,
**kwargs) |
def get_items_from_response(self, response):
"""Yield :class:`.taskqueue.task.Task` items from response.
:type response: dict
:param response: The JSON API response for a page of tasks.
"""
for item in response.get('items', []):
id = item.get('id')
task = Task(id, taskqueue=self.taskqueue)
task._set_properties(item)
yield task | Yield :class:`.taskqueue.task.Task` items from response.
:type response: dict
:param response: The JSON API response for a page of tasks. | Below is the the instruction that describes the task:
### Input:
Yield :class:`.taskqueue.task.Task` items from response.
:type response: dict
:param response: The JSON API response for a page of tasks.
### Response:
def get_items_from_response(self, response):
"""Yield :class:`.taskqueue.task.Task` items from response.
:type response: dict
:param response: The JSON API response for a page of tasks.
"""
for item in response.get('items', []):
id = item.get('id')
task = Task(id, taskqueue=self.taskqueue)
task._set_properties(item)
yield task |
def send_until(self,
send,
regexps,
not_there=False,
shutit_pexpect_child=None,
cadence=5,
retries=100,
echo=None,
note=None,
debug_command=None,
pause_point_on_fail=True,
nonewline=False,
loglevel=logging.INFO):
"""Send string on a regular cadence until a string is either seen, or the timeout is triggered.
@param send: See send()
@param regexps: List of regexps to wait for.
@param not_there: If True, wait until this a regexp is not seen in the output. If False
wait until a regexp is seen in the output (default)
@param shutit_pexpect_child: See send()
@param echo: See send()
@param note: See send()
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.send_until(send,
regexps,
not_there=not_there,
cadence=cadence,
retries=retries,
echo=echo,
note=note,
loglevel=loglevel,
debug_command=debug_command,
nonewline=nonewline,
pause_point_on_fail=pause_point_on_fail) | Send string on a regular cadence until a string is either seen, or the timeout is triggered.
@param send: See send()
@param regexps: List of regexps to wait for.
@param not_there: If True, wait until this a regexp is not seen in the output. If False
wait until a regexp is seen in the output (default)
@param shutit_pexpect_child: See send()
@param echo: See send()
@param note: See send() | Below is the the instruction that describes the task:
### Input:
Send string on a regular cadence until a string is either seen, or the timeout is triggered.
@param send: See send()
@param regexps: List of regexps to wait for.
@param not_there: If True, wait until this a regexp is not seen in the output. If False
wait until a regexp is seen in the output (default)
@param shutit_pexpect_child: See send()
@param echo: See send()
@param note: See send()
### Response:
def send_until(self,
send,
regexps,
not_there=False,
shutit_pexpect_child=None,
cadence=5,
retries=100,
echo=None,
note=None,
debug_command=None,
pause_point_on_fail=True,
nonewline=False,
loglevel=logging.INFO):
"""Send string on a regular cadence until a string is either seen, or the timeout is triggered.
@param send: See send()
@param regexps: List of regexps to wait for.
@param not_there: If True, wait until this a regexp is not seen in the output. If False
wait until a regexp is seen in the output (default)
@param shutit_pexpect_child: See send()
@param echo: See send()
@param note: See send()
"""
shutit_global.shutit_global_object.yield_to_draw()
shutit_pexpect_child = shutit_pexpect_child or self.get_current_shutit_pexpect_session().pexpect_child
shutit_pexpect_session = self.get_shutit_pexpect_session_from_child(shutit_pexpect_child)
return shutit_pexpect_session.send_until(send,
regexps,
not_there=not_there,
cadence=cadence,
retries=retries,
echo=echo,
note=note,
loglevel=loglevel,
debug_command=debug_command,
nonewline=nonewline,
pause_point_on_fail=pause_point_on_fail) |
def fullqualname_py3(obj):
"""Fully qualified name for objects in Python 3."""
if type(obj).__name__ == 'builtin_function_or_method':
return _fullqualname_builtin_py3(obj)
elif type(obj).__name__ == 'function':
return _fullqualname_function_py3(obj)
elif type(obj).__name__ in ['member_descriptor', 'method_descriptor',
'wrapper_descriptor']:
return obj.__objclass__.__module__ + '.' + obj.__qualname__
elif type(obj).__name__ == 'method':
return _fullqualname_method_py3(obj)
elif type(obj).__name__ == 'method-wrapper':
return fullqualname_py3(obj.__self__) + '.' + obj.__name__
elif type(obj).__name__ == 'module':
return obj.__name__
elif type(obj).__name__ == 'property':
return obj.fget.__module__ + '.' + obj.fget.__qualname__
elif inspect.isclass(obj):
return obj.__module__ + '.' + obj.__qualname__
return obj.__class__.__module__ + '.' + obj.__class__.__qualname__ | Fully qualified name for objects in Python 3. | Below is the the instruction that describes the task:
### Input:
Fully qualified name for objects in Python 3.
### Response:
def fullqualname_py3(obj):
"""Fully qualified name for objects in Python 3."""
if type(obj).__name__ == 'builtin_function_or_method':
return _fullqualname_builtin_py3(obj)
elif type(obj).__name__ == 'function':
return _fullqualname_function_py3(obj)
elif type(obj).__name__ in ['member_descriptor', 'method_descriptor',
'wrapper_descriptor']:
return obj.__objclass__.__module__ + '.' + obj.__qualname__
elif type(obj).__name__ == 'method':
return _fullqualname_method_py3(obj)
elif type(obj).__name__ == 'method-wrapper':
return fullqualname_py3(obj.__self__) + '.' + obj.__name__
elif type(obj).__name__ == 'module':
return obj.__name__
elif type(obj).__name__ == 'property':
return obj.fget.__module__ + '.' + obj.fget.__qualname__
elif inspect.isclass(obj):
return obj.__module__ + '.' + obj.__qualname__
return obj.__class__.__module__ + '.' + obj.__class__.__qualname__ |
def stage(x, staging):
"""
Stage an object to the `staging` directory.
If the object is a Track and is one of the types that needs an index file
(bam, vcfTabix), then the index file will be staged as well.
Returns a list of the linknames created.
"""
linknames = []
# Objects that don't represent a file shouldn't be staged
non_file_objects = (
track.ViewTrack,
track.CompositeTrack,
track.AggregateTrack,
track.SuperTrack,
genome.Genome,
)
if isinstance(x, non_file_objects):
return linknames
# If it's an object representing a file, then render it.
#
# Track objects don't represent files, but their documentation does
linknames.append(x.render(staging))
if hasattr(x, 'source') and hasattr(x, 'filename'):
def _stg(x, ext=''):
# A remote track hosted elsewhere does not need staging. This is
# defined by a track with a url, but no source or filename.
if (
x.source is None
and x.filename is None
and getattr(x, 'url', None) is not None
):
return
linknames.append(
local_link(x.source + ext, x.filename + ext, staging)
)
_stg(x)
if isinstance(x, track.Track):
if x.tracktype == 'bam':
_stg(x, ext='.bai')
if x.tracktype == 'vcfTabix':
_stg(x, ext='.tbi')
if isinstance(x, track.CompositeTrack):
if x._html:
_stg(x._html)
return linknames | Stage an object to the `staging` directory.
If the object is a Track and is one of the types that needs an index file
(bam, vcfTabix), then the index file will be staged as well.
Returns a list of the linknames created. | Below is the the instruction that describes the task:
### Input:
Stage an object to the `staging` directory.
If the object is a Track and is one of the types that needs an index file
(bam, vcfTabix), then the index file will be staged as well.
Returns a list of the linknames created.
### Response:
def stage(x, staging):
"""
Stage an object to the `staging` directory.
If the object is a Track and is one of the types that needs an index file
(bam, vcfTabix), then the index file will be staged as well.
Returns a list of the linknames created.
"""
linknames = []
# Objects that don't represent a file shouldn't be staged
non_file_objects = (
track.ViewTrack,
track.CompositeTrack,
track.AggregateTrack,
track.SuperTrack,
genome.Genome,
)
if isinstance(x, non_file_objects):
return linknames
# If it's an object representing a file, then render it.
#
# Track objects don't represent files, but their documentation does
linknames.append(x.render(staging))
if hasattr(x, 'source') and hasattr(x, 'filename'):
def _stg(x, ext=''):
# A remote track hosted elsewhere does not need staging. This is
# defined by a track with a url, but no source or filename.
if (
x.source is None
and x.filename is None
and getattr(x, 'url', None) is not None
):
return
linknames.append(
local_link(x.source + ext, x.filename + ext, staging)
)
_stg(x)
if isinstance(x, track.Track):
if x.tracktype == 'bam':
_stg(x, ext='.bai')
if x.tracktype == 'vcfTabix':
_stg(x, ext='.tbi')
if isinstance(x, track.CompositeTrack):
if x._html:
_stg(x._html)
return linknames |
def arrange_plots_on_one_canvas(FigureAxTupleArray, title='', SubtitleArray = [], show_fig=True):
"""
Arranges plots, given in an array of tuples consisting of fig and axs,
onto a subplot-figure consisting of 2 horizontal times the lenght of the
passed (fig,axs)-array divided by 2 vertical subplots
Parameters
----------
FigureAxTupleArray : array-like
array of Tuples(fig, axs) outputted from the other plotting funtions
inside optoanalysis
title : string, optional
string for the global title of the overall combined figure
SubtitleArray : array-like, optional
array of titles for each figure-set to be plotted, i.e. subplots
show_fig : bool, optional
If True runs plt.show() before returning figure
if False it just returns the figure object.
(the default is True, it shows the figure)
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created
axs : list of matplotlib.axes.Axes objects
The list of axes object created
"""
if SubtitleArray == []:
SubtitleArray = ["Plot {}".format(i)
for i in _np.arange(0, len(FigureAxTupleArray), 1)]
SingleFigSize = FigureAxTupleArray[0][0].get_size_inches()
combinedFig=_plt.figure(figsize=(2*SingleFigSize[0],_np.ceil(len(FigureAxTupleArray)/2)*SingleFigSize[1]))
for index in range(len(FigureAxTupleArray)):
individualPlot = FigureAxTupleArray[index]
individualPlot[0].set_size_inches((2*SingleFigSize[0],_np.ceil(len(FigureAxTupleArray)/2)*SingleFigSize[1]))
ax = individualPlot[1]
ax.set_title(SubtitleArray[index])
ax.remove()
ax.figure = combinedFig
ax.change_geometry(int(_np.ceil(len(FigureAxTupleArray)/2)),2,1+index)
combinedFig.axes.append(ax)
combinedFig.add_axes(ax)
#_plt.close(individualPlot[0])
combinedFig.subplots_adjust(hspace=.4)
combinedFig.suptitle(title)
if show_fig == True:
_plt.show()
return combinedFig | Arranges plots, given in an array of tuples consisting of fig and axs,
onto a subplot-figure consisting of 2 horizontal times the lenght of the
passed (fig,axs)-array divided by 2 vertical subplots
Parameters
----------
FigureAxTupleArray : array-like
array of Tuples(fig, axs) outputted from the other plotting funtions
inside optoanalysis
title : string, optional
string for the global title of the overall combined figure
SubtitleArray : array-like, optional
array of titles for each figure-set to be plotted, i.e. subplots
show_fig : bool, optional
If True runs plt.show() before returning figure
if False it just returns the figure object.
(the default is True, it shows the figure)
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created
axs : list of matplotlib.axes.Axes objects
The list of axes object created | Below is the the instruction that describes the task:
### Input:
Arranges plots, given in an array of tuples consisting of fig and axs,
onto a subplot-figure consisting of 2 horizontal times the lenght of the
passed (fig,axs)-array divided by 2 vertical subplots
Parameters
----------
FigureAxTupleArray : array-like
array of Tuples(fig, axs) outputted from the other plotting funtions
inside optoanalysis
title : string, optional
string for the global title of the overall combined figure
SubtitleArray : array-like, optional
array of titles for each figure-set to be plotted, i.e. subplots
show_fig : bool, optional
If True runs plt.show() before returning figure
if False it just returns the figure object.
(the default is True, it shows the figure)
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created
axs : list of matplotlib.axes.Axes objects
The list of axes object created
### Response:
def arrange_plots_on_one_canvas(FigureAxTupleArray, title='', SubtitleArray = [], show_fig=True):
"""
Arranges plots, given in an array of tuples consisting of fig and axs,
onto a subplot-figure consisting of 2 horizontal times the lenght of the
passed (fig,axs)-array divided by 2 vertical subplots
Parameters
----------
FigureAxTupleArray : array-like
array of Tuples(fig, axs) outputted from the other plotting funtions
inside optoanalysis
title : string, optional
string for the global title of the overall combined figure
SubtitleArray : array-like, optional
array of titles for each figure-set to be plotted, i.e. subplots
show_fig : bool, optional
If True runs plt.show() before returning figure
if False it just returns the figure object.
(the default is True, it shows the figure)
Returns
-------
fig : matplotlib.figure.Figure object
The figure object created
axs : list of matplotlib.axes.Axes objects
The list of axes object created
"""
if SubtitleArray == []:
SubtitleArray = ["Plot {}".format(i)
for i in _np.arange(0, len(FigureAxTupleArray), 1)]
SingleFigSize = FigureAxTupleArray[0][0].get_size_inches()
combinedFig=_plt.figure(figsize=(2*SingleFigSize[0],_np.ceil(len(FigureAxTupleArray)/2)*SingleFigSize[1]))
for index in range(len(FigureAxTupleArray)):
individualPlot = FigureAxTupleArray[index]
individualPlot[0].set_size_inches((2*SingleFigSize[0],_np.ceil(len(FigureAxTupleArray)/2)*SingleFigSize[1]))
ax = individualPlot[1]
ax.set_title(SubtitleArray[index])
ax.remove()
ax.figure = combinedFig
ax.change_geometry(int(_np.ceil(len(FigureAxTupleArray)/2)),2,1+index)
combinedFig.axes.append(ax)
combinedFig.add_axes(ax)
#_plt.close(individualPlot[0])
combinedFig.subplots_adjust(hspace=.4)
combinedFig.suptitle(title)
if show_fig == True:
_plt.show()
return combinedFig |
def floating_ip_create(kwargs, call=None):
'''
Allocate a floating IP
.. versionadded:: 2016.3.0
'''
if call != 'function':
raise SaltCloudSystemExit(
'The floating_ip_create action must be called with -f or --function'
)
if 'pool' not in kwargs:
log.error('pool is required')
return False
conn = get_conn()
return conn.floating_ip_create(kwargs['pool']) | Allocate a floating IP
.. versionadded:: 2016.3.0 | Below is the the instruction that describes the task:
### Input:
Allocate a floating IP
.. versionadded:: 2016.3.0
### Response:
def floating_ip_create(kwargs, call=None):
'''
Allocate a floating IP
.. versionadded:: 2016.3.0
'''
if call != 'function':
raise SaltCloudSystemExit(
'The floating_ip_create action must be called with -f or --function'
)
if 'pool' not in kwargs:
log.error('pool is required')
return False
conn = get_conn()
return conn.floating_ip_create(kwargs['pool']) |
def set_formatter(name, func):
"""Replace the formatter function used by the trace decorator to
handle formatting a specific kind of argument. There are several
kinds of arguments that trace discriminates between:
* instance argument - the object bound to an instance method.
* class argument - the class object bound to a class method.
* positional arguments (named) - values bound to distinct names.
* positional arguments (default) - named positional arguments with
default values specified in the function declaration.
* positional arguments (anonymous) - an arbitrary number of values
that are all bound to the '*' variable.
* keyword arguments - zero or more name-value pairs that are
placed in a dictionary and bound to the double-star variable.
\var{name} - specifies the name of the formatter to be modified.
* instance argument - "self", "instance" or "this"
* class argument - "class"
* named argument - "named", "param" or "parameter"
* default argument - "default", "optional"
* anonymous argument - "anonymous", "arbitrary" or "unnamed"
* keyword argument - "keyword", "pair" or "pairs"
\var{func} - a function to format an argument.
* For all but anonymous formatters this function must accept two
arguments: the variable name and the value to which it is bound.
* The anonymous formatter function is passed only one argument
corresponding to an anonymous value.
* if \var{func} is "None" then the default formatter will be used.
"""
if name in ('self', 'instance', 'this'):
global af_self
af_self = _formatter_self if func is None else func
elif name == 'class':
global af_class
af_class = _formatter_class if func is None else func
elif name in ('named', 'param', 'parameter'):
global af_named
af_named = _formatter_named if func is None else func
elif name in ('default', 'optional'):
global af_default
af_default = _formatter_defaults if func is None else func
elif name in ('anonymous', 'arbitrary', 'unnamed'):
global af_anonymous
af_anonymous = chop if func is None else func
elif name in ('keyword', 'pair', 'pairs'):
global af_keyword
af_keyword = _formatter_named if func is None else func
else:
raise ValueError('unknown trace formatter %r' % name) | Replace the formatter function used by the trace decorator to
handle formatting a specific kind of argument. There are several
kinds of arguments that trace discriminates between:
* instance argument - the object bound to an instance method.
* class argument - the class object bound to a class method.
* positional arguments (named) - values bound to distinct names.
* positional arguments (default) - named positional arguments with
default values specified in the function declaration.
* positional arguments (anonymous) - an arbitrary number of values
that are all bound to the '*' variable.
* keyword arguments - zero or more name-value pairs that are
placed in a dictionary and bound to the double-star variable.
\var{name} - specifies the name of the formatter to be modified.
* instance argument - "self", "instance" or "this"
* class argument - "class"
* named argument - "named", "param" or "parameter"
* default argument - "default", "optional"
* anonymous argument - "anonymous", "arbitrary" or "unnamed"
* keyword argument - "keyword", "pair" or "pairs"
\var{func} - a function to format an argument.
* For all but anonymous formatters this function must accept two
arguments: the variable name and the value to which it is bound.
* The anonymous formatter function is passed only one argument
corresponding to an anonymous value.
* if \var{func} is "None" then the default formatter will be used. | Below is the the instruction that describes the task:
### Input:
Replace the formatter function used by the trace decorator to
handle formatting a specific kind of argument. There are several
kinds of arguments that trace discriminates between:
* instance argument - the object bound to an instance method.
* class argument - the class object bound to a class method.
* positional arguments (named) - values bound to distinct names.
* positional arguments (default) - named positional arguments with
default values specified in the function declaration.
* positional arguments (anonymous) - an arbitrary number of values
that are all bound to the '*' variable.
* keyword arguments - zero or more name-value pairs that are
placed in a dictionary and bound to the double-star variable.
\var{name} - specifies the name of the formatter to be modified.
* instance argument - "self", "instance" or "this"
* class argument - "class"
* named argument - "named", "param" or "parameter"
* default argument - "default", "optional"
* anonymous argument - "anonymous", "arbitrary" or "unnamed"
* keyword argument - "keyword", "pair" or "pairs"
\var{func} - a function to format an argument.
* For all but anonymous formatters this function must accept two
arguments: the variable name and the value to which it is bound.
* The anonymous formatter function is passed only one argument
corresponding to an anonymous value.
* if \var{func} is "None" then the default formatter will be used.
### Response:
def set_formatter(name, func):
"""Replace the formatter function used by the trace decorator to
handle formatting a specific kind of argument. There are several
kinds of arguments that trace discriminates between:
* instance argument - the object bound to an instance method.
* class argument - the class object bound to a class method.
* positional arguments (named) - values bound to distinct names.
* positional arguments (default) - named positional arguments with
default values specified in the function declaration.
* positional arguments (anonymous) - an arbitrary number of values
that are all bound to the '*' variable.
* keyword arguments - zero or more name-value pairs that are
placed in a dictionary and bound to the double-star variable.
\var{name} - specifies the name of the formatter to be modified.
* instance argument - "self", "instance" or "this"
* class argument - "class"
* named argument - "named", "param" or "parameter"
* default argument - "default", "optional"
* anonymous argument - "anonymous", "arbitrary" or "unnamed"
* keyword argument - "keyword", "pair" or "pairs"
\var{func} - a function to format an argument.
* For all but anonymous formatters this function must accept two
arguments: the variable name and the value to which it is bound.
* The anonymous formatter function is passed only one argument
corresponding to an anonymous value.
* if \var{func} is "None" then the default formatter will be used.
"""
if name in ('self', 'instance', 'this'):
global af_self
af_self = _formatter_self if func is None else func
elif name == 'class':
global af_class
af_class = _formatter_class if func is None else func
elif name in ('named', 'param', 'parameter'):
global af_named
af_named = _formatter_named if func is None else func
elif name in ('default', 'optional'):
global af_default
af_default = _formatter_defaults if func is None else func
elif name in ('anonymous', 'arbitrary', 'unnamed'):
global af_anonymous
af_anonymous = chop if func is None else func
elif name in ('keyword', 'pair', 'pairs'):
global af_keyword
af_keyword = _formatter_named if func is None else func
else:
raise ValueError('unknown trace formatter %r' % name) |
def do_alarm_definition_list(mc, args):
'''List alarm definitions for this tenant.'''
fields = {}
if args.name:
fields['name'] = args.name
if args.dimensions:
fields['dimensions'] = utils.format_dimensions_query(args.dimensions)
if args.severity:
if not _validate_severity(args.severity):
return
fields['severity'] = args.severity
if args.sort_by:
sort_by = args.sort_by.split(',')
for field in sort_by:
field_values = field.split()
if len(field_values) > 2:
print("Invalid sort_by value {}".format(field))
if field_values[0] not in allowed_definition_sort_by:
print("Sort-by field name {} is not in [{}]".format(field_values[0],
allowed_definition_sort_by))
return
if len(field_values) > 1 and field_values[1] not in ['asc', 'desc']:
print("Invalid value {}, must be asc or desc".format(field_values[1]))
fields['sort_by'] = args.sort_by
if args.limit:
fields['limit'] = args.limit
if args.offset:
fields['offset'] = args.offset
try:
alarm = mc.alarm_definitions.list(**fields)
except (osc_exc.ClientException, k_exc.HttpError) as he:
raise osc_exc.CommandError('%s\n%s' % (he.message, he.details))
else:
if args.json:
print(utils.json_formatter(alarm))
return
cols = ['name', 'id', 'expression', 'match_by', 'actions_enabled']
formatters = {
'name': lambda x: x['name'],
'id': lambda x: x['id'],
'expression': lambda x: x['expression'],
'match_by': lambda x: utils.format_list(x['match_by']),
'actions_enabled': lambda x: x['actions_enabled'],
}
if isinstance(alarm, list):
# print the list
utils.print_list(alarm, cols, formatters=formatters)
else:
# add the dictionary to a list, so print_list works
alarm_list = list()
alarm_list.append(alarm)
utils.print_list(alarm_list, cols, formatters=formatters) | List alarm definitions for this tenant. | Below is the the instruction that describes the task:
### Input:
List alarm definitions for this tenant.
### Response:
def do_alarm_definition_list(mc, args):
'''List alarm definitions for this tenant.'''
fields = {}
if args.name:
fields['name'] = args.name
if args.dimensions:
fields['dimensions'] = utils.format_dimensions_query(args.dimensions)
if args.severity:
if not _validate_severity(args.severity):
return
fields['severity'] = args.severity
if args.sort_by:
sort_by = args.sort_by.split(',')
for field in sort_by:
field_values = field.split()
if len(field_values) > 2:
print("Invalid sort_by value {}".format(field))
if field_values[0] not in allowed_definition_sort_by:
print("Sort-by field name {} is not in [{}]".format(field_values[0],
allowed_definition_sort_by))
return
if len(field_values) > 1 and field_values[1] not in ['asc', 'desc']:
print("Invalid value {}, must be asc or desc".format(field_values[1]))
fields['sort_by'] = args.sort_by
if args.limit:
fields['limit'] = args.limit
if args.offset:
fields['offset'] = args.offset
try:
alarm = mc.alarm_definitions.list(**fields)
except (osc_exc.ClientException, k_exc.HttpError) as he:
raise osc_exc.CommandError('%s\n%s' % (he.message, he.details))
else:
if args.json:
print(utils.json_formatter(alarm))
return
cols = ['name', 'id', 'expression', 'match_by', 'actions_enabled']
formatters = {
'name': lambda x: x['name'],
'id': lambda x: x['id'],
'expression': lambda x: x['expression'],
'match_by': lambda x: utils.format_list(x['match_by']),
'actions_enabled': lambda x: x['actions_enabled'],
}
if isinstance(alarm, list):
# print the list
utils.print_list(alarm, cols, formatters=formatters)
else:
# add the dictionary to a list, so print_list works
alarm_list = list()
alarm_list.append(alarm)
utils.print_list(alarm_list, cols, formatters=formatters) |
def frame2string(frame):
"""Return info about frame.
Keyword arg:
frame
Return string in format:
File {file name}, line {line number}, in
{name of parent of code object} {newline}
Line from file at line number
"""
lineno = frame.f_lineno # or f_lasti
co = frame.f_code
filename = co.co_filename
name = co.co_name
s = '\tFile "{0}", line {1}, in {2}'.format(filename, lineno, name)
line = linecache.getline(filename, lineno, frame.f_globals).lstrip()
return s + '\n\t\t' + line | Return info about frame.
Keyword arg:
frame
Return string in format:
File {file name}, line {line number}, in
{name of parent of code object} {newline}
Line from file at line number | Below is the the instruction that describes the task:
### Input:
Return info about frame.
Keyword arg:
frame
Return string in format:
File {file name}, line {line number}, in
{name of parent of code object} {newline}
Line from file at line number
### Response:
def frame2string(frame):
"""Return info about frame.
Keyword arg:
frame
Return string in format:
File {file name}, line {line number}, in
{name of parent of code object} {newline}
Line from file at line number
"""
lineno = frame.f_lineno # or f_lasti
co = frame.f_code
filename = co.co_filename
name = co.co_name
s = '\tFile "{0}", line {1}, in {2}'.format(filename, lineno, name)
line = linecache.getline(filename, lineno, frame.f_globals).lstrip()
return s + '\n\t\t' + line |
def views_preview(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/views#previewing-views"
api_path = "/api/v2/views/preview.json"
return self.call(api_path, method="POST", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/views#previewing-views | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/views#previewing-views
### Response:
def views_preview(self, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/views#previewing-views"
api_path = "/api/v2/views/preview.json"
return self.call(api_path, method="POST", data=data, **kwargs) |
def parse(self, configManager, config):
"""
Parse configuration options out of a YAML configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object.
"""
configFile = self._getConfigFile(config)
if not configFile:
return dict()
yamlConfigs = yaml.load(configFile)
if isinstance(yamlConfigs, dict):
return yamlConfigs
raise self.subparserException("YAML config parsed did not result in a dictionary, but instead a: %s"
% type(yamlConfigs)) | Parse configuration options out of a YAML configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object. | Below is the the instruction that describes the task:
### Input:
Parse configuration options out of a YAML configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object.
### Response:
def parse(self, configManager, config):
"""
Parse configuration options out of a YAML configuration file.
Inputs: configManager - Our parent ConfigManager instance which is constructing the Config object.
config - The _Config object containing configuration options populated thus far.
Outputs: A dictionary of new configuration options to add to the Config object.
"""
configFile = self._getConfigFile(config)
if not configFile:
return dict()
yamlConfigs = yaml.load(configFile)
if isinstance(yamlConfigs, dict):
return yamlConfigs
raise self.subparserException("YAML config parsed did not result in a dictionary, but instead a: %s"
% type(yamlConfigs)) |
def list_user_participants(self, id, registration_status=None):
"""
List user participants.
List users that are (or may be) participating in this appointment group.
Refer to the Users API for the response fields. Returns no results for
appointment groups with the "Group" participant_type.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - registration_status
"""Limits results to the a given participation status, defaults to "all""""
if registration_status is not None:
self._validate_enum(registration_status, ["all", "registered", "registered"])
params["registration_status"] = registration_status
self.logger.debug("GET /api/v1/appointment_groups/{id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/appointment_groups/{id}/users".format(**path), data=data, params=params, no_data=True) | List user participants.
List users that are (or may be) participating in this appointment group.
Refer to the Users API for the response fields. Returns no results for
appointment groups with the "Group" participant_type. | Below is the the instruction that describes the task:
### Input:
List user participants.
List users that are (or may be) participating in this appointment group.
Refer to the Users API for the response fields. Returns no results for
appointment groups with the "Group" participant_type.
### Response:
def list_user_participants(self, id, registration_status=None):
"""
List user participants.
List users that are (or may be) participating in this appointment group.
Refer to the Users API for the response fields. Returns no results for
appointment groups with the "Group" participant_type.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - id
"""ID"""
path["id"] = id
# OPTIONAL - registration_status
"""Limits results to the a given participation status, defaults to "all""""
if registration_status is not None:
self._validate_enum(registration_status, ["all", "registered", "registered"])
params["registration_status"] = registration_status
self.logger.debug("GET /api/v1/appointment_groups/{id}/users with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/appointment_groups/{id}/users".format(**path), data=data, params=params, no_data=True) |
def compose(self, other, qargs=None, front=False):
"""Return the composition channel self∘other.
Args:
other (QuantumChannel): a quantum channel.
qargs (list): a list of subsystem positions to compose other on.
front (bool): If False compose in standard order other(self(input))
otherwise compose in reverse order self(other(input))
[default: False]
Returns:
Chi: The composition channel as a Chi object.
Raises:
QiskitError: if other is not a QuantumChannel subclass, or
has incompatible dimensions.
"""
if qargs is not None:
return Chi(
SuperOp(self).compose(other, qargs=qargs, front=front))
# Convert other to Choi since we convert via Choi
if not isinstance(other, Choi):
other = Choi(other)
# Check dimensions match up
if front and self._input_dim != other._output_dim:
raise QiskitError(
'input_dim of self must match output_dim of other')
if not front and self._output_dim != other._input_dim:
raise QiskitError(
'input_dim of other must match output_dim of self')
# Since we cannot directly add two channels in the Chi
# representation we convert to the Choi representation
return Chi(Choi(self).compose(other, front=front)) | Return the composition channel self∘other.
Args:
other (QuantumChannel): a quantum channel.
qargs (list): a list of subsystem positions to compose other on.
front (bool): If False compose in standard order other(self(input))
otherwise compose in reverse order self(other(input))
[default: False]
Returns:
Chi: The composition channel as a Chi object.
Raises:
QiskitError: if other is not a QuantumChannel subclass, or
has incompatible dimensions. | Below is the the instruction that describes the task:
### Input:
Return the composition channel self∘other.
Args:
other (QuantumChannel): a quantum channel.
qargs (list): a list of subsystem positions to compose other on.
front (bool): If False compose in standard order other(self(input))
otherwise compose in reverse order self(other(input))
[default: False]
Returns:
Chi: The composition channel as a Chi object.
Raises:
QiskitError: if other is not a QuantumChannel subclass, or
has incompatible dimensions.
### Response:
def compose(self, other, qargs=None, front=False):
"""Return the composition channel self∘other.
Args:
other (QuantumChannel): a quantum channel.
qargs (list): a list of subsystem positions to compose other on.
front (bool): If False compose in standard order other(self(input))
otherwise compose in reverse order self(other(input))
[default: False]
Returns:
Chi: The composition channel as a Chi object.
Raises:
QiskitError: if other is not a QuantumChannel subclass, or
has incompatible dimensions.
"""
if qargs is not None:
return Chi(
SuperOp(self).compose(other, qargs=qargs, front=front))
# Convert other to Choi since we convert via Choi
if not isinstance(other, Choi):
other = Choi(other)
# Check dimensions match up
if front and self._input_dim != other._output_dim:
raise QiskitError(
'input_dim of self must match output_dim of other')
if not front and self._output_dim != other._input_dim:
raise QiskitError(
'input_dim of other must match output_dim of self')
# Since we cannot directly add two channels in the Chi
# representation we convert to the Choi representation
return Chi(Choi(self).compose(other, front=front)) |
def list_types(pdb_list, sleep_time=.1):
'''Given a list of PDB IDs, look up their associated structure type
Parameters
----------
pdb_list : list of str
List of PDB IDs
sleep_time : float
Time (in seconds) to wait between requests. If this number is too small
the API will stop working, but it appears to vary among different systems
Returns
-------
infotypes : list of str
A list of the structure types associated with each PDB
in the list. For many entries in the RCSB PDB, this defaults
to 'protein'
Examples
--------
>>> crispr_query = make_query('crispr')
>>> crispr_results = do_search(crispr_query)
>>> print(list_types(crispr_results[:5]))
['protein', 'protein', 'protein', 'protein', 'protein']
'''
if len(pdb_list)*sleep_time > 30:
warnings.warn("Because of API limitations, this function\
will take at least " + str(len(pdb_list)*sleep_time) + " seconds to return results.\
If you need greater speed, try modifying the optional argument sleep_time=.1, (although \
this may cause the search to time out)" )
infotypes = []
for pdb_id in pdb_list:
all_info = get_all_info(pdb_id)
type_results = walk_nested_dict(all_info, '@type', maxdepth=25,outputs=[])
if type_results:
infotypes.append(type_results[-1])
else:
infotypes.append('Unknown')
time.sleep(sleep_time)
return infotypes | Given a list of PDB IDs, look up their associated structure type
Parameters
----------
pdb_list : list of str
List of PDB IDs
sleep_time : float
Time (in seconds) to wait between requests. If this number is too small
the API will stop working, but it appears to vary among different systems
Returns
-------
infotypes : list of str
A list of the structure types associated with each PDB
in the list. For many entries in the RCSB PDB, this defaults
to 'protein'
Examples
--------
>>> crispr_query = make_query('crispr')
>>> crispr_results = do_search(crispr_query)
>>> print(list_types(crispr_results[:5]))
['protein', 'protein', 'protein', 'protein', 'protein'] | Below is the the instruction that describes the task:
### Input:
Given a list of PDB IDs, look up their associated structure type
Parameters
----------
pdb_list : list of str
List of PDB IDs
sleep_time : float
Time (in seconds) to wait between requests. If this number is too small
the API will stop working, but it appears to vary among different systems
Returns
-------
infotypes : list of str
A list of the structure types associated with each PDB
in the list. For many entries in the RCSB PDB, this defaults
to 'protein'
Examples
--------
>>> crispr_query = make_query('crispr')
>>> crispr_results = do_search(crispr_query)
>>> print(list_types(crispr_results[:5]))
['protein', 'protein', 'protein', 'protein', 'protein']
### Response:
def list_types(pdb_list, sleep_time=.1):
'''Given a list of PDB IDs, look up their associated structure type
Parameters
----------
pdb_list : list of str
List of PDB IDs
sleep_time : float
Time (in seconds) to wait between requests. If this number is too small
the API will stop working, but it appears to vary among different systems
Returns
-------
infotypes : list of str
A list of the structure types associated with each PDB
in the list. For many entries in the RCSB PDB, this defaults
to 'protein'
Examples
--------
>>> crispr_query = make_query('crispr')
>>> crispr_results = do_search(crispr_query)
>>> print(list_types(crispr_results[:5]))
['protein', 'protein', 'protein', 'protein', 'protein']
'''
if len(pdb_list)*sleep_time > 30:
warnings.warn("Because of API limitations, this function\
will take at least " + str(len(pdb_list)*sleep_time) + " seconds to return results.\
If you need greater speed, try modifying the optional argument sleep_time=.1, (although \
this may cause the search to time out)" )
infotypes = []
for pdb_id in pdb_list:
all_info = get_all_info(pdb_id)
type_results = walk_nested_dict(all_info, '@type', maxdepth=25,outputs=[])
if type_results:
infotypes.append(type_results[-1])
else:
infotypes.append('Unknown')
time.sleep(sleep_time)
return infotypes |
def load_plugins(builtin=True, others=True):
"""Load plugins, either builtin, others, or both.
"""
for entry_point in pkg_resources.iter_entry_points('yolk.plugins'):
#LOG.debug("load plugin %s" % entry_point)
try:
plugin = entry_point.load()
except KeyboardInterrupt:
raise
except Exception as err_msg:
# never want a plugin load to exit yolk
# but we can't log here because the logger is not yet
# configured
warn("Unable to load plugin %s: %s" % \
(entry_point, err_msg), RuntimeWarning)
continue
if plugin.__module__.startswith('yolk.plugins'):
if builtin:
yield plugin
elif others:
yield plugin | Load plugins, either builtin, others, or both. | Below is the the instruction that describes the task:
### Input:
Load plugins, either builtin, others, or both.
### Response:
def load_plugins(builtin=True, others=True):
"""Load plugins, either builtin, others, or both.
"""
for entry_point in pkg_resources.iter_entry_points('yolk.plugins'):
#LOG.debug("load plugin %s" % entry_point)
try:
plugin = entry_point.load()
except KeyboardInterrupt:
raise
except Exception as err_msg:
# never want a plugin load to exit yolk
# but we can't log here because the logger is not yet
# configured
warn("Unable to load plugin %s: %s" % \
(entry_point, err_msg), RuntimeWarning)
continue
if plugin.__module__.startswith('yolk.plugins'):
if builtin:
yield plugin
elif others:
yield plugin |
def save_plot(self, filename, img_format="eps", ylim=None,
zero_to_efermi=True, smooth=False):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(ylim=ylim, zero_to_efermi=zero_to_efermi,
smooth=smooth)
plt.savefig(filename, format=img_format)
plt.close() | Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits. | Below is the the instruction that describes the task:
### Input:
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
### Response:
def save_plot(self, filename, img_format="eps", ylim=None,
zero_to_efermi=True, smooth=False):
"""
Save matplotlib plot to a file.
Args:
filename: Filename to write to.
img_format: Image format to use. Defaults to EPS.
ylim: Specifies the y-axis limits.
"""
plt = self.get_plot(ylim=ylim, zero_to_efermi=zero_to_efermi,
smooth=smooth)
plt.savefig(filename, format=img_format)
plt.close() |
def execute(self):
""" Executes all main windows by starting the Qt main application
"""
logger.info("Starting Argos event loop...")
exitCode = self.qApplication.exec_()
logger.info("Argos event loop finished with exit code: {}".format(exitCode))
return exitCode | Executes all main windows by starting the Qt main application | Below is the the instruction that describes the task:
### Input:
Executes all main windows by starting the Qt main application
### Response:
def execute(self):
""" Executes all main windows by starting the Qt main application
"""
logger.info("Starting Argos event loop...")
exitCode = self.qApplication.exec_()
logger.info("Argos event loop finished with exit code: {}".format(exitCode))
return exitCode |
def proxy_set(self, value):
"""
A helper to easily call the proxy_setter of the field
"""
setter = getattr(self, self.proxy_setter)
if isinstance(value, (list, tuple, set)):
result = setter(*value)
elif isinstance(value, dict):
result = setter(**value)
else:
result = setter(value)
return result | A helper to easily call the proxy_setter of the field | Below is the the instruction that describes the task:
### Input:
A helper to easily call the proxy_setter of the field
### Response:
def proxy_set(self, value):
"""
A helper to easily call the proxy_setter of the field
"""
setter = getattr(self, self.proxy_setter)
if isinstance(value, (list, tuple, set)):
result = setter(*value)
elif isinstance(value, dict):
result = setter(**value)
else:
result = setter(value)
return result |
def do_handle_log(self, workunit, level, *msg_elements):
"""Implementation of Reporter callback."""
entry_info = {
'level': self._log_level_str[level],
'messages': self._render_messages(*msg_elements),
}
root_id = str(workunit.root().id)
current_stack = self._root_id_to_workunit_stack[root_id]
if current_stack:
current_stack[-1]['log_entries'].append(entry_info)
else:
self.results[root_id]['log_entries'].append(entry_info) | Implementation of Reporter callback. | Below is the the instruction that describes the task:
### Input:
Implementation of Reporter callback.
### Response:
def do_handle_log(self, workunit, level, *msg_elements):
"""Implementation of Reporter callback."""
entry_info = {
'level': self._log_level_str[level],
'messages': self._render_messages(*msg_elements),
}
root_id = str(workunit.root().id)
current_stack = self._root_id_to_workunit_stack[root_id]
if current_stack:
current_stack[-1]['log_entries'].append(entry_info)
else:
self.results[root_id]['log_entries'].append(entry_info) |
def _list_clouds(self):
"""
Request a list of all added clouds.
Populates self._clouds dict with mist.client.model.Cloud instances
"""
req = self.request(self.uri + '/clouds')
clouds = req.get().json()
if clouds:
for cloud in clouds:
self._clouds[cloud['id']] = Cloud(cloud, self)
else:
self._clouds = {} | Request a list of all added clouds.
Populates self._clouds dict with mist.client.model.Cloud instances | Below is the the instruction that describes the task:
### Input:
Request a list of all added clouds.
Populates self._clouds dict with mist.client.model.Cloud instances
### Response:
def _list_clouds(self):
"""
Request a list of all added clouds.
Populates self._clouds dict with mist.client.model.Cloud instances
"""
req = self.request(self.uri + '/clouds')
clouds = req.get().json()
if clouds:
for cloud in clouds:
self._clouds[cloud['id']] = Cloud(cloud, self)
else:
self._clouds = {} |
def NoExclusions(self):
"""Determine that there are no exclusion criterion in play
:return: True if there is no real boundary specification of any kind.
Simple method allowing parsers to short circuit the determination of
missingness, which can be moderately compute intensive.
"""
if len(self.start_bounds) + len(self.target_rs) + len(self.ignored_rs) == 0:
return BoundaryCheck.chrom == -1
return False | Determine that there are no exclusion criterion in play
:return: True if there is no real boundary specification of any kind.
Simple method allowing parsers to short circuit the determination of
missingness, which can be moderately compute intensive. | Below is the the instruction that describes the task:
### Input:
Determine that there are no exclusion criterion in play
:return: True if there is no real boundary specification of any kind.
Simple method allowing parsers to short circuit the determination of
missingness, which can be moderately compute intensive.
### Response:
def NoExclusions(self):
"""Determine that there are no exclusion criterion in play
:return: True if there is no real boundary specification of any kind.
Simple method allowing parsers to short circuit the determination of
missingness, which can be moderately compute intensive.
"""
if len(self.start_bounds) + len(self.target_rs) + len(self.ignored_rs) == 0:
return BoundaryCheck.chrom == -1
return False |
def overlay_type_data(graph: BELGraph,
data: Mapping[str, float],
func: str,
namespace: str,
label: Optional[str] = None,
overwrite: bool = False,
impute: Optional[float] = None,
) -> None:
"""Overlay tabular data on the network for data that comes from an data set with identifiers that lack
namespaces.
For example, if you want to overlay differential gene expression data from a table, that table
probably has HGNC identifiers, but no specific annotations that they are in the HGNC namespace or
that the entities to which they refer are RNA.
:param graph: A BEL Graph
:param dict data: A dictionary of {name: data}
:param func: The function of the keys in the data dictionary
:param namespace: The namespace of the keys in the data dictionary
:param label: The annotation label to put in the node dictionary
:param overwrite: Should old annotations be overwritten?
:param impute: The value to use for missing data
"""
new_data = {
node: data.get(node[NAME], impute)
for node in filter_nodes(graph, function_namespace_inclusion_builder(func, namespace))
}
overlay_data(graph, new_data, label=label, overwrite=overwrite) | Overlay tabular data on the network for data that comes from an data set with identifiers that lack
namespaces.
For example, if you want to overlay differential gene expression data from a table, that table
probably has HGNC identifiers, but no specific annotations that they are in the HGNC namespace or
that the entities to which they refer are RNA.
:param graph: A BEL Graph
:param dict data: A dictionary of {name: data}
:param func: The function of the keys in the data dictionary
:param namespace: The namespace of the keys in the data dictionary
:param label: The annotation label to put in the node dictionary
:param overwrite: Should old annotations be overwritten?
:param impute: The value to use for missing data | Below is the the instruction that describes the task:
### Input:
Overlay tabular data on the network for data that comes from an data set with identifiers that lack
namespaces.
For example, if you want to overlay differential gene expression data from a table, that table
probably has HGNC identifiers, but no specific annotations that they are in the HGNC namespace or
that the entities to which they refer are RNA.
:param graph: A BEL Graph
:param dict data: A dictionary of {name: data}
:param func: The function of the keys in the data dictionary
:param namespace: The namespace of the keys in the data dictionary
:param label: The annotation label to put in the node dictionary
:param overwrite: Should old annotations be overwritten?
:param impute: The value to use for missing data
### Response:
def overlay_type_data(graph: BELGraph,
data: Mapping[str, float],
func: str,
namespace: str,
label: Optional[str] = None,
overwrite: bool = False,
impute: Optional[float] = None,
) -> None:
"""Overlay tabular data on the network for data that comes from an data set with identifiers that lack
namespaces.
For example, if you want to overlay differential gene expression data from a table, that table
probably has HGNC identifiers, but no specific annotations that they are in the HGNC namespace or
that the entities to which they refer are RNA.
:param graph: A BEL Graph
:param dict data: A dictionary of {name: data}
:param func: The function of the keys in the data dictionary
:param namespace: The namespace of the keys in the data dictionary
:param label: The annotation label to put in the node dictionary
:param overwrite: Should old annotations be overwritten?
:param impute: The value to use for missing data
"""
new_data = {
node: data.get(node[NAME], impute)
for node in filter_nodes(graph, function_namespace_inclusion_builder(func, namespace))
}
overlay_data(graph, new_data, label=label, overwrite=overwrite) |
def run_script_on_server(
script_name,
server_name,
inputs=None,
timeout_s=10,
output=sys.stdout
):
"""
Runs a RightScript and polls for status.
Sample usage::
from rightscale import run_script_on_server
run_script_on_server(
'my cool bob lol script',
'some server',
inputs={'BOB': 'blah blah', 'LOL': 'fubar'},
)
Sample output::
status: Querying tags
status: Querying tags
status: Preparing execution
status: RightScript: 'my cool bob lol script'
status: completed: my cool bob lol script
Defaults to printing status message to stdout, but will accept any object
that implements ``write()`` passed in to :attr:`output`.
"""
api = get_api()
script = find_by_name(api.right_scripts, script_name)
server = find_by_name(api.servers, server_name)
path = server.links['current_instance'] + '/run_executable'
data = {
'right_script_href': script.href,
}
if inputs:
for k, v in inputs.items():
data['inputs[%s]' % k] = 'text:' + v
response = api.client.post(path, data=data)
status_path = response.headers['location']
for i in range(timeout_s):
status = api.client.get(status_path).json()
summary = status.get('summary', '')
output.write('status: %s\n' % summary)
if summary.startswith('completed'):
return
time.sleep(1)
output.write('Done waiting. Poll %s for status.\n' % status_path) | Runs a RightScript and polls for status.
Sample usage::
from rightscale import run_script_on_server
run_script_on_server(
'my cool bob lol script',
'some server',
inputs={'BOB': 'blah blah', 'LOL': 'fubar'},
)
Sample output::
status: Querying tags
status: Querying tags
status: Preparing execution
status: RightScript: 'my cool bob lol script'
status: completed: my cool bob lol script
Defaults to printing status message to stdout, but will accept any object
that implements ``write()`` passed in to :attr:`output`. | Below is the the instruction that describes the task:
### Input:
Runs a RightScript and polls for status.
Sample usage::
from rightscale import run_script_on_server
run_script_on_server(
'my cool bob lol script',
'some server',
inputs={'BOB': 'blah blah', 'LOL': 'fubar'},
)
Sample output::
status: Querying tags
status: Querying tags
status: Preparing execution
status: RightScript: 'my cool bob lol script'
status: completed: my cool bob lol script
Defaults to printing status message to stdout, but will accept any object
that implements ``write()`` passed in to :attr:`output`.
### Response:
def run_script_on_server(
script_name,
server_name,
inputs=None,
timeout_s=10,
output=sys.stdout
):
"""
Runs a RightScript and polls for status.
Sample usage::
from rightscale import run_script_on_server
run_script_on_server(
'my cool bob lol script',
'some server',
inputs={'BOB': 'blah blah', 'LOL': 'fubar'},
)
Sample output::
status: Querying tags
status: Querying tags
status: Preparing execution
status: RightScript: 'my cool bob lol script'
status: completed: my cool bob lol script
Defaults to printing status message to stdout, but will accept any object
that implements ``write()`` passed in to :attr:`output`.
"""
api = get_api()
script = find_by_name(api.right_scripts, script_name)
server = find_by_name(api.servers, server_name)
path = server.links['current_instance'] + '/run_executable'
data = {
'right_script_href': script.href,
}
if inputs:
for k, v in inputs.items():
data['inputs[%s]' % k] = 'text:' + v
response = api.client.post(path, data=data)
status_path = response.headers['location']
for i in range(timeout_s):
status = api.client.get(status_path).json()
summary = status.get('summary', '')
output.write('status: %s\n' % summary)
if summary.startswith('completed'):
return
time.sleep(1)
output.write('Done waiting. Poll %s for status.\n' % status_path) |
async def set_endpoint_for_did(wallet_handle: int,
did: str,
address: str,
transport_key: str) -> None:
"""
Set/replaces endpoint information for the given DID.
:param wallet_handle: Wallet handle (created by open_wallet).
:param did: The DID to resolve endpoint.
:param address: The DIDs endpoint address.
:param transport_key: The DIDs transport key (ver key, key id).
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("set_endpoint_for_did: >>> wallet_handle: %r, did: %r, address: %r, transport_key: %r",
wallet_handle,
did,
address,
transport_key)
if not hasattr(set_endpoint_for_did, "cb"):
logger.debug("set_endpoint_for_did: Creating callback")
set_endpoint_for_did.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_wallet_handle = c_int32(wallet_handle)
c_did = c_char_p(did.encode('utf-8'))
c_address = c_char_p(address.encode('utf-8'))
c_transport_key = c_char_p(transport_key.encode('utf-8'))
await do_call('indy_set_endpoint_for_did',
c_wallet_handle,
c_did,
c_address,
c_transport_key,
set_endpoint_for_did.cb)
logger.debug("set_endpoint_for_did: <<<") | Set/replaces endpoint information for the given DID.
:param wallet_handle: Wallet handle (created by open_wallet).
:param did: The DID to resolve endpoint.
:param address: The DIDs endpoint address.
:param transport_key: The DIDs transport key (ver key, key id).
:return: Error code | Below is the the instruction that describes the task:
### Input:
Set/replaces endpoint information for the given DID.
:param wallet_handle: Wallet handle (created by open_wallet).
:param did: The DID to resolve endpoint.
:param address: The DIDs endpoint address.
:param transport_key: The DIDs transport key (ver key, key id).
:return: Error code
### Response:
async def set_endpoint_for_did(wallet_handle: int,
did: str,
address: str,
transport_key: str) -> None:
"""
Set/replaces endpoint information for the given DID.
:param wallet_handle: Wallet handle (created by open_wallet).
:param did: The DID to resolve endpoint.
:param address: The DIDs endpoint address.
:param transport_key: The DIDs transport key (ver key, key id).
:return: Error code
"""
logger = logging.getLogger(__name__)
logger.debug("set_endpoint_for_did: >>> wallet_handle: %r, did: %r, address: %r, transport_key: %r",
wallet_handle,
did,
address,
transport_key)
if not hasattr(set_endpoint_for_did, "cb"):
logger.debug("set_endpoint_for_did: Creating callback")
set_endpoint_for_did.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32))
c_wallet_handle = c_int32(wallet_handle)
c_did = c_char_p(did.encode('utf-8'))
c_address = c_char_p(address.encode('utf-8'))
c_transport_key = c_char_p(transport_key.encode('utf-8'))
await do_call('indy_set_endpoint_for_did',
c_wallet_handle,
c_did,
c_address,
c_transport_key,
set_endpoint_for_did.cb)
logger.debug("set_endpoint_for_did: <<<") |
def EvalGeneric(self, hashers=None):
"""Causes the entire file to be hashed by the given hash functions.
This sets up a 'finger' for fingerprinting, where the entire file
is passed through a pre-defined (or user defined) set of hash functions.
Args:
hashers: An iterable of hash classes (e.g. out of hashlib) which will
be instantiated for use. If hashers is not provided, or is
provided as 'None', the default hashers will get used. To
invoke this without hashers, provide an empty list.
Returns:
Always True, as all files are 'generic' files.
"""
if hashers is None:
hashers = Fingerprinter.GENERIC_HASH_CLASSES
hashfuncs = [x() for x in hashers]
finger = Finger(hashfuncs, [Range(0, self.filelength)], {'name': 'generic'})
self.fingers.append(finger)
return True | Causes the entire file to be hashed by the given hash functions.
This sets up a 'finger' for fingerprinting, where the entire file
is passed through a pre-defined (or user defined) set of hash functions.
Args:
hashers: An iterable of hash classes (e.g. out of hashlib) which will
be instantiated for use. If hashers is not provided, or is
provided as 'None', the default hashers will get used. To
invoke this without hashers, provide an empty list.
Returns:
Always True, as all files are 'generic' files. | Below is the the instruction that describes the task:
### Input:
Causes the entire file to be hashed by the given hash functions.
This sets up a 'finger' for fingerprinting, where the entire file
is passed through a pre-defined (or user defined) set of hash functions.
Args:
hashers: An iterable of hash classes (e.g. out of hashlib) which will
be instantiated for use. If hashers is not provided, or is
provided as 'None', the default hashers will get used. To
invoke this without hashers, provide an empty list.
Returns:
Always True, as all files are 'generic' files.
### Response:
def EvalGeneric(self, hashers=None):
"""Causes the entire file to be hashed by the given hash functions.
This sets up a 'finger' for fingerprinting, where the entire file
is passed through a pre-defined (or user defined) set of hash functions.
Args:
hashers: An iterable of hash classes (e.g. out of hashlib) which will
be instantiated for use. If hashers is not provided, or is
provided as 'None', the default hashers will get used. To
invoke this without hashers, provide an empty list.
Returns:
Always True, as all files are 'generic' files.
"""
if hashers is None:
hashers = Fingerprinter.GENERIC_HASH_CLASSES
hashfuncs = [x() for x in hashers]
finger = Finger(hashfuncs, [Range(0, self.filelength)], {'name': 'generic'})
self.fingers.append(finger)
return True |
def create_signature(public_key, private_key, data, scheme='ecdsa-sha2-nistp256'):
"""
<Purpose>
Return a (signature, scheme) tuple.
>>> requested_scheme = 'ecdsa-sha2-nistp256'
>>> public, private = generate_public_and_private(requested_scheme)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> signature, scheme = create_signature(public, private, data, requested_scheme)
>>> securesystemslib.formats.ECDSASIGNATURE_SCHEMA.matches(signature)
True
>>> requested_scheme == scheme
True
<Arguments>
public:
The ECDSA public key in PEM format.
private:
The ECDSA private key in PEM format.
data:
Byte data used by create_signature() to generate the signature returned.
scheme:
The signature scheme used to generate the signature. For example:
'ecdsa-sha2-nistp256'.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if a signature cannot be created.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is not
one of the supported signature schemes.
<Side Effects>
None.
<Returns>
A signature dictionary conformat to
'securesystemslib.format.SIGNATURE_SCHEMA'. ECDSA signatures are XX bytes,
however, the hexlified signature is stored in the dictionary returned.
"""
# Do 'public_key' and 'private_key' have the correct format?
# This check will ensure that the arguments conform to
# 'securesystemslib.formats.PEMECDSA_SCHEMA'. Raise
# 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(public_key)
# Is 'private_key' properly formatted?
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(private_key)
# Is 'scheme' properly formatted?
securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme)
# 'ecdsa-sha2-nistp256' is the only currently supported ECDSA scheme, so this
# if-clause isn't strictly needed. Nevertheless, the conditional statement
# is included to accommodate multiple schemes that can potentially be added
# in the future.
if scheme == 'ecdsa-sha2-nistp256':
try:
private_key = load_pem_private_key(private_key.encode('utf-8'),
password=None, backend=default_backend())
signature = private_key.sign(data, ec.ECDSA(hashes.SHA256()))
except TypeError as e:
raise securesystemslib.exceptions.CryptoError('Could not create'
' signature: ' + str(e))
# A defensive check for an invalid 'scheme'. The
# ECDSA_SCHEME_SCHEMA.check_match() above should have already validated it.
else: #pragma: no cover
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(scheme))
return signature, scheme | <Purpose>
Return a (signature, scheme) tuple.
>>> requested_scheme = 'ecdsa-sha2-nistp256'
>>> public, private = generate_public_and_private(requested_scheme)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> signature, scheme = create_signature(public, private, data, requested_scheme)
>>> securesystemslib.formats.ECDSASIGNATURE_SCHEMA.matches(signature)
True
>>> requested_scheme == scheme
True
<Arguments>
public:
The ECDSA public key in PEM format.
private:
The ECDSA private key in PEM format.
data:
Byte data used by create_signature() to generate the signature returned.
scheme:
The signature scheme used to generate the signature. For example:
'ecdsa-sha2-nistp256'.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if a signature cannot be created.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is not
one of the supported signature schemes.
<Side Effects>
None.
<Returns>
A signature dictionary conformat to
'securesystemslib.format.SIGNATURE_SCHEMA'. ECDSA signatures are XX bytes,
however, the hexlified signature is stored in the dictionary returned. | Below is the the instruction that describes the task:
### Input:
<Purpose>
Return a (signature, scheme) tuple.
>>> requested_scheme = 'ecdsa-sha2-nistp256'
>>> public, private = generate_public_and_private(requested_scheme)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> signature, scheme = create_signature(public, private, data, requested_scheme)
>>> securesystemslib.formats.ECDSASIGNATURE_SCHEMA.matches(signature)
True
>>> requested_scheme == scheme
True
<Arguments>
public:
The ECDSA public key in PEM format.
private:
The ECDSA private key in PEM format.
data:
Byte data used by create_signature() to generate the signature returned.
scheme:
The signature scheme used to generate the signature. For example:
'ecdsa-sha2-nistp256'.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if a signature cannot be created.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is not
one of the supported signature schemes.
<Side Effects>
None.
<Returns>
A signature dictionary conformat to
'securesystemslib.format.SIGNATURE_SCHEMA'. ECDSA signatures are XX bytes,
however, the hexlified signature is stored in the dictionary returned.
### Response:
def create_signature(public_key, private_key, data, scheme='ecdsa-sha2-nistp256'):
"""
<Purpose>
Return a (signature, scheme) tuple.
>>> requested_scheme = 'ecdsa-sha2-nistp256'
>>> public, private = generate_public_and_private(requested_scheme)
>>> data = b'The quick brown fox jumps over the lazy dog'
>>> signature, scheme = create_signature(public, private, data, requested_scheme)
>>> securesystemslib.formats.ECDSASIGNATURE_SCHEMA.matches(signature)
True
>>> requested_scheme == scheme
True
<Arguments>
public:
The ECDSA public key in PEM format.
private:
The ECDSA private key in PEM format.
data:
Byte data used by create_signature() to generate the signature returned.
scheme:
The signature scheme used to generate the signature. For example:
'ecdsa-sha2-nistp256'.
<Exceptions>
securesystemslib.exceptions.FormatError, if the arguments are improperly
formatted.
securesystemslib.exceptions.CryptoError, if a signature cannot be created.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'scheme' is not
one of the supported signature schemes.
<Side Effects>
None.
<Returns>
A signature dictionary conformat to
'securesystemslib.format.SIGNATURE_SCHEMA'. ECDSA signatures are XX bytes,
however, the hexlified signature is stored in the dictionary returned.
"""
# Do 'public_key' and 'private_key' have the correct format?
# This check will ensure that the arguments conform to
# 'securesystemslib.formats.PEMECDSA_SCHEMA'. Raise
# 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(public_key)
# Is 'private_key' properly formatted?
securesystemslib.formats.PEMECDSA_SCHEMA.check_match(private_key)
# Is 'scheme' properly formatted?
securesystemslib.formats.ECDSA_SCHEME_SCHEMA.check_match(scheme)
# 'ecdsa-sha2-nistp256' is the only currently supported ECDSA scheme, so this
# if-clause isn't strictly needed. Nevertheless, the conditional statement
# is included to accommodate multiple schemes that can potentially be added
# in the future.
if scheme == 'ecdsa-sha2-nistp256':
try:
private_key = load_pem_private_key(private_key.encode('utf-8'),
password=None, backend=default_backend())
signature = private_key.sign(data, ec.ECDSA(hashes.SHA256()))
except TypeError as e:
raise securesystemslib.exceptions.CryptoError('Could not create'
' signature: ' + str(e))
# A defensive check for an invalid 'scheme'. The
# ECDSA_SCHEME_SCHEMA.check_match() above should have already validated it.
else: #pragma: no cover
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(scheme))
return signature, scheme |
def _supports(self, data):
""" Simply checks if data is supported """
if isinstance(data, Quantity):
return True
elif super(Brian2Result, self)._supports(data):
return True
return False | Simply checks if data is supported | Below is the the instruction that describes the task:
### Input:
Simply checks if data is supported
### Response:
def _supports(self, data):
""" Simply checks if data is supported """
if isinstance(data, Quantity):
return True
elif super(Brian2Result, self)._supports(data):
return True
return False |
def encap(self, pkt):
"""encapsulate a frame using this Secure Association"""
if pkt.name != Ether().name:
raise TypeError('cannot encapsulate packet in MACsec, must be Ethernet') # noqa: E501
hdr = copy.deepcopy(pkt)
payload = hdr.payload
del hdr.payload
tag = MACsec(sci=self.sci, an=self.an,
SC=self.send_sci,
E=self.e_bit(), C=self.c_bit(),
shortlen=MACsecSA.shortlen(pkt),
pn=(self.pn & 0xFFFFFFFF), type=pkt.type)
hdr.type = ETH_P_MACSEC
return hdr / tag / payload | encapsulate a frame using this Secure Association | Below is the the instruction that describes the task:
### Input:
encapsulate a frame using this Secure Association
### Response:
def encap(self, pkt):
"""encapsulate a frame using this Secure Association"""
if pkt.name != Ether().name:
raise TypeError('cannot encapsulate packet in MACsec, must be Ethernet') # noqa: E501
hdr = copy.deepcopy(pkt)
payload = hdr.payload
del hdr.payload
tag = MACsec(sci=self.sci, an=self.an,
SC=self.send_sci,
E=self.e_bit(), C=self.c_bit(),
shortlen=MACsecSA.shortlen(pkt),
pn=(self.pn & 0xFFFFFFFF), type=pkt.type)
hdr.type = ETH_P_MACSEC
return hdr / tag / payload |
def import_namespaced_class(path):
"""Pass in a string in the format of foo.Bar, foo.bar.Baz, foo.bar.baz.Qux
and it will return a handle to the class
:param str path: The object path
:rtype: class
"""
parts = path.split('.')
return getattr(importlib.import_module('.'.join(parts[0:-1])), parts[-1]) | Pass in a string in the format of foo.Bar, foo.bar.Baz, foo.bar.baz.Qux
and it will return a handle to the class
:param str path: The object path
:rtype: class | Below is the the instruction that describes the task:
### Input:
Pass in a string in the format of foo.Bar, foo.bar.Baz, foo.bar.baz.Qux
and it will return a handle to the class
:param str path: The object path
:rtype: class
### Response:
def import_namespaced_class(path):
"""Pass in a string in the format of foo.Bar, foo.bar.Baz, foo.bar.baz.Qux
and it will return a handle to the class
:param str path: The object path
:rtype: class
"""
parts = path.split('.')
return getattr(importlib.import_module('.'.join(parts[0:-1])), parts[-1]) |
def _len_frame(obj):
'''Length of a frame object.
'''
c = getattr(obj, 'f_code', None)
if c:
n = _len_code(c)
else:
n = 0
return n | Length of a frame object. | Below is the the instruction that describes the task:
### Input:
Length of a frame object.
### Response:
def _len_frame(obj):
'''Length of a frame object.
'''
c = getattr(obj, 'f_code', None)
if c:
n = _len_code(c)
else:
n = 0
return n |
def _prepare_experiment(self):
"""Called by the environment to make some initial configurations before performing the
individual runs.
Checks if all parameters marked for presetting were preset. If not raises a
DefaultReplacementError.
Locks all parameters.
Removal of potential results of previous runs in case the trajectory was expanded to avoid
mixing up undesired shortcuts in natural naming.
"""
if len(self._changed_default_parameters):
raise pex.PresettingError(
'The following parameters were supposed to replace a '
'default value, but it was never tried to '
'add default values with these names: %s' %
str(self._changed_default_parameters))
self.f_lock_parameters()
self.f_lock_derived_parameters() | Called by the environment to make some initial configurations before performing the
individual runs.
Checks if all parameters marked for presetting were preset. If not raises a
DefaultReplacementError.
Locks all parameters.
Removal of potential results of previous runs in case the trajectory was expanded to avoid
mixing up undesired shortcuts in natural naming. | Below is the the instruction that describes the task:
### Input:
Called by the environment to make some initial configurations before performing the
individual runs.
Checks if all parameters marked for presetting were preset. If not raises a
DefaultReplacementError.
Locks all parameters.
Removal of potential results of previous runs in case the trajectory was expanded to avoid
mixing up undesired shortcuts in natural naming.
### Response:
def _prepare_experiment(self):
"""Called by the environment to make some initial configurations before performing the
individual runs.
Checks if all parameters marked for presetting were preset. If not raises a
DefaultReplacementError.
Locks all parameters.
Removal of potential results of previous runs in case the trajectory was expanded to avoid
mixing up undesired shortcuts in natural naming.
"""
if len(self._changed_default_parameters):
raise pex.PresettingError(
'The following parameters were supposed to replace a '
'default value, but it was never tried to '
'add default values with these names: %s' %
str(self._changed_default_parameters))
self.f_lock_parameters()
self.f_lock_derived_parameters() |
def line_spacing_rule(self):
"""
A member of the :ref:`WdLineSpacing` enumeration indicating how the
value of :attr:`line_spacing` should be interpreted. Assigning any of
the :ref:`WdLineSpacing` members :attr:`SINGLE`, :attr:`DOUBLE`, or
:attr:`ONE_POINT_FIVE` will cause the value of :attr:`line_spacing`
to be updated to produce the corresponding line spacing.
"""
pPr = self._element.pPr
if pPr is None:
return None
return self._line_spacing_rule(
pPr.spacing_line, pPr.spacing_lineRule
) | A member of the :ref:`WdLineSpacing` enumeration indicating how the
value of :attr:`line_spacing` should be interpreted. Assigning any of
the :ref:`WdLineSpacing` members :attr:`SINGLE`, :attr:`DOUBLE`, or
:attr:`ONE_POINT_FIVE` will cause the value of :attr:`line_spacing`
to be updated to produce the corresponding line spacing. | Below is the the instruction that describes the task:
### Input:
A member of the :ref:`WdLineSpacing` enumeration indicating how the
value of :attr:`line_spacing` should be interpreted. Assigning any of
the :ref:`WdLineSpacing` members :attr:`SINGLE`, :attr:`DOUBLE`, or
:attr:`ONE_POINT_FIVE` will cause the value of :attr:`line_spacing`
to be updated to produce the corresponding line spacing.
### Response:
def line_spacing_rule(self):
"""
A member of the :ref:`WdLineSpacing` enumeration indicating how the
value of :attr:`line_spacing` should be interpreted. Assigning any of
the :ref:`WdLineSpacing` members :attr:`SINGLE`, :attr:`DOUBLE`, or
:attr:`ONE_POINT_FIVE` will cause the value of :attr:`line_spacing`
to be updated to produce the corresponding line spacing.
"""
pPr = self._element.pPr
if pPr is None:
return None
return self._line_spacing_rule(
pPr.spacing_line, pPr.spacing_lineRule
) |
def color_array_by_hue_mix(value, palette):
"""
Figure out the appropriate color for a binary string value by averaging
the colors corresponding the indices of each one that it contains. Makes
for visualizations that intuitively show patch overlap.
"""
if int(value, 2) > 0:
# Convert bits to list and reverse order to avoid issues with
# differing lengths
int_list = [int(i) for i in list(value[2:])]
int_list.reverse()
# since this is a 1D array, we need the zeroth elements
# of np.nonzero.
locs = np.nonzero(int_list)[0]
# print(locs)
# print(palette)
rgb_vals = [palette[i] for i in locs]
rgb = [0]*len(rgb_vals[0]) # We don't know if it's rgb or rgba
for val in rgb_vals:
for index in range(len(val)):
rgb[index] += val[index]
for i in range(len(rgb)):
rgb[i] /= len(locs)
return tuple(rgb)
if int(value, 2) == 0:
return (1, 1, 1) if len(palette[0]) == 3 else (1, 1, 1, 1)
return -1 | Figure out the appropriate color for a binary string value by averaging
the colors corresponding the indices of each one that it contains. Makes
for visualizations that intuitively show patch overlap. | Below is the the instruction that describes the task:
### Input:
Figure out the appropriate color for a binary string value by averaging
the colors corresponding the indices of each one that it contains. Makes
for visualizations that intuitively show patch overlap.
### Response:
def color_array_by_hue_mix(value, palette):
"""
Figure out the appropriate color for a binary string value by averaging
the colors corresponding the indices of each one that it contains. Makes
for visualizations that intuitively show patch overlap.
"""
if int(value, 2) > 0:
# Convert bits to list and reverse order to avoid issues with
# differing lengths
int_list = [int(i) for i in list(value[2:])]
int_list.reverse()
# since this is a 1D array, we need the zeroth elements
# of np.nonzero.
locs = np.nonzero(int_list)[0]
# print(locs)
# print(palette)
rgb_vals = [palette[i] for i in locs]
rgb = [0]*len(rgb_vals[0]) # We don't know if it's rgb or rgba
for val in rgb_vals:
for index in range(len(val)):
rgb[index] += val[index]
for i in range(len(rgb)):
rgb[i] /= len(locs)
return tuple(rgb)
if int(value, 2) == 0:
return (1, 1, 1) if len(palette[0]) == 3 else (1, 1, 1, 1)
return -1 |
def paginator(self):
"""
The paginator instance associated with the view, or `None`.
"""
if not hasattr(self, '_paginator'):
if self.pagination_class is None:
self._paginator = None
else:
self._paginator = self.pagination_class()
return self._paginator | The paginator instance associated with the view, or `None`. | Below is the the instruction that describes the task:
### Input:
The paginator instance associated with the view, or `None`.
### Response:
def paginator(self):
"""
The paginator instance associated with the view, or `None`.
"""
if not hasattr(self, '_paginator'):
if self.pagination_class is None:
self._paginator = None
else:
self._paginator = self.pagination_class()
return self._paginator |
def wl_uris(self):
"""\
Returns cable IRIs to WikiLeaks (mirrors).
"""
def year_month(d):
date, time = d.split()
return date.split('-')[:2]
if not self.created:
raise ValueError('The "created" property must be provided')
year, month = year_month(self.created)
l = u'%s/%s/%s' % (year, month, self.reference_id)
html = l + u'.html'
wl_uris = []
append = wl_uris.append
for wl in _WL_CABLE_BASE_URIS:
append(wl + l)
append(wl + html)
return wl_uris | \
Returns cable IRIs to WikiLeaks (mirrors). | Below is the the instruction that describes the task:
### Input:
\
Returns cable IRIs to WikiLeaks (mirrors).
### Response:
def wl_uris(self):
"""\
Returns cable IRIs to WikiLeaks (mirrors).
"""
def year_month(d):
date, time = d.split()
return date.split('-')[:2]
if not self.created:
raise ValueError('The "created" property must be provided')
year, month = year_month(self.created)
l = u'%s/%s/%s' % (year, month, self.reference_id)
html = l + u'.html'
wl_uris = []
append = wl_uris.append
for wl in _WL_CABLE_BASE_URIS:
append(wl + l)
append(wl + html)
return wl_uris |
def num_rows(self):
"""
Returns the number of rows.
Returns
-------
out : int
Number of rows in the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.summary()['num_vertices']
elif self._is_edge_frame():
return self.__graph__.summary()['num_edges'] | Returns the number of rows.
Returns
-------
out : int
Number of rows in the SFrame. | Below is the the instruction that describes the task:
### Input:
Returns the number of rows.
Returns
-------
out : int
Number of rows in the SFrame.
### Response:
def num_rows(self):
"""
Returns the number of rows.
Returns
-------
out : int
Number of rows in the SFrame.
"""
if self._is_vertex_frame():
return self.__graph__.summary()['num_vertices']
elif self._is_edge_frame():
return self.__graph__.summary()['num_edges'] |
def read_as_text(self):
'''Read and return the dataset contents as text.'''
return self.workspace._rest.read_intermediate_dataset_contents_text(
self.workspace.workspace_id,
self.experiment.experiment_id,
self.node_id,
self.port_name
) | Read and return the dataset contents as text. | Below is the the instruction that describes the task:
### Input:
Read and return the dataset contents as text.
### Response:
def read_as_text(self):
'''Read and return the dataset contents as text.'''
return self.workspace._rest.read_intermediate_dataset_contents_text(
self.workspace.workspace_id,
self.experiment.experiment_id,
self.node_id,
self.port_name
) |
async def send(self, msg: Message):
"""
Sends a message.
Args:
msg (spade.message.Message): the message to be sent.
"""
if not msg.sender:
msg.sender = str(self.agent.jid)
logger.debug(f"Adding agent's jid as sender to message: {msg}")
await self.agent.container.send(msg, self)
msg.sent = True
self.agent.traces.append(msg, category=str(self)) | Sends a message.
Args:
msg (spade.message.Message): the message to be sent. | Below is the the instruction that describes the task:
### Input:
Sends a message.
Args:
msg (spade.message.Message): the message to be sent.
### Response:
async def send(self, msg: Message):
"""
Sends a message.
Args:
msg (spade.message.Message): the message to be sent.
"""
if not msg.sender:
msg.sender = str(self.agent.jid)
logger.debug(f"Adding agent's jid as sender to message: {msg}")
await self.agent.container.send(msg, self)
msg.sent = True
self.agent.traces.append(msg, category=str(self)) |
def create(self, stylename, **kwargs):
""" Creates a new style which inherits from the default style,
or any other style which name is supplied to the optional template parameter.
"""
if stylename == "default":
self[stylename] = style(stylename, self._ctx, **kwargs)
return self[stylename]
k = kwargs.get("template", "default")
s = self[stylename] = self[k].copy(stylename)
for attr in kwargs:
if s.__dict__.has_key(attr):
s.__dict__[attr] = kwargs[attr]
return s | Creates a new style which inherits from the default style,
or any other style which name is supplied to the optional template parameter. | Below is the the instruction that describes the task:
### Input:
Creates a new style which inherits from the default style,
or any other style which name is supplied to the optional template parameter.
### Response:
def create(self, stylename, **kwargs):
""" Creates a new style which inherits from the default style,
or any other style which name is supplied to the optional template parameter.
"""
if stylename == "default":
self[stylename] = style(stylename, self._ctx, **kwargs)
return self[stylename]
k = kwargs.get("template", "default")
s = self[stylename] = self[k].copy(stylename)
for attr in kwargs:
if s.__dict__.has_key(attr):
s.__dict__[attr] = kwargs[attr]
return s |
def emit(self, record):
"""
Mostly copy-paste from :obj:`logging.StreamHandler`.
"""
try:
msg = self.format(record)
if record.levelno < self.splitLevel:
stream = self.outStream
else:
stream = self.errStream
fs = "%s\n"
try:
if (isinstance(msg, unicode) and # noqa: F405
getattr(stream, 'encoding', None)):
ufs = fs.decode(stream.encoding)
try:
stream.write(ufs % msg)
except UnicodeEncodeError:
stream.write((ufs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
stream.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record) | Mostly copy-paste from :obj:`logging.StreamHandler`. | Below is the the instruction that describes the task:
### Input:
Mostly copy-paste from :obj:`logging.StreamHandler`.
### Response:
def emit(self, record):
"""
Mostly copy-paste from :obj:`logging.StreamHandler`.
"""
try:
msg = self.format(record)
if record.levelno < self.splitLevel:
stream = self.outStream
else:
stream = self.errStream
fs = "%s\n"
try:
if (isinstance(msg, unicode) and # noqa: F405
getattr(stream, 'encoding', None)):
ufs = fs.decode(stream.encoding)
try:
stream.write(ufs % msg)
except UnicodeEncodeError:
stream.write((ufs % msg).encode(stream.encoding))
else:
stream.write(fs % msg)
except UnicodeError:
stream.write(fs % msg.encode("UTF-8"))
stream.flush()
except (KeyboardInterrupt, SystemExit):
raise
except:
self.handleError(record) |
def alias_objective(self, objective_id, alias_id):
"""Adds an ``Id`` to an ``Objective`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Objective`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another objective, it is
reassigned to the given objective ``Id``.
arg: objective_id (osid.id.Id): the ``Id`` of an
``Objective``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``objective_id`` not found
raise: NullArgument - ``objective_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.alias_resources_template
self._alias_id(primary_id=objective_id, equivalent_id=alias_id) | Adds an ``Id`` to an ``Objective`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Objective`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another objective, it is
reassigned to the given objective ``Id``.
arg: objective_id (osid.id.Id): the ``Id`` of an
``Objective``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``objective_id`` not found
raise: NullArgument - ``objective_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Adds an ``Id`` to an ``Objective`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Objective`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another objective, it is
reassigned to the given objective ``Id``.
arg: objective_id (osid.id.Id): the ``Id`` of an
``Objective``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``objective_id`` not found
raise: NullArgument - ``objective_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def alias_objective(self, objective_id, alias_id):
"""Adds an ``Id`` to an ``Objective`` for the purpose of creating compatibility.
The primary ``Id`` of the ``Objective`` is determined by the
provider. The new ``Id`` performs as an alias to the primary
``Id``. If the alias is a pointer to another objective, it is
reassigned to the given objective ``Id``.
arg: objective_id (osid.id.Id): the ``Id`` of an
``Objective``
arg: alias_id (osid.id.Id): the alias ``Id``
raise: AlreadyExists - ``alias_id`` is already assigned
raise: NotFound - ``objective_id`` not found
raise: NullArgument - ``objective_id`` or ``alias_id`` is
``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceAdminSession.alias_resources_template
self._alias_id(primary_id=objective_id, equivalent_id=alias_id) |
def ensure_rng(random_state=None):
"""
Creates a random number generator based on an optional seed. This can be
an integer or another random state for a seeded rng, or None for an
unseeded rng.
"""
if random_state is None:
random_state = np.random.RandomState()
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
else:
assert isinstance(random_state, np.random.RandomState)
return random_state | Creates a random number generator based on an optional seed. This can be
an integer or another random state for a seeded rng, or None for an
unseeded rng. | Below is the the instruction that describes the task:
### Input:
Creates a random number generator based on an optional seed. This can be
an integer or another random state for a seeded rng, or None for an
unseeded rng.
### Response:
def ensure_rng(random_state=None):
"""
Creates a random number generator based on an optional seed. This can be
an integer or another random state for a seeded rng, or None for an
unseeded rng.
"""
if random_state is None:
random_state = np.random.RandomState()
elif isinstance(random_state, int):
random_state = np.random.RandomState(random_state)
else:
assert isinstance(random_state, np.random.RandomState)
return random_state |
def print_table(document, *columns):
""" Print json document as table """
headers = []
for _, header in columns:
headers.append(header)
table = []
for element in document:
row = []
for item, _ in columns:
if item in element:
row.append(element[item])
else:
row.append(None)
table.append(row)
print(tabulate.tabulate(table, headers)) | Print json document as table | Below is the the instruction that describes the task:
### Input:
Print json document as table
### Response:
def print_table(document, *columns):
""" Print json document as table """
headers = []
for _, header in columns:
headers.append(header)
table = []
for element in document:
row = []
for item, _ in columns:
if item in element:
row.append(element[item])
else:
row.append(None)
table.append(row)
print(tabulate.tabulate(table, headers)) |
def remove_team_repo(repo_name, team_name, profile="github"):
'''
Removes a repository from a team with team_name.
repo_name
The name of the repository to remove.
team_name
The name of the team of which to remove the repository.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.remove_team_repo 'my_repo' 'team_name'
.. versionadded:: 2016.11.0
'''
team = get_team(team_name, profile=profile)
if not team:
log.error('Team %s does not exist', team_name)
return False
try:
client = _get_client(profile)
organization = client.get_organization(
_get_config_value(profile, 'org_name')
)
team = organization.get_team(team['id'])
repo = organization.get_repo(repo_name)
except UnknownObjectException:
log.exception('Resource not found: %s', team['id'])
return False
team.remove_from_repos(repo)
return repo_name not in list_team_repos(team_name, profile=profile, ignore_cache=True) | Removes a repository from a team with team_name.
repo_name
The name of the repository to remove.
team_name
The name of the team of which to remove the repository.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.remove_team_repo 'my_repo' 'team_name'
.. versionadded:: 2016.11.0 | Below is the the instruction that describes the task:
### Input:
Removes a repository from a team with team_name.
repo_name
The name of the repository to remove.
team_name
The name of the team of which to remove the repository.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.remove_team_repo 'my_repo' 'team_name'
.. versionadded:: 2016.11.0
### Response:
def remove_team_repo(repo_name, team_name, profile="github"):
'''
Removes a repository from a team with team_name.
repo_name
The name of the repository to remove.
team_name
The name of the team of which to remove the repository.
profile
The name of the profile configuration to use. Defaults to ``github``.
CLI Example:
.. code-block:: bash
salt myminion github.remove_team_repo 'my_repo' 'team_name'
.. versionadded:: 2016.11.0
'''
team = get_team(team_name, profile=profile)
if not team:
log.error('Team %s does not exist', team_name)
return False
try:
client = _get_client(profile)
organization = client.get_organization(
_get_config_value(profile, 'org_name')
)
team = organization.get_team(team['id'])
repo = organization.get_repo(repo_name)
except UnknownObjectException:
log.exception('Resource not found: %s', team['id'])
return False
team.remove_from_repos(repo)
return repo_name not in list_team_repos(team_name, profile=profile, ignore_cache=True) |
def _trim_tree(state):
"""Trim empty leaf nodes from the tree.
- To simplify the tree conversion, empty nodes are added before it is known if they
will contain items that connect back to the authenticated subject. If there are
no connections, the nodes remain empty, which causes them to be removed here.
- Removing a leaf node may cause the parent to become a new empty leaf node, so the
function is repeated until there are no more empty leaf nodes.
"""
for n in list(state.tree.leaf_node_gen):
if n.type_str == TYPE_NODE_TAG:
n.parent.child_list.remove(n)
return _trim_tree(state) | Trim empty leaf nodes from the tree.
- To simplify the tree conversion, empty nodes are added before it is known if they
will contain items that connect back to the authenticated subject. If there are
no connections, the nodes remain empty, which causes them to be removed here.
- Removing a leaf node may cause the parent to become a new empty leaf node, so the
function is repeated until there are no more empty leaf nodes. | Below is the the instruction that describes the task:
### Input:
Trim empty leaf nodes from the tree.
- To simplify the tree conversion, empty nodes are added before it is known if they
will contain items that connect back to the authenticated subject. If there are
no connections, the nodes remain empty, which causes them to be removed here.
- Removing a leaf node may cause the parent to become a new empty leaf node, so the
function is repeated until there are no more empty leaf nodes.
### Response:
def _trim_tree(state):
"""Trim empty leaf nodes from the tree.
- To simplify the tree conversion, empty nodes are added before it is known if they
will contain items that connect back to the authenticated subject. If there are
no connections, the nodes remain empty, which causes them to be removed here.
- Removing a leaf node may cause the parent to become a new empty leaf node, so the
function is repeated until there are no more empty leaf nodes.
"""
for n in list(state.tree.leaf_node_gen):
if n.type_str == TYPE_NODE_TAG:
n.parent.child_list.remove(n)
return _trim_tree(state) |
def start(self):
"""
Initiate the session by starting to execute the command with the peer.
:return: The :attr:`~.xso.Command.first_payload` of the response
This sends an empty command IQ request with the
:attr:`~.ActionType.EXECUTE` action.
The :attr:`status`, :attr:`response` and related attributes get updated
with the newly received values.
"""
if self._response is not None:
raise RuntimeError("command execution already started")
request = aioxmpp.IQ(
type_=aioxmpp.IQType.SET,
to=self._peer_jid,
payload=adhoc_xso.Command(self._command_name),
)
self._response = yield from self._stream.send_iq_and_wait_for_reply(
request,
)
return self._response.first_payload | Initiate the session by starting to execute the command with the peer.
:return: The :attr:`~.xso.Command.first_payload` of the response
This sends an empty command IQ request with the
:attr:`~.ActionType.EXECUTE` action.
The :attr:`status`, :attr:`response` and related attributes get updated
with the newly received values. | Below is the the instruction that describes the task:
### Input:
Initiate the session by starting to execute the command with the peer.
:return: The :attr:`~.xso.Command.first_payload` of the response
This sends an empty command IQ request with the
:attr:`~.ActionType.EXECUTE` action.
The :attr:`status`, :attr:`response` and related attributes get updated
with the newly received values.
### Response:
def start(self):
"""
Initiate the session by starting to execute the command with the peer.
:return: The :attr:`~.xso.Command.first_payload` of the response
This sends an empty command IQ request with the
:attr:`~.ActionType.EXECUTE` action.
The :attr:`status`, :attr:`response` and related attributes get updated
with the newly received values.
"""
if self._response is not None:
raise RuntimeError("command execution already started")
request = aioxmpp.IQ(
type_=aioxmpp.IQType.SET,
to=self._peer_jid,
payload=adhoc_xso.Command(self._command_name),
)
self._response = yield from self._stream.send_iq_and_wait_for_reply(
request,
)
return self._response.first_payload |
def absolute_git_root_dir(fpath=""):
"""Absolute path to the git root directory containing a given file or
directory.
"""
if len(fpath) == 0:
dirname_str = os.getcwd()
else:
dirname_str = os.path.dirname(fpath)
dirname_str = os.path.abspath(dirname_str)
dirnames = dirname_str.split(os.sep)
n = len(dirnames)
for i in xrange(n):
# is there a .git directory at this level?
# FIXME hack
basedir = "/" + os.path.join(*dirnames[0:n - i])
gitdir = os.path.join(basedir, ".git")
if os.path.exists(gitdir):
return basedir | Absolute path to the git root directory containing a given file or
directory. | Below is the the instruction that describes the task:
### Input:
Absolute path to the git root directory containing a given file or
directory.
### Response:
def absolute_git_root_dir(fpath=""):
"""Absolute path to the git root directory containing a given file or
directory.
"""
if len(fpath) == 0:
dirname_str = os.getcwd()
else:
dirname_str = os.path.dirname(fpath)
dirname_str = os.path.abspath(dirname_str)
dirnames = dirname_str.split(os.sep)
n = len(dirnames)
for i in xrange(n):
# is there a .git directory at this level?
# FIXME hack
basedir = "/" + os.path.join(*dirnames[0:n - i])
gitdir = os.path.join(basedir, ".git")
if os.path.exists(gitdir):
return basedir |
def adjust(self, date, mode):
"""
Adjust the date to the closest work date.
Args:
date (date, datetime or str): Date to be adjusted.
mode (integer): FOLLOWING, PREVIOUS or MODIFIEDFOLLOWING.
Note:
If date is already a business date than it is returned unchanged.
How to use the adjustment constants:
**FOLLOWING**:
Adjust to the next business date.
**PREVIOUS**:
Adjust to the previous business date.
**MODIFIEDFOLLOWING**:
Adjust to the next business date unless it falls on a
different month, in which case adjust to the previous business
date.
Returns:
datetime: Adjusted date.
"""
date = parsefun(date)
if self.isbusday(date):
return date
if mode == FOLLOWING:
dateadj = self.addbusdays(date, 1)
elif mode == PREVIOUS:
dateadj = self.addbusdays(date, -1)
elif mode == MODIFIEDFOLLOWING:
dateadj = self.addbusdays(date, 1)
if dateadj.month != date.month:
dateadj = self.addbusdays(dateadj, -1)
else:
raise ValueError('Invalid mode %s' % mode)
return dateadj | Adjust the date to the closest work date.
Args:
date (date, datetime or str): Date to be adjusted.
mode (integer): FOLLOWING, PREVIOUS or MODIFIEDFOLLOWING.
Note:
If date is already a business date than it is returned unchanged.
How to use the adjustment constants:
**FOLLOWING**:
Adjust to the next business date.
**PREVIOUS**:
Adjust to the previous business date.
**MODIFIEDFOLLOWING**:
Adjust to the next business date unless it falls on a
different month, in which case adjust to the previous business
date.
Returns:
datetime: Adjusted date. | Below is the the instruction that describes the task:
### Input:
Adjust the date to the closest work date.
Args:
date (date, datetime or str): Date to be adjusted.
mode (integer): FOLLOWING, PREVIOUS or MODIFIEDFOLLOWING.
Note:
If date is already a business date than it is returned unchanged.
How to use the adjustment constants:
**FOLLOWING**:
Adjust to the next business date.
**PREVIOUS**:
Adjust to the previous business date.
**MODIFIEDFOLLOWING**:
Adjust to the next business date unless it falls on a
different month, in which case adjust to the previous business
date.
Returns:
datetime: Adjusted date.
### Response:
def adjust(self, date, mode):
"""
Adjust the date to the closest work date.
Args:
date (date, datetime or str): Date to be adjusted.
mode (integer): FOLLOWING, PREVIOUS or MODIFIEDFOLLOWING.
Note:
If date is already a business date than it is returned unchanged.
How to use the adjustment constants:
**FOLLOWING**:
Adjust to the next business date.
**PREVIOUS**:
Adjust to the previous business date.
**MODIFIEDFOLLOWING**:
Adjust to the next business date unless it falls on a
different month, in which case adjust to the previous business
date.
Returns:
datetime: Adjusted date.
"""
date = parsefun(date)
if self.isbusday(date):
return date
if mode == FOLLOWING:
dateadj = self.addbusdays(date, 1)
elif mode == PREVIOUS:
dateadj = self.addbusdays(date, -1)
elif mode == MODIFIEDFOLLOWING:
dateadj = self.addbusdays(date, 1)
if dateadj.month != date.month:
dateadj = self.addbusdays(dateadj, -1)
else:
raise ValueError('Invalid mode %s' % mode)
return dateadj |
def getWorkitem(self, workitem_id, returned_properties=None):
"""Get :class:`rtcclient.workitem.Workitem` object by its id/number
:param workitem_id: the workitem id/number
(integer or equivalent string)
:param returned_properties: the returned properties that you want.
Refer to :class:`rtcclient.client.RTCClient` for more explanations
:return: the :class:`rtcclient.workitem.Workitem` object
:rtype: rtcclient.workitem.Workitem
"""
try:
if isinstance(workitem_id, bool):
raise ValueError("Invalid Workitem id")
if isinstance(workitem_id, six.string_types):
workitem_id = int(workitem_id)
if not isinstance(workitem_id, int):
raise ValueError("Invalid Workitem id")
workitem_url = "/".join([self.url,
"oslc/workitems/%s" % workitem_id])
rp = self._validate_returned_properties(returned_properties)
if rp is not None:
req_url = "".join([workitem_url,
"?oslc_cm.properties=",
urlquote(rp)])
else:
req_url = workitem_url
resp = self.get(req_url,
verify=False,
proxies=self.proxies,
headers=self.headers)
raw_data = xmltodict.parse(resp.content)
workitem_raw = raw_data["oslc_cm:ChangeRequest"]
return Workitem(workitem_url,
self,
workitem_id=workitem_id,
raw_data=workitem_raw)
except ValueError:
excp_msg = "Please input a valid workitem id"
self.log.error(excp_msg)
raise exception.BadValue(excp_msg)
except Exception as excp:
self.log.error(excp)
raise exception.NotFound("Not found <Workitem %s>" % workitem_id) | Get :class:`rtcclient.workitem.Workitem` object by its id/number
:param workitem_id: the workitem id/number
(integer or equivalent string)
:param returned_properties: the returned properties that you want.
Refer to :class:`rtcclient.client.RTCClient` for more explanations
:return: the :class:`rtcclient.workitem.Workitem` object
:rtype: rtcclient.workitem.Workitem | Below is the the instruction that describes the task:
### Input:
Get :class:`rtcclient.workitem.Workitem` object by its id/number
:param workitem_id: the workitem id/number
(integer or equivalent string)
:param returned_properties: the returned properties that you want.
Refer to :class:`rtcclient.client.RTCClient` for more explanations
:return: the :class:`rtcclient.workitem.Workitem` object
:rtype: rtcclient.workitem.Workitem
### Response:
def getWorkitem(self, workitem_id, returned_properties=None):
"""Get :class:`rtcclient.workitem.Workitem` object by its id/number
:param workitem_id: the workitem id/number
(integer or equivalent string)
:param returned_properties: the returned properties that you want.
Refer to :class:`rtcclient.client.RTCClient` for more explanations
:return: the :class:`rtcclient.workitem.Workitem` object
:rtype: rtcclient.workitem.Workitem
"""
try:
if isinstance(workitem_id, bool):
raise ValueError("Invalid Workitem id")
if isinstance(workitem_id, six.string_types):
workitem_id = int(workitem_id)
if not isinstance(workitem_id, int):
raise ValueError("Invalid Workitem id")
workitem_url = "/".join([self.url,
"oslc/workitems/%s" % workitem_id])
rp = self._validate_returned_properties(returned_properties)
if rp is not None:
req_url = "".join([workitem_url,
"?oslc_cm.properties=",
urlquote(rp)])
else:
req_url = workitem_url
resp = self.get(req_url,
verify=False,
proxies=self.proxies,
headers=self.headers)
raw_data = xmltodict.parse(resp.content)
workitem_raw = raw_data["oslc_cm:ChangeRequest"]
return Workitem(workitem_url,
self,
workitem_id=workitem_id,
raw_data=workitem_raw)
except ValueError:
excp_msg = "Please input a valid workitem id"
self.log.error(excp_msg)
raise exception.BadValue(excp_msg)
except Exception as excp:
self.log.error(excp)
raise exception.NotFound("Not found <Workitem %s>" % workitem_id) |
def summarize(self):
"""Some summary information."""
# force shrinkage to be calculated
self.covariance_
return """time-structure based Independent Components Analysis (tICA)
-----------------------------------------------------------
n_components : {n_components}
shrinkage : {shrinkage}
lag_time : {lag_time}
kinetic_mapping : {kinetic_mapping}
Top 5 timescales :
{timescales}
Top 5 eigenvalues :
{eigenvalues}
""".format(n_components=self.n_components, lag_time=self.lag_time,
shrinkage=self.shrinkage_, kinetic_mapping=self.kinetic_mapping,
timescales=self.timescales_[:5], eigenvalues=self.eigenvalues_[:5]) | Some summary information. | Below is the the instruction that describes the task:
### Input:
Some summary information.
### Response:
def summarize(self):
"""Some summary information."""
# force shrinkage to be calculated
self.covariance_
return """time-structure based Independent Components Analysis (tICA)
-----------------------------------------------------------
n_components : {n_components}
shrinkage : {shrinkage}
lag_time : {lag_time}
kinetic_mapping : {kinetic_mapping}
Top 5 timescales :
{timescales}
Top 5 eigenvalues :
{eigenvalues}
""".format(n_components=self.n_components, lag_time=self.lag_time,
shrinkage=self.shrinkage_, kinetic_mapping=self.kinetic_mapping,
timescales=self.timescales_[:5], eigenvalues=self.eigenvalues_[:5]) |
def init(self, app):
"""
Initialise from flask app
This gets configuration values from a flask application.
:param app: flask.Flask
:return: boiler.user.user_servce.UserService
"""
cfg = app.config
self.welcome_message = cfg.get('USER_SEND_WELCOME_MESSAGE')
self.require_confirmation = cfg.get(
'USER_ACCOUNTS_REQUIRE_CONFIRMATION'
)
subjects = cfg.get('USER_EMAIL_SUBJECTS')
self.email_subjects = subjects if subjects else dict()
self.jwt_secret = cfg.get('USER_JWT_SECRET')
self.jwt_algo = cfg.get('USER_JWT_ALGO')
self.jwt_lifetime = cfg.get('USER_JWT_LIFETIME_SECONDS')
self.jwt_implementation = cfg.get('USER_JWT_IMPLEMENTATION')
self.jwt_loader_implementation = cfg.get(
'USER_JWT_LOADER_IMPLEMENTATION'
) | Initialise from flask app
This gets configuration values from a flask application.
:param app: flask.Flask
:return: boiler.user.user_servce.UserService | Below is the the instruction that describes the task:
### Input:
Initialise from flask app
This gets configuration values from a flask application.
:param app: flask.Flask
:return: boiler.user.user_servce.UserService
### Response:
def init(self, app):
"""
Initialise from flask app
This gets configuration values from a flask application.
:param app: flask.Flask
:return: boiler.user.user_servce.UserService
"""
cfg = app.config
self.welcome_message = cfg.get('USER_SEND_WELCOME_MESSAGE')
self.require_confirmation = cfg.get(
'USER_ACCOUNTS_REQUIRE_CONFIRMATION'
)
subjects = cfg.get('USER_EMAIL_SUBJECTS')
self.email_subjects = subjects if subjects else dict()
self.jwt_secret = cfg.get('USER_JWT_SECRET')
self.jwt_algo = cfg.get('USER_JWT_ALGO')
self.jwt_lifetime = cfg.get('USER_JWT_LIFETIME_SECONDS')
self.jwt_implementation = cfg.get('USER_JWT_IMPLEMENTATION')
self.jwt_loader_implementation = cfg.get(
'USER_JWT_LOADER_IMPLEMENTATION'
) |
def get_renderer_from_definition(config):
"""Returns a renderer object based on the configuration (as a dictionary)"""
options = config.get('options', {})
try:
renderer_type = config['type']
renderer_colors = [(float(x[0]), hex_to_color(x[1])) for x in config['colors']]
fill_value = options.get('fill_value')
if fill_value is not None:
fill_value = float(fill_value)
except KeyError:
raise ValueError("Missing required keys from renderer configuration")
renderer_kwargs = {
'colormap': renderer_colors,
'fill_value': fill_value,
}
if renderer_type == "stretched":
color_space = options.get('color_space', 'hsv').lower().strip()
if not color_space in ('rgb', 'hsv'):
raise ValueError("Invalid color space: {}".format(color_space))
renderer = StretchedRenderer(colorspace=color_space, **renderer_kwargs)
elif renderer_type == "classified":
renderer = ClassifiedRenderer(**renderer_kwargs)
elif renderer_type == "unique":
try:
labels = [six.text_type(x) for x in options.get('labels', [])]
except TypeError:
raise ValueError("Labels option must be an array")
renderer = UniqueValuesRenderer(labels=labels, **renderer_kwargs)
return renderer | Returns a renderer object based on the configuration (as a dictionary) | Below is the the instruction that describes the task:
### Input:
Returns a renderer object based on the configuration (as a dictionary)
### Response:
def get_renderer_from_definition(config):
"""Returns a renderer object based on the configuration (as a dictionary)"""
options = config.get('options', {})
try:
renderer_type = config['type']
renderer_colors = [(float(x[0]), hex_to_color(x[1])) for x in config['colors']]
fill_value = options.get('fill_value')
if fill_value is not None:
fill_value = float(fill_value)
except KeyError:
raise ValueError("Missing required keys from renderer configuration")
renderer_kwargs = {
'colormap': renderer_colors,
'fill_value': fill_value,
}
if renderer_type == "stretched":
color_space = options.get('color_space', 'hsv').lower().strip()
if not color_space in ('rgb', 'hsv'):
raise ValueError("Invalid color space: {}".format(color_space))
renderer = StretchedRenderer(colorspace=color_space, **renderer_kwargs)
elif renderer_type == "classified":
renderer = ClassifiedRenderer(**renderer_kwargs)
elif renderer_type == "unique":
try:
labels = [six.text_type(x) for x in options.get('labels', [])]
except TypeError:
raise ValueError("Labels option must be an array")
renderer = UniqueValuesRenderer(labels=labels, **renderer_kwargs)
return renderer |
def _process_cities_file(self, file, city_country_mapping):
""" Iterate over cities info and extract useful data """
data = {'all_regions': list(), 'regions': list(), 'cities': list(), 'city_region_mapping': dict()}
allowed_countries = settings.IPGEOBASE_ALLOWED_COUNTRIES
for geo_info in self._line_to_dict(file, field_names=settings.IPGEOBASE_CITIES_FIELDS):
country_code = self._get_country_code_for_city(geo_info['city_id'], city_country_mapping, data['all_regions'])
new_region = {'name': geo_info['region_name'],
'country__code': country_code}
if new_region not in data['all_regions']:
data['all_regions'].append(new_region)
if allowed_countries and country_code not in allowed_countries:
continue
if new_region not in data['regions']:
data['regions'].append(new_region)
data['cities'].append({'region__name': geo_info['region_name'],
'name': geo_info['city_name'],
'id': geo_info['city_id'],
'latitude': Decimal(geo_info['latitude']),
'longitude': Decimal(geo_info['longitude'])})
return data | Iterate over cities info and extract useful data | Below is the the instruction that describes the task:
### Input:
Iterate over cities info and extract useful data
### Response:
def _process_cities_file(self, file, city_country_mapping):
""" Iterate over cities info and extract useful data """
data = {'all_regions': list(), 'regions': list(), 'cities': list(), 'city_region_mapping': dict()}
allowed_countries = settings.IPGEOBASE_ALLOWED_COUNTRIES
for geo_info in self._line_to_dict(file, field_names=settings.IPGEOBASE_CITIES_FIELDS):
country_code = self._get_country_code_for_city(geo_info['city_id'], city_country_mapping, data['all_regions'])
new_region = {'name': geo_info['region_name'],
'country__code': country_code}
if new_region not in data['all_regions']:
data['all_regions'].append(new_region)
if allowed_countries and country_code not in allowed_countries:
continue
if new_region not in data['regions']:
data['regions'].append(new_region)
data['cities'].append({'region__name': geo_info['region_name'],
'name': geo_info['city_name'],
'id': geo_info['city_id'],
'latitude': Decimal(geo_info['latitude']),
'longitude': Decimal(geo_info['longitude'])})
return data |
def remove_model(self, propname=None, mode=['model', 'data']):
r"""
Removes model and data from object.
Parameters
----------
propname : string or list of strings
The property or list of properties to remove
mode : list of strings
Controls what is removed. Options are:
*'model'* : Removes the model but not any numerical data that may
already exist.
*'data'* : Removes the data but leaves the model.
The default is both.
"""
if type(propname) is str:
propname = [propname]
for item in propname:
if 'model' in mode:
if item in self.models.keys():
del self.models[item]
if 'data' in mode:
if item in self.keys():
del self[item] | r"""
Removes model and data from object.
Parameters
----------
propname : string or list of strings
The property or list of properties to remove
mode : list of strings
Controls what is removed. Options are:
*'model'* : Removes the model but not any numerical data that may
already exist.
*'data'* : Removes the data but leaves the model.
The default is both. | Below is the the instruction that describes the task:
### Input:
r"""
Removes model and data from object.
Parameters
----------
propname : string or list of strings
The property or list of properties to remove
mode : list of strings
Controls what is removed. Options are:
*'model'* : Removes the model but not any numerical data that may
already exist.
*'data'* : Removes the data but leaves the model.
The default is both.
### Response:
def remove_model(self, propname=None, mode=['model', 'data']):
r"""
Removes model and data from object.
Parameters
----------
propname : string or list of strings
The property or list of properties to remove
mode : list of strings
Controls what is removed. Options are:
*'model'* : Removes the model but not any numerical data that may
already exist.
*'data'* : Removes the data but leaves the model.
The default is both.
"""
if type(propname) is str:
propname = [propname]
for item in propname:
if 'model' in mode:
if item in self.models.keys():
del self.models[item]
if 'data' in mode:
if item in self.keys():
del self[item] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.