code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def info(self):
"""
Property for accessing :class:`InfoManager` instance, which is used to general server info.
:rtype: yagocd.resources.info.InfoManager
"""
if self._info_manager is None:
self._info_manager = InfoManager(session=self._session)
return self._info_manager | Property for accessing :class:`InfoManager` instance, which is used to general server info.
:rtype: yagocd.resources.info.InfoManager | Below is the the instruction that describes the task:
### Input:
Property for accessing :class:`InfoManager` instance, which is used to general server info.
:rtype: yagocd.resources.info.InfoManager
### Response:
def info(self):
"""
Property for accessing :class:`InfoManager` instance, which is used to general server info.
:rtype: yagocd.resources.info.InfoManager
"""
if self._info_manager is None:
self._info_manager = InfoManager(session=self._session)
return self._info_manager |
def wildcard_allowed_actions(self, pattern=None):
"""
Find statements which allow wildcard actions.
A pattern can be specified for the wildcard action
"""
wildcard_allowed = []
for statement in self.statements:
if statement.wildcard_actions(pattern) and statement.effect == "Allow":
wildcard_allowed.append(statement)
return wildcard_allowed | Find statements which allow wildcard actions.
A pattern can be specified for the wildcard action | Below is the the instruction that describes the task:
### Input:
Find statements which allow wildcard actions.
A pattern can be specified for the wildcard action
### Response:
def wildcard_allowed_actions(self, pattern=None):
"""
Find statements which allow wildcard actions.
A pattern can be specified for the wildcard action
"""
wildcard_allowed = []
for statement in self.statements:
if statement.wildcard_actions(pattern) and statement.effect == "Allow":
wildcard_allowed.append(statement)
return wildcard_allowed |
def omnigraffle(self):
""" tries to open an export directly in omnigraffle """
temp = self.rdf_source("dot")
try: # try to put in the user/tmp folder
from os.path import expanduser
home = expanduser("~")
filename = home + "/tmp/turtle_sketch.dot"
f = open(filename, "w")
except:
filename = "turtle_sketch.dot"
f = open(filename, "w")
f.write(temp)
f.close()
try:
os.system("open " + filename)
except:
os.system("start " + filename) | tries to open an export directly in omnigraffle | Below is the the instruction that describes the task:
### Input:
tries to open an export directly in omnigraffle
### Response:
def omnigraffle(self):
""" tries to open an export directly in omnigraffle """
temp = self.rdf_source("dot")
try: # try to put in the user/tmp folder
from os.path import expanduser
home = expanduser("~")
filename = home + "/tmp/turtle_sketch.dot"
f = open(filename, "w")
except:
filename = "turtle_sketch.dot"
f = open(filename, "w")
f.write(temp)
f.close()
try:
os.system("open " + filename)
except:
os.system("start " + filename) |
def Nu_plate_Kumar(Re, Pr, chevron_angle, mu=None, mu_wall=None):
r'''Calculates Nusselt number for single-phase flow in a
**well-designed** Chevron-style plate heat exchanger according to [1]_.
The data is believed to have been developed by APV International Limited,
since acquired by SPX Corporation. This uses a curve fit of that data
published in [2]_.
.. math::
Nu = C_1 Re^m Pr^{0.33}\left(\frac{\mu}{\mu_{wall}}\right)^{0.17}
`C1` and `m` are coefficients looked up in a table, with varying ranges
of Re validity and chevron angle validity. See the source for their
exact values. The wall fluid property correction is included only if the
viscosity values are provided.
Parameters
----------
Re : float
Reynolds number with respect to the hydraulic diameter of the channels,
[-]
Pr : float
Prandtl number calculated with bulk fluid properties, [-]
chevron_angle : float
Angle of the plate corrugations with respect to the vertical axis
(the direction of flow if the plates were straight), between 0 and
90. Many plate exchangers use two alternating patterns; use their
average angle for that situation [degrees]
mu : float, optional
Viscosity of the fluid at the bulk (inlet and outlet average)
temperature, [Pa*s]
mu_wall : float, optional
Viscosity of fluid at wall temperature, [Pa*s]
Returns
-------
Nu : float
Nusselt number with respect to `Dh`, [-]
Notes
-----
Data on graph from Re=0.1 to Re=10000, with chevron angles 30 to 65 degrees.
See `PlateExchanger` for further clarification on the definitions.
It is believed the constants used in this correlation were curve-fit to
the actual graph in [1]_ by the author of [2]_ as there is no
As the coefficients change, there are numerous small discontinuities,
although the data on the graphs is continuous with sharp transitions
of the slope.
The author of [1]_ states clearly this correlation is "applicable only to
well designed Chevron PHEs".
Examples
--------
>>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30)
47.757818892853955
With the wall-correction factor included:
>>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30, mu=1E-3, mu_wall=8E-4)
49.604284135097544
References
----------
.. [1] Kumar, H. "The plate heat exchanger: construction and design." In
First U.K. National Conference on Heat Transfer: Held at the University
of Leeds, 3-5 July 1984, Institute of Chemical Engineering Symposium
Series, vol. 86, pp. 1275-1288. 1984.
.. [2] Ayub, Zahid H. "Plate Heat Exchanger Literature Survey and New Heat
Transfer and Pressure Drop Correlations for Refrigerant Evaporators."
Heat Transfer Engineering 24, no. 5 (September 1, 2003): 3-16.
doi:10.1080/01457630304056.
'''
# Uses the standard diameter as characteristic diameter
beta_list_len = len(Kumar_beta_list)
for i in range(beta_list_len):
if chevron_angle <= Kumar_beta_list[i]:
C1_options, m_options, Re_ranges = Kumar_C1s[i], Kumar_ms[i], Kumar_Nu_Res[i]
break
elif i == beta_list_len-1:
C1_options, m_options, Re_ranges = Kumar_C1s[-1], Kumar_ms[-1], Kumar_Nu_Res[-1]
Re_len = len(Re_ranges)
for j in range(Re_len):
if Re <= Re_ranges[j]:
C1, m = C1_options[j], m_options[j]
break
elif j == Re_len-1:
C1, m = C1_options[-1], m_options[-1]
Nu = C1*Re**m*Pr**0.33
if mu_wall is not None and mu is not None:
Nu *= (mu/mu_wall)**0.17
return Nu | r'''Calculates Nusselt number for single-phase flow in a
**well-designed** Chevron-style plate heat exchanger according to [1]_.
The data is believed to have been developed by APV International Limited,
since acquired by SPX Corporation. This uses a curve fit of that data
published in [2]_.
.. math::
Nu = C_1 Re^m Pr^{0.33}\left(\frac{\mu}{\mu_{wall}}\right)^{0.17}
`C1` and `m` are coefficients looked up in a table, with varying ranges
of Re validity and chevron angle validity. See the source for their
exact values. The wall fluid property correction is included only if the
viscosity values are provided.
Parameters
----------
Re : float
Reynolds number with respect to the hydraulic diameter of the channels,
[-]
Pr : float
Prandtl number calculated with bulk fluid properties, [-]
chevron_angle : float
Angle of the plate corrugations with respect to the vertical axis
(the direction of flow if the plates were straight), between 0 and
90. Many plate exchangers use two alternating patterns; use their
average angle for that situation [degrees]
mu : float, optional
Viscosity of the fluid at the bulk (inlet and outlet average)
temperature, [Pa*s]
mu_wall : float, optional
Viscosity of fluid at wall temperature, [Pa*s]
Returns
-------
Nu : float
Nusselt number with respect to `Dh`, [-]
Notes
-----
Data on graph from Re=0.1 to Re=10000, with chevron angles 30 to 65 degrees.
See `PlateExchanger` for further clarification on the definitions.
It is believed the constants used in this correlation were curve-fit to
the actual graph in [1]_ by the author of [2]_ as there is no
As the coefficients change, there are numerous small discontinuities,
although the data on the graphs is continuous with sharp transitions
of the slope.
The author of [1]_ states clearly this correlation is "applicable only to
well designed Chevron PHEs".
Examples
--------
>>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30)
47.757818892853955
With the wall-correction factor included:
>>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30, mu=1E-3, mu_wall=8E-4)
49.604284135097544
References
----------
.. [1] Kumar, H. "The plate heat exchanger: construction and design." In
First U.K. National Conference on Heat Transfer: Held at the University
of Leeds, 3-5 July 1984, Institute of Chemical Engineering Symposium
Series, vol. 86, pp. 1275-1288. 1984.
.. [2] Ayub, Zahid H. "Plate Heat Exchanger Literature Survey and New Heat
Transfer and Pressure Drop Correlations for Refrigerant Evaporators."
Heat Transfer Engineering 24, no. 5 (September 1, 2003): 3-16.
doi:10.1080/01457630304056. | Below is the the instruction that describes the task:
### Input:
r'''Calculates Nusselt number for single-phase flow in a
**well-designed** Chevron-style plate heat exchanger according to [1]_.
The data is believed to have been developed by APV International Limited,
since acquired by SPX Corporation. This uses a curve fit of that data
published in [2]_.
.. math::
Nu = C_1 Re^m Pr^{0.33}\left(\frac{\mu}{\mu_{wall}}\right)^{0.17}
`C1` and `m` are coefficients looked up in a table, with varying ranges
of Re validity and chevron angle validity. See the source for their
exact values. The wall fluid property correction is included only if the
viscosity values are provided.
Parameters
----------
Re : float
Reynolds number with respect to the hydraulic diameter of the channels,
[-]
Pr : float
Prandtl number calculated with bulk fluid properties, [-]
chevron_angle : float
Angle of the plate corrugations with respect to the vertical axis
(the direction of flow if the plates were straight), between 0 and
90. Many plate exchangers use two alternating patterns; use their
average angle for that situation [degrees]
mu : float, optional
Viscosity of the fluid at the bulk (inlet and outlet average)
temperature, [Pa*s]
mu_wall : float, optional
Viscosity of fluid at wall temperature, [Pa*s]
Returns
-------
Nu : float
Nusselt number with respect to `Dh`, [-]
Notes
-----
Data on graph from Re=0.1 to Re=10000, with chevron angles 30 to 65 degrees.
See `PlateExchanger` for further clarification on the definitions.
It is believed the constants used in this correlation were curve-fit to
the actual graph in [1]_ by the author of [2]_ as there is no
As the coefficients change, there are numerous small discontinuities,
although the data on the graphs is continuous with sharp transitions
of the slope.
The author of [1]_ states clearly this correlation is "applicable only to
well designed Chevron PHEs".
Examples
--------
>>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30)
47.757818892853955
With the wall-correction factor included:
>>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30, mu=1E-3, mu_wall=8E-4)
49.604284135097544
References
----------
.. [1] Kumar, H. "The plate heat exchanger: construction and design." In
First U.K. National Conference on Heat Transfer: Held at the University
of Leeds, 3-5 July 1984, Institute of Chemical Engineering Symposium
Series, vol. 86, pp. 1275-1288. 1984.
.. [2] Ayub, Zahid H. "Plate Heat Exchanger Literature Survey and New Heat
Transfer and Pressure Drop Correlations for Refrigerant Evaporators."
Heat Transfer Engineering 24, no. 5 (September 1, 2003): 3-16.
doi:10.1080/01457630304056.
### Response:
def Nu_plate_Kumar(Re, Pr, chevron_angle, mu=None, mu_wall=None):
r'''Calculates Nusselt number for single-phase flow in a
**well-designed** Chevron-style plate heat exchanger according to [1]_.
The data is believed to have been developed by APV International Limited,
since acquired by SPX Corporation. This uses a curve fit of that data
published in [2]_.
.. math::
Nu = C_1 Re^m Pr^{0.33}\left(\frac{\mu}{\mu_{wall}}\right)^{0.17}
`C1` and `m` are coefficients looked up in a table, with varying ranges
of Re validity and chevron angle validity. See the source for their
exact values. The wall fluid property correction is included only if the
viscosity values are provided.
Parameters
----------
Re : float
Reynolds number with respect to the hydraulic diameter of the channels,
[-]
Pr : float
Prandtl number calculated with bulk fluid properties, [-]
chevron_angle : float
Angle of the plate corrugations with respect to the vertical axis
(the direction of flow if the plates were straight), between 0 and
90. Many plate exchangers use two alternating patterns; use their
average angle for that situation [degrees]
mu : float, optional
Viscosity of the fluid at the bulk (inlet and outlet average)
temperature, [Pa*s]
mu_wall : float, optional
Viscosity of fluid at wall temperature, [Pa*s]
Returns
-------
Nu : float
Nusselt number with respect to `Dh`, [-]
Notes
-----
Data on graph from Re=0.1 to Re=10000, with chevron angles 30 to 65 degrees.
See `PlateExchanger` for further clarification on the definitions.
It is believed the constants used in this correlation were curve-fit to
the actual graph in [1]_ by the author of [2]_ as there is no
As the coefficients change, there are numerous small discontinuities,
although the data on the graphs is continuous with sharp transitions
of the slope.
The author of [1]_ states clearly this correlation is "applicable only to
well designed Chevron PHEs".
Examples
--------
>>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30)
47.757818892853955
With the wall-correction factor included:
>>> Nu_plate_Kumar(Re=2000, Pr=0.7, chevron_angle=30, mu=1E-3, mu_wall=8E-4)
49.604284135097544
References
----------
.. [1] Kumar, H. "The plate heat exchanger: construction and design." In
First U.K. National Conference on Heat Transfer: Held at the University
of Leeds, 3-5 July 1984, Institute of Chemical Engineering Symposium
Series, vol. 86, pp. 1275-1288. 1984.
.. [2] Ayub, Zahid H. "Plate Heat Exchanger Literature Survey and New Heat
Transfer and Pressure Drop Correlations for Refrigerant Evaporators."
Heat Transfer Engineering 24, no. 5 (September 1, 2003): 3-16.
doi:10.1080/01457630304056.
'''
# Uses the standard diameter as characteristic diameter
beta_list_len = len(Kumar_beta_list)
for i in range(beta_list_len):
if chevron_angle <= Kumar_beta_list[i]:
C1_options, m_options, Re_ranges = Kumar_C1s[i], Kumar_ms[i], Kumar_Nu_Res[i]
break
elif i == beta_list_len-1:
C1_options, m_options, Re_ranges = Kumar_C1s[-1], Kumar_ms[-1], Kumar_Nu_Res[-1]
Re_len = len(Re_ranges)
for j in range(Re_len):
if Re <= Re_ranges[j]:
C1, m = C1_options[j], m_options[j]
break
elif j == Re_len-1:
C1, m = C1_options[-1], m_options[-1]
Nu = C1*Re**m*Pr**0.33
if mu_wall is not None and mu is not None:
Nu *= (mu/mu_wall)**0.17
return Nu |
def step_it_should_fail_with(context):
'''
EXAMPLE:
...
when I run "behave ..."
then it should fail with:
"""
TEXT
"""
'''
assert context.text is not None, "ENSURE: multiline text is provided."
step_command_output_should_contain(context)
assert_that(context.command_result.returncode, is_not(equal_to(0))) | EXAMPLE:
...
when I run "behave ..."
then it should fail with:
"""
TEXT
""" | Below is the the instruction that describes the task:
### Input:
EXAMPLE:
...
when I run "behave ..."
then it should fail with:
"""
TEXT
"""
### Response:
def step_it_should_fail_with(context):
'''
EXAMPLE:
...
when I run "behave ..."
then it should fail with:
"""
TEXT
"""
'''
assert context.text is not None, "ENSURE: multiline text is provided."
step_command_output_should_contain(context)
assert_that(context.command_result.returncode, is_not(equal_to(0))) |
def _dot_product(self, imgs_to_decode):
""" Decoding using the dot product.
"""
return np.dot(imgs_to_decode.T, self.feature_images).T | Decoding using the dot product. | Below is the the instruction that describes the task:
### Input:
Decoding using the dot product.
### Response:
def _dot_product(self, imgs_to_decode):
""" Decoding using the dot product.
"""
return np.dot(imgs_to_decode.T, self.feature_images).T |
def decode_sent_msg(pref, message, pretty=False):
"""decode_sent_msg: Return a string of the decoded message
"""
newline = "\n" if pretty else " "
indent = " " if pretty else ""
start = newline + indent
out = []
out.append("%s%s{%sSEQNUM: %d," % (pref, newline, start, message[Const.W_SEQ]))
out.append("%sCOMPRESSION: %d," % (start, message[Const.W_COMPRESSION]))
out.append("%sHASH: %s...," % (start, str(binascii.b2a_hex(message[Const.W_HASH]).decode('ascii'))[:10]))
out.append("%sMESSAGE:%s{%sCLIENTREF: %s," % (start, start, start + indent,
message[Const.W_MESSAGE][Const.M_CLIENTREF]))
out.append("%sRESOURCE: %s," % (start + indent, R_TYPES[message[Const.W_MESSAGE][Const.M_RESOURCE]]))
out.append("%sTYPE: %s," % (start + indent, C_TYPES[message[Const.W_MESSAGE][Const.M_TYPE]]))
out.append("%sACTION: %s," % (start + indent, message[Const.W_MESSAGE][Const.M_ACTION]))
if Const.M_RANGE in message[Const.W_MESSAGE]:
out.append("%sRANGE: %s," % (start + indent, message[Const.W_MESSAGE][Const.M_RANGE]))
out.append("%sPAYLOAD: %s%s}%s}" % (start + indent, message[Const.W_MESSAGE][Const.M_PAYLOAD], start, newline))
return ''.join(out) | decode_sent_msg: Return a string of the decoded message | Below is the the instruction that describes the task:
### Input:
decode_sent_msg: Return a string of the decoded message
### Response:
def decode_sent_msg(pref, message, pretty=False):
"""decode_sent_msg: Return a string of the decoded message
"""
newline = "\n" if pretty else " "
indent = " " if pretty else ""
start = newline + indent
out = []
out.append("%s%s{%sSEQNUM: %d," % (pref, newline, start, message[Const.W_SEQ]))
out.append("%sCOMPRESSION: %d," % (start, message[Const.W_COMPRESSION]))
out.append("%sHASH: %s...," % (start, str(binascii.b2a_hex(message[Const.W_HASH]).decode('ascii'))[:10]))
out.append("%sMESSAGE:%s{%sCLIENTREF: %s," % (start, start, start + indent,
message[Const.W_MESSAGE][Const.M_CLIENTREF]))
out.append("%sRESOURCE: %s," % (start + indent, R_TYPES[message[Const.W_MESSAGE][Const.M_RESOURCE]]))
out.append("%sTYPE: %s," % (start + indent, C_TYPES[message[Const.W_MESSAGE][Const.M_TYPE]]))
out.append("%sACTION: %s," % (start + indent, message[Const.W_MESSAGE][Const.M_ACTION]))
if Const.M_RANGE in message[Const.W_MESSAGE]:
out.append("%sRANGE: %s," % (start + indent, message[Const.W_MESSAGE][Const.M_RANGE]))
out.append("%sPAYLOAD: %s%s}%s}" % (start + indent, message[Const.W_MESSAGE][Const.M_PAYLOAD], start, newline))
return ''.join(out) |
def privateparts(self, domain):
""" Return tuple of labels and the private suffix. """
s = self.privatesuffix(domain)
if s is None:
return None
else:
# I know the domain is valid and ends with private suffix
pre = domain[0:-(len(s)+1)]
if pre == "":
return (s,)
else:
return tuple(pre.split(".") + [s]) | Return tuple of labels and the private suffix. | Below is the the instruction that describes the task:
### Input:
Return tuple of labels and the private suffix.
### Response:
def privateparts(self, domain):
""" Return tuple of labels and the private suffix. """
s = self.privatesuffix(domain)
if s is None:
return None
else:
# I know the domain is valid and ends with private suffix
pre = domain[0:-(len(s)+1)]
if pre == "":
return (s,)
else:
return tuple(pre.split(".") + [s]) |
def download(input, filename, representation, overwrite=False, resolvers=None, get3d=False, **kwargs):
"""Convenience function to save a CIR response as a file.
This is just a simple wrapper around the resolve function.
:param string input: Chemical identifier to resolve
:param string filename: File path to save to
:param string representation: Desired output representation
:param bool overwrite: (Optional) Whether to allow overwriting of an existing file
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:raises HTTPError: if CIR returns an error code
:raises ParseError: if CIR response is uninterpretable
:raises IOError: if overwrite is False and file already exists
"""
result = resolve(input, representation, resolvers, get3d, **kwargs)
# Just log and return if nothing resolved
if not result:
log.debug('No file to download.')
return
# Only overwrite an existing file if explicitly instructed to.
if not overwrite and os.path.isfile(filename):
raise IOError("%s already exists. Use 'overwrite=True' to overwrite it." % filename)
# Ensure file ends with a newline
if not result.endswith('\n'):
result += '\n'
with open(filename, 'w') as f:
f.write(result) | Convenience function to save a CIR response as a file.
This is just a simple wrapper around the resolve function.
:param string input: Chemical identifier to resolve
:param string filename: File path to save to
:param string representation: Desired output representation
:param bool overwrite: (Optional) Whether to allow overwriting of an existing file
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:raises HTTPError: if CIR returns an error code
:raises ParseError: if CIR response is uninterpretable
:raises IOError: if overwrite is False and file already exists | Below is the the instruction that describes the task:
### Input:
Convenience function to save a CIR response as a file.
This is just a simple wrapper around the resolve function.
:param string input: Chemical identifier to resolve
:param string filename: File path to save to
:param string representation: Desired output representation
:param bool overwrite: (Optional) Whether to allow overwriting of an existing file
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:raises HTTPError: if CIR returns an error code
:raises ParseError: if CIR response is uninterpretable
:raises IOError: if overwrite is False and file already exists
### Response:
def download(input, filename, representation, overwrite=False, resolvers=None, get3d=False, **kwargs):
"""Convenience function to save a CIR response as a file.
This is just a simple wrapper around the resolve function.
:param string input: Chemical identifier to resolve
:param string filename: File path to save to
:param string representation: Desired output representation
:param bool overwrite: (Optional) Whether to allow overwriting of an existing file
:param list(string) resolvers: (Optional) Ordered list of resolvers to use
:param bool get3d: (Optional) Whether to return 3D coordinates (where applicable)
:raises HTTPError: if CIR returns an error code
:raises ParseError: if CIR response is uninterpretable
:raises IOError: if overwrite is False and file already exists
"""
result = resolve(input, representation, resolvers, get3d, **kwargs)
# Just log and return if nothing resolved
if not result:
log.debug('No file to download.')
return
# Only overwrite an existing file if explicitly instructed to.
if not overwrite and os.path.isfile(filename):
raise IOError("%s already exists. Use 'overwrite=True' to overwrite it." % filename)
# Ensure file ends with a newline
if not result.endswith('\n'):
result += '\n'
with open(filename, 'w') as f:
f.write(result) |
def cwd_decorator(func):
"""
decorator to change cwd to directory containing rst for this function
"""
def wrapper(*args, **kw):
cur_dir = os.getcwd()
found = False
for arg in sys.argv:
if arg.endswith(".rst"):
found = arg
break
if found:
directory = os.path.dirname(found)
if directory:
os.chdir(directory)
data = func(*args, **kw)
os.chdir(cur_dir)
return data
return wrapper | decorator to change cwd to directory containing rst for this function | Below is the the instruction that describes the task:
### Input:
decorator to change cwd to directory containing rst for this function
### Response:
def cwd_decorator(func):
"""
decorator to change cwd to directory containing rst for this function
"""
def wrapper(*args, **kw):
cur_dir = os.getcwd()
found = False
for arg in sys.argv:
if arg.endswith(".rst"):
found = arg
break
if found:
directory = os.path.dirname(found)
if directory:
os.chdir(directory)
data = func(*args, **kw)
os.chdir(cur_dir)
return data
return wrapper |
def create_top_schema(self):
"""
(Category --->) Item <---> Module <---> LogEntry
<---> is a many-to-many relationship
---> is a foreign key relationship
(- Category: represents a group of Items which form a top list)
- Item: something that can be played multiple times and is grouped by to build a top list
- Module: an instance of a module on the queue
- LogEntry: an act performed on the queue and logged
"""
self.execute("""CREATE TABLE IF NOT EXISTS top_module (
uuid TEXT,
add_timestamp DATETIME
);""")
self.execute("""CREATE TABLE IF NOT EXISTS top_category (
pk INTEGER PRIMARY KEY,
slug TEXT,
description TEXT
);""")
self.execute("""CREATE TABLE IF NOT EXISTS top_item (
pk INTEGER PRIMARY KEY,
canonical_id TEXT,
category_pk INTEGER,
requeue_command TEXT,
url TEXT,
description TEXT
);""")
self.execute("""CREATE TABLE IF NOT EXISTS top_item_module (
pk INTEGER PRIMARY KEY,
item_pk INTEGER,
module_uuid TEXT
);""")
self.execute("""CREATE TABLE IF NOT EXISTS top_log_entry (
pk INTEGER,
uid TEXT,
namespace TEXT,
timestamp DATETIME,
input_json TEXT,
output_json TEXT
);""")
self.execute("""CREATE TABLE IF NOT EXISTS top_module_log_entry (
pk INTEGER PRIMARY KEY,
log_pk INTEGER,
module_uuid TEXT,
log_type TEXT
);""")
self.commit() | (Category --->) Item <---> Module <---> LogEntry
<---> is a many-to-many relationship
---> is a foreign key relationship
(- Category: represents a group of Items which form a top list)
- Item: something that can be played multiple times and is grouped by to build a top list
- Module: an instance of a module on the queue
- LogEntry: an act performed on the queue and logged | Below is the the instruction that describes the task:
### Input:
(Category --->) Item <---> Module <---> LogEntry
<---> is a many-to-many relationship
---> is a foreign key relationship
(- Category: represents a group of Items which form a top list)
- Item: something that can be played multiple times and is grouped by to build a top list
- Module: an instance of a module on the queue
- LogEntry: an act performed on the queue and logged
### Response:
def create_top_schema(self):
"""
(Category --->) Item <---> Module <---> LogEntry
<---> is a many-to-many relationship
---> is a foreign key relationship
(- Category: represents a group of Items which form a top list)
- Item: something that can be played multiple times and is grouped by to build a top list
- Module: an instance of a module on the queue
- LogEntry: an act performed on the queue and logged
"""
self.execute("""CREATE TABLE IF NOT EXISTS top_module (
uuid TEXT,
add_timestamp DATETIME
);""")
self.execute("""CREATE TABLE IF NOT EXISTS top_category (
pk INTEGER PRIMARY KEY,
slug TEXT,
description TEXT
);""")
self.execute("""CREATE TABLE IF NOT EXISTS top_item (
pk INTEGER PRIMARY KEY,
canonical_id TEXT,
category_pk INTEGER,
requeue_command TEXT,
url TEXT,
description TEXT
);""")
self.execute("""CREATE TABLE IF NOT EXISTS top_item_module (
pk INTEGER PRIMARY KEY,
item_pk INTEGER,
module_uuid TEXT
);""")
self.execute("""CREATE TABLE IF NOT EXISTS top_log_entry (
pk INTEGER,
uid TEXT,
namespace TEXT,
timestamp DATETIME,
input_json TEXT,
output_json TEXT
);""")
self.execute("""CREATE TABLE IF NOT EXISTS top_module_log_entry (
pk INTEGER PRIMARY KEY,
log_pk INTEGER,
module_uuid TEXT,
log_type TEXT
);""")
self.commit() |
def __groupchat_message(self,stanza):
"""Process a groupchat message from a MUC room.
:Parameters:
- `stanza`: the stanza received.
:Types:
- `stanza`: `Message`
:return: `True` if the message was properly recognized as directed to
one of the managed rooms, `False` otherwise.
:returntype: `bool`"""
fr=stanza.get_from()
key=fr.bare().as_unicode()
rs=self.rooms.get(key)
if not rs:
self.__logger.debug("groupchat message from unknown source")
return False
rs.process_groupchat_message(stanza)
return True | Process a groupchat message from a MUC room.
:Parameters:
- `stanza`: the stanza received.
:Types:
- `stanza`: `Message`
:return: `True` if the message was properly recognized as directed to
one of the managed rooms, `False` otherwise.
:returntype: `bool` | Below is the the instruction that describes the task:
### Input:
Process a groupchat message from a MUC room.
:Parameters:
- `stanza`: the stanza received.
:Types:
- `stanza`: `Message`
:return: `True` if the message was properly recognized as directed to
one of the managed rooms, `False` otherwise.
:returntype: `bool`
### Response:
def __groupchat_message(self,stanza):
"""Process a groupchat message from a MUC room.
:Parameters:
- `stanza`: the stanza received.
:Types:
- `stanza`: `Message`
:return: `True` if the message was properly recognized as directed to
one of the managed rooms, `False` otherwise.
:returntype: `bool`"""
fr=stanza.get_from()
key=fr.bare().as_unicode()
rs=self.rooms.get(key)
if not rs:
self.__logger.debug("groupchat message from unknown source")
return False
rs.process_groupchat_message(stanza)
return True |
def put(self, key, value):
'''Stores the object in all underlying datastores.'''
for store in self._stores:
store.put(key, value) | Stores the object in all underlying datastores. | Below is the the instruction that describes the task:
### Input:
Stores the object in all underlying datastores.
### Response:
def put(self, key, value):
'''Stores the object in all underlying datastores.'''
for store in self._stores:
store.put(key, value) |
def send_rpc(self, service, routing_id, method, args=None, kwargs=None,
broadcast=False):
'''Send out an RPC request
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args: the positional arguments to send along with the request
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to all peers with matching subscriptions
:type broadcast: bool
:returns:
a :class:`RPC <junction.futures.RPC>` object representing the
RPC and its future response.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if the client
doesn't have a connection to a hub
'''
if not self._peer.up:
raise errors.Unroutable()
return self._dispatcher.send_proxied_rpc(service, routing_id, method,
args or (), kwargs or {}, not broadcast) | Send out an RPC request
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args: the positional arguments to send along with the request
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to all peers with matching subscriptions
:type broadcast: bool
:returns:
a :class:`RPC <junction.futures.RPC>` object representing the
RPC and its future response.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if the client
doesn't have a connection to a hub | Below is the the instruction that describes the task:
### Input:
Send out an RPC request
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args: the positional arguments to send along with the request
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to all peers with matching subscriptions
:type broadcast: bool
:returns:
a :class:`RPC <junction.futures.RPC>` object representing the
RPC and its future response.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if the client
doesn't have a connection to a hub
### Response:
def send_rpc(self, service, routing_id, method, args=None, kwargs=None,
broadcast=False):
'''Send out an RPC request
:param service: the service name (the routing top level)
:type service: anything hash-able
:param routing_id:
The id used for routing within the registered handlers of the
service.
:type routing_id: int
:param method: the method name to call
:type method: string
:param args: the positional arguments to send along with the request
:type args: tuple
:param kwargs: keyword arguments to send along with the request
:type kwargs: dict
:param broadcast:
if ``True``, send to all peers with matching subscriptions
:type broadcast: bool
:returns:
a :class:`RPC <junction.futures.RPC>` object representing the
RPC and its future response.
:raises:
:class:`Unroutable <junction.errors.Unroutable>` if the client
doesn't have a connection to a hub
'''
if not self._peer.up:
raise errors.Unroutable()
return self._dispatcher.send_proxied_rpc(service, routing_id, method,
args or (), kwargs or {}, not broadcast) |
def to_cfn_resource_name(name):
# type: (str) -> str
"""Transform a name to a valid cfn name.
This will convert the provided name to a CamelCase name.
It's possible that the conversion to a CFN resource name
can result in name collisions. It's up to the caller
to handle name collisions appropriately.
"""
if not name:
raise ValueError("Invalid name: %r" % name)
word_separators = ['-', '_']
for word_separator in word_separators:
word_parts = [p for p in name.split(word_separator) if p]
name = ''.join([w[0].upper() + w[1:] for w in word_parts])
return re.sub(r'[^A-Za-z0-9]+', '', name) | Transform a name to a valid cfn name.
This will convert the provided name to a CamelCase name.
It's possible that the conversion to a CFN resource name
can result in name collisions. It's up to the caller
to handle name collisions appropriately. | Below is the the instruction that describes the task:
### Input:
Transform a name to a valid cfn name.
This will convert the provided name to a CamelCase name.
It's possible that the conversion to a CFN resource name
can result in name collisions. It's up to the caller
to handle name collisions appropriately.
### Response:
def to_cfn_resource_name(name):
# type: (str) -> str
"""Transform a name to a valid cfn name.
This will convert the provided name to a CamelCase name.
It's possible that the conversion to a CFN resource name
can result in name collisions. It's up to the caller
to handle name collisions appropriately.
"""
if not name:
raise ValueError("Invalid name: %r" % name)
word_separators = ['-', '_']
for word_separator in word_separators:
word_parts = [p for p in name.split(word_separator) if p]
name = ''.join([w[0].upper() + w[1:] for w in word_parts])
return re.sub(r'[^A-Za-z0-9]+', '', name) |
def process(self, filename, encoding, **kwargs):
"""Process ``filename`` and encode byte-string with ``encoding``. This
method is called by :func:`textract.parsers.process` and wraps
the :meth:`.BaseParser.extract` method in `a delicious unicode
sandwich <http://nedbatchelder.com/text/unipain.html>`_.
"""
# make a "unicode sandwich" to handle dealing with unknown
# input byte strings and converting them to a predictable
# output encoding
# http://nedbatchelder.com/text/unipain/unipain.html#35
byte_string = self.extract(filename, **kwargs)
unicode_string = self.decode(byte_string)
return self.encode(unicode_string, encoding) | Process ``filename`` and encode byte-string with ``encoding``. This
method is called by :func:`textract.parsers.process` and wraps
the :meth:`.BaseParser.extract` method in `a delicious unicode
sandwich <http://nedbatchelder.com/text/unipain.html>`_. | Below is the the instruction that describes the task:
### Input:
Process ``filename`` and encode byte-string with ``encoding``. This
method is called by :func:`textract.parsers.process` and wraps
the :meth:`.BaseParser.extract` method in `a delicious unicode
sandwich <http://nedbatchelder.com/text/unipain.html>`_.
### Response:
def process(self, filename, encoding, **kwargs):
"""Process ``filename`` and encode byte-string with ``encoding``. This
method is called by :func:`textract.parsers.process` and wraps
the :meth:`.BaseParser.extract` method in `a delicious unicode
sandwich <http://nedbatchelder.com/text/unipain.html>`_.
"""
# make a "unicode sandwich" to handle dealing with unknown
# input byte strings and converting them to a predictable
# output encoding
# http://nedbatchelder.com/text/unipain/unipain.html#35
byte_string = self.extract(filename, **kwargs)
unicode_string = self.decode(byte_string)
return self.encode(unicode_string, encoding) |
def get_slot_value(payload, slot_name):
""" Return the parsed value of a slot. An intent has the form:
{
"text": "brew me a cappuccino with 3 sugars tomorrow",
"slots": [
{"value": {"slotName": "coffee_type", "value": "cappuccino"}},
...
]
}
This function extracts a slot value given its slot name, and parses
it into a Python object if applicable (e.g. for dates).
Slots can be of various forms, the simplest being just:
{"slotName": "coffee_sugar_amout", "value": "3"}
More complex examples are date times, where we distinguish between
instant times, or intervals. Thus, a slot:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "InstantTime",
"value": {
"value": "2017-07-14 00:00:00 +00:00",
"grain": "Day",
"precision": "Exact"
}
}
}
will be extracted as an `InstantTime` object, with datetime parsed
and granularity set.
Another example is a time interval:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "TimeInterval",
"value": {
"from": "2017-07-14 12:00:00 +00:00",
"to": "2017-07-14 19:00:00 +00:00"
}
},
}
which will be extracted as a TimeInterval object.
:param payload: the intent, in JSON format.
:return: the parsed value, as described above.
"""
if not 'slots' in payload:
return []
slots = []
for candidate in payload['slots']:
if 'slotName' in candidate and candidate['slotName'] == slot_name:
slots.append(candidate)
result = []
for slot in slots:
kind = IntentParser.get_dict_value(slot, ['value', 'kind'])
if kind == "InstantTime":
result.append(IntentParser.parse_instant_time(slot))
elif kind == "TimeInterval":
result.append(IntentParser.parse_time_interval(slot))
else:
result.append(IntentParser.get_dict_value(slot, ['value', 'value', 'value']) \
or IntentParser.get_dict_value(slot, ['value', 'value']))
return result | Return the parsed value of a slot. An intent has the form:
{
"text": "brew me a cappuccino with 3 sugars tomorrow",
"slots": [
{"value": {"slotName": "coffee_type", "value": "cappuccino"}},
...
]
}
This function extracts a slot value given its slot name, and parses
it into a Python object if applicable (e.g. for dates).
Slots can be of various forms, the simplest being just:
{"slotName": "coffee_sugar_amout", "value": "3"}
More complex examples are date times, where we distinguish between
instant times, or intervals. Thus, a slot:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "InstantTime",
"value": {
"value": "2017-07-14 00:00:00 +00:00",
"grain": "Day",
"precision": "Exact"
}
}
}
will be extracted as an `InstantTime` object, with datetime parsed
and granularity set.
Another example is a time interval:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "TimeInterval",
"value": {
"from": "2017-07-14 12:00:00 +00:00",
"to": "2017-07-14 19:00:00 +00:00"
}
},
}
which will be extracted as a TimeInterval object.
:param payload: the intent, in JSON format.
:return: the parsed value, as described above. | Below is the the instruction that describes the task:
### Input:
Return the parsed value of a slot. An intent has the form:
{
"text": "brew me a cappuccino with 3 sugars tomorrow",
"slots": [
{"value": {"slotName": "coffee_type", "value": "cappuccino"}},
...
]
}
This function extracts a slot value given its slot name, and parses
it into a Python object if applicable (e.g. for dates).
Slots can be of various forms, the simplest being just:
{"slotName": "coffee_sugar_amout", "value": "3"}
More complex examples are date times, where we distinguish between
instant times, or intervals. Thus, a slot:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "InstantTime",
"value": {
"value": "2017-07-14 00:00:00 +00:00",
"grain": "Day",
"precision": "Exact"
}
}
}
will be extracted as an `InstantTime` object, with datetime parsed
and granularity set.
Another example is a time interval:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "TimeInterval",
"value": {
"from": "2017-07-14 12:00:00 +00:00",
"to": "2017-07-14 19:00:00 +00:00"
}
},
}
which will be extracted as a TimeInterval object.
:param payload: the intent, in JSON format.
:return: the parsed value, as described above.
### Response:
def get_slot_value(payload, slot_name):
""" Return the parsed value of a slot. An intent has the form:
{
"text": "brew me a cappuccino with 3 sugars tomorrow",
"slots": [
{"value": {"slotName": "coffee_type", "value": "cappuccino"}},
...
]
}
This function extracts a slot value given its slot name, and parses
it into a Python object if applicable (e.g. for dates).
Slots can be of various forms, the simplest being just:
{"slotName": "coffee_sugar_amout", "value": "3"}
More complex examples are date times, where we distinguish between
instant times, or intervals. Thus, a slot:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "InstantTime",
"value": {
"value": "2017-07-14 00:00:00 +00:00",
"grain": "Day",
"precision": "Exact"
}
}
}
will be extracted as an `InstantTime` object, with datetime parsed
and granularity set.
Another example is a time interval:
{
"slotName": "weatherForecastStartDatetime",
"value": {
"kind": "TimeInterval",
"value": {
"from": "2017-07-14 12:00:00 +00:00",
"to": "2017-07-14 19:00:00 +00:00"
}
},
}
which will be extracted as a TimeInterval object.
:param payload: the intent, in JSON format.
:return: the parsed value, as described above.
"""
if not 'slots' in payload:
return []
slots = []
for candidate in payload['slots']:
if 'slotName' in candidate and candidate['slotName'] == slot_name:
slots.append(candidate)
result = []
for slot in slots:
kind = IntentParser.get_dict_value(slot, ['value', 'kind'])
if kind == "InstantTime":
result.append(IntentParser.parse_instant_time(slot))
elif kind == "TimeInterval":
result.append(IntentParser.parse_time_interval(slot))
else:
result.append(IntentParser.get_dict_value(slot, ['value', 'value', 'value']) \
or IntentParser.get_dict_value(slot, ['value', 'value']))
return result |
def send_event(self, action, properties, event_severity=EVENT_SEVERITY):
"""
send css_event and if fails send custom_event instead
Args:
action (ACTIONS): the action causing the event
properties (dict): the action additional properties
event_severity (string): the event severity
Raises:
XCLIError: if the xcli.cmd.custom_event failed
KeyError: if action wasn't predefined
TypeError: if properties is not None or dict
"""
# verify properties
event_properties = dict() if (properties is None) else properties
if type(event_properties) is not dict:
raise TypeError('properties is not dict')
# prepare event
event_bunch = Bunch(
Product=self.product_name,
Version=self.product_version,
Server=self.server_name,
Platform=self.platform,
Action=action,
Properties=event_properties)
event_description = self._get_description_prefix() + \
json.dumps(event_bunch)
use_custom_event = True
if CSS_PRODUCT_EVENT in dir(self.xcli.cmd):
try:
# send css product event
log.debug("sending css_product_event "
"description=%s severity=%s",
event_description, event_severity)
self.xcli.cmd.css_product_event(severity=event_severity,
product=self.product_name,
version=self.product_version,
server=self.server_name,
platform=self.platform,
action=action,
properties=event_properties)
use_custom_event = False
except (UnrecognizedCommandError,
OperationForbiddenForUserCategoryError):
log.warning("failed css_product_event "
"description=%s severity=%s",
event_description, event_severity)
if use_custom_event:
# send custom event
log.debug("sending custom_event description=%s severity=%s",
event_description, event_severity)
self.xcli.cmd.custom_event(
description=event_description, severity=event_severity) | send css_event and if fails send custom_event instead
Args:
action (ACTIONS): the action causing the event
properties (dict): the action additional properties
event_severity (string): the event severity
Raises:
XCLIError: if the xcli.cmd.custom_event failed
KeyError: if action wasn't predefined
TypeError: if properties is not None or dict | Below is the the instruction that describes the task:
### Input:
send css_event and if fails send custom_event instead
Args:
action (ACTIONS): the action causing the event
properties (dict): the action additional properties
event_severity (string): the event severity
Raises:
XCLIError: if the xcli.cmd.custom_event failed
KeyError: if action wasn't predefined
TypeError: if properties is not None or dict
### Response:
def send_event(self, action, properties, event_severity=EVENT_SEVERITY):
"""
send css_event and if fails send custom_event instead
Args:
action (ACTIONS): the action causing the event
properties (dict): the action additional properties
event_severity (string): the event severity
Raises:
XCLIError: if the xcli.cmd.custom_event failed
KeyError: if action wasn't predefined
TypeError: if properties is not None or dict
"""
# verify properties
event_properties = dict() if (properties is None) else properties
if type(event_properties) is not dict:
raise TypeError('properties is not dict')
# prepare event
event_bunch = Bunch(
Product=self.product_name,
Version=self.product_version,
Server=self.server_name,
Platform=self.platform,
Action=action,
Properties=event_properties)
event_description = self._get_description_prefix() + \
json.dumps(event_bunch)
use_custom_event = True
if CSS_PRODUCT_EVENT in dir(self.xcli.cmd):
try:
# send css product event
log.debug("sending css_product_event "
"description=%s severity=%s",
event_description, event_severity)
self.xcli.cmd.css_product_event(severity=event_severity,
product=self.product_name,
version=self.product_version,
server=self.server_name,
platform=self.platform,
action=action,
properties=event_properties)
use_custom_event = False
except (UnrecognizedCommandError,
OperationForbiddenForUserCategoryError):
log.warning("failed css_product_event "
"description=%s severity=%s",
event_description, event_severity)
if use_custom_event:
# send custom event
log.debug("sending custom_event description=%s severity=%s",
event_description, event_severity)
self.xcli.cmd.custom_event(
description=event_description, severity=event_severity) |
def values(self):
"""
in order
"""
tmp = self
while tmp is not None:
yield tmp.data
tmp = tmp.next | in order | Below is the the instruction that describes the task:
### Input:
in order
### Response:
def values(self):
"""
in order
"""
tmp = self
while tmp is not None:
yield tmp.data
tmp = tmp.next |
def _get_url(cls, name, message_model, dispatch_model):
"""Returns a common pattern sitemessage URL.
:param str name: URL name
:param Message message_model:
:param Dispatch|None dispatch_model:
:return:
"""
global APP_URLS_ATTACHED
url = ''
if dispatch_model is None:
return url
if APP_URLS_ATTACHED != False: # sic!
hashed = cls.get_dispatch_hash(dispatch_model.id, message_model.id)
try:
url = reverse(name, args=[message_model.id, dispatch_model.id, hashed])
url = '%s%s' % (get_site_url(), url)
except NoReverseMatch:
if APP_URLS_ATTACHED is None:
APP_URLS_ATTACHED = False
return url | Returns a common pattern sitemessage URL.
:param str name: URL name
:param Message message_model:
:param Dispatch|None dispatch_model:
:return: | Below is the the instruction that describes the task:
### Input:
Returns a common pattern sitemessage URL.
:param str name: URL name
:param Message message_model:
:param Dispatch|None dispatch_model:
:return:
### Response:
def _get_url(cls, name, message_model, dispatch_model):
"""Returns a common pattern sitemessage URL.
:param str name: URL name
:param Message message_model:
:param Dispatch|None dispatch_model:
:return:
"""
global APP_URLS_ATTACHED
url = ''
if dispatch_model is None:
return url
if APP_URLS_ATTACHED != False: # sic!
hashed = cls.get_dispatch_hash(dispatch_model.id, message_model.id)
try:
url = reverse(name, args=[message_model.id, dispatch_model.id, hashed])
url = '%s%s' % (get_site_url(), url)
except NoReverseMatch:
if APP_URLS_ATTACHED is None:
APP_URLS_ATTACHED = False
return url |
def mark_running(self):
"""Moves the service to the Running state.
Raises if the service is not currently in the Paused state.
"""
with self._lock:
self._set_state(self._RUNNING, self._PAUSED) | Moves the service to the Running state.
Raises if the service is not currently in the Paused state. | Below is the the instruction that describes the task:
### Input:
Moves the service to the Running state.
Raises if the service is not currently in the Paused state.
### Response:
def mark_running(self):
"""Moves the service to the Running state.
Raises if the service is not currently in the Paused state.
"""
with self._lock:
self._set_state(self._RUNNING, self._PAUSED) |
def _persist(self):
"""
Run the command inside a thread so that we can catch output for each
line as it comes in and display it.
"""
# run the block/command
for command in self.commands:
try:
process = Popen(
[command],
stdout=PIPE,
stderr=PIPE,
universal_newlines=True,
env=self.env,
shell=True,
)
except Exception as e:
retcode = process.poll()
msg = "Command '{cmd}' {error} retcode {retcode}"
self.py3.log(msg.format(cmd=command, error=e, retcode=retcode))
# persistent blocklet output can be of two forms. Either each row
# of the output is on a new line this is much easier to deal with)
# or else the output can be continuous and just flushed when ready.
# The second form is more tricky, if we find newlines then we
# switch to easy parsing of the output.
# When we have output we store in self.persistent_output and then
# trigger the module to update.
fd = process.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
has_newlines = False
while True:
line = process.stdout.read(1)
# switch to a non-blocking read as we do not know the output
# length
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
line += process.stdout.read(1024)
# switch back to blocking so we can wait for the next output
fcntl.fcntl(fd, fcntl.F_SETFL, fl)
if process.poll():
break
if self.py3.is_python_2():
line = line.decode("utf-8")
self.persistent_output = line
self.py3.update()
if line[-1] == "\n":
has_newlines = True
break
if line == "":
break
if has_newlines:
msg = "Switch to newline persist method {cmd}"
self.py3.log(msg.format(cmd=command))
# just read the output in a sane manner
for line in iter(process.stdout.readline, b""):
if process.poll():
break
if self.py3.is_python_2():
line = line.decode("utf-8")
self.persistent_output = line
self.py3.update()
self.py3.log("command exited {cmd}".format(cmd=command))
self.persistent_output = "Error\nError\n{}".format(
self.py3.COLOR_ERROR or self.py3.COLOR_BAD
)
self.py3.update() | Run the command inside a thread so that we can catch output for each
line as it comes in and display it. | Below is the the instruction that describes the task:
### Input:
Run the command inside a thread so that we can catch output for each
line as it comes in and display it.
### Response:
def _persist(self):
"""
Run the command inside a thread so that we can catch output for each
line as it comes in and display it.
"""
# run the block/command
for command in self.commands:
try:
process = Popen(
[command],
stdout=PIPE,
stderr=PIPE,
universal_newlines=True,
env=self.env,
shell=True,
)
except Exception as e:
retcode = process.poll()
msg = "Command '{cmd}' {error} retcode {retcode}"
self.py3.log(msg.format(cmd=command, error=e, retcode=retcode))
# persistent blocklet output can be of two forms. Either each row
# of the output is on a new line this is much easier to deal with)
# or else the output can be continuous and just flushed when ready.
# The second form is more tricky, if we find newlines then we
# switch to easy parsing of the output.
# When we have output we store in self.persistent_output and then
# trigger the module to update.
fd = process.stdout.fileno()
fl = fcntl.fcntl(fd, fcntl.F_GETFL)
has_newlines = False
while True:
line = process.stdout.read(1)
# switch to a non-blocking read as we do not know the output
# length
fcntl.fcntl(fd, fcntl.F_SETFL, fl | os.O_NONBLOCK)
line += process.stdout.read(1024)
# switch back to blocking so we can wait for the next output
fcntl.fcntl(fd, fcntl.F_SETFL, fl)
if process.poll():
break
if self.py3.is_python_2():
line = line.decode("utf-8")
self.persistent_output = line
self.py3.update()
if line[-1] == "\n":
has_newlines = True
break
if line == "":
break
if has_newlines:
msg = "Switch to newline persist method {cmd}"
self.py3.log(msg.format(cmd=command))
# just read the output in a sane manner
for line in iter(process.stdout.readline, b""):
if process.poll():
break
if self.py3.is_python_2():
line = line.decode("utf-8")
self.persistent_output = line
self.py3.update()
self.py3.log("command exited {cmd}".format(cmd=command))
self.persistent_output = "Error\nError\n{}".format(
self.py3.COLOR_ERROR or self.py3.COLOR_BAD
)
self.py3.update() |
def create_article(tree, template, title, language, slug=None, description=None,
page_title=None, menu_title=None, meta_description=None,
created_by=None, image=None, publication_date=None, publication_end_date=None,
published=False, login_required=False, creation_date=None, categories=[]):
"""
Create a CMS Article and it's title for the given language
"""
# validate tree
tree = tree.get_public_object()
assert tree.application_urls == 'CMSArticlesApp'
# validate template
assert template in [tpl[0] for tpl in settings.CMS_ARTICLES_TEMPLATES]
get_template(template)
# validate language:
assert language in get_language_list(tree.node.site_id), settings.CMS_LANGUAGES.get(tree.node.site_id)
# validate publication date
if publication_date:
assert isinstance(publication_date, datetime.date)
# validate publication end date
if publication_end_date:
assert isinstance(publication_end_date, datetime.date)
# validate creation date
if not creation_date:
creation_date = publication_date
if creation_date:
assert isinstance(creation_date, datetime.date)
# get username
if created_by:
try:
username = created_by.get_username()
except Exception:
username = force_text(created_by)
else:
username = 'script'
with current_user(username):
# create article
article = Article.objects.create(
tree=tree,
template=template,
login_required=login_required,
creation_date=creation_date,
publication_date=publication_date,
publication_end_date=publication_end_date,
languages=language,
)
for category in categories:
article.categories.add(category)
# create title
create_title(
article=article,
language=language,
title=title,
slug=slug,
description=description,
page_title=page_title,
menu_title=menu_title,
meta_description=meta_description,
creation_date=creation_date,
image=image,
)
# publish article
if published:
article.publish(language)
return article.reload() | Create a CMS Article and it's title for the given language | Below is the the instruction that describes the task:
### Input:
Create a CMS Article and it's title for the given language
### Response:
def create_article(tree, template, title, language, slug=None, description=None,
page_title=None, menu_title=None, meta_description=None,
created_by=None, image=None, publication_date=None, publication_end_date=None,
published=False, login_required=False, creation_date=None, categories=[]):
"""
Create a CMS Article and it's title for the given language
"""
# validate tree
tree = tree.get_public_object()
assert tree.application_urls == 'CMSArticlesApp'
# validate template
assert template in [tpl[0] for tpl in settings.CMS_ARTICLES_TEMPLATES]
get_template(template)
# validate language:
assert language in get_language_list(tree.node.site_id), settings.CMS_LANGUAGES.get(tree.node.site_id)
# validate publication date
if publication_date:
assert isinstance(publication_date, datetime.date)
# validate publication end date
if publication_end_date:
assert isinstance(publication_end_date, datetime.date)
# validate creation date
if not creation_date:
creation_date = publication_date
if creation_date:
assert isinstance(creation_date, datetime.date)
# get username
if created_by:
try:
username = created_by.get_username()
except Exception:
username = force_text(created_by)
else:
username = 'script'
with current_user(username):
# create article
article = Article.objects.create(
tree=tree,
template=template,
login_required=login_required,
creation_date=creation_date,
publication_date=publication_date,
publication_end_date=publication_end_date,
languages=language,
)
for category in categories:
article.categories.add(category)
# create title
create_title(
article=article,
language=language,
title=title,
slug=slug,
description=description,
page_title=page_title,
menu_title=menu_title,
meta_description=meta_description,
creation_date=creation_date,
image=image,
)
# publish article
if published:
article.publish(language)
return article.reload() |
def from_chords(self, chords, duration=1):
"""Add chords to the Track.
The given chords should be a list of shorthand strings or list of
list of shorthand strings, etc.
Each sublist divides the value by 2.
If a tuning is set, chords will be expanded so they have a proper
fingering.
Example:
>>> t = Track().from_chords(['C', ['Am', 'Dm'], 'G7', 'C#'], 1)
"""
tun = self.get_tuning()
def add_chord(chord, duration):
if type(chord) == list:
for c in chord:
add_chord(c, duration * 2)
else:
chord = NoteContainer().from_chord(chord)
if tun:
chord = tun.find_chord_fingering(chord,
return_best_as_NoteContainer=True)
if not self.add_notes(chord, duration):
# This should be the standard behaviour of add_notes
dur = self.bars[-1].value_left()
self.add_notes(chord, dur)
# warning should hold note
self.add_notes(chord, value.subtract(duration, dur))
for c in chords:
if c is not None:
add_chord(c, duration)
else:
self.add_notes(None, duration)
return self | Add chords to the Track.
The given chords should be a list of shorthand strings or list of
list of shorthand strings, etc.
Each sublist divides the value by 2.
If a tuning is set, chords will be expanded so they have a proper
fingering.
Example:
>>> t = Track().from_chords(['C', ['Am', 'Dm'], 'G7', 'C#'], 1) | Below is the the instruction that describes the task:
### Input:
Add chords to the Track.
The given chords should be a list of shorthand strings or list of
list of shorthand strings, etc.
Each sublist divides the value by 2.
If a tuning is set, chords will be expanded so they have a proper
fingering.
Example:
>>> t = Track().from_chords(['C', ['Am', 'Dm'], 'G7', 'C#'], 1)
### Response:
def from_chords(self, chords, duration=1):
"""Add chords to the Track.
The given chords should be a list of shorthand strings or list of
list of shorthand strings, etc.
Each sublist divides the value by 2.
If a tuning is set, chords will be expanded so they have a proper
fingering.
Example:
>>> t = Track().from_chords(['C', ['Am', 'Dm'], 'G7', 'C#'], 1)
"""
tun = self.get_tuning()
def add_chord(chord, duration):
if type(chord) == list:
for c in chord:
add_chord(c, duration * 2)
else:
chord = NoteContainer().from_chord(chord)
if tun:
chord = tun.find_chord_fingering(chord,
return_best_as_NoteContainer=True)
if not self.add_notes(chord, duration):
# This should be the standard behaviour of add_notes
dur = self.bars[-1].value_left()
self.add_notes(chord, dur)
# warning should hold note
self.add_notes(chord, value.subtract(duration, dur))
for c in chords:
if c is not None:
add_chord(c, duration)
else:
self.add_notes(None, duration)
return self |
def cree_widgets(self):
"""Create widgets and store them in self.widgets"""
for t in self.FIELDS:
if type(t) is str:
attr, kwargs = t, {}
else:
attr, kwargs = t[0], t[1].copy()
self.champs.append(attr)
is_editable = kwargs.pop("is_editable", self.is_editable)
args = [self.acces[attr], is_editable]
with_base = kwargs.pop("with_base", False)
if with_base:
args.append(self.acces.base)
if 'with_label' in kwargs:
label = kwargs.pop('with_label')
else:
label = ASSOCIATION[attr][0]
if kwargs:
w = ASSOCIATION[attr][3](*args, **kwargs)
else:
w = ASSOCIATION[attr][3](*args)
self.widgets[attr] = (w, label) | Create widgets and store them in self.widgets | Below is the the instruction that describes the task:
### Input:
Create widgets and store them in self.widgets
### Response:
def cree_widgets(self):
"""Create widgets and store them in self.widgets"""
for t in self.FIELDS:
if type(t) is str:
attr, kwargs = t, {}
else:
attr, kwargs = t[0], t[1].copy()
self.champs.append(attr)
is_editable = kwargs.pop("is_editable", self.is_editable)
args = [self.acces[attr], is_editable]
with_base = kwargs.pop("with_base", False)
if with_base:
args.append(self.acces.base)
if 'with_label' in kwargs:
label = kwargs.pop('with_label')
else:
label = ASSOCIATION[attr][0]
if kwargs:
w = ASSOCIATION[attr][3](*args, **kwargs)
else:
w = ASSOCIATION[attr][3](*args)
self.widgets[attr] = (w, label) |
def init_app(self, app):
"""Initializes your mail settings from the application settings.
You can use this if you want to set up your Mail instance
at configuration time.
:param app: Flask application instance
"""
state = self.init_mail(app.config, app.debug, app.testing)
# register extension with app
app.extensions = getattr(app, 'extensions', {})
app.extensions['mail'] = state
return state | Initializes your mail settings from the application settings.
You can use this if you want to set up your Mail instance
at configuration time.
:param app: Flask application instance | Below is the the instruction that describes the task:
### Input:
Initializes your mail settings from the application settings.
You can use this if you want to set up your Mail instance
at configuration time.
:param app: Flask application instance
### Response:
def init_app(self, app):
"""Initializes your mail settings from the application settings.
You can use this if you want to set up your Mail instance
at configuration time.
:param app: Flask application instance
"""
state = self.init_mail(app.config, app.debug, app.testing)
# register extension with app
app.extensions = getattr(app, 'extensions', {})
app.extensions['mail'] = state
return state |
def _textOutput(self, gaObjects):
"""
Prints out the specified Variant objects in a VCF-like form.
"""
for variant in gaObjects:
print(
variant.id, variant.variant_set_id, variant.names,
variant.reference_name, variant.start, variant.end,
variant.reference_bases, variant.alternate_bases,
sep="\t", end="\t")
for key, value in variant.attributes.attr.items():
val = value.values[0].string_value
print(key, val, sep="=", end=";")
print("\t", end="")
for c in variant.calls:
print(
c.call_set_id,
c.genotype.__str__().replace('\n', ''),
c.genotype_likelihood, c.attributes,
c.phaseset, sep=":", end="\t")
print() | Prints out the specified Variant objects in a VCF-like form. | Below is the the instruction that describes the task:
### Input:
Prints out the specified Variant objects in a VCF-like form.
### Response:
def _textOutput(self, gaObjects):
"""
Prints out the specified Variant objects in a VCF-like form.
"""
for variant in gaObjects:
print(
variant.id, variant.variant_set_id, variant.names,
variant.reference_name, variant.start, variant.end,
variant.reference_bases, variant.alternate_bases,
sep="\t", end="\t")
for key, value in variant.attributes.attr.items():
val = value.values[0].string_value
print(key, val, sep="=", end=";")
print("\t", end="")
for c in variant.calls:
print(
c.call_set_id,
c.genotype.__str__().replace('\n', ''),
c.genotype_likelihood, c.attributes,
c.phaseset, sep=":", end="\t")
print() |
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value | Additional object or objects required by this object. | Below is the the instruction that describes the task:
### Input:
Additional object or objects required by this object.
### Response:
def requires(self):
"""Additional object or objects required by this object."""
# NOTE: spec says this can also be a list of strings
value = self._schema.get("requires", {})
if not isinstance(value, (basestring, dict)):
raise SchemaError(
"requires value {0!r} is neither a string nor an"
" object".format(value))
return value |
def Deserialize(self, reader):
"""
Read serialized data from byte stream
Args:
reader (neocore.IO.BinaryReader): reader to read byte data from
"""
self.name = reader.ReadVarString().decode('utf-8')
self.symbol = reader.ReadVarString().decode('utf-8')
self.decimals = reader.ReadUInt8() | Read serialized data from byte stream
Args:
reader (neocore.IO.BinaryReader): reader to read byte data from | Below is the the instruction that describes the task:
### Input:
Read serialized data from byte stream
Args:
reader (neocore.IO.BinaryReader): reader to read byte data from
### Response:
def Deserialize(self, reader):
"""
Read serialized data from byte stream
Args:
reader (neocore.IO.BinaryReader): reader to read byte data from
"""
self.name = reader.ReadVarString().decode('utf-8')
self.symbol = reader.ReadVarString().decode('utf-8')
self.decimals = reader.ReadUInt8() |
def _extract_error():
"""
Extracts the last OS error message into a python unicode string
:return:
A unicode string error message
"""
error_num = errno()
try:
error_string = os.strerror(error_num)
except (ValueError):
return str_cls(error_num)
if isinstance(error_string, str_cls):
return error_string
return _try_decode(error_string) | Extracts the last OS error message into a python unicode string
:return:
A unicode string error message | Below is the the instruction that describes the task:
### Input:
Extracts the last OS error message into a python unicode string
:return:
A unicode string error message
### Response:
def _extract_error():
"""
Extracts the last OS error message into a python unicode string
:return:
A unicode string error message
"""
error_num = errno()
try:
error_string = os.strerror(error_num)
except (ValueError):
return str_cls(error_num)
if isinstance(error_string, str_cls):
return error_string
return _try_decode(error_string) |
def getColData(self, attri, fname, numtype='cycNum'):
"""
In this method a column of data for the associated column
attribute is returned.
Parameters
----------
attri : string
The name of the attribute we are looking for.
fname : string
The name of the file we are getting the data from or the
cycle number found in the filename.
numtype : string, optional
Determines whether fname is the name of a file or, the
cycle number. If it is 'file' it will then interpret it as
a file, if it is 'cycNum' it will then interpret it as a
cycle number. The default is "cycNum".
"""
fname=self.findFile(fname,numtype)
f=open(fname,'r')
for i in range(self.index+1):
f.readline()
lines=f.readlines()
for i in range(len(lines)):
lines[i]=lines[i].strip()
lines[i]=lines[i].split()
index=0
data=[]
while index < len (self.dcols):
if attri== self.dcols[index]:
break
index+=1
for i in range(len(lines)):
if index==5 and len(lines[i])==7:
data.append(str(lines[i][index].capitalize())+'-'\
+str(lines[i][index+1]))
elif index==5 and len(lines[i])!=7:
tmp=str(lines[i][index])
if tmp[len(tmp)-1].isdigit():
tmp1=tmp[0]+tmp[1]
tmp1=tmp1.capitalize()
tmp2=''
for j in range(len(tmp)):
if j == 0 or j == 1:
continue
tmp2+=tmp[j]
data.append(tmp1+'-'+tmp2)
elif tmp=='PROT':
data.append('H-1')
elif tmp==('NEUT'or'NEUTR'or'nn'or'N 1'or'N-1'):
data.append('N-1')
else:
data.append(tmp)
elif index==0:
data.append(int(lines[i][index]))
else:
data.append(float(lines[i][index]))
return array(data) | In this method a column of data for the associated column
attribute is returned.
Parameters
----------
attri : string
The name of the attribute we are looking for.
fname : string
The name of the file we are getting the data from or the
cycle number found in the filename.
numtype : string, optional
Determines whether fname is the name of a file or, the
cycle number. If it is 'file' it will then interpret it as
a file, if it is 'cycNum' it will then interpret it as a
cycle number. The default is "cycNum". | Below is the the instruction that describes the task:
### Input:
In this method a column of data for the associated column
attribute is returned.
Parameters
----------
attri : string
The name of the attribute we are looking for.
fname : string
The name of the file we are getting the data from or the
cycle number found in the filename.
numtype : string, optional
Determines whether fname is the name of a file or, the
cycle number. If it is 'file' it will then interpret it as
a file, if it is 'cycNum' it will then interpret it as a
cycle number. The default is "cycNum".
### Response:
def getColData(self, attri, fname, numtype='cycNum'):
"""
In this method a column of data for the associated column
attribute is returned.
Parameters
----------
attri : string
The name of the attribute we are looking for.
fname : string
The name of the file we are getting the data from or the
cycle number found in the filename.
numtype : string, optional
Determines whether fname is the name of a file or, the
cycle number. If it is 'file' it will then interpret it as
a file, if it is 'cycNum' it will then interpret it as a
cycle number. The default is "cycNum".
"""
fname=self.findFile(fname,numtype)
f=open(fname,'r')
for i in range(self.index+1):
f.readline()
lines=f.readlines()
for i in range(len(lines)):
lines[i]=lines[i].strip()
lines[i]=lines[i].split()
index=0
data=[]
while index < len (self.dcols):
if attri== self.dcols[index]:
break
index+=1
for i in range(len(lines)):
if index==5 and len(lines[i])==7:
data.append(str(lines[i][index].capitalize())+'-'\
+str(lines[i][index+1]))
elif index==5 and len(lines[i])!=7:
tmp=str(lines[i][index])
if tmp[len(tmp)-1].isdigit():
tmp1=tmp[0]+tmp[1]
tmp1=tmp1.capitalize()
tmp2=''
for j in range(len(tmp)):
if j == 0 or j == 1:
continue
tmp2+=tmp[j]
data.append(tmp1+'-'+tmp2)
elif tmp=='PROT':
data.append('H-1')
elif tmp==('NEUT'or'NEUTR'or'nn'or'N 1'or'N-1'):
data.append('N-1')
else:
data.append(tmp)
elif index==0:
data.append(int(lines[i][index]))
else:
data.append(float(lines[i][index]))
return array(data) |
def _handle_get_cfn_template_response(self, response, application_id, template_id):
"""
Handles the response from the SAR service call
:param dict response: the response dictionary from the app repo
:param string application_id: the ApplicationId
:param string template_id: the unique TemplateId for this application
"""
status = response['Status']
if status != "ACTIVE":
# Other options are PREPARING and EXPIRED.
if status == 'EXPIRED':
message = ("Template for {} with id {} returned status: {}. Cannot access an expired "
"template.".format(application_id, template_id, status))
raise InvalidResourceException(application_id, message)
self._in_progress_templates.append((application_id, template_id)) | Handles the response from the SAR service call
:param dict response: the response dictionary from the app repo
:param string application_id: the ApplicationId
:param string template_id: the unique TemplateId for this application | Below is the the instruction that describes the task:
### Input:
Handles the response from the SAR service call
:param dict response: the response dictionary from the app repo
:param string application_id: the ApplicationId
:param string template_id: the unique TemplateId for this application
### Response:
def _handle_get_cfn_template_response(self, response, application_id, template_id):
"""
Handles the response from the SAR service call
:param dict response: the response dictionary from the app repo
:param string application_id: the ApplicationId
:param string template_id: the unique TemplateId for this application
"""
status = response['Status']
if status != "ACTIVE":
# Other options are PREPARING and EXPIRED.
if status == 'EXPIRED':
message = ("Template for {} with id {} returned status: {}. Cannot access an expired "
"template.".format(application_id, template_id, status))
raise InvalidResourceException(application_id, message)
self._in_progress_templates.append((application_id, template_id)) |
def digit(uni_char, default_value=None):
"""Returns the digit value assigned to the Unicode character uni_char as
integer. If no such value is defined, default is returned, or, if not
given, ValueError is raised."""
uni_char = unicod(uni_char) # Force to Unicode.
if default_value is not None:
return unicodedata.digit(uni_char, default_value)
else:
return unicodedata.digit(uni_char) | Returns the digit value assigned to the Unicode character uni_char as
integer. If no such value is defined, default is returned, or, if not
given, ValueError is raised. | Below is the the instruction that describes the task:
### Input:
Returns the digit value assigned to the Unicode character uni_char as
integer. If no such value is defined, default is returned, or, if not
given, ValueError is raised.
### Response:
def digit(uni_char, default_value=None):
"""Returns the digit value assigned to the Unicode character uni_char as
integer. If no such value is defined, default is returned, or, if not
given, ValueError is raised."""
uni_char = unicod(uni_char) # Force to Unicode.
if default_value is not None:
return unicodedata.digit(uni_char, default_value)
else:
return unicodedata.digit(uni_char) |
def setRecord( self, record ):
"""
Sets the record that is linked with this widget.
:param record | <orb.Table>
"""
super(XBasicCardWidget, self).setRecord(record)
browser = self.browserWidget()
if ( not browser ):
return
factory = browser.factory()
if ( not factory ):
return
self._thumbnailButton.setIcon(factory.thumbnail(record))
self._titleLabel.setText(factory.thumbnailText(record)) | Sets the record that is linked with this widget.
:param record | <orb.Table> | Below is the the instruction that describes the task:
### Input:
Sets the record that is linked with this widget.
:param record | <orb.Table>
### Response:
def setRecord( self, record ):
"""
Sets the record that is linked with this widget.
:param record | <orb.Table>
"""
super(XBasicCardWidget, self).setRecord(record)
browser = self.browserWidget()
if ( not browser ):
return
factory = browser.factory()
if ( not factory ):
return
self._thumbnailButton.setIcon(factory.thumbnail(record))
self._titleLabel.setText(factory.thumbnailText(record)) |
def log(self,phrase):
"""log something that happened. The first time phrase is passed the
start time is saved. The second time the phrase is logged, the
elapsed time is written
Parameters
----------
phrase : str
the thing that happened
"""
pass
t = datetime.now()
if phrase in self.items.keys():
s = str(t) + ' finished: ' + str(phrase) + " took: " + \
str(t - self.items[phrase]) + '\n'
if self.echo:
print(s,end='')
if self.filename:
self.f.write(s)
self.f.flush()
self.items.pop(phrase)
else:
s = str(t) + ' starting: ' + str(phrase) + '\n'
if self.echo:
print(s,end='')
if self.filename:
self.f.write(s)
self.f.flush()
self.items[phrase] = copy.deepcopy(t) | log something that happened. The first time phrase is passed the
start time is saved. The second time the phrase is logged, the
elapsed time is written
Parameters
----------
phrase : str
the thing that happened | Below is the the instruction that describes the task:
### Input:
log something that happened. The first time phrase is passed the
start time is saved. The second time the phrase is logged, the
elapsed time is written
Parameters
----------
phrase : str
the thing that happened
### Response:
def log(self,phrase):
"""log something that happened. The first time phrase is passed the
start time is saved. The second time the phrase is logged, the
elapsed time is written
Parameters
----------
phrase : str
the thing that happened
"""
pass
t = datetime.now()
if phrase in self.items.keys():
s = str(t) + ' finished: ' + str(phrase) + " took: " + \
str(t - self.items[phrase]) + '\n'
if self.echo:
print(s,end='')
if self.filename:
self.f.write(s)
self.f.flush()
self.items.pop(phrase)
else:
s = str(t) + ' starting: ' + str(phrase) + '\n'
if self.echo:
print(s,end='')
if self.filename:
self.f.write(s)
self.f.flush()
self.items[phrase] = copy.deepcopy(t) |
def _expand_pattern_lists(pattern, **mappings):
'''
Expands the pattern for any list-valued mappings, such that for any list of
length N in the mappings present in the pattern, N copies of the pattern are
returned, each with an element of the list substituted.
pattern:
A pattern to expand, for example ``by-role/{grains[roles]}``
mappings:
A dictionary of variables that can be expanded into the pattern.
Example: Given the pattern `` by-role/{grains[roles]}`` and the below grains
.. code-block:: yaml
grains:
roles:
- web
- database
This function will expand into two patterns,
``[by-role/web, by-role/database]``.
Note that this method does not expand any non-list patterns.
'''
expanded_patterns = []
f = string.Formatter()
'''
This function uses a string.Formatter to get all the formatting tokens from
the pattern, then recursively replaces tokens whose expanded value is a
list. For a list with N items, it will create N new pattern strings and
then continue with the next token. In practice this is expected to not be
very expensive, since patterns will typically involve a handful of lists at
most.
''' # pylint: disable=W0105
for (_, field_name, _, _) in f.parse(pattern):
if field_name is None:
continue
(value, _) = f.get_field(field_name, None, mappings)
if isinstance(value, list):
token = '{{{0}}}'.format(field_name)
expanded = [pattern.replace(token, six.text_type(elem)) for elem in value]
for expanded_item in expanded:
result = _expand_pattern_lists(expanded_item, **mappings)
expanded_patterns += result
return expanded_patterns
return [pattern] | Expands the pattern for any list-valued mappings, such that for any list of
length N in the mappings present in the pattern, N copies of the pattern are
returned, each with an element of the list substituted.
pattern:
A pattern to expand, for example ``by-role/{grains[roles]}``
mappings:
A dictionary of variables that can be expanded into the pattern.
Example: Given the pattern `` by-role/{grains[roles]}`` and the below grains
.. code-block:: yaml
grains:
roles:
- web
- database
This function will expand into two patterns,
``[by-role/web, by-role/database]``.
Note that this method does not expand any non-list patterns. | Below is the the instruction that describes the task:
### Input:
Expands the pattern for any list-valued mappings, such that for any list of
length N in the mappings present in the pattern, N copies of the pattern are
returned, each with an element of the list substituted.
pattern:
A pattern to expand, for example ``by-role/{grains[roles]}``
mappings:
A dictionary of variables that can be expanded into the pattern.
Example: Given the pattern `` by-role/{grains[roles]}`` and the below grains
.. code-block:: yaml
grains:
roles:
- web
- database
This function will expand into two patterns,
``[by-role/web, by-role/database]``.
Note that this method does not expand any non-list patterns.
### Response:
def _expand_pattern_lists(pattern, **mappings):
'''
Expands the pattern for any list-valued mappings, such that for any list of
length N in the mappings present in the pattern, N copies of the pattern are
returned, each with an element of the list substituted.
pattern:
A pattern to expand, for example ``by-role/{grains[roles]}``
mappings:
A dictionary of variables that can be expanded into the pattern.
Example: Given the pattern `` by-role/{grains[roles]}`` and the below grains
.. code-block:: yaml
grains:
roles:
- web
- database
This function will expand into two patterns,
``[by-role/web, by-role/database]``.
Note that this method does not expand any non-list patterns.
'''
expanded_patterns = []
f = string.Formatter()
'''
This function uses a string.Formatter to get all the formatting tokens from
the pattern, then recursively replaces tokens whose expanded value is a
list. For a list with N items, it will create N new pattern strings and
then continue with the next token. In practice this is expected to not be
very expensive, since patterns will typically involve a handful of lists at
most.
''' # pylint: disable=W0105
for (_, field_name, _, _) in f.parse(pattern):
if field_name is None:
continue
(value, _) = f.get_field(field_name, None, mappings)
if isinstance(value, list):
token = '{{{0}}}'.format(field_name)
expanded = [pattern.replace(token, six.text_type(elem)) for elem in value]
for expanded_item in expanded:
result = _expand_pattern_lists(expanded_item, **mappings)
expanded_patterns += result
return expanded_patterns
return [pattern] |
def add_file(self, path, yaml):
"""
Adds given file to the file index
"""
if is_job_config(yaml):
name = self.get_job_name(yaml)
file_data = FileData(path=path, yaml=yaml)
self.files[path] = file_data
self.jobs[name] = file_data
else:
self.files[path] = FileData(path=path, yaml=yaml) | Adds given file to the file index | Below is the the instruction that describes the task:
### Input:
Adds given file to the file index
### Response:
def add_file(self, path, yaml):
"""
Adds given file to the file index
"""
if is_job_config(yaml):
name = self.get_job_name(yaml)
file_data = FileData(path=path, yaml=yaml)
self.files[path] = file_data
self.jobs[name] = file_data
else:
self.files[path] = FileData(path=path, yaml=yaml) |
def read_nonblocking(self, size=1, timeout=None):
"""This reads data from the file descriptor.
This is a simple implementation suitable for a regular file. Subclasses using ptys or pipes should override it.
The timeout parameter is ignored.
"""
try:
s = os.read(self.child_fd, size)
except OSError as err:
if err.args[0] == errno.EIO:
# Linux-style EOF
self.flag_eof = True
raise EOF('End Of File (EOF). Exception style platform.')
raise
if s == b'':
# BSD-style EOF
self.flag_eof = True
raise EOF('End Of File (EOF). Empty string style platform.')
s = self._decoder.decode(s, final=False)
self._log(s, 'read')
return s | This reads data from the file descriptor.
This is a simple implementation suitable for a regular file. Subclasses using ptys or pipes should override it.
The timeout parameter is ignored. | Below is the the instruction that describes the task:
### Input:
This reads data from the file descriptor.
This is a simple implementation suitable for a regular file. Subclasses using ptys or pipes should override it.
The timeout parameter is ignored.
### Response:
def read_nonblocking(self, size=1, timeout=None):
"""This reads data from the file descriptor.
This is a simple implementation suitable for a regular file. Subclasses using ptys or pipes should override it.
The timeout parameter is ignored.
"""
try:
s = os.read(self.child_fd, size)
except OSError as err:
if err.args[0] == errno.EIO:
# Linux-style EOF
self.flag_eof = True
raise EOF('End Of File (EOF). Exception style platform.')
raise
if s == b'':
# BSD-style EOF
self.flag_eof = True
raise EOF('End Of File (EOF). Empty string style platform.')
s = self._decoder.decode(s, final=False)
self._log(s, 'read')
return s |
def get_dependency_walker():
"""
Checks if `depends.exe` is in the system PATH. If not, it will be downloaded
and extracted to a temporary directory. Note that the file will not be
deleted afterwards.
Returns the path to the Dependency Walker executable.
"""
for dirname in os.getenv('PATH', '').split(os.pathsep):
filename = os.path.join(dirname, 'depends.exe')
if os.path.isfile(filename):
logger.info('Dependency Walker found at "{}"'.format(filename))
return filename
temp_exe = os.path.join(tempfile.gettempdir(), 'depends.exe')
temp_dll = os.path.join(tempfile.gettempdir(), 'depends.dll')
if os.path.isfile(temp_exe):
logger.info('Dependency Walker found at "{}"'.format(temp_exe))
return temp_exe
logger.info('Dependency Walker not found. Downloading ...')
with urlopen('http://dependencywalker.com/depends22_x64.zip') as fp:
data = fp.read()
logger.info('Extracting Dependency Walker to "{}"'.format(temp_exe))
with zipfile.ZipFile(io.BytesIO(data)) as fp:
with fp.open('depends.exe') as src:
with open(temp_exe, 'wb') as dst:
shutil.copyfileobj(src, dst)
with fp.open('depends.dll') as src:
with open(temp_dll, 'wb') as dst:
shutil.copyfileobj(src, dst)
return temp_exe | Checks if `depends.exe` is in the system PATH. If not, it will be downloaded
and extracted to a temporary directory. Note that the file will not be
deleted afterwards.
Returns the path to the Dependency Walker executable. | Below is the the instruction that describes the task:
### Input:
Checks if `depends.exe` is in the system PATH. If not, it will be downloaded
and extracted to a temporary directory. Note that the file will not be
deleted afterwards.
Returns the path to the Dependency Walker executable.
### Response:
def get_dependency_walker():
"""
Checks if `depends.exe` is in the system PATH. If not, it will be downloaded
and extracted to a temporary directory. Note that the file will not be
deleted afterwards.
Returns the path to the Dependency Walker executable.
"""
for dirname in os.getenv('PATH', '').split(os.pathsep):
filename = os.path.join(dirname, 'depends.exe')
if os.path.isfile(filename):
logger.info('Dependency Walker found at "{}"'.format(filename))
return filename
temp_exe = os.path.join(tempfile.gettempdir(), 'depends.exe')
temp_dll = os.path.join(tempfile.gettempdir(), 'depends.dll')
if os.path.isfile(temp_exe):
logger.info('Dependency Walker found at "{}"'.format(temp_exe))
return temp_exe
logger.info('Dependency Walker not found. Downloading ...')
with urlopen('http://dependencywalker.com/depends22_x64.zip') as fp:
data = fp.read()
logger.info('Extracting Dependency Walker to "{}"'.format(temp_exe))
with zipfile.ZipFile(io.BytesIO(data)) as fp:
with fp.open('depends.exe') as src:
with open(temp_exe, 'wb') as dst:
shutil.copyfileobj(src, dst)
with fp.open('depends.dll') as src:
with open(temp_dll, 'wb') as dst:
shutil.copyfileobj(src, dst)
return temp_exe |
def list_tags(self, pattern=None):
"""
List all tags made on this project.
:param pattern: filters the starting letters of the return value
:return:
"""
request_url = "{}tags".format(self.create_basic_url())
params = None
if pattern:
params = {'pattern': pattern}
return_value = self._call_api(request_url, params=params)
return return_value['tags'] | List all tags made on this project.
:param pattern: filters the starting letters of the return value
:return: | Below is the the instruction that describes the task:
### Input:
List all tags made on this project.
:param pattern: filters the starting letters of the return value
:return:
### Response:
def list_tags(self, pattern=None):
"""
List all tags made on this project.
:param pattern: filters the starting letters of the return value
:return:
"""
request_url = "{}tags".format(self.create_basic_url())
params = None
if pattern:
params = {'pattern': pattern}
return_value = self._call_api(request_url, params=params)
return return_value['tags'] |
def get_labs(format):
"""Gets Techshop data from techshop.ws."""
techshops_soup = data_from_techshop_ws(techshop_us_url)
techshops = {}
# Load all the TechShops
# By first parsing the html
data = techshops_soup.findAll('div', attrs={'id': 'main-content'})
for element in data:
links = element.findAll('a')
hrefs = {}
for k, a in enumerate(links):
if "contact" not in a['href']:
hrefs[k] = a['href']
for k, v in hrefs.iteritems():
if "http://techshop.ws/" not in v:
hrefs[k] = "http://techshop.ws/" + v
else:
hrefs[k] = v
for k, v in hrefs.iteritems():
if "http://techshop.com/" in v:
hrefs[k] = v.replace("http://techshop.com/", "")
# Remove duplicate pages
hr = []
for key, value in hrefs.iteritems():
if value not in hr:
hr.append(value)
hrefs = hr
# Check all pages
for page in hrefs:
data = data_from_techshop_ws(page)
current_lab = Techshop()
name = data.title.contents[0].split('-- ')[1].encode('utf-8')
if "TechShop" not in name:
name = "TechShop " + name
current_lab.name = name
current_lab.slug = name
current_lab.url = page
# Find Facebook and Twitter links
current_lab.links = {"facebook": "", "twitter": ""}
page_links = data.findAll('a')
for link in page_links:
if link.has_attr("href"):
if "facebook" in link.attrs["href"]:
current_lab.links["facebook"] = link.attrs["href"]
if "twitter" in link.attrs["href"]:
current_lab.links["twitter"] = link.attrs["href"]
# Find the coordinates by analysing the embedded google map
iframes = data.findAll('iframe')
if len(iframes) != 0:
for iframe in iframes:
embed_url = iframe.attrs["src"]
if "google" in embed_url:
two_d = embed_url.find("2d")
three_d = embed_url.find("3d")
longitude = embed_url[two_d:].split('!')[0]
latitude = embed_url[three_d:].split('!')[0]
longitude = longitude[2:]
latitude = latitude[2:]
# ... or the link to google map
else:
page_links = data.findAll('a')
for link in page_links:
# one case...
if "maps.google.com/" in link.attrs["href"]:
embed_url = link.attrs["href"]
if "ll=" in embed_url:
first_string = embed_url.split('&sspn')[0]
coordinates = first_string.split('ll=')[1]
latitude = coordinates.split(',')[0]
longitude = coordinates.split(',')[1]
# ... another case
elif "www.google.com/maps" in link.attrs["href"]:
embed_url = link.attrs["href"]
if "1d" in embed_url:
one_d = embed_url.find("1d")
two_d = embed_url.find("2d")
longitude = embed_url[one_d:].split('!')[0]
latitude = embed_url[two_d:].split('!')[0]
longitude = longitude[2:]
latitude = latitude[2:]
current_lab.latitude = latitude
current_lab.longitude = longitude
current_lab.continent = "North America"
current_lab.country_code = "USA"
current_lab.country = "United States of America"
location = geolocator.reverse((latitude, longitude))
if "city" in location.raw["address"]:
current_lab.county = location.raw["address"]["city"].encode(
'utf-8')
if "county" in location.raw["address"]:
current_lab.county = location.raw["address"]["county"].encode(
'utf-8')
if "state" in location.raw["address"]:
current_lab.state = location.raw["address"]["state"].encode(
'utf-8')
if "postcode" in location.raw["address"]:
current_lab.postal_code = location.raw["address"][
"postcode"].encode('utf-8')
current_lab.address_1 = location.address.encode('utf-8')
# Add the lab to the list
techshops[current_lab.slug] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in techshops:
output[j] = techshops[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in techshops:
single = techshops[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in techshops:
output[j] = techshops[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = techshops
# Default: return an oject
else:
output = techshops
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output | Gets Techshop data from techshop.ws. | Below is the the instruction that describes the task:
### Input:
Gets Techshop data from techshop.ws.
### Response:
def get_labs(format):
"""Gets Techshop data from techshop.ws."""
techshops_soup = data_from_techshop_ws(techshop_us_url)
techshops = {}
# Load all the TechShops
# By first parsing the html
data = techshops_soup.findAll('div', attrs={'id': 'main-content'})
for element in data:
links = element.findAll('a')
hrefs = {}
for k, a in enumerate(links):
if "contact" not in a['href']:
hrefs[k] = a['href']
for k, v in hrefs.iteritems():
if "http://techshop.ws/" not in v:
hrefs[k] = "http://techshop.ws/" + v
else:
hrefs[k] = v
for k, v in hrefs.iteritems():
if "http://techshop.com/" in v:
hrefs[k] = v.replace("http://techshop.com/", "")
# Remove duplicate pages
hr = []
for key, value in hrefs.iteritems():
if value not in hr:
hr.append(value)
hrefs = hr
# Check all pages
for page in hrefs:
data = data_from_techshop_ws(page)
current_lab = Techshop()
name = data.title.contents[0].split('-- ')[1].encode('utf-8')
if "TechShop" not in name:
name = "TechShop " + name
current_lab.name = name
current_lab.slug = name
current_lab.url = page
# Find Facebook and Twitter links
current_lab.links = {"facebook": "", "twitter": ""}
page_links = data.findAll('a')
for link in page_links:
if link.has_attr("href"):
if "facebook" in link.attrs["href"]:
current_lab.links["facebook"] = link.attrs["href"]
if "twitter" in link.attrs["href"]:
current_lab.links["twitter"] = link.attrs["href"]
# Find the coordinates by analysing the embedded google map
iframes = data.findAll('iframe')
if len(iframes) != 0:
for iframe in iframes:
embed_url = iframe.attrs["src"]
if "google" in embed_url:
two_d = embed_url.find("2d")
three_d = embed_url.find("3d")
longitude = embed_url[two_d:].split('!')[0]
latitude = embed_url[three_d:].split('!')[0]
longitude = longitude[2:]
latitude = latitude[2:]
# ... or the link to google map
else:
page_links = data.findAll('a')
for link in page_links:
# one case...
if "maps.google.com/" in link.attrs["href"]:
embed_url = link.attrs["href"]
if "ll=" in embed_url:
first_string = embed_url.split('&sspn')[0]
coordinates = first_string.split('ll=')[1]
latitude = coordinates.split(',')[0]
longitude = coordinates.split(',')[1]
# ... another case
elif "www.google.com/maps" in link.attrs["href"]:
embed_url = link.attrs["href"]
if "1d" in embed_url:
one_d = embed_url.find("1d")
two_d = embed_url.find("2d")
longitude = embed_url[one_d:].split('!')[0]
latitude = embed_url[two_d:].split('!')[0]
longitude = longitude[2:]
latitude = latitude[2:]
current_lab.latitude = latitude
current_lab.longitude = longitude
current_lab.continent = "North America"
current_lab.country_code = "USA"
current_lab.country = "United States of America"
location = geolocator.reverse((latitude, longitude))
if "city" in location.raw["address"]:
current_lab.county = location.raw["address"]["city"].encode(
'utf-8')
if "county" in location.raw["address"]:
current_lab.county = location.raw["address"]["county"].encode(
'utf-8')
if "state" in location.raw["address"]:
current_lab.state = location.raw["address"]["state"].encode(
'utf-8')
if "postcode" in location.raw["address"]:
current_lab.postal_code = location.raw["address"][
"postcode"].encode('utf-8')
current_lab.address_1 = location.address.encode('utf-8')
# Add the lab to the list
techshops[current_lab.slug] = current_lab
# Return a dictiornary / json
if format.lower() == "dict" or format.lower() == "json":
output = {}
for j in techshops:
output[j] = techshops[j].__dict__
# Return a geojson
elif format.lower() == "geojson" or format.lower() == "geo":
labs_list = []
for l in techshops:
single = techshops[l].__dict__
single_lab = Feature(
type="Feature",
geometry=Point((single["latitude"], single["longitude"])),
properties=single)
labs_list.append(single_lab)
output = dumps(FeatureCollection(labs_list))
# Return a Pandas DataFrame
elif format.lower() == "pandas" or format.lower() == "dataframe":
output = {}
for j in techshops:
output[j] = techshops[j].__dict__
# Transform the dict into a Pandas DataFrame
output = pd.DataFrame.from_dict(output)
output = output.transpose()
# Return an object
elif format.lower() == "object" or format.lower() == "obj":
output = techshops
# Default: return an oject
else:
output = techshops
# Return a proper json
if format.lower() == "json":
output = json.dumps(output)
return output |
def to_affine(aff, dims=None):
'''
to_affine(None) yields None.
to_affine(data) yields an affine transformation matrix equivalent to that given in data. Such a
matrix may be specified either as (matrix, offset_vector), as an (n+1)x(n+1) matrix, or, as an
n x (n+1) matrix.
to_affine(data, dims) additionally requires that the dimensionality of the data be dims; meaning
that the returned matrix will be of size (dims+1) x (dims+1).
'''
if aff is None: return None
if isinstance(aff, _tuple_type):
# allowed to be (mtx, offset)
if (len(aff) != 2 or
not pimms.is_matrix(aff[0], 'real') or
not pimms.is_vector(aff[1], 'real')):
raise ValueError('affine transforms must be matrices or (mtx,offset) tuples')
mtx = np.asarray(aff[0])
off = np.asarray(aff[1])
if dims is not None:
if mtx.shape[0] != dims or mtx.shape[1] != dims:
raise ValueError('%dD affine matrix must be %d x %d' % (dims,dims,dims))
if off.shape[0] != dims:
raise ValueError('%dD affine offset must have length %d' % (dims,dims))
else:
dims = off.shape[0]
if mtx.shape[0] != dims or mtx.shape[1] != dims:
raise ValueError('with offset size=%d, matrix must be %d x %d' % (dims,dims,dims))
aff = np.zeros((dims+1,dims+1), dtype=np.float)
aff[dims,dims] = 1
aff[0:dims,0:dims] = mtx
aff[0:dims,dims] = off
return pimms.imm_array(aff)
if not pimms.is_matrix(aff, 'real'):
raise ValueError('affine transforms must be matrices or (mtx, offset) tuples')
aff = np.asarray(aff)
if dims is None:
dims = aff.shape[1] - 1
if aff.shape[0] == dims:
lastrow = np.zeros((1,dims+1))
lastrow[0,-1] = 1
aff = np.concatenate((aff, lastrow))
if aff.shape[1] != dims+1 or aff.shape[0] != dims+1:
arg = (dims, dims,dims+1, dims+1,dims+1)
raise ValueError('%dD affine matrix must be %dx%d or %dx%d' % arg)
return aff | to_affine(None) yields None.
to_affine(data) yields an affine transformation matrix equivalent to that given in data. Such a
matrix may be specified either as (matrix, offset_vector), as an (n+1)x(n+1) matrix, or, as an
n x (n+1) matrix.
to_affine(data, dims) additionally requires that the dimensionality of the data be dims; meaning
that the returned matrix will be of size (dims+1) x (dims+1). | Below is the the instruction that describes the task:
### Input:
to_affine(None) yields None.
to_affine(data) yields an affine transformation matrix equivalent to that given in data. Such a
matrix may be specified either as (matrix, offset_vector), as an (n+1)x(n+1) matrix, or, as an
n x (n+1) matrix.
to_affine(data, dims) additionally requires that the dimensionality of the data be dims; meaning
that the returned matrix will be of size (dims+1) x (dims+1).
### Response:
def to_affine(aff, dims=None):
'''
to_affine(None) yields None.
to_affine(data) yields an affine transformation matrix equivalent to that given in data. Such a
matrix may be specified either as (matrix, offset_vector), as an (n+1)x(n+1) matrix, or, as an
n x (n+1) matrix.
to_affine(data, dims) additionally requires that the dimensionality of the data be dims; meaning
that the returned matrix will be of size (dims+1) x (dims+1).
'''
if aff is None: return None
if isinstance(aff, _tuple_type):
# allowed to be (mtx, offset)
if (len(aff) != 2 or
not pimms.is_matrix(aff[0], 'real') or
not pimms.is_vector(aff[1], 'real')):
raise ValueError('affine transforms must be matrices or (mtx,offset) tuples')
mtx = np.asarray(aff[0])
off = np.asarray(aff[1])
if dims is not None:
if mtx.shape[0] != dims or mtx.shape[1] != dims:
raise ValueError('%dD affine matrix must be %d x %d' % (dims,dims,dims))
if off.shape[0] != dims:
raise ValueError('%dD affine offset must have length %d' % (dims,dims))
else:
dims = off.shape[0]
if mtx.shape[0] != dims or mtx.shape[1] != dims:
raise ValueError('with offset size=%d, matrix must be %d x %d' % (dims,dims,dims))
aff = np.zeros((dims+1,dims+1), dtype=np.float)
aff[dims,dims] = 1
aff[0:dims,0:dims] = mtx
aff[0:dims,dims] = off
return pimms.imm_array(aff)
if not pimms.is_matrix(aff, 'real'):
raise ValueError('affine transforms must be matrices or (mtx, offset) tuples')
aff = np.asarray(aff)
if dims is None:
dims = aff.shape[1] - 1
if aff.shape[0] == dims:
lastrow = np.zeros((1,dims+1))
lastrow[0,-1] = 1
aff = np.concatenate((aff, lastrow))
if aff.shape[1] != dims+1 or aff.shape[0] != dims+1:
arg = (dims, dims,dims+1, dims+1,dims+1)
raise ValueError('%dD affine matrix must be %dx%d or %dx%d' % arg)
return aff |
def find_ge(self, item):
'Return first item with a key >= equal to item. Raise ValueError if not found'
k = self._key(item)
i = bisect_left(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key at or above: %r' % (k,)) | Return first item with a key >= equal to item. Raise ValueError if not found | Below is the the instruction that describes the task:
### Input:
Return first item with a key >= equal to item. Raise ValueError if not found
### Response:
def find_ge(self, item):
'Return first item with a key >= equal to item. Raise ValueError if not found'
k = self._key(item)
i = bisect_left(self._keys, k)
if i != len(self):
return self._items[i]
raise ValueError('No item found with key at or above: %r' % (k,)) |
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the Archive response payload to a stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is not defined.
"""
local_stream = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(
local_stream,
kmip_version=kmip_version
)
self.length = local_stream.length()
super(ArchiveResponsePayload, self).write(
output_stream,
kmip_version=kmip_version
)
output_stream.write(local_stream.buffer) | Write the data encoding the Archive response payload to a stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is not defined. | Below is the the instruction that describes the task:
### Input:
Write the data encoding the Archive response payload to a stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is not defined.
### Response:
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_1_0):
"""
Write the data encoding the Archive response payload to a stream.
Args:
output_stream (stream): A data stream in which to encode object
data, supporting a write method; usually a BytearrayStream
object.
kmip_version (KMIPVersion): An enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 1.0.
Raises:
ValueError: Raised if the data attribute is not defined.
"""
local_stream = utils.BytearrayStream()
if self._unique_identifier:
self._unique_identifier.write(
local_stream,
kmip_version=kmip_version
)
self.length = local_stream.length()
super(ArchiveResponsePayload, self).write(
output_stream,
kmip_version=kmip_version
)
output_stream.write(local_stream.buffer) |
def _Cobject(cls, ctype):
"""(INTERNAL) New instance from ctypes.
"""
o = object.__new__(cls)
o._as_parameter_ = ctype
return o | (INTERNAL) New instance from ctypes. | Below is the the instruction that describes the task:
### Input:
(INTERNAL) New instance from ctypes.
### Response:
def _Cobject(cls, ctype):
"""(INTERNAL) New instance from ctypes.
"""
o = object.__new__(cls)
o._as_parameter_ = ctype
return o |
def radiation_values(self, location, timestep=1):
"""Lists of driect normal, diffuse horiz, and global horiz rad at each timestep.
"""
# create sunpath and get altitude at every timestep of the design day
sp = Sunpath.from_location(location)
altitudes = []
dates = self._get_datetimes(timestep)
for t_date in dates:
sun = sp.calculate_sun_from_date_time(t_date)
altitudes.append(sun.altitude)
dir_norm, diff_horiz = ashrae_clear_sky(
altitudes, self._month, self._clearness)
glob_horiz = [dhr + dnr * math.sin(math.radians(alt)) for
alt, dnr, dhr in zip(altitudes, dir_norm, diff_horiz)]
return dir_norm, diff_horiz, glob_horiz | Lists of driect normal, diffuse horiz, and global horiz rad at each timestep. | Below is the the instruction that describes the task:
### Input:
Lists of driect normal, diffuse horiz, and global horiz rad at each timestep.
### Response:
def radiation_values(self, location, timestep=1):
"""Lists of driect normal, diffuse horiz, and global horiz rad at each timestep.
"""
# create sunpath and get altitude at every timestep of the design day
sp = Sunpath.from_location(location)
altitudes = []
dates = self._get_datetimes(timestep)
for t_date in dates:
sun = sp.calculate_sun_from_date_time(t_date)
altitudes.append(sun.altitude)
dir_norm, diff_horiz = ashrae_clear_sky(
altitudes, self._month, self._clearness)
glob_horiz = [dhr + dnr * math.sin(math.radians(alt)) for
alt, dnr, dhr in zip(altitudes, dir_norm, diff_horiz)]
return dir_norm, diff_horiz, glob_horiz |
def node_indent(elt_name, node_id, fact_term, attribute, highlight_node=None):
"""
This tag uses a table structure to display indentation
of fact terms based on the information contained in the
node identifier.
This tag and the closing 'node_indent_end' tag must
enclose the value to be displayed after the display
of the fact term.
"""
# Some colors to chose from:
color_dict = {0: {0: '#004C80', # blueish
1: '#005C99',
2: '#006BB2',
3: '#007ACC',
4: '#008AE6',
5: '#0099FF',
6: '#19A3FF',
7: '#33ADFF',
8: '#4DB8FF',
9: '#66C2FF',
10: '#80CCFF',
11: '#99D6FF',
12: '#B2E0FF',
13: '#CCEBFF',
14: '#E6F5FF'},
2: {0: '#008000', # greenish
1: '#009900',
2: '#00B200',
3: '#00CC00',
4: '#00E600',
5: '#00FF00',
6: '#19FF19',
7: '#33FF33',
8: '#4DFF4D',
9: '#66FF66',
10: '#80FF80',
11: '#99FF99',
12: '#B2FFB2',
13: '#CCFFCC',
14: '#E6FFE6'},
3: {0: '#804C80', # pinkish
1: '#995C99',
2: '#B26BB2',
3: '#CC7ACC',
4: '#E68AE6',
5: '#FF99FF',
6: '#FFA3FF',
7: '#FFADFF',
8: '#FFB8FF',
9: '#FFC2FF',
10: '#FFCCFF',
11: '#FFD6FF',
12: '#FFE0FF',
13: '#FFEBFF',
14: '#FFF5FF', },
1: {0: "#5C3D99", # violetish
1: "#6B47B2",
2: "#7A52CC",
3: "#8A5CE6",
4: "#9966FF",
5: "#A375FF",
6: "#AD85FF",
7: "#B894FF",
8: "#C2A3FF",
9: "#CCB2FF",
10: "#D6C2FF",
11: "#E0D1FF",
12: "#EBE0FF",
13: "#F5F0FF",
14: "#FFFFFF"}
}
indents = 100
node_ids = node_id.split(':')
fact_components = fact_term.split('/')
if len(fact_components) == 1 and fact_components[0] == '':
fact_components = []
if attribute:
fact_components.append("@%s" % attribute)
fact_components = dict([(x, fact_components[x]) for x in range(0, len(fact_components))])
#node_ids.reverse()
result = []
counter = 0
for node in node_ids:
is_attr = False
if len(node) >= 1:
if node[0] == 'A':
is_attr = True
node = node[1:]
if len(node) > 0:
node_nr = int(node)
else:
node_nr = 0
if is_attr:
node_mod = 2
else:
node_mod = node_nr % 2
if is_attr:
result.append("<%(elt_name)s style='background: %(color)s'>%(fact_term_component)s</%(elt_name)s>" % {
'elt_name': elt_name,
'fact_term_component': fact_components.get(counter, ''),
'color': color_dict[2][max(14 - counter,4)]})
else:
result.append(
"<%(elt_name)s style='width:1px; margin: 0px ; background : %(color)s'>%(fact_term_component)s</%(elt_name)s>" % {
'elt_name': elt_name,
'color': color_dict[node_mod][max(14 - counter,4)],
'fact_term_component': fact_components.get(counter, '')})
counter += 1
highlight = "style='background: #FF0000;'" if highlight_node == node_id else None
result.append("<%(elt_name)s colspan='%(colspan)s' %(highlight)s>" % {'elt_name': elt_name, 'colspan': (indents - counter), 'highlight' : highlight})
return "".join(result) | This tag uses a table structure to display indentation
of fact terms based on the information contained in the
node identifier.
This tag and the closing 'node_indent_end' tag must
enclose the value to be displayed after the display
of the fact term. | Below is the the instruction that describes the task:
### Input:
This tag uses a table structure to display indentation
of fact terms based on the information contained in the
node identifier.
This tag and the closing 'node_indent_end' tag must
enclose the value to be displayed after the display
of the fact term.
### Response:
def node_indent(elt_name, node_id, fact_term, attribute, highlight_node=None):
"""
This tag uses a table structure to display indentation
of fact terms based on the information contained in the
node identifier.
This tag and the closing 'node_indent_end' tag must
enclose the value to be displayed after the display
of the fact term.
"""
# Some colors to chose from:
color_dict = {0: {0: '#004C80', # blueish
1: '#005C99',
2: '#006BB2',
3: '#007ACC',
4: '#008AE6',
5: '#0099FF',
6: '#19A3FF',
7: '#33ADFF',
8: '#4DB8FF',
9: '#66C2FF',
10: '#80CCFF',
11: '#99D6FF',
12: '#B2E0FF',
13: '#CCEBFF',
14: '#E6F5FF'},
2: {0: '#008000', # greenish
1: '#009900',
2: '#00B200',
3: '#00CC00',
4: '#00E600',
5: '#00FF00',
6: '#19FF19',
7: '#33FF33',
8: '#4DFF4D',
9: '#66FF66',
10: '#80FF80',
11: '#99FF99',
12: '#B2FFB2',
13: '#CCFFCC',
14: '#E6FFE6'},
3: {0: '#804C80', # pinkish
1: '#995C99',
2: '#B26BB2',
3: '#CC7ACC',
4: '#E68AE6',
5: '#FF99FF',
6: '#FFA3FF',
7: '#FFADFF',
8: '#FFB8FF',
9: '#FFC2FF',
10: '#FFCCFF',
11: '#FFD6FF',
12: '#FFE0FF',
13: '#FFEBFF',
14: '#FFF5FF', },
1: {0: "#5C3D99", # violetish
1: "#6B47B2",
2: "#7A52CC",
3: "#8A5CE6",
4: "#9966FF",
5: "#A375FF",
6: "#AD85FF",
7: "#B894FF",
8: "#C2A3FF",
9: "#CCB2FF",
10: "#D6C2FF",
11: "#E0D1FF",
12: "#EBE0FF",
13: "#F5F0FF",
14: "#FFFFFF"}
}
indents = 100
node_ids = node_id.split(':')
fact_components = fact_term.split('/')
if len(fact_components) == 1 and fact_components[0] == '':
fact_components = []
if attribute:
fact_components.append("@%s" % attribute)
fact_components = dict([(x, fact_components[x]) for x in range(0, len(fact_components))])
#node_ids.reverse()
result = []
counter = 0
for node in node_ids:
is_attr = False
if len(node) >= 1:
if node[0] == 'A':
is_attr = True
node = node[1:]
if len(node) > 0:
node_nr = int(node)
else:
node_nr = 0
if is_attr:
node_mod = 2
else:
node_mod = node_nr % 2
if is_attr:
result.append("<%(elt_name)s style='background: %(color)s'>%(fact_term_component)s</%(elt_name)s>" % {
'elt_name': elt_name,
'fact_term_component': fact_components.get(counter, ''),
'color': color_dict[2][max(14 - counter,4)]})
else:
result.append(
"<%(elt_name)s style='width:1px; margin: 0px ; background : %(color)s'>%(fact_term_component)s</%(elt_name)s>" % {
'elt_name': elt_name,
'color': color_dict[node_mod][max(14 - counter,4)],
'fact_term_component': fact_components.get(counter, '')})
counter += 1
highlight = "style='background: #FF0000;'" if highlight_node == node_id else None
result.append("<%(elt_name)s colspan='%(colspan)s' %(highlight)s>" % {'elt_name': elt_name, 'colspan': (indents - counter), 'highlight' : highlight})
return "".join(result) |
def jsonHook(encoded):
"""Custom JSON decoder that allows construction of a new ``Ci`` instance
from a decoded JSON object.
:param encoded: a JSON decoded object literal (a dict)
:returns: "encoded" or one of the these objects: :class:`Ci`,
:class:`MzmlProduct`, :class:`MzmlPrecursor`
"""
if '__Ci__' in encoded:
return Ci._fromJSON(encoded['__Ci__'])
elif '__MzmlProduct__' in encoded:
return MzmlProduct._fromJSON(encoded['__MzmlProduct__'])
elif '__MzmlPrecursor__' in encoded:
return MzmlPrecursor._fromJSON(encoded['__MzmlPrecursor__'])
else:
return encoded | Custom JSON decoder that allows construction of a new ``Ci`` instance
from a decoded JSON object.
:param encoded: a JSON decoded object literal (a dict)
:returns: "encoded" or one of the these objects: :class:`Ci`,
:class:`MzmlProduct`, :class:`MzmlPrecursor` | Below is the the instruction that describes the task:
### Input:
Custom JSON decoder that allows construction of a new ``Ci`` instance
from a decoded JSON object.
:param encoded: a JSON decoded object literal (a dict)
:returns: "encoded" or one of the these objects: :class:`Ci`,
:class:`MzmlProduct`, :class:`MzmlPrecursor`
### Response:
def jsonHook(encoded):
"""Custom JSON decoder that allows construction of a new ``Ci`` instance
from a decoded JSON object.
:param encoded: a JSON decoded object literal (a dict)
:returns: "encoded" or one of the these objects: :class:`Ci`,
:class:`MzmlProduct`, :class:`MzmlPrecursor`
"""
if '__Ci__' in encoded:
return Ci._fromJSON(encoded['__Ci__'])
elif '__MzmlProduct__' in encoded:
return MzmlProduct._fromJSON(encoded['__MzmlProduct__'])
elif '__MzmlPrecursor__' in encoded:
return MzmlPrecursor._fromJSON(encoded['__MzmlPrecursor__'])
else:
return encoded |
def _initialize_context(self, trace_header):
"""
Create a facade segment based on environment variables
set by AWS Lambda and initialize storage for subsegments.
"""
sampled = None
if not global_sdk_config.sdk_enabled():
# Force subsequent subsegments to be disabled and turned into DummySegments.
sampled = False
elif trace_header.sampled == 0:
sampled = False
elif trace_header.sampled == 1:
sampled = True
segment = FacadeSegment(
name='facade',
traceid=trace_header.root,
entityid=trace_header.parent,
sampled=sampled,
)
setattr(self._local, 'segment', segment)
setattr(self._local, 'entities', []) | Create a facade segment based on environment variables
set by AWS Lambda and initialize storage for subsegments. | Below is the the instruction that describes the task:
### Input:
Create a facade segment based on environment variables
set by AWS Lambda and initialize storage for subsegments.
### Response:
def _initialize_context(self, trace_header):
"""
Create a facade segment based on environment variables
set by AWS Lambda and initialize storage for subsegments.
"""
sampled = None
if not global_sdk_config.sdk_enabled():
# Force subsequent subsegments to be disabled and turned into DummySegments.
sampled = False
elif trace_header.sampled == 0:
sampled = False
elif trace_header.sampled == 1:
sampled = True
segment = FacadeSegment(
name='facade',
traceid=trace_header.root,
entityid=trace_header.parent,
sampled=sampled,
)
setattr(self._local, 'segment', segment)
setattr(self._local, 'entities', []) |
def revealjs(basedir=None, title=None, subtitle=None, description=None,
github_user=None, github_repo=None):
'''Set up or update a reveals.js presentation with slides written in markdown.
Several reveal.js plugins will be set up, too.
More info:
Demo: https://theno.github.io/revealjs_template
http://lab.hakim.se/reveal-js/
https://github.com/hakimel/reveal.js
plugins:
https://github.com/hakimel/reveal.js/wiki/Plugins,-Tools-and-Hardware
https://github.com/rajgoel/reveal.js-plugins/
https://github.com/e-gor/Reveal.js-TOC-Progress
https://github.com/e-gor/Reveal.js-Title-Footer
'''
basedir = basedir or query_input('Base dir of the presentation?',
default='~/repos/my_presi')
revealjs_repo_name = 'reveal.js'
revealjs_dir = flo('{basedir}/{revealjs_repo_name}')
_lazy_dict['presi_title'] = title
_lazy_dict['presi_subtitle'] = subtitle
_lazy_dict['presi_description'] = description
_lazy_dict['github_user'] = github_user
_lazy_dict['github_repo'] = github_repo
question = flo("Base dir already contains a sub dir '{revealjs_repo_name}'."
' Reset (and re-download) reveal.js codebase?')
if not exists(revealjs_dir) or query_yes_no(question, default='no'):
run(flo('mkdir -p {basedir}'))
set_up_revealjs_codebase(basedir, revealjs_repo_name)
install_plugins(revealjs_dir)
apply_customizations(repo_dir=revealjs_dir)
if exists(revealjs_dir):
install_files_in_basedir(basedir, repo_dir=revealjs_dir)
init_git_repo(basedir)
create_github_remote_repo(basedir)
setup_npm(revealjs_dir)
else:
print('abort') | Set up or update a reveals.js presentation with slides written in markdown.
Several reveal.js plugins will be set up, too.
More info:
Demo: https://theno.github.io/revealjs_template
http://lab.hakim.se/reveal-js/
https://github.com/hakimel/reveal.js
plugins:
https://github.com/hakimel/reveal.js/wiki/Plugins,-Tools-and-Hardware
https://github.com/rajgoel/reveal.js-plugins/
https://github.com/e-gor/Reveal.js-TOC-Progress
https://github.com/e-gor/Reveal.js-Title-Footer | Below is the the instruction that describes the task:
### Input:
Set up or update a reveals.js presentation with slides written in markdown.
Several reveal.js plugins will be set up, too.
More info:
Demo: https://theno.github.io/revealjs_template
http://lab.hakim.se/reveal-js/
https://github.com/hakimel/reveal.js
plugins:
https://github.com/hakimel/reveal.js/wiki/Plugins,-Tools-and-Hardware
https://github.com/rajgoel/reveal.js-plugins/
https://github.com/e-gor/Reveal.js-TOC-Progress
https://github.com/e-gor/Reveal.js-Title-Footer
### Response:
def revealjs(basedir=None, title=None, subtitle=None, description=None,
github_user=None, github_repo=None):
'''Set up or update a reveals.js presentation with slides written in markdown.
Several reveal.js plugins will be set up, too.
More info:
Demo: https://theno.github.io/revealjs_template
http://lab.hakim.se/reveal-js/
https://github.com/hakimel/reveal.js
plugins:
https://github.com/hakimel/reveal.js/wiki/Plugins,-Tools-and-Hardware
https://github.com/rajgoel/reveal.js-plugins/
https://github.com/e-gor/Reveal.js-TOC-Progress
https://github.com/e-gor/Reveal.js-Title-Footer
'''
basedir = basedir or query_input('Base dir of the presentation?',
default='~/repos/my_presi')
revealjs_repo_name = 'reveal.js'
revealjs_dir = flo('{basedir}/{revealjs_repo_name}')
_lazy_dict['presi_title'] = title
_lazy_dict['presi_subtitle'] = subtitle
_lazy_dict['presi_description'] = description
_lazy_dict['github_user'] = github_user
_lazy_dict['github_repo'] = github_repo
question = flo("Base dir already contains a sub dir '{revealjs_repo_name}'."
' Reset (and re-download) reveal.js codebase?')
if not exists(revealjs_dir) or query_yes_no(question, default='no'):
run(flo('mkdir -p {basedir}'))
set_up_revealjs_codebase(basedir, revealjs_repo_name)
install_plugins(revealjs_dir)
apply_customizations(repo_dir=revealjs_dir)
if exists(revealjs_dir):
install_files_in_basedir(basedir, repo_dir=revealjs_dir)
init_git_repo(basedir)
create_github_remote_repo(basedir)
setup_npm(revealjs_dir)
else:
print('abort') |
def reverse(self):
"""
Returns a reversed copy of the list.
"""
colors = ColorList.copy(self)
_list.reverse(colors)
return colors | Returns a reversed copy of the list. | Below is the the instruction that describes the task:
### Input:
Returns a reversed copy of the list.
### Response:
def reverse(self):
"""
Returns a reversed copy of the list.
"""
colors = ColorList.copy(self)
_list.reverse(colors)
return colors |
def dc(result, reference):
r"""
Dice coefficient
Computes the Dice coefficient (also known as Sorensen index) between the binary
objects in two images.
The metric is defined as
.. math::
DC=\frac{2|A\cap B|}{|A|+|B|}
, where :math:`A` is the first and :math:`B` the second set of samples (here: binary objects).
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
dc : float
The Dice coefficient between the object(s) in ```result``` and the
object(s) in ```reference```. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
"""
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
intersection = numpy.count_nonzero(result & reference)
size_i1 = numpy.count_nonzero(result)
size_i2 = numpy.count_nonzero(reference)
try:
dc = 2. * intersection / float(size_i1 + size_i2)
except ZeroDivisionError:
dc = 0.0
return dc | r"""
Dice coefficient
Computes the Dice coefficient (also known as Sorensen index) between the binary
objects in two images.
The metric is defined as
.. math::
DC=\frac{2|A\cap B|}{|A|+|B|}
, where :math:`A` is the first and :math:`B` the second set of samples (here: binary objects).
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
dc : float
The Dice coefficient between the object(s) in ```result``` and the
object(s) in ```reference```. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order. | Below is the the instruction that describes the task:
### Input:
r"""
Dice coefficient
Computes the Dice coefficient (also known as Sorensen index) between the binary
objects in two images.
The metric is defined as
.. math::
DC=\frac{2|A\cap B|}{|A|+|B|}
, where :math:`A` is the first and :math:`B` the second set of samples (here: binary objects).
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
dc : float
The Dice coefficient between the object(s) in ```result``` and the
object(s) in ```reference```. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
### Response:
def dc(result, reference):
r"""
Dice coefficient
Computes the Dice coefficient (also known as Sorensen index) between the binary
objects in two images.
The metric is defined as
.. math::
DC=\frac{2|A\cap B|}{|A|+|B|}
, where :math:`A` is the first and :math:`B` the second set of samples (here: binary objects).
Parameters
----------
result : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
reference : array_like
Input data containing objects. Can be any type but will be converted
into binary: background where 0, object everywhere else.
Returns
-------
dc : float
The Dice coefficient between the object(s) in ```result``` and the
object(s) in ```reference```. It ranges from 0 (no overlap) to 1 (perfect overlap).
Notes
-----
This is a real metric. The binary images can therefore be supplied in any order.
"""
result = numpy.atleast_1d(result.astype(numpy.bool))
reference = numpy.atleast_1d(reference.astype(numpy.bool))
intersection = numpy.count_nonzero(result & reference)
size_i1 = numpy.count_nonzero(result)
size_i2 = numpy.count_nonzero(reference)
try:
dc = 2. * intersection / float(size_i1 + size_i2)
except ZeroDivisionError:
dc = 0.0
return dc |
def stop(name, kill=False, path=None, use_vt=None):
'''
Stop the named container
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
kill: False
Do not wait for the container to stop, kill all tasks in the container.
Older LXC versions will stop containers like this irrespective of this
argument.
.. versionchanged:: 2015.5.0
Default value changed to ``False``
use_vt
run the command through VT
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion lxc.stop name
'''
_ensure_exists(name, path=path)
orig_state = state(name, path=path)
if orig_state == 'frozen' and not kill:
# Gracefully stopping a frozen container is slower than unfreezing and
# then stopping it (at least in my testing), so if we're not
# force-stopping the container, unfreeze it first.
unfreeze(name, path=path)
cmd = 'lxc-stop'
if kill:
cmd += ' -k'
ret = _change_state(cmd, name, 'stopped', use_vt=use_vt, path=path)
ret['state']['old'] = orig_state
return ret | Stop the named container
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
kill: False
Do not wait for the container to stop, kill all tasks in the container.
Older LXC versions will stop containers like this irrespective of this
argument.
.. versionchanged:: 2015.5.0
Default value changed to ``False``
use_vt
run the command through VT
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion lxc.stop name | Below is the the instruction that describes the task:
### Input:
Stop the named container
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
kill: False
Do not wait for the container to stop, kill all tasks in the container.
Older LXC versions will stop containers like this irrespective of this
argument.
.. versionchanged:: 2015.5.0
Default value changed to ``False``
use_vt
run the command through VT
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion lxc.stop name
### Response:
def stop(name, kill=False, path=None, use_vt=None):
'''
Stop the named container
path
path to the container parent directory
default: /var/lib/lxc (system)
.. versionadded:: 2015.8.0
kill: False
Do not wait for the container to stop, kill all tasks in the container.
Older LXC versions will stop containers like this irrespective of this
argument.
.. versionchanged:: 2015.5.0
Default value changed to ``False``
use_vt
run the command through VT
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt myminion lxc.stop name
'''
_ensure_exists(name, path=path)
orig_state = state(name, path=path)
if orig_state == 'frozen' and not kill:
# Gracefully stopping a frozen container is slower than unfreezing and
# then stopping it (at least in my testing), so if we're not
# force-stopping the container, unfreeze it first.
unfreeze(name, path=path)
cmd = 'lxc-stop'
if kill:
cmd += ' -k'
ret = _change_state(cmd, name, 'stopped', use_vt=use_vt, path=path)
ret['state']['old'] = orig_state
return ret |
def destroy(self, blocking=False):
"""
Destroy all data and metadata related to this broadcast variable.
Use this with caution; once a broadcast variable has been destroyed,
it cannot be used again.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
if self._jbroadcast is None:
raise Exception("Broadcast can only be destroyed in driver")
self._jbroadcast.destroy(blocking)
os.unlink(self._path) | Destroy all data and metadata related to this broadcast variable.
Use this with caution; once a broadcast variable has been destroyed,
it cannot be used again.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted. | Below is the the instruction that describes the task:
### Input:
Destroy all data and metadata related to this broadcast variable.
Use this with caution; once a broadcast variable has been destroyed,
it cannot be used again.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
### Response:
def destroy(self, blocking=False):
"""
Destroy all data and metadata related to this broadcast variable.
Use this with caution; once a broadcast variable has been destroyed,
it cannot be used again.
.. versionchanged:: 3.0.0
Added optional argument `blocking` to specify whether to block until all
blocks are deleted.
"""
if self._jbroadcast is None:
raise Exception("Broadcast can only be destroyed in driver")
self._jbroadcast.destroy(blocking)
os.unlink(self._path) |
def get_gafvals(self, line):
"""Convert fields from string to preferred format for GAF ver 2.1 and 2.0."""
flds = line.split('\t')
flds[3] = self._get_qualifier(flds[3]) # 3 Qualifier
flds[5] = self._get_set(flds[5]) # 5 DB_Reference
flds[7] = self._get_set(flds[7]) # 7 With_From
flds[8] = self.aspect2ns[flds[8]] # 8 GAF Aspect field converted to BP, MF, or CC
flds[9] = self._get_set(flds[9]) # 9 DB_Name
flds[10] = self._get_set(flds[10]) # 10 DB_Synonym
flds[12] = self._do_taxons(flds[12]) # 12 Taxon
flds[13] = GET_DATE_YYYYMMDD(flds[13]) # self.strptime(flds[13], '%Y%m%d').date(), # 13 Date 20190406
# Version 2.x has these additional fields not found in v1.0
if self.is_long:
flds[15] = get_extensions(flds[15]) # Extensions (or Annotation_Extension)
flds[16] = self._get_set(flds[16].rstrip())
else:
flds[14] = self._get_set(flds[14].rstrip())
return flds | Convert fields from string to preferred format for GAF ver 2.1 and 2.0. | Below is the the instruction that describes the task:
### Input:
Convert fields from string to preferred format for GAF ver 2.1 and 2.0.
### Response:
def get_gafvals(self, line):
"""Convert fields from string to preferred format for GAF ver 2.1 and 2.0."""
flds = line.split('\t')
flds[3] = self._get_qualifier(flds[3]) # 3 Qualifier
flds[5] = self._get_set(flds[5]) # 5 DB_Reference
flds[7] = self._get_set(flds[7]) # 7 With_From
flds[8] = self.aspect2ns[flds[8]] # 8 GAF Aspect field converted to BP, MF, or CC
flds[9] = self._get_set(flds[9]) # 9 DB_Name
flds[10] = self._get_set(flds[10]) # 10 DB_Synonym
flds[12] = self._do_taxons(flds[12]) # 12 Taxon
flds[13] = GET_DATE_YYYYMMDD(flds[13]) # self.strptime(flds[13], '%Y%m%d').date(), # 13 Date 20190406
# Version 2.x has these additional fields not found in v1.0
if self.is_long:
flds[15] = get_extensions(flds[15]) # Extensions (or Annotation_Extension)
flds[16] = self._get_set(flds[16].rstrip())
else:
flds[14] = self._get_set(flds[14].rstrip())
return flds |
def get_or_create_exh_obj(full_cname=False, exclude=None, callables_fname=None):
r"""
Return global exception handler if set, otherwise create a new one and return it.
:param full_cname: Flag that indicates whether fully qualified
function/method/class property names are obtained for
functions/methods/class properties that use the
exception manager (True) or not (False).
There is a performance penalty if the flag is True as
the call stack needs to be traced. This argument is
only relevant if the global exception handler is not
set and a new one is created
:type full_cname: boolean
:param exclude: Module exclusion list. A particular callable in an
otherwise fully qualified name is omitted if it belongs
to a module in this list. If None all callables are
included
:type exclude: list of strings or None
:param callables_fname: File name that contains traced modules information.
File can be produced by either the
:py:meth:`pexdoc.pinspect.Callables.save` or
:py:meth:`pexdoc.ExHandle.save_callables`
methods
:type callables_fname: :ref:`FileNameExists` or None
:rtype: :py:class:`pexdoc.ExHandle`
:raises:
* OSError (File *[callables_fname]* could not be found
* RuntimeError (Argument \\`exclude\\` is not valid)
* RuntimeError (Argument \\`callables_fname\\` is not valid)
* RuntimeError (Argument \\`full_cname\\` is not valid)
"""
if not hasattr(__builtin__, "_EXH"):
set_exh_obj(
ExHandle(
full_cname=full_cname, exclude=exclude, callables_fname=callables_fname
)
)
return get_exh_obj() | r"""
Return global exception handler if set, otherwise create a new one and return it.
:param full_cname: Flag that indicates whether fully qualified
function/method/class property names are obtained for
functions/methods/class properties that use the
exception manager (True) or not (False).
There is a performance penalty if the flag is True as
the call stack needs to be traced. This argument is
only relevant if the global exception handler is not
set and a new one is created
:type full_cname: boolean
:param exclude: Module exclusion list. A particular callable in an
otherwise fully qualified name is omitted if it belongs
to a module in this list. If None all callables are
included
:type exclude: list of strings or None
:param callables_fname: File name that contains traced modules information.
File can be produced by either the
:py:meth:`pexdoc.pinspect.Callables.save` or
:py:meth:`pexdoc.ExHandle.save_callables`
methods
:type callables_fname: :ref:`FileNameExists` or None
:rtype: :py:class:`pexdoc.ExHandle`
:raises:
* OSError (File *[callables_fname]* could not be found
* RuntimeError (Argument \\`exclude\\` is not valid)
* RuntimeError (Argument \\`callables_fname\\` is not valid)
* RuntimeError (Argument \\`full_cname\\` is not valid) | Below is the the instruction that describes the task:
### Input:
r"""
Return global exception handler if set, otherwise create a new one and return it.
:param full_cname: Flag that indicates whether fully qualified
function/method/class property names are obtained for
functions/methods/class properties that use the
exception manager (True) or not (False).
There is a performance penalty if the flag is True as
the call stack needs to be traced. This argument is
only relevant if the global exception handler is not
set and a new one is created
:type full_cname: boolean
:param exclude: Module exclusion list. A particular callable in an
otherwise fully qualified name is omitted if it belongs
to a module in this list. If None all callables are
included
:type exclude: list of strings or None
:param callables_fname: File name that contains traced modules information.
File can be produced by either the
:py:meth:`pexdoc.pinspect.Callables.save` or
:py:meth:`pexdoc.ExHandle.save_callables`
methods
:type callables_fname: :ref:`FileNameExists` or None
:rtype: :py:class:`pexdoc.ExHandle`
:raises:
* OSError (File *[callables_fname]* could not be found
* RuntimeError (Argument \\`exclude\\` is not valid)
* RuntimeError (Argument \\`callables_fname\\` is not valid)
* RuntimeError (Argument \\`full_cname\\` is not valid)
### Response:
def get_or_create_exh_obj(full_cname=False, exclude=None, callables_fname=None):
r"""
Return global exception handler if set, otherwise create a new one and return it.
:param full_cname: Flag that indicates whether fully qualified
function/method/class property names are obtained for
functions/methods/class properties that use the
exception manager (True) or not (False).
There is a performance penalty if the flag is True as
the call stack needs to be traced. This argument is
only relevant if the global exception handler is not
set and a new one is created
:type full_cname: boolean
:param exclude: Module exclusion list. A particular callable in an
otherwise fully qualified name is omitted if it belongs
to a module in this list. If None all callables are
included
:type exclude: list of strings or None
:param callables_fname: File name that contains traced modules information.
File can be produced by either the
:py:meth:`pexdoc.pinspect.Callables.save` or
:py:meth:`pexdoc.ExHandle.save_callables`
methods
:type callables_fname: :ref:`FileNameExists` or None
:rtype: :py:class:`pexdoc.ExHandle`
:raises:
* OSError (File *[callables_fname]* could not be found
* RuntimeError (Argument \\`exclude\\` is not valid)
* RuntimeError (Argument \\`callables_fname\\` is not valid)
* RuntimeError (Argument \\`full_cname\\` is not valid)
"""
if not hasattr(__builtin__, "_EXH"):
set_exh_obj(
ExHandle(
full_cname=full_cname, exclude=exclude, callables_fname=callables_fname
)
)
return get_exh_obj() |
def _init_template(self, cls, base_init_template):
'''This would be better as an override for Gtk.Widget'''
# TODO: could disallow using a metaclass.. but this is good enough
# .. if you disagree, feel free to fix it and issue a PR :)
if self.__class__ is not cls:
raise TypeError("Inheritance from classes with @GtkTemplate decorators "
"is not allowed at this time")
connected_signals = set()
self.__connected_template_signals__ = connected_signals
base_init_template(self)
for name in self.__gtemplate_widgets__:
widget = self.get_template_child(cls, name)
self.__dict__[name] = widget
if widget is None:
# Bug: if you bind a template child, and one of them was
# not present, then the whole template is broken (and
# it's not currently possible for us to know which
# one is broken either -- but the stderr should show
# something useful with a Gtk-CRITICAL message)
raise AttributeError("A missing child widget was set using "
"GtkTemplate.Child and the entire "
"template is now broken (widgets: %s)" %
', '.join(self.__gtemplate_widgets__))
for name in self.__gtemplate_methods__.difference(connected_signals):
errmsg = ("Signal '%s' was declared with @GtkTemplate.Callback " +
"but was not present in template") % name
warnings.warn(errmsg, GtkTemplateWarning) | This would be better as an override for Gtk.Widget | Below is the the instruction that describes the task:
### Input:
This would be better as an override for Gtk.Widget
### Response:
def _init_template(self, cls, base_init_template):
'''This would be better as an override for Gtk.Widget'''
# TODO: could disallow using a metaclass.. but this is good enough
# .. if you disagree, feel free to fix it and issue a PR :)
if self.__class__ is not cls:
raise TypeError("Inheritance from classes with @GtkTemplate decorators "
"is not allowed at this time")
connected_signals = set()
self.__connected_template_signals__ = connected_signals
base_init_template(self)
for name in self.__gtemplate_widgets__:
widget = self.get_template_child(cls, name)
self.__dict__[name] = widget
if widget is None:
# Bug: if you bind a template child, and one of them was
# not present, then the whole template is broken (and
# it's not currently possible for us to know which
# one is broken either -- but the stderr should show
# something useful with a Gtk-CRITICAL message)
raise AttributeError("A missing child widget was set using "
"GtkTemplate.Child and the entire "
"template is now broken (widgets: %s)" %
', '.join(self.__gtemplate_widgets__))
for name in self.__gtemplate_methods__.difference(connected_signals):
errmsg = ("Signal '%s' was declared with @GtkTemplate.Callback " +
"but was not present in template") % name
warnings.warn(errmsg, GtkTemplateWarning) |
def to_bytes(s, encoding="utf-8"):
"""
Converts the string to a bytes type, if not already.
:s: the string to convert to bytes
:returns: `str` on Python2 and `bytes` on Python3.
"""
if isinstance(s, six.binary_type):
return s
else:
return six.text_type(s).encode(encoding) | Converts the string to a bytes type, if not already.
:s: the string to convert to bytes
:returns: `str` on Python2 and `bytes` on Python3. | Below is the the instruction that describes the task:
### Input:
Converts the string to a bytes type, if not already.
:s: the string to convert to bytes
:returns: `str` on Python2 and `bytes` on Python3.
### Response:
def to_bytes(s, encoding="utf-8"):
"""
Converts the string to a bytes type, if not already.
:s: the string to convert to bytes
:returns: `str` on Python2 and `bytes` on Python3.
"""
if isinstance(s, six.binary_type):
return s
else:
return six.text_type(s).encode(encoding) |
def get_vmpolicy_macaddr_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
input = ET.SubElement(get_vmpolicy_macaddr, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_vmpolicy_macaddr_input_datacenter(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vmpolicy_macaddr = ET.Element("get_vmpolicy_macaddr")
config = get_vmpolicy_macaddr
input = ET.SubElement(get_vmpolicy_macaddr, "input")
datacenter = ET.SubElement(input, "datacenter")
datacenter.text = kwargs.pop('datacenter')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def find(default='', whole_words=0, case_sensitive=0, parent=None):
"Shows a find text dialog"
result = dialogs.findDialog(parent, default, whole_words, case_sensitive)
return {'text': result.searchText, 'whole_words': result.wholeWordsOnly,
'case_sensitive': result.caseSensitive} | Shows a find text dialog | Below is the the instruction that describes the task:
### Input:
Shows a find text dialog
### Response:
def find(default='', whole_words=0, case_sensitive=0, parent=None):
"Shows a find text dialog"
result = dialogs.findDialog(parent, default, whole_words, case_sensitive)
return {'text': result.searchText, 'whole_words': result.wholeWordsOnly,
'case_sensitive': result.caseSensitive} |
def offset_overlays(self, text, offset=0, **kw):
"""
Generate overlays after offset.
:param text: The text to be searched.
:param offset: Match starting that index. If none just search.
:returns: An overlay or None
"""
# This may be a bit slower but overlayedtext takes care of
# unicode issues.
if not isinstance(text, OverlayedText):
text = OverlayedText(text)
for m in self.regex.finditer(unicode(text)[offset:]):
yield Overlay(text, (offset + m.start(), offset + m.end()),
props=self.props,
value=self.value(rxmatch=m)) | Generate overlays after offset.
:param text: The text to be searched.
:param offset: Match starting that index. If none just search.
:returns: An overlay or None | Below is the the instruction that describes the task:
### Input:
Generate overlays after offset.
:param text: The text to be searched.
:param offset: Match starting that index. If none just search.
:returns: An overlay or None
### Response:
def offset_overlays(self, text, offset=0, **kw):
"""
Generate overlays after offset.
:param text: The text to be searched.
:param offset: Match starting that index. If none just search.
:returns: An overlay or None
"""
# This may be a bit slower but overlayedtext takes care of
# unicode issues.
if not isinstance(text, OverlayedText):
text = OverlayedText(text)
for m in self.regex.finditer(unicode(text)[offset:]):
yield Overlay(text, (offset + m.start(), offset + m.end()),
props=self.props,
value=self.value(rxmatch=m)) |
def random_boggle(n=4):
"""Return a random Boggle board of size n x n.
We represent a board as a linear list of letters."""
cubes = [cubes16[i % 16] for i in range(n*n)]
random.shuffle(cubes)
return map(random.choice, cubes) | Return a random Boggle board of size n x n.
We represent a board as a linear list of letters. | Below is the the instruction that describes the task:
### Input:
Return a random Boggle board of size n x n.
We represent a board as a linear list of letters.
### Response:
def random_boggle(n=4):
"""Return a random Boggle board of size n x n.
We represent a board as a linear list of letters."""
cubes = [cubes16[i % 16] for i in range(n*n)]
random.shuffle(cubes)
return map(random.choice, cubes) |
def readQuotes(self, start, end):
''' read quotes '''
rows = self.__hbase.scanTable(self.tableName(HBaseDAM.QUOTE), [HBaseDAM.QUOTE], start, end)
return [self.__rowResultToQuote(row) for row in rows] | read quotes | Below is the the instruction that describes the task:
### Input:
read quotes
### Response:
def readQuotes(self, start, end):
''' read quotes '''
rows = self.__hbase.scanTable(self.tableName(HBaseDAM.QUOTE), [HBaseDAM.QUOTE], start, end)
return [self.__rowResultToQuote(row) for row in rows] |
def _fetch(self, request):
""" Fetch using the OkHttpClient """
client = self.client
#: Dispatch the async call
call = Call(__id__=client.newCall(request.request))
call.enqueue(request.handler)
#: Save the call reference
request.call = call | Fetch using the OkHttpClient | Below is the the instruction that describes the task:
### Input:
Fetch using the OkHttpClient
### Response:
def _fetch(self, request):
""" Fetch using the OkHttpClient """
client = self.client
#: Dispatch the async call
call = Call(__id__=client.newCall(request.request))
call.enqueue(request.handler)
#: Save the call reference
request.call = call |
def revoke_token(self, token, callback):
'''
revoke_token removes the access token from the data_store
'''
yield Task(self.data_store.remove, 'tokens', token=token)
callback() | revoke_token removes the access token from the data_store | Below is the the instruction that describes the task:
### Input:
revoke_token removes the access token from the data_store
### Response:
def revoke_token(self, token, callback):
'''
revoke_token removes the access token from the data_store
'''
yield Task(self.data_store.remove, 'tokens', token=token)
callback() |
def _get_domain_id(self, domain):
"""
Pulls all domains managed by authenticated Hetzner account, extracts their IDs
and returns the ID for the current domain, if exists. Otherwise raises error.
"""
api = self.api[self.account]['domain_id']
qdomain = dns.name.from_text(domain).to_unicode(True)
domains, last_count, page = {}, -1, 0
while last_count != len(domains):
last_count = len(domains)
page += 1
url = (api['GET'].copy()).get('url', '/').replace('<index>', str(page))
params = api['GET'].get('params', {}).copy()
for param in params:
params[param] = params[param].replace('<index>', str(page))
response = self._get(url, query_params=params)
domain_tags = Provider._filter_dom(response.text, api['filter'], True)
for domain_tag in domain_tags:
domain_id = Provider._extract_domain_id(dict(domain_tag.attrs)[api['id']['attr']],
api['id']['regex'])
domain = (Provider._filter_dom(domain_tag, api['domain'])
.renderContents().decode('UTF-8'))
domains[domain] = domain_id
if domain == qdomain:
LOGGER.info('Hetzner => Get ID %s for domain %s', domain_id, qdomain)
return domain_id
LOGGER.error('Hetzner => ID for domain %s does not exists', qdomain)
raise AssertionError | Pulls all domains managed by authenticated Hetzner account, extracts their IDs
and returns the ID for the current domain, if exists. Otherwise raises error. | Below is the the instruction that describes the task:
### Input:
Pulls all domains managed by authenticated Hetzner account, extracts their IDs
and returns the ID for the current domain, if exists. Otherwise raises error.
### Response:
def _get_domain_id(self, domain):
"""
Pulls all domains managed by authenticated Hetzner account, extracts their IDs
and returns the ID for the current domain, if exists. Otherwise raises error.
"""
api = self.api[self.account]['domain_id']
qdomain = dns.name.from_text(domain).to_unicode(True)
domains, last_count, page = {}, -1, 0
while last_count != len(domains):
last_count = len(domains)
page += 1
url = (api['GET'].copy()).get('url', '/').replace('<index>', str(page))
params = api['GET'].get('params', {}).copy()
for param in params:
params[param] = params[param].replace('<index>', str(page))
response = self._get(url, query_params=params)
domain_tags = Provider._filter_dom(response.text, api['filter'], True)
for domain_tag in domain_tags:
domain_id = Provider._extract_domain_id(dict(domain_tag.attrs)[api['id']['attr']],
api['id']['regex'])
domain = (Provider._filter_dom(domain_tag, api['domain'])
.renderContents().decode('UTF-8'))
domains[domain] = domain_id
if domain == qdomain:
LOGGER.info('Hetzner => Get ID %s for domain %s', domain_id, qdomain)
return domain_id
LOGGER.error('Hetzner => ID for domain %s does not exists', qdomain)
raise AssertionError |
def choose_plural(amount, variants):
"""
Choose proper case depending on amount
@param amount: amount of objects
@type amount: C{integer types}
@param variants: variants (forms) of object in such form:
(1 object, 2 objects, 5 objects).
@type variants: 3-element C{sequence} of C{unicode}
or C{unicode} (three variants with delimeter ',')
@return: proper variant
@rtype: C{unicode}
@raise ValueError: variants' length lesser than 3
"""
if isinstance(variants, six.text_type):
variants = split_values(variants)
check_length(variants, 3)
amount = abs(amount)
if amount % 10 == 1 and amount % 100 != 11:
variant = 0
elif amount % 10 >= 2 and amount % 10 <= 4 and \
(amount % 100 < 10 or amount % 100 >= 20):
variant = 1
else:
variant = 2
return variants[variant] | Choose proper case depending on amount
@param amount: amount of objects
@type amount: C{integer types}
@param variants: variants (forms) of object in such form:
(1 object, 2 objects, 5 objects).
@type variants: 3-element C{sequence} of C{unicode}
or C{unicode} (three variants with delimeter ',')
@return: proper variant
@rtype: C{unicode}
@raise ValueError: variants' length lesser than 3 | Below is the the instruction that describes the task:
### Input:
Choose proper case depending on amount
@param amount: amount of objects
@type amount: C{integer types}
@param variants: variants (forms) of object in such form:
(1 object, 2 objects, 5 objects).
@type variants: 3-element C{sequence} of C{unicode}
or C{unicode} (three variants with delimeter ',')
@return: proper variant
@rtype: C{unicode}
@raise ValueError: variants' length lesser than 3
### Response:
def choose_plural(amount, variants):
"""
Choose proper case depending on amount
@param amount: amount of objects
@type amount: C{integer types}
@param variants: variants (forms) of object in such form:
(1 object, 2 objects, 5 objects).
@type variants: 3-element C{sequence} of C{unicode}
or C{unicode} (three variants with delimeter ',')
@return: proper variant
@rtype: C{unicode}
@raise ValueError: variants' length lesser than 3
"""
if isinstance(variants, six.text_type):
variants = split_values(variants)
check_length(variants, 3)
amount = abs(amount)
if amount % 10 == 1 and amount % 100 != 11:
variant = 0
elif amount % 10 >= 2 and amount % 10 <= 4 and \
(amount % 100 < 10 or amount % 100 >= 20):
variant = 1
else:
variant = 2
return variants[variant] |
def ow_search(self, vid=0xBC, pid=None, name=None):
"""Search for specific memory id/name and return it"""
for m in self.get_mems(MemoryElement.TYPE_1W):
if pid and m.pid == pid or name and m.name == name:
return m
return None | Search for specific memory id/name and return it | Below is the the instruction that describes the task:
### Input:
Search for specific memory id/name and return it
### Response:
def ow_search(self, vid=0xBC, pid=None, name=None):
"""Search for specific memory id/name and return it"""
for m in self.get_mems(MemoryElement.TYPE_1W):
if pid and m.pid == pid or name and m.name == name:
return m
return None |
def compile_fund(workbook, sheet, row, col):
"""
Compile funding entries. Iter both rows at the same time. Keep adding entries until both cells are empty.
:param obj workbook:
:param str sheet:
:param int row:
:param int col:
:return list of dict: l
"""
logger_excel.info("enter compile_fund")
l = []
temp_sheet = workbook.sheet_by_name(sheet)
while col < temp_sheet.ncols:
col += 1
try:
# Make a dictionary for this funding entry.
_curr = {
'agency': temp_sheet.cell_value(row, col),
'grant': temp_sheet.cell_value(row+1, col),
"principalInvestigator": temp_sheet.cell_value(row+2, col),
"country": temp_sheet.cell_value(row + 3, col)
}
# Make a list for all
_exist = [temp_sheet.cell_value(row, col), temp_sheet.cell_value(row+1, col),
temp_sheet.cell_value(row+2, col), temp_sheet.cell_value(row+3, col)]
# Remove all empty items from the list
_exist = [i for i in _exist if i]
# If we have all empty entries, then don't continue. Quit funding and return what we have.
if not _exist:
return l
# We have funding data. Add this funding block to the growing list.
l.append(_curr)
except IndexError as e:
logger_excel.debug("compile_fund: IndexError: sheet:{} row:{} col:{}, {}".format(sheet, row, col, e))
logger_excel.info("exit compile_fund")
return l | Compile funding entries. Iter both rows at the same time. Keep adding entries until both cells are empty.
:param obj workbook:
:param str sheet:
:param int row:
:param int col:
:return list of dict: l | Below is the the instruction that describes the task:
### Input:
Compile funding entries. Iter both rows at the same time. Keep adding entries until both cells are empty.
:param obj workbook:
:param str sheet:
:param int row:
:param int col:
:return list of dict: l
### Response:
def compile_fund(workbook, sheet, row, col):
"""
Compile funding entries. Iter both rows at the same time. Keep adding entries until both cells are empty.
:param obj workbook:
:param str sheet:
:param int row:
:param int col:
:return list of dict: l
"""
logger_excel.info("enter compile_fund")
l = []
temp_sheet = workbook.sheet_by_name(sheet)
while col < temp_sheet.ncols:
col += 1
try:
# Make a dictionary for this funding entry.
_curr = {
'agency': temp_sheet.cell_value(row, col),
'grant': temp_sheet.cell_value(row+1, col),
"principalInvestigator": temp_sheet.cell_value(row+2, col),
"country": temp_sheet.cell_value(row + 3, col)
}
# Make a list for all
_exist = [temp_sheet.cell_value(row, col), temp_sheet.cell_value(row+1, col),
temp_sheet.cell_value(row+2, col), temp_sheet.cell_value(row+3, col)]
# Remove all empty items from the list
_exist = [i for i in _exist if i]
# If we have all empty entries, then don't continue. Quit funding and return what we have.
if not _exist:
return l
# We have funding data. Add this funding block to the growing list.
l.append(_curr)
except IndexError as e:
logger_excel.debug("compile_fund: IndexError: sheet:{} row:{} col:{}, {}".format(sheet, row, col, e))
logger_excel.info("exit compile_fund")
return l |
def get(self, key, default=None, type=None):
"""Returns the first value for a key.
If `type` is not None, the value will be converted by calling
`type` with the value as argument. If type() raises `ValueError`, it
will be treated as if the value didn't exist, and `default` will be
returned instead.
"""
try:
value = self[key]
if type is not None:
return type(value)
return value
except (KeyError, ValueError):
return default | Returns the first value for a key.
If `type` is not None, the value will be converted by calling
`type` with the value as argument. If type() raises `ValueError`, it
will be treated as if the value didn't exist, and `default` will be
returned instead. | Below is the the instruction that describes the task:
### Input:
Returns the first value for a key.
If `type` is not None, the value will be converted by calling
`type` with the value as argument. If type() raises `ValueError`, it
will be treated as if the value didn't exist, and `default` will be
returned instead.
### Response:
def get(self, key, default=None, type=None):
"""Returns the first value for a key.
If `type` is not None, the value will be converted by calling
`type` with the value as argument. If type() raises `ValueError`, it
will be treated as if the value didn't exist, and `default` will be
returned instead.
"""
try:
value = self[key]
if type is not None:
return type(value)
return value
except (KeyError, ValueError):
return default |
def get_info(self, full=False):
" Return printable information about current site. "
if full:
context = self.as_dict()
return "".join("{0:<25} = {1}\n".format(
key, context[key]) for key in sorted(context.iterkeys()))
return "%s [%s]" % (self.get_name(), self.template) | Return printable information about current site. | Below is the the instruction that describes the task:
### Input:
Return printable information about current site.
### Response:
def get_info(self, full=False):
" Return printable information about current site. "
if full:
context = self.as_dict()
return "".join("{0:<25} = {1}\n".format(
key, context[key]) for key in sorted(context.iterkeys()))
return "%s [%s]" % (self.get_name(), self.template) |
def is_indexed(self, partition):
""" Returns True if partition is already indexed. Otherwise returns False. """
query = text("""
SELECT vid
FROM partition_index
WHERE vid = :vid;
""")
result = self.execute(query, vid=partition.vid)
return bool(result.fetchall()) | Returns True if partition is already indexed. Otherwise returns False. | Below is the the instruction that describes the task:
### Input:
Returns True if partition is already indexed. Otherwise returns False.
### Response:
def is_indexed(self, partition):
""" Returns True if partition is already indexed. Otherwise returns False. """
query = text("""
SELECT vid
FROM partition_index
WHERE vid = :vid;
""")
result = self.execute(query, vid=partition.vid)
return bool(result.fetchall()) |
def write_collection_data(self, path, data):
"""
Write collections rules to disk
"""
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
fd = os.open(path, flags, 0o600)
with os.fdopen(fd, 'w') as dyn_conf_file:
dyn_conf_file.write(data) | Write collections rules to disk | Below is the the instruction that describes the task:
### Input:
Write collections rules to disk
### Response:
def write_collection_data(self, path, data):
"""
Write collections rules to disk
"""
flags = os.O_WRONLY | os.O_CREAT | os.O_TRUNC
fd = os.open(path, flags, 0o600)
with os.fdopen(fd, 'w') as dyn_conf_file:
dyn_conf_file.write(data) |
def _prop_name(self):
"""
Calculate property name from tag name, e.g. a:schemeClr -> schemeClr.
"""
if ':' in self._nsptagname:
start = self._nsptagname.index(':') + 1
else:
start = 0
return self._nsptagname[start:] | Calculate property name from tag name, e.g. a:schemeClr -> schemeClr. | Below is the the instruction that describes the task:
### Input:
Calculate property name from tag name, e.g. a:schemeClr -> schemeClr.
### Response:
def _prop_name(self):
"""
Calculate property name from tag name, e.g. a:schemeClr -> schemeClr.
"""
if ':' in self._nsptagname:
start = self._nsptagname.index(':') + 1
else:
start = 0
return self._nsptagname[start:] |
def set_entries(self, entries, user_scope):
"""SetEntries.
[Preview API] Set the specified setting entry values for the given user/all-users scope
:param {object} entries: The entries to set
:param str user_scope: User-Scope at which to set the values. Should be "me" for the current user or "host" for all users.
"""
route_values = {}
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
content = self._serialize.body(entries, '{object}')
self._send(http_method='PATCH',
location_id='cd006711-163d-4cd4-a597-b05bad2556ff',
version='5.0-preview.1',
route_values=route_values,
content=content) | SetEntries.
[Preview API] Set the specified setting entry values for the given user/all-users scope
:param {object} entries: The entries to set
:param str user_scope: User-Scope at which to set the values. Should be "me" for the current user or "host" for all users. | Below is the the instruction that describes the task:
### Input:
SetEntries.
[Preview API] Set the specified setting entry values for the given user/all-users scope
:param {object} entries: The entries to set
:param str user_scope: User-Scope at which to set the values. Should be "me" for the current user or "host" for all users.
### Response:
def set_entries(self, entries, user_scope):
"""SetEntries.
[Preview API] Set the specified setting entry values for the given user/all-users scope
:param {object} entries: The entries to set
:param str user_scope: User-Scope at which to set the values. Should be "me" for the current user or "host" for all users.
"""
route_values = {}
if user_scope is not None:
route_values['userScope'] = self._serialize.url('user_scope', user_scope, 'str')
content = self._serialize.body(entries, '{object}')
self._send(http_method='PATCH',
location_id='cd006711-163d-4cd4-a597-b05bad2556ff',
version='5.0-preview.1',
route_values=route_values,
content=content) |
def gen_key(minion_id, dns_name=None, zone='default', password=None):
'''
Generate and return an private_key. If a ``dns_name`` is passed in, the
private_key will be cached under that name. The type of key and the
parameters used to generate the key are based on the default certificate
use policy associated with the specified zone.
CLI Example:
.. code-block:: bash
salt-run venafi.gen_key <minion_id> [dns_name] [zone] [password]
'''
# Get the default certificate use policy associated with the zone
# so we can generate keys that conform with policy
# The /v1/zones/tag/{name} API call is a shortcut to get the zoneID
# directly from the name
qdata = __utils__['http.query'](
'{0}/zones/tag/{1}'.format(_base_url(), zone),
method='GET',
decode=True,
decode_type='json',
header_dict={
'tppl-api-key': _api_key(),
'Content-Type': 'application/json',
},
)
zone_id = qdata['dict']['id']
# the /v1/certificatepolicies?zoneId API call returns the default
# certificate use and certificate identity policies
qdata = __utils__['http.query'](
'{0}/certificatepolicies?zoneId={1}'.format(_base_url(), zone_id),
method='GET',
decode=True,
decode_type='json',
header_dict={
'tppl-api-key': _api_key(),
'Content-Type': 'application/json',
},
)
policies = qdata['dict']['certificatePolicies']
# Extract the key length and key type from the certificate use policy
# and generate the private key accordingly
for policy in policies:
if policy['certificatePolicyType'] == "CERTIFICATE_USE":
keyTypes = policy['keyTypes']
# in case multiple keytypes and key lengths are supported
# always use the first key type and key length
keygen_type = keyTypes[0]['keyType']
key_len = keyTypes[0]['keyLengths'][0]
if int(key_len) < 2048:
key_len = 2048
if keygen_type == "RSA":
if HAS_M2:
gen = RSA.gen_key(key_len, 65537)
private_key = gen.as_pem(cipher='des_ede3_cbc', callback=lambda x: six.b(password))
else:
gen = RSA.generate(bits=key_len)
private_key = gen.exportKey('PEM', password)
if dns_name is not None:
bank = 'venafi/domains'
cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)
try:
data = cache.fetch(bank, dns_name)
data['private_key'] = private_key
data['minion_id'] = minion_id
except TypeError:
data = {'private_key': private_key,
'minion_id': minion_id}
cache.store(bank, dns_name, data)
return private_key | Generate and return an private_key. If a ``dns_name`` is passed in, the
private_key will be cached under that name. The type of key and the
parameters used to generate the key are based on the default certificate
use policy associated with the specified zone.
CLI Example:
.. code-block:: bash
salt-run venafi.gen_key <minion_id> [dns_name] [zone] [password] | Below is the the instruction that describes the task:
### Input:
Generate and return an private_key. If a ``dns_name`` is passed in, the
private_key will be cached under that name. The type of key and the
parameters used to generate the key are based on the default certificate
use policy associated with the specified zone.
CLI Example:
.. code-block:: bash
salt-run venafi.gen_key <minion_id> [dns_name] [zone] [password]
### Response:
def gen_key(minion_id, dns_name=None, zone='default', password=None):
'''
Generate and return an private_key. If a ``dns_name`` is passed in, the
private_key will be cached under that name. The type of key and the
parameters used to generate the key are based on the default certificate
use policy associated with the specified zone.
CLI Example:
.. code-block:: bash
salt-run venafi.gen_key <minion_id> [dns_name] [zone] [password]
'''
# Get the default certificate use policy associated with the zone
# so we can generate keys that conform with policy
# The /v1/zones/tag/{name} API call is a shortcut to get the zoneID
# directly from the name
qdata = __utils__['http.query'](
'{0}/zones/tag/{1}'.format(_base_url(), zone),
method='GET',
decode=True,
decode_type='json',
header_dict={
'tppl-api-key': _api_key(),
'Content-Type': 'application/json',
},
)
zone_id = qdata['dict']['id']
# the /v1/certificatepolicies?zoneId API call returns the default
# certificate use and certificate identity policies
qdata = __utils__['http.query'](
'{0}/certificatepolicies?zoneId={1}'.format(_base_url(), zone_id),
method='GET',
decode=True,
decode_type='json',
header_dict={
'tppl-api-key': _api_key(),
'Content-Type': 'application/json',
},
)
policies = qdata['dict']['certificatePolicies']
# Extract the key length and key type from the certificate use policy
# and generate the private key accordingly
for policy in policies:
if policy['certificatePolicyType'] == "CERTIFICATE_USE":
keyTypes = policy['keyTypes']
# in case multiple keytypes and key lengths are supported
# always use the first key type and key length
keygen_type = keyTypes[0]['keyType']
key_len = keyTypes[0]['keyLengths'][0]
if int(key_len) < 2048:
key_len = 2048
if keygen_type == "RSA":
if HAS_M2:
gen = RSA.gen_key(key_len, 65537)
private_key = gen.as_pem(cipher='des_ede3_cbc', callback=lambda x: six.b(password))
else:
gen = RSA.generate(bits=key_len)
private_key = gen.exportKey('PEM', password)
if dns_name is not None:
bank = 'venafi/domains'
cache = salt.cache.Cache(__opts__, syspaths.CACHE_DIR)
try:
data = cache.fetch(bank, dns_name)
data['private_key'] = private_key
data['minion_id'] = minion_id
except TypeError:
data = {'private_key': private_key,
'minion_id': minion_id}
cache.store(bank, dns_name, data)
return private_key |
def _set_port_security(self, v, load=False):
"""
Setter method for port_security, mapped from YANG variable /interface/ethernet/switchport/port_security (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_security is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_security() directly.
YANG Description: Enable port-security feature
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=port_security.port_security, is_container='container', presence=True, yang_name="port-security", rest_name="port-security", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable port-security feature', u'callpoint': u'interface_portsecurity'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_security must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=port_security.port_security, is_container='container', presence=True, yang_name="port-security", rest_name="port-security", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable port-security feature', u'callpoint': u'interface_portsecurity'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__port_security = t
if hasattr(self, '_set'):
self._set() | Setter method for port_security, mapped from YANG variable /interface/ethernet/switchport/port_security (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_security is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_security() directly.
YANG Description: Enable port-security feature | Below is the the instruction that describes the task:
### Input:
Setter method for port_security, mapped from YANG variable /interface/ethernet/switchport/port_security (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_security is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_security() directly.
YANG Description: Enable port-security feature
### Response:
def _set_port_security(self, v, load=False):
"""
Setter method for port_security, mapped from YANG variable /interface/ethernet/switchport/port_security (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_port_security is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_port_security() directly.
YANG Description: Enable port-security feature
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=port_security.port_security, is_container='container', presence=True, yang_name="port-security", rest_name="port-security", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable port-security feature', u'callpoint': u'interface_portsecurity'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """port_security must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=port_security.port_security, is_container='container', presence=True, yang_name="port-security", rest_name="port-security", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Enable port-security feature', u'callpoint': u'interface_portsecurity'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__port_security = t
if hasattr(self, '_set'):
self._set() |
def key_rule(self, regex, verifier):
"""Add a rule with a pattern that should apply to all keys.
Any key not explicitly listed in an add_required or add_optional rule
must match ONE OF the rules given in a call to key_rule().
So these rules are all OR'ed together.
In this case you should pass a raw string specifying a regex that is
used to determine if the rule is used to check a given key.
Args:
regex (str): The regular expression used to match the rule or None
if this should apply to all
verifier (Verifier): The verification rule
"""
if regex is not None:
regex = re.compile(regex)
self._additional_key_rules.append((regex, verifier)) | Add a rule with a pattern that should apply to all keys.
Any key not explicitly listed in an add_required or add_optional rule
must match ONE OF the rules given in a call to key_rule().
So these rules are all OR'ed together.
In this case you should pass a raw string specifying a regex that is
used to determine if the rule is used to check a given key.
Args:
regex (str): The regular expression used to match the rule or None
if this should apply to all
verifier (Verifier): The verification rule | Below is the the instruction that describes the task:
### Input:
Add a rule with a pattern that should apply to all keys.
Any key not explicitly listed in an add_required or add_optional rule
must match ONE OF the rules given in a call to key_rule().
So these rules are all OR'ed together.
In this case you should pass a raw string specifying a regex that is
used to determine if the rule is used to check a given key.
Args:
regex (str): The regular expression used to match the rule or None
if this should apply to all
verifier (Verifier): The verification rule
### Response:
def key_rule(self, regex, verifier):
"""Add a rule with a pattern that should apply to all keys.
Any key not explicitly listed in an add_required or add_optional rule
must match ONE OF the rules given in a call to key_rule().
So these rules are all OR'ed together.
In this case you should pass a raw string specifying a regex that is
used to determine if the rule is used to check a given key.
Args:
regex (str): The regular expression used to match the rule or None
if this should apply to all
verifier (Verifier): The verification rule
"""
if regex is not None:
regex = re.compile(regex)
self._additional_key_rules.append((regex, verifier)) |
def cudnnCreateConvolutionDescriptor():
""""
Create a convolution descriptor.
This function creates a convolution descriptor object by allocating the memory needed to
hold its opaque structure.
Returns
-------
convDesc : cudnnConvolutionDescriptor
Handle to newly allocated convolution descriptor.
"""
convDesc = ctypes.c_void_p()
status = _libcudnn.cudnnCreateConvolutionDescriptor(ctypes.byref(convDesc))
cudnnCheckStatus(status)
return convDesc.value | Create a convolution descriptor.
This function creates a convolution descriptor object by allocating the memory needed to
hold its opaque structure.
Returns
-------
convDesc : cudnnConvolutionDescriptor
Handle to newly allocated convolution descriptor. | Below is the the instruction that describes the task:
### Input:
Create a convolution descriptor.
This function creates a convolution descriptor object by allocating the memory needed to
hold its opaque structure.
Returns
-------
convDesc : cudnnConvolutionDescriptor
Handle to newly allocated convolution descriptor.
### Response:
def cudnnCreateConvolutionDescriptor():
""""
Create a convolution descriptor.
This function creates a convolution descriptor object by allocating the memory needed to
hold its opaque structure.
Returns
-------
convDesc : cudnnConvolutionDescriptor
Handle to newly allocated convolution descriptor.
"""
convDesc = ctypes.c_void_p()
status = _libcudnn.cudnnCreateConvolutionDescriptor(ctypes.byref(convDesc))
cudnnCheckStatus(status)
return convDesc.value |
def git_list_refs(repo_dir):
"""List references available in the local repo with commit ids.
This is similar to ls-remote, but shows the *local* refs.
Return format:
.. code-block:: python
{<ref1>: <commit_hash1>,
<ref2>: <commit_hash2>,
...,
<refN>: <commit_hashN>,
}
"""
command = ['git', 'show-ref', '--dereference', '--head']
raw = execute_git_command(command, repo_dir=repo_dir).splitlines()
output = [l.strip() for l in raw if l.strip()]
return {ref: commit_hash for commit_hash, ref in
[l.split(None, 1) for l in output]} | List references available in the local repo with commit ids.
This is similar to ls-remote, but shows the *local* refs.
Return format:
.. code-block:: python
{<ref1>: <commit_hash1>,
<ref2>: <commit_hash2>,
...,
<refN>: <commit_hashN>,
} | Below is the the instruction that describes the task:
### Input:
List references available in the local repo with commit ids.
This is similar to ls-remote, but shows the *local* refs.
Return format:
.. code-block:: python
{<ref1>: <commit_hash1>,
<ref2>: <commit_hash2>,
...,
<refN>: <commit_hashN>,
}
### Response:
def git_list_refs(repo_dir):
"""List references available in the local repo with commit ids.
This is similar to ls-remote, but shows the *local* refs.
Return format:
.. code-block:: python
{<ref1>: <commit_hash1>,
<ref2>: <commit_hash2>,
...,
<refN>: <commit_hashN>,
}
"""
command = ['git', 'show-ref', '--dereference', '--head']
raw = execute_git_command(command, repo_dir=repo_dir).splitlines()
output = [l.strip() for l in raw if l.strip()]
return {ref: commit_hash for commit_hash, ref in
[l.split(None, 1) for l in output]} |
def distortion_score(X, labels, metric='euclidean'):
"""
Compute the mean distortion of all samples.
The distortion is computed as the the sum of the squared distances between
each observation and its closest centroid. Logically, this is the metric
that K-Means attempts to minimize as it is fitting the model.
.. seealso:: http://kldavenport.com/the-cost-function-of-k-means/
Parameters
----------
X : array, shape = [n_samples, n_features] or [n_samples_a, n_samples_a]
Array of pairwise distances between samples if metric == "precomputed"
or a feature array for computing distances against the labels.
labels : array, shape = [n_samples]
Predicted labels for each sample
metric : string
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by `sklearn.metrics.pairwise.pairwise_distances
<http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html#sklearn.metrics.pairwise.pairwise_distances>`_
.. todo:: add sample_size and random_state kwds similar to silhouette_score
"""
# Encode labels to get unique centers and groups
le = LabelEncoder()
labels = le.fit_transform(labels)
unique_labels = le.classes_
# Sum of the distortions
distortion = 0
# Loop through each label (center) to compute the centroid
for current_label in unique_labels:
# Mask the instances that belong to the current label
mask = labels == current_label
instances = X[mask]
# Compute the center of these instances
center = instances.mean(axis=0)
# NOTE: csc_matrix and csr_matrix mean returns a 2D array, numpy.mean
# returns an array of 1 dimension less than the input. We expect
# instances to be a 2D array, therefore to do pairwise computation we
# require center to be a 2D array with a single row (the center).
# See #370 for more detail.
if not sp.issparse(instances):
center = np.array([center])
# Compute the square distances from the instances to the center
distances = pairwise_distances(instances, center, metric=metric)
distances = distances ** 2
# Add the sum of square distance to the distortion
distortion += distances.sum()
return distortion | Compute the mean distortion of all samples.
The distortion is computed as the the sum of the squared distances between
each observation and its closest centroid. Logically, this is the metric
that K-Means attempts to minimize as it is fitting the model.
.. seealso:: http://kldavenport.com/the-cost-function-of-k-means/
Parameters
----------
X : array, shape = [n_samples, n_features] or [n_samples_a, n_samples_a]
Array of pairwise distances between samples if metric == "precomputed"
or a feature array for computing distances against the labels.
labels : array, shape = [n_samples]
Predicted labels for each sample
metric : string
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by `sklearn.metrics.pairwise.pairwise_distances
<http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html#sklearn.metrics.pairwise.pairwise_distances>`_
.. todo:: add sample_size and random_state kwds similar to silhouette_score | Below is the the instruction that describes the task:
### Input:
Compute the mean distortion of all samples.
The distortion is computed as the the sum of the squared distances between
each observation and its closest centroid. Logically, this is the metric
that K-Means attempts to minimize as it is fitting the model.
.. seealso:: http://kldavenport.com/the-cost-function-of-k-means/
Parameters
----------
X : array, shape = [n_samples, n_features] or [n_samples_a, n_samples_a]
Array of pairwise distances between samples if metric == "precomputed"
or a feature array for computing distances against the labels.
labels : array, shape = [n_samples]
Predicted labels for each sample
metric : string
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by `sklearn.metrics.pairwise.pairwise_distances
<http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html#sklearn.metrics.pairwise.pairwise_distances>`_
.. todo:: add sample_size and random_state kwds similar to silhouette_score
### Response:
def distortion_score(X, labels, metric='euclidean'):
"""
Compute the mean distortion of all samples.
The distortion is computed as the the sum of the squared distances between
each observation and its closest centroid. Logically, this is the metric
that K-Means attempts to minimize as it is fitting the model.
.. seealso:: http://kldavenport.com/the-cost-function-of-k-means/
Parameters
----------
X : array, shape = [n_samples, n_features] or [n_samples_a, n_samples_a]
Array of pairwise distances between samples if metric == "precomputed"
or a feature array for computing distances against the labels.
labels : array, shape = [n_samples]
Predicted labels for each sample
metric : string
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by `sklearn.metrics.pairwise.pairwise_distances
<http://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise.pairwise_distances.html#sklearn.metrics.pairwise.pairwise_distances>`_
.. todo:: add sample_size and random_state kwds similar to silhouette_score
"""
# Encode labels to get unique centers and groups
le = LabelEncoder()
labels = le.fit_transform(labels)
unique_labels = le.classes_
# Sum of the distortions
distortion = 0
# Loop through each label (center) to compute the centroid
for current_label in unique_labels:
# Mask the instances that belong to the current label
mask = labels == current_label
instances = X[mask]
# Compute the center of these instances
center = instances.mean(axis=0)
# NOTE: csc_matrix and csr_matrix mean returns a 2D array, numpy.mean
# returns an array of 1 dimension less than the input. We expect
# instances to be a 2D array, therefore to do pairwise computation we
# require center to be a 2D array with a single row (the center).
# See #370 for more detail.
if not sp.issparse(instances):
center = np.array([center])
# Compute the square distances from the instances to the center
distances = pairwise_distances(instances, center, metric=metric)
distances = distances ** 2
# Add the sum of square distance to the distortion
distortion += distances.sum()
return distortion |
def is_virtual_host(endpoint_url, bucket_name):
"""
Check to see if the ``bucket_name`` can be part of virtual host
style.
:param endpoint_url: Endpoint url which will be used for virtual host.
:param bucket_name: Bucket name to be validated against.
"""
is_valid_bucket_name(bucket_name)
parsed_url = urlsplit(endpoint_url)
# bucket_name can be valid but '.' in the hostname will fail
# SSL certificate validation. So do not use host-style for
# such buckets.
if 'https' in parsed_url.scheme and '.' in bucket_name:
return False
for host in ['s3.amazonaws.com', 'aliyuncs.com']:
if host in parsed_url.netloc:
return True
return False | Check to see if the ``bucket_name`` can be part of virtual host
style.
:param endpoint_url: Endpoint url which will be used for virtual host.
:param bucket_name: Bucket name to be validated against. | Below is the the instruction that describes the task:
### Input:
Check to see if the ``bucket_name`` can be part of virtual host
style.
:param endpoint_url: Endpoint url which will be used for virtual host.
:param bucket_name: Bucket name to be validated against.
### Response:
def is_virtual_host(endpoint_url, bucket_name):
"""
Check to see if the ``bucket_name`` can be part of virtual host
style.
:param endpoint_url: Endpoint url which will be used for virtual host.
:param bucket_name: Bucket name to be validated against.
"""
is_valid_bucket_name(bucket_name)
parsed_url = urlsplit(endpoint_url)
# bucket_name can be valid but '.' in the hostname will fail
# SSL certificate validation. So do not use host-style for
# such buckets.
if 'https' in parsed_url.scheme and '.' in bucket_name:
return False
for host in ['s3.amazonaws.com', 'aliyuncs.com']:
if host in parsed_url.netloc:
return True
return False |
def import_lane_element(lane_element, plane_element):
"""
Method for importing 'laneSet' element from diagram file.
:param lane_element: XML document element,
:param plane_element: object representing a BPMN XML 'plane' element.
"""
lane_id = lane_element.getAttribute(consts.Consts.id)
lane_name = lane_element.getAttribute(consts.Consts.name)
child_lane_set_attr = {}
flow_node_refs = []
for element in utils.BpmnImportUtils.iterate_elements(lane_element):
if element.nodeType != element.TEXT_NODE:
tag_name = utils.BpmnImportUtils.remove_namespace_from_tag_name(element.tagName)
if tag_name == consts.Consts.child_lane_set:
child_lane_set_attr = BpmnDiagramGraphImport.import_child_lane_set_element(element, plane_element)
elif tag_name == consts.Consts.flow_node_ref:
flow_node_ref_id = element.firstChild.nodeValue
flow_node_refs.append(flow_node_ref_id)
lane_attr = {consts.Consts.id: lane_id, consts.Consts.name: lane_name,
consts.Consts.child_lane_set: child_lane_set_attr,
consts.Consts.flow_node_refs: flow_node_refs}
shape_element = None
for element in utils.BpmnImportUtils.iterate_elements(plane_element):
if element.nodeType != element.TEXT_NODE and element.getAttribute(consts.Consts.bpmn_element) == lane_id:
shape_element = element
if shape_element is not None:
bounds = shape_element.getElementsByTagNameNS("*", "Bounds")[0]
lane_attr[consts.Consts.is_horizontal] = shape_element.getAttribute(consts.Consts.is_horizontal)
lane_attr[consts.Consts.width] = bounds.getAttribute(consts.Consts.width)
lane_attr[consts.Consts.height] = bounds.getAttribute(consts.Consts.height)
lane_attr[consts.Consts.x] = bounds.getAttribute(consts.Consts.x)
lane_attr[consts.Consts.y] = bounds.getAttribute(consts.Consts.y)
return lane_attr | Method for importing 'laneSet' element from diagram file.
:param lane_element: XML document element,
:param plane_element: object representing a BPMN XML 'plane' element. | Below is the the instruction that describes the task:
### Input:
Method for importing 'laneSet' element from diagram file.
:param lane_element: XML document element,
:param plane_element: object representing a BPMN XML 'plane' element.
### Response:
def import_lane_element(lane_element, plane_element):
"""
Method for importing 'laneSet' element from diagram file.
:param lane_element: XML document element,
:param plane_element: object representing a BPMN XML 'plane' element.
"""
lane_id = lane_element.getAttribute(consts.Consts.id)
lane_name = lane_element.getAttribute(consts.Consts.name)
child_lane_set_attr = {}
flow_node_refs = []
for element in utils.BpmnImportUtils.iterate_elements(lane_element):
if element.nodeType != element.TEXT_NODE:
tag_name = utils.BpmnImportUtils.remove_namespace_from_tag_name(element.tagName)
if tag_name == consts.Consts.child_lane_set:
child_lane_set_attr = BpmnDiagramGraphImport.import_child_lane_set_element(element, plane_element)
elif tag_name == consts.Consts.flow_node_ref:
flow_node_ref_id = element.firstChild.nodeValue
flow_node_refs.append(flow_node_ref_id)
lane_attr = {consts.Consts.id: lane_id, consts.Consts.name: lane_name,
consts.Consts.child_lane_set: child_lane_set_attr,
consts.Consts.flow_node_refs: flow_node_refs}
shape_element = None
for element in utils.BpmnImportUtils.iterate_elements(plane_element):
if element.nodeType != element.TEXT_NODE and element.getAttribute(consts.Consts.bpmn_element) == lane_id:
shape_element = element
if shape_element is not None:
bounds = shape_element.getElementsByTagNameNS("*", "Bounds")[0]
lane_attr[consts.Consts.is_horizontal] = shape_element.getAttribute(consts.Consts.is_horizontal)
lane_attr[consts.Consts.width] = bounds.getAttribute(consts.Consts.width)
lane_attr[consts.Consts.height] = bounds.getAttribute(consts.Consts.height)
lane_attr[consts.Consts.x] = bounds.getAttribute(consts.Consts.x)
lane_attr[consts.Consts.y] = bounds.getAttribute(consts.Consts.y)
return lane_attr |
def get_ancestors(self, obj):
"""Return a list of all ancestor nodes of the :class:`.Node` *obj*.
The list is ordered from the most shallow ancestor (greatest great-
grandparent) to the direct parent. The node itself is not included in
the list. For example::
>>> text = "{{a|{{b|{{c|{{d}}}}}}}}"
>>> code = mwparserfromhell.parse(text)
>>> node = code.filter_templates(matches=lambda n: n == "{{d}}")[0]
>>> code.get_ancestors(node)
['{{a|{{b|{{c|{{d}}}}}}}}', '{{b|{{c|{{d}}}}}}', '{{c|{{d}}}}']
Will return an empty list if *obj* is at the top level of this Wikicode
object. Will raise :exc:`ValueError` if it wasn't found.
"""
def _get_ancestors(code, needle):
for node in code.nodes:
if node is needle:
return []
for code in node.__children__():
ancestors = _get_ancestors(code, needle)
if ancestors is not None:
return [node] + ancestors
if isinstance(obj, Wikicode):
obj = obj.get(0)
elif not isinstance(obj, Node):
raise ValueError(obj)
ancestors = _get_ancestors(self, obj)
if ancestors is None:
raise ValueError(obj)
return ancestors | Return a list of all ancestor nodes of the :class:`.Node` *obj*.
The list is ordered from the most shallow ancestor (greatest great-
grandparent) to the direct parent. The node itself is not included in
the list. For example::
>>> text = "{{a|{{b|{{c|{{d}}}}}}}}"
>>> code = mwparserfromhell.parse(text)
>>> node = code.filter_templates(matches=lambda n: n == "{{d}}")[0]
>>> code.get_ancestors(node)
['{{a|{{b|{{c|{{d}}}}}}}}', '{{b|{{c|{{d}}}}}}', '{{c|{{d}}}}']
Will return an empty list if *obj* is at the top level of this Wikicode
object. Will raise :exc:`ValueError` if it wasn't found. | Below is the the instruction that describes the task:
### Input:
Return a list of all ancestor nodes of the :class:`.Node` *obj*.
The list is ordered from the most shallow ancestor (greatest great-
grandparent) to the direct parent. The node itself is not included in
the list. For example::
>>> text = "{{a|{{b|{{c|{{d}}}}}}}}"
>>> code = mwparserfromhell.parse(text)
>>> node = code.filter_templates(matches=lambda n: n == "{{d}}")[0]
>>> code.get_ancestors(node)
['{{a|{{b|{{c|{{d}}}}}}}}', '{{b|{{c|{{d}}}}}}', '{{c|{{d}}}}']
Will return an empty list if *obj* is at the top level of this Wikicode
object. Will raise :exc:`ValueError` if it wasn't found.
### Response:
def get_ancestors(self, obj):
"""Return a list of all ancestor nodes of the :class:`.Node` *obj*.
The list is ordered from the most shallow ancestor (greatest great-
grandparent) to the direct parent. The node itself is not included in
the list. For example::
>>> text = "{{a|{{b|{{c|{{d}}}}}}}}"
>>> code = mwparserfromhell.parse(text)
>>> node = code.filter_templates(matches=lambda n: n == "{{d}}")[0]
>>> code.get_ancestors(node)
['{{a|{{b|{{c|{{d}}}}}}}}', '{{b|{{c|{{d}}}}}}', '{{c|{{d}}}}']
Will return an empty list if *obj* is at the top level of this Wikicode
object. Will raise :exc:`ValueError` if it wasn't found.
"""
def _get_ancestors(code, needle):
for node in code.nodes:
if node is needle:
return []
for code in node.__children__():
ancestors = _get_ancestors(code, needle)
if ancestors is not None:
return [node] + ancestors
if isinstance(obj, Wikicode):
obj = obj.get(0)
elif not isinstance(obj, Node):
raise ValueError(obj)
ancestors = _get_ancestors(self, obj)
if ancestors is None:
raise ValueError(obj)
return ancestors |
def get_etree_layout_as_dict(layout_tree):
"""
Convert something that looks like this:
<layout>
<item>
<name>color</name>
<value>red</value>
</item>
<item>
<name>shapefile</name>
<value>blah.shp</value>
</item>
</layout>
Into something that looks like this:
{
'color' : ['red'],
'shapefile' : ['blah.shp']
}
"""
layout_dict = dict()
for item in layout_tree.findall('item'):
name = item.find('name').text
val_element = item.find('value')
value = val_element.text.strip()
if value == '':
children = val_element.getchildren()
value = etree.tostring(children[0], pretty_print=True, encoding="unicode")
layout_dict[name] = value
return layout_dict | Convert something that looks like this:
<layout>
<item>
<name>color</name>
<value>red</value>
</item>
<item>
<name>shapefile</name>
<value>blah.shp</value>
</item>
</layout>
Into something that looks like this:
{
'color' : ['red'],
'shapefile' : ['blah.shp']
} | Below is the the instruction that describes the task:
### Input:
Convert something that looks like this:
<layout>
<item>
<name>color</name>
<value>red</value>
</item>
<item>
<name>shapefile</name>
<value>blah.shp</value>
</item>
</layout>
Into something that looks like this:
{
'color' : ['red'],
'shapefile' : ['blah.shp']
}
### Response:
def get_etree_layout_as_dict(layout_tree):
"""
Convert something that looks like this:
<layout>
<item>
<name>color</name>
<value>red</value>
</item>
<item>
<name>shapefile</name>
<value>blah.shp</value>
</item>
</layout>
Into something that looks like this:
{
'color' : ['red'],
'shapefile' : ['blah.shp']
}
"""
layout_dict = dict()
for item in layout_tree.findall('item'):
name = item.find('name').text
val_element = item.find('value')
value = val_element.text.strip()
if value == '':
children = val_element.getchildren()
value = etree.tostring(children[0], pretty_print=True, encoding="unicode")
layout_dict[name] = value
return layout_dict |
def guessFormat(self, name):
"""
:param name:
:type name: str
"""
name = name.lower()
generator = self.generator
if re.findall(r'^is[_A-Z]', name): return lambda x:generator.boolean()
if re.findall(r'(_a|A)t$', name): return lambda x:generator.dateTime()
if name in ('first_name','firstname'): return lambda x: generator.firstName()
if name in ('last_name','lastname'): return lambda x: generator.lastName()
if name in ('username','login','nickname'): return lambda x:generator.userName()
if name in ('email','email_address'): return lambda x:generator.email()
if name in ('phone_number','phonenumber','phone'): return lambda x:generator.phoneNumber()
if name == 'address' : return lambda x:generator.address()
if name == 'city' : return lambda x: generator.city()
if name == 'streetaddress' : return lambda x: generator.streetaddress()
if name in ('postcode','zipcode'): return lambda x: generator.postcode()
if name == 'state' : return lambda x: generator.state()
if name == 'country' : return lambda x: generator.country()
if name == 'title' : return lambda x: generator.sentence()
if name in ('body','summary', 'description'): return lambda x: generator.text() | :param name:
:type name: str | Below is the the instruction that describes the task:
### Input:
:param name:
:type name: str
### Response:
def guessFormat(self, name):
"""
:param name:
:type name: str
"""
name = name.lower()
generator = self.generator
if re.findall(r'^is[_A-Z]', name): return lambda x:generator.boolean()
if re.findall(r'(_a|A)t$', name): return lambda x:generator.dateTime()
if name in ('first_name','firstname'): return lambda x: generator.firstName()
if name in ('last_name','lastname'): return lambda x: generator.lastName()
if name in ('username','login','nickname'): return lambda x:generator.userName()
if name in ('email','email_address'): return lambda x:generator.email()
if name in ('phone_number','phonenumber','phone'): return lambda x:generator.phoneNumber()
if name == 'address' : return lambda x:generator.address()
if name == 'city' : return lambda x: generator.city()
if name == 'streetaddress' : return lambda x: generator.streetaddress()
if name in ('postcode','zipcode'): return lambda x: generator.postcode()
if name == 'state' : return lambda x: generator.state()
if name == 'country' : return lambda x: generator.country()
if name == 'title' : return lambda x: generator.sentence()
if name in ('body','summary', 'description'): return lambda x: generator.text() |
def _CreateConfig(self, project_id):
"""Create the boto config to support standalone GSUtil.
Args:
project_id: string, the project ID to use in the config file.
"""
project_id = project_id or self._GetNumericProjectId()
# Our project doesn't support service accounts.
if not project_id:
return
self.boto_config_header %= (
self.boto_config_script, self.boto_config_template)
config = config_manager.ConfigManager(
config_file=self.boto_config_template,
config_header=self.boto_config_header)
boto_dir = os.path.dirname(self.boto_config_script)
config.SetOption('GSUtil', 'default_project_id', project_id)
config.SetOption('GSUtil', 'default_api_version', '2')
config.SetOption('GoogleCompute', 'service_account', 'default')
config.SetOption('Plugin', 'plugin_directory', boto_dir)
config.WriteConfig(config_file=self.boto_config) | Create the boto config to support standalone GSUtil.
Args:
project_id: string, the project ID to use in the config file. | Below is the the instruction that describes the task:
### Input:
Create the boto config to support standalone GSUtil.
Args:
project_id: string, the project ID to use in the config file.
### Response:
def _CreateConfig(self, project_id):
"""Create the boto config to support standalone GSUtil.
Args:
project_id: string, the project ID to use in the config file.
"""
project_id = project_id or self._GetNumericProjectId()
# Our project doesn't support service accounts.
if not project_id:
return
self.boto_config_header %= (
self.boto_config_script, self.boto_config_template)
config = config_manager.ConfigManager(
config_file=self.boto_config_template,
config_header=self.boto_config_header)
boto_dir = os.path.dirname(self.boto_config_script)
config.SetOption('GSUtil', 'default_project_id', project_id)
config.SetOption('GSUtil', 'default_api_version', '2')
config.SetOption('GoogleCompute', 'service_account', 'default')
config.SetOption('Plugin', 'plugin_directory', boto_dir)
config.WriteConfig(config_file=self.boto_config) |
def rewind(self, position=0):
"""Set the position of the data buffer cursor to 'position'."""
if position < 0 or position > len(self._data):
raise Exception("Invalid position to rewind cursor to: %s." % position)
self._position = position | Set the position of the data buffer cursor to 'position'. | Below is the the instruction that describes the task:
### Input:
Set the position of the data buffer cursor to 'position'.
### Response:
def rewind(self, position=0):
"""Set the position of the data buffer cursor to 'position'."""
if position < 0 or position > len(self._data):
raise Exception("Invalid position to rewind cursor to: %s." % position)
self._position = position |
def calc_path_and_create_folders(folder, import_path):
""" calculate the path and create the needed folders """
file_path = abspath(path_join(folder, import_path[:import_path.rfind(".")].replace(".", folder_seperator) + ".py"))
mkdir_p(dirname(file_path))
return file_path | calculate the path and create the needed folders | Below is the the instruction that describes the task:
### Input:
calculate the path and create the needed folders
### Response:
def calc_path_and_create_folders(folder, import_path):
""" calculate the path and create the needed folders """
file_path = abspath(path_join(folder, import_path[:import_path.rfind(".")].replace(".", folder_seperator) + ".py"))
mkdir_p(dirname(file_path))
return file_path |
def _ExtractPathSpecsFromDirectory(self, file_entry, depth=0):
"""Extracts path specification from a directory.
Args:
file_entry (dfvfs.FileEntry): file entry that refers to the directory.
depth (Optional[int]): current depth where 0 represents the file system
root.
Yields:
dfvfs.PathSpec: path specification of a file entry found in the directory.
"""
if depth >= self._MAXIMUM_DEPTH:
raise errors.MaximumRecursionDepth('Maximum recursion depth reached.')
# Need to do a breadth-first search otherwise we'll hit the Python
# maximum recursion depth.
sub_directories = []
for sub_file_entry in file_entry.sub_file_entries:
try:
if not sub_file_entry.IsAllocated() or sub_file_entry.IsLink():
continue
except dfvfs_errors.BackEndError as exception:
logger.warning(
'Unable to process file: {0:s} with error: {1!s}'.format(
sub_file_entry.path_spec.comparable.replace(
'\n', ';'), exception))
continue
# For TSK-based file entries only, ignore the virtual /$OrphanFiles
# directory.
if sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK:
if file_entry.IsRoot() and sub_file_entry.name == '$OrphanFiles':
continue
if sub_file_entry.IsDirectory():
sub_directories.append(sub_file_entry)
elif sub_file_entry.IsFile():
# If we are dealing with a VSS we want to calculate a hash
# value based on available timestamps and compare that to previously
# calculated hash values, and only include the file into the queue if
# the hash does not match.
if self._duplicate_file_check:
hash_value = self._CalculateNTFSTimeHash(sub_file_entry)
inode = getattr(sub_file_entry.path_spec, 'inode', 0)
if inode in self._hashlist:
if hash_value in self._hashlist[inode]:
continue
self._hashlist.setdefault(inode, []).append(hash_value)
for path_spec in self._ExtractPathSpecsFromFile(sub_file_entry):
yield path_spec
for sub_file_entry in sub_directories:
try:
for path_spec in self._ExtractPathSpecsFromDirectory(
sub_file_entry, depth=(depth + 1)):
yield path_spec
except (
IOError, dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
dfvfs_errors.PathSpecError) as exception:
logger.warning('{0!s}'.format(exception)) | Extracts path specification from a directory.
Args:
file_entry (dfvfs.FileEntry): file entry that refers to the directory.
depth (Optional[int]): current depth where 0 represents the file system
root.
Yields:
dfvfs.PathSpec: path specification of a file entry found in the directory. | Below is the the instruction that describes the task:
### Input:
Extracts path specification from a directory.
Args:
file_entry (dfvfs.FileEntry): file entry that refers to the directory.
depth (Optional[int]): current depth where 0 represents the file system
root.
Yields:
dfvfs.PathSpec: path specification of a file entry found in the directory.
### Response:
def _ExtractPathSpecsFromDirectory(self, file_entry, depth=0):
"""Extracts path specification from a directory.
Args:
file_entry (dfvfs.FileEntry): file entry that refers to the directory.
depth (Optional[int]): current depth where 0 represents the file system
root.
Yields:
dfvfs.PathSpec: path specification of a file entry found in the directory.
"""
if depth >= self._MAXIMUM_DEPTH:
raise errors.MaximumRecursionDepth('Maximum recursion depth reached.')
# Need to do a breadth-first search otherwise we'll hit the Python
# maximum recursion depth.
sub_directories = []
for sub_file_entry in file_entry.sub_file_entries:
try:
if not sub_file_entry.IsAllocated() or sub_file_entry.IsLink():
continue
except dfvfs_errors.BackEndError as exception:
logger.warning(
'Unable to process file: {0:s} with error: {1!s}'.format(
sub_file_entry.path_spec.comparable.replace(
'\n', ';'), exception))
continue
# For TSK-based file entries only, ignore the virtual /$OrphanFiles
# directory.
if sub_file_entry.type_indicator == dfvfs_definitions.TYPE_INDICATOR_TSK:
if file_entry.IsRoot() and sub_file_entry.name == '$OrphanFiles':
continue
if sub_file_entry.IsDirectory():
sub_directories.append(sub_file_entry)
elif sub_file_entry.IsFile():
# If we are dealing with a VSS we want to calculate a hash
# value based on available timestamps and compare that to previously
# calculated hash values, and only include the file into the queue if
# the hash does not match.
if self._duplicate_file_check:
hash_value = self._CalculateNTFSTimeHash(sub_file_entry)
inode = getattr(sub_file_entry.path_spec, 'inode', 0)
if inode in self._hashlist:
if hash_value in self._hashlist[inode]:
continue
self._hashlist.setdefault(inode, []).append(hash_value)
for path_spec in self._ExtractPathSpecsFromFile(sub_file_entry):
yield path_spec
for sub_file_entry in sub_directories:
try:
for path_spec in self._ExtractPathSpecsFromDirectory(
sub_file_entry, depth=(depth + 1)):
yield path_spec
except (
IOError, dfvfs_errors.AccessError, dfvfs_errors.BackEndError,
dfvfs_errors.PathSpecError) as exception:
logger.warning('{0!s}'.format(exception)) |
def service_endpoint_policy_definitions(self):
"""Instance depends on the API version:
* 2018-07-01: :class:`ServiceEndpointPolicyDefinitionsOperations<azure.mgmt.network.v2018_07_01.operations.ServiceEndpointPolicyDefinitionsOperations>`
* 2018-08-01: :class:`ServiceEndpointPolicyDefinitionsOperations<azure.mgmt.network.v2018_08_01.operations.ServiceEndpointPolicyDefinitionsOperations>`
"""
api_version = self._get_api_version('service_endpoint_policy_definitions')
if api_version == '2018-07-01':
from .v2018_07_01.operations import ServiceEndpointPolicyDefinitionsOperations as OperationClass
elif api_version == '2018-08-01':
from .v2018_08_01.operations import ServiceEndpointPolicyDefinitionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) | Instance depends on the API version:
* 2018-07-01: :class:`ServiceEndpointPolicyDefinitionsOperations<azure.mgmt.network.v2018_07_01.operations.ServiceEndpointPolicyDefinitionsOperations>`
* 2018-08-01: :class:`ServiceEndpointPolicyDefinitionsOperations<azure.mgmt.network.v2018_08_01.operations.ServiceEndpointPolicyDefinitionsOperations>` | Below is the the instruction that describes the task:
### Input:
Instance depends on the API version:
* 2018-07-01: :class:`ServiceEndpointPolicyDefinitionsOperations<azure.mgmt.network.v2018_07_01.operations.ServiceEndpointPolicyDefinitionsOperations>`
* 2018-08-01: :class:`ServiceEndpointPolicyDefinitionsOperations<azure.mgmt.network.v2018_08_01.operations.ServiceEndpointPolicyDefinitionsOperations>`
### Response:
def service_endpoint_policy_definitions(self):
"""Instance depends on the API version:
* 2018-07-01: :class:`ServiceEndpointPolicyDefinitionsOperations<azure.mgmt.network.v2018_07_01.operations.ServiceEndpointPolicyDefinitionsOperations>`
* 2018-08-01: :class:`ServiceEndpointPolicyDefinitionsOperations<azure.mgmt.network.v2018_08_01.operations.ServiceEndpointPolicyDefinitionsOperations>`
"""
api_version = self._get_api_version('service_endpoint_policy_definitions')
if api_version == '2018-07-01':
from .v2018_07_01.operations import ServiceEndpointPolicyDefinitionsOperations as OperationClass
elif api_version == '2018-08-01':
from .v2018_08_01.operations import ServiceEndpointPolicyDefinitionsOperations as OperationClass
else:
raise NotImplementedError("APIVersion {} is not available".format(api_version))
return OperationClass(self._client, self.config, Serializer(self._models_dict(api_version)), Deserializer(self._models_dict(api_version))) |
def poke_8(library, session, address, data):
"""Write an 8-bit value from the specified address.
Corresponds to viPoke8 function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param address: Source address to read the value.
:param data: value to be written to the bus.
:return: Data read from bus.
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
"""
return library.viPoke8(session, address, data) | Write an 8-bit value from the specified address.
Corresponds to viPoke8 function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param address: Source address to read the value.
:param data: value to be written to the bus.
:return: Data read from bus.
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode` | Below is the the instruction that describes the task:
### Input:
Write an 8-bit value from the specified address.
Corresponds to viPoke8 function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param address: Source address to read the value.
:param data: value to be written to the bus.
:return: Data read from bus.
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
### Response:
def poke_8(library, session, address, data):
"""Write an 8-bit value from the specified address.
Corresponds to viPoke8 function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param address: Source address to read the value.
:param data: value to be written to the bus.
:return: Data read from bus.
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode`
"""
return library.viPoke8(session, address, data) |
def pop_value(instance, path, ref=None):
"""
Pop the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
"""
head, tail = os.path.split(abspath(path, ref))
instance = get_value(instance, head)
if isinstance(instance, list):
tail = int(tail)
return instance.pop(tail) | Pop the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance` | Below is the the instruction that describes the task:
### Input:
Pop the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
### Response:
def pop_value(instance, path, ref=None):
"""
Pop the value from `instance` at the given `path`.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
ref : str or None
reference path if `path` is relative
Returns
-------
value :
value at `path` in `instance`
"""
head, tail = os.path.split(abspath(path, ref))
instance = get_value(instance, head)
if isinstance(instance, list):
tail = int(tail)
return instance.pop(tail) |
def get_monitor_physical_size(monitor):
"""
Returns the physical size of the monitor.
Wrapper for:
void glfwGetMonitorPhysicalSize(GLFWmonitor* monitor, int* width, int* height);
"""
width_value = ctypes.c_int(0)
width = ctypes.pointer(width_value)
height_value = ctypes.c_int(0)
height = ctypes.pointer(height_value)
_glfw.glfwGetMonitorPhysicalSize(monitor, width, height)
return width_value.value, height_value.value | Returns the physical size of the monitor.
Wrapper for:
void glfwGetMonitorPhysicalSize(GLFWmonitor* monitor, int* width, int* height); | Below is the the instruction that describes the task:
### Input:
Returns the physical size of the monitor.
Wrapper for:
void glfwGetMonitorPhysicalSize(GLFWmonitor* monitor, int* width, int* height);
### Response:
def get_monitor_physical_size(monitor):
"""
Returns the physical size of the monitor.
Wrapper for:
void glfwGetMonitorPhysicalSize(GLFWmonitor* monitor, int* width, int* height);
"""
width_value = ctypes.c_int(0)
width = ctypes.pointer(width_value)
height_value = ctypes.c_int(0)
height = ctypes.pointer(height_value)
_glfw.glfwGetMonitorPhysicalSize(monitor, width, height)
return width_value.value, height_value.value |
def build(subparsers):
"""
Build source packages.
The mp build program runs all of the resources listed in a Metatab file and
produces one or more Metapack packages with those resources localized. It
will always try to produce a Filesystem package, and may optionally produce
Excel, Zip and CSV packages.
Typical usage is to be run inside a source package directory with
.. code-block:: bash
$ mp build
To build all of the package types:
.. code-block:: bash
$ mp build -fezc
By default, packages are built with versioned names. The
:option:`--nonversion-name` option will create file packages with
non-versioned name, and the :option:`--nonversioned-link` option will
produce a non-versioned soft link pointing to the versioned file.
"""
parser = subparsers.add_parser(
'build',
help='Build derived packages',
description=build.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='')
parser.set_defaults(run_command=run_metapack)
parser.add_argument('metatabfile', nargs='?',
help="Path or URL to a metatab file. If not provided, defaults to 'metadata.csv'. "
)
parser.add_argument('-p', '--profile', help="Name of a BOTO or AWS credentails profile", required=False)
parser.add_argument('-D', '--package-directory', help="Write Zip, Excel and CSV packages to an alternate directory",
required=False)
parser.add_argument('-F', '--force', action='store_true', default=False,
help='Force some operations, like updating the name and building packages')
parser.add_argument('-R', '--reuse-resources', action='store_true', default=False,
help='When building Filesystem package, try to reuse resources built in prior build')
group = parser.add_mutually_exclusive_group()
group.add_argument('-n', '--nonversion-name', action='store_true', default=False,
help='Write file packages with non-versioned names')
group.add_argument('-N', '--nonversion-link', action='store_true', default=False,
help='Create links with nonversioned names to file packages')
parser.set_defaults(handler=None)
##
## Derived Package Group
derived_group = parser.add_argument_group('Derived Packages', 'Generate other types of packages')
derived_group.add_argument('-e', '--excel', action='store_true', default=False,
help='Create an excel archive from a metatab file')
derived_group.add_argument('-z', '--zip', action='store_true', default=False,
help='Create a zip archive from a metatab file')
derived_group.add_argument('-f', '--filesystem', action='store_true', default=False,
help='Create a filesystem archive from a metatab file')
derived_group.add_argument('-c', '--csv', action='store_true', default=False,
help='Create a CSV archive from a metatab file')
##
## Administration Group
admin_group = parser.add_argument_group('Administration', 'Information and administration')
admin_group.add_argument('--clean-cache', default=False, action='store_true',
help="Clean the download cache")
admin_group.add_argument('-C', '--clean', default=False, action='store_true',
help="For some operations, like updating schemas, clear the section of existing terms first") | Build source packages.
The mp build program runs all of the resources listed in a Metatab file and
produces one or more Metapack packages with those resources localized. It
will always try to produce a Filesystem package, and may optionally produce
Excel, Zip and CSV packages.
Typical usage is to be run inside a source package directory with
.. code-block:: bash
$ mp build
To build all of the package types:
.. code-block:: bash
$ mp build -fezc
By default, packages are built with versioned names. The
:option:`--nonversion-name` option will create file packages with
non-versioned name, and the :option:`--nonversioned-link` option will
produce a non-versioned soft link pointing to the versioned file. | Below is the the instruction that describes the task:
### Input:
Build source packages.
The mp build program runs all of the resources listed in a Metatab file and
produces one or more Metapack packages with those resources localized. It
will always try to produce a Filesystem package, and may optionally produce
Excel, Zip and CSV packages.
Typical usage is to be run inside a source package directory with
.. code-block:: bash
$ mp build
To build all of the package types:
.. code-block:: bash
$ mp build -fezc
By default, packages are built with versioned names. The
:option:`--nonversion-name` option will create file packages with
non-versioned name, and the :option:`--nonversioned-link` option will
produce a non-versioned soft link pointing to the versioned file.
### Response:
def build(subparsers):
"""
Build source packages.
The mp build program runs all of the resources listed in a Metatab file and
produces one or more Metapack packages with those resources localized. It
will always try to produce a Filesystem package, and may optionally produce
Excel, Zip and CSV packages.
Typical usage is to be run inside a source package directory with
.. code-block:: bash
$ mp build
To build all of the package types:
.. code-block:: bash
$ mp build -fezc
By default, packages are built with versioned names. The
:option:`--nonversion-name` option will create file packages with
non-versioned name, and the :option:`--nonversioned-link` option will
produce a non-versioned soft link pointing to the versioned file.
"""
parser = subparsers.add_parser(
'build',
help='Build derived packages',
description=build.__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog='')
parser.set_defaults(run_command=run_metapack)
parser.add_argument('metatabfile', nargs='?',
help="Path or URL to a metatab file. If not provided, defaults to 'metadata.csv'. "
)
parser.add_argument('-p', '--profile', help="Name of a BOTO or AWS credentails profile", required=False)
parser.add_argument('-D', '--package-directory', help="Write Zip, Excel and CSV packages to an alternate directory",
required=False)
parser.add_argument('-F', '--force', action='store_true', default=False,
help='Force some operations, like updating the name and building packages')
parser.add_argument('-R', '--reuse-resources', action='store_true', default=False,
help='When building Filesystem package, try to reuse resources built in prior build')
group = parser.add_mutually_exclusive_group()
group.add_argument('-n', '--nonversion-name', action='store_true', default=False,
help='Write file packages with non-versioned names')
group.add_argument('-N', '--nonversion-link', action='store_true', default=False,
help='Create links with nonversioned names to file packages')
parser.set_defaults(handler=None)
##
## Derived Package Group
derived_group = parser.add_argument_group('Derived Packages', 'Generate other types of packages')
derived_group.add_argument('-e', '--excel', action='store_true', default=False,
help='Create an excel archive from a metatab file')
derived_group.add_argument('-z', '--zip', action='store_true', default=False,
help='Create a zip archive from a metatab file')
derived_group.add_argument('-f', '--filesystem', action='store_true', default=False,
help='Create a filesystem archive from a metatab file')
derived_group.add_argument('-c', '--csv', action='store_true', default=False,
help='Create a CSV archive from a metatab file')
##
## Administration Group
admin_group = parser.add_argument_group('Administration', 'Information and administration')
admin_group.add_argument('--clean-cache', default=False, action='store_true',
help="Clean the download cache")
admin_group.add_argument('-C', '--clean', default=False, action='store_true',
help="For some operations, like updating schemas, clear the section of existing terms first") |
def update(self):
"""Update current date/time."""
# Had to convert it to string because datetime is not JSON serializable
self.stats = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# Add the time zone (issue #1249 and issue #1337)
if 'tmzone' in localtime():
self.stats += ' {}'.format(localtime().tm_zone)
elif len(tzname) > 0:
self.stats += ' {}'.format(tzname[1])
return self.stats | Update current date/time. | Below is the the instruction that describes the task:
### Input:
Update current date/time.
### Response:
def update(self):
"""Update current date/time."""
# Had to convert it to string because datetime is not JSON serializable
self.stats = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
# Add the time zone (issue #1249 and issue #1337)
if 'tmzone' in localtime():
self.stats += ' {}'.format(localtime().tm_zone)
elif len(tzname) > 0:
self.stats += ' {}'.format(tzname[1])
return self.stats |
def _iter_year_month(year_info):
""" Iter the month days in a lunar year.
"""
# info => month, days, leap
leap_month, leap_days = _parse_leap(year_info)
months = [(i, 0) for i in range(1, 13)]
if leap_month > 0:
months.insert(leap_month, (leap_month, 1))
for month, leap in months:
if leap:
days = leap_days
else:
days = (year_info >> (16 - month)) % 2 + 29
yield month, days, leap | Iter the month days in a lunar year. | Below is the the instruction that describes the task:
### Input:
Iter the month days in a lunar year.
### Response:
def _iter_year_month(year_info):
""" Iter the month days in a lunar year.
"""
# info => month, days, leap
leap_month, leap_days = _parse_leap(year_info)
months = [(i, 0) for i in range(1, 13)]
if leap_month > 0:
months.insert(leap_month, (leap_month, 1))
for month, leap in months:
if leap:
days = leap_days
else:
days = (year_info >> (16 - month)) % 2 + 29
yield month, days, leap |
def _prnt_min_max_val(var, text, verb):
r"""Print variable; if more than three, just min/max, unless verb > 3."""
if var.size > 3:
print(text, _strvar(var.min()), "-", _strvar(var.max()),
":", _strvar(var.size), " [min-max; #]")
if verb > 3:
print(" : ", _strvar(var))
else:
print(text, _strvar(np.atleast_1d(var))) | r"""Print variable; if more than three, just min/max, unless verb > 3. | Below is the the instruction that describes the task:
### Input:
r"""Print variable; if more than three, just min/max, unless verb > 3.
### Response:
def _prnt_min_max_val(var, text, verb):
r"""Print variable; if more than three, just min/max, unless verb > 3."""
if var.size > 3:
print(text, _strvar(var.min()), "-", _strvar(var.max()),
":", _strvar(var.size), " [min-max; #]")
if verb > 3:
print(" : ", _strvar(var))
else:
print(text, _strvar(np.atleast_1d(var))) |
def results_tc(self, key, value):
"""Write data to results_tc file in TcEX specified directory.
The TcEx platform support persistent values between executions of the App. This
method will store the values for TC to read and put into the Database.
Args:
key (string): The data key to be stored.
value (string): The data value to be stored.
"""
if os.access(self.default_args.tc_out_path, os.W_OK):
results_file = '{}/results.tc'.format(self.default_args.tc_out_path)
else:
results_file = 'results.tc'
new = True
open(results_file, 'a').close() # ensure file exists
with open(results_file, 'r+') as fh:
results = ''
for line in fh.read().strip().split('\n'):
if not line:
continue
try:
k, v = line.split(' = ')
except ValueError:
# handle null/empty value (e.g., "name =")
k, v = line.split(' =')
if k == key:
v = value
new = False
if v is not None:
results += '{} = {}\n'.format(k, v)
if new and value is not None: # indicates the key/value pair didn't already exist
results += '{} = {}\n'.format(key, value)
fh.seek(0)
fh.write(results)
fh.truncate() | Write data to results_tc file in TcEX specified directory.
The TcEx platform support persistent values between executions of the App. This
method will store the values for TC to read and put into the Database.
Args:
key (string): The data key to be stored.
value (string): The data value to be stored. | Below is the the instruction that describes the task:
### Input:
Write data to results_tc file in TcEX specified directory.
The TcEx platform support persistent values between executions of the App. This
method will store the values for TC to read and put into the Database.
Args:
key (string): The data key to be stored.
value (string): The data value to be stored.
### Response:
def results_tc(self, key, value):
"""Write data to results_tc file in TcEX specified directory.
The TcEx platform support persistent values between executions of the App. This
method will store the values for TC to read and put into the Database.
Args:
key (string): The data key to be stored.
value (string): The data value to be stored.
"""
if os.access(self.default_args.tc_out_path, os.W_OK):
results_file = '{}/results.tc'.format(self.default_args.tc_out_path)
else:
results_file = 'results.tc'
new = True
open(results_file, 'a').close() # ensure file exists
with open(results_file, 'r+') as fh:
results = ''
for line in fh.read().strip().split('\n'):
if not line:
continue
try:
k, v = line.split(' = ')
except ValueError:
# handle null/empty value (e.g., "name =")
k, v = line.split(' =')
if k == key:
v = value
new = False
if v is not None:
results += '{} = {}\n'.format(k, v)
if new and value is not None: # indicates the key/value pair didn't already exist
results += '{} = {}\n'.format(key, value)
fh.seek(0)
fh.write(results)
fh.truncate() |
def ge(self, value):
"""Construct a greater than or equal to (``>=``) filter.
:param value: Filter value
:return: :class:`filters.Field <filters.Field>` object
:rtype: filters.Field
"""
self.op = '>='
self.negate_op = '<'
self.value = self._value(value)
return self | Construct a greater than or equal to (``>=``) filter.
:param value: Filter value
:return: :class:`filters.Field <filters.Field>` object
:rtype: filters.Field | Below is the the instruction that describes the task:
### Input:
Construct a greater than or equal to (``>=``) filter.
:param value: Filter value
:return: :class:`filters.Field <filters.Field>` object
:rtype: filters.Field
### Response:
def ge(self, value):
"""Construct a greater than or equal to (``>=``) filter.
:param value: Filter value
:return: :class:`filters.Field <filters.Field>` object
:rtype: filters.Field
"""
self.op = '>='
self.negate_op = '<'
self.value = self._value(value)
return self |
Subsets and Splits