code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def create_provenance(dataset, software_versions=None, db_url=None):
"""Create (or get if already exists) a provenance entity, store it in the database and get back a provenance ID.
Arguments:
:param dataset: Name of the data set.
:param software_versions: (optional) Version of the software components used to get the data. It is a dictionary
that accepts the following fields:
- matlab_version
- spm_version
- spm_revision
- fn_called
- fn_version
- others
:param db_url: (optional) Database URL. If not defined, it looks for an Airflow configuration file.
:return: Provenance ID.
"""
logging.info("Connecting to database...")
db_conn = connection.Connection(db_url)
try:
matlab_version = software_versions['matlab_version']
except (KeyError, TypeError):
matlab_version = None
try:
spm_version = software_versions['spm_version']
except (KeyError, TypeError):
spm_version = None
try:
spm_revision = software_versions['spm_revision']
except (KeyError, TypeError):
spm_revision = None
try:
fn_called = software_versions['fn_called']
except (KeyError, TypeError):
fn_called = None
try:
fn_version = software_versions['fn_version']
except (KeyError, TypeError):
fn_version = None
try:
others = software_versions['others']
except (KeyError, TypeError):
others = None
provenance = db_conn.db_session.query(db_conn.Provenance).filter_by(
dataset=dataset, matlab_version=matlab_version, spm_version=spm_version, spm_revision=spm_revision,
fn_called=fn_called, fn_version=fn_version, others=others
).first()
if not provenance:
provenance = db_conn.Provenance(
dataset=dataset, matlab_version=matlab_version, spm_version=spm_version, spm_revision=spm_revision,
fn_called=fn_called, fn_version=fn_version, others=others
)
db_conn.db_session.merge(provenance)
db_conn.db_session.commit()
provenance = db_conn.db_session.query(db_conn.Provenance).filter_by(
dataset=dataset, matlab_version=matlab_version, spm_version=spm_version, spm_revision=spm_revision,
fn_called=fn_called, fn_version=fn_version, others=others
).first()
provenance_id = provenance.id
logging.info("Closing database connection...")
db_conn.close()
return provenance_id | Create (or get if already exists) a provenance entity, store it in the database and get back a provenance ID.
Arguments:
:param dataset: Name of the data set.
:param software_versions: (optional) Version of the software components used to get the data. It is a dictionary
that accepts the following fields:
- matlab_version
- spm_version
- spm_revision
- fn_called
- fn_version
- others
:param db_url: (optional) Database URL. If not defined, it looks for an Airflow configuration file.
:return: Provenance ID. | Below is the the instruction that describes the task:
### Input:
Create (or get if already exists) a provenance entity, store it in the database and get back a provenance ID.
Arguments:
:param dataset: Name of the data set.
:param software_versions: (optional) Version of the software components used to get the data. It is a dictionary
that accepts the following fields:
- matlab_version
- spm_version
- spm_revision
- fn_called
- fn_version
- others
:param db_url: (optional) Database URL. If not defined, it looks for an Airflow configuration file.
:return: Provenance ID.
### Response:
def create_provenance(dataset, software_versions=None, db_url=None):
"""Create (or get if already exists) a provenance entity, store it in the database and get back a provenance ID.
Arguments:
:param dataset: Name of the data set.
:param software_versions: (optional) Version of the software components used to get the data. It is a dictionary
that accepts the following fields:
- matlab_version
- spm_version
- spm_revision
- fn_called
- fn_version
- others
:param db_url: (optional) Database URL. If not defined, it looks for an Airflow configuration file.
:return: Provenance ID.
"""
logging.info("Connecting to database...")
db_conn = connection.Connection(db_url)
try:
matlab_version = software_versions['matlab_version']
except (KeyError, TypeError):
matlab_version = None
try:
spm_version = software_versions['spm_version']
except (KeyError, TypeError):
spm_version = None
try:
spm_revision = software_versions['spm_revision']
except (KeyError, TypeError):
spm_revision = None
try:
fn_called = software_versions['fn_called']
except (KeyError, TypeError):
fn_called = None
try:
fn_version = software_versions['fn_version']
except (KeyError, TypeError):
fn_version = None
try:
others = software_versions['others']
except (KeyError, TypeError):
others = None
provenance = db_conn.db_session.query(db_conn.Provenance).filter_by(
dataset=dataset, matlab_version=matlab_version, spm_version=spm_version, spm_revision=spm_revision,
fn_called=fn_called, fn_version=fn_version, others=others
).first()
if not provenance:
provenance = db_conn.Provenance(
dataset=dataset, matlab_version=matlab_version, spm_version=spm_version, spm_revision=spm_revision,
fn_called=fn_called, fn_version=fn_version, others=others
)
db_conn.db_session.merge(provenance)
db_conn.db_session.commit()
provenance = db_conn.db_session.query(db_conn.Provenance).filter_by(
dataset=dataset, matlab_version=matlab_version, spm_version=spm_version, spm_revision=spm_revision,
fn_called=fn_called, fn_version=fn_version, others=others
).first()
provenance_id = provenance.id
logging.info("Closing database connection...")
db_conn.close()
return provenance_id |
def expect_optional_keyword(lexer: Lexer, value: str) -> Optional[Token]:
"""Expect the next token optionally to be a given keyword.
If the next token is a given keyword, return that token after advancing the lexer.
Otherwise, do not change the parser state and return None.
"""
token = lexer.token
if token.kind == TokenKind.NAME and token.value == value:
lexer.advance()
return token
return None | Expect the next token optionally to be a given keyword.
If the next token is a given keyword, return that token after advancing the lexer.
Otherwise, do not change the parser state and return None. | Below is the the instruction that describes the task:
### Input:
Expect the next token optionally to be a given keyword.
If the next token is a given keyword, return that token after advancing the lexer.
Otherwise, do not change the parser state and return None.
### Response:
def expect_optional_keyword(lexer: Lexer, value: str) -> Optional[Token]:
"""Expect the next token optionally to be a given keyword.
If the next token is a given keyword, return that token after advancing the lexer.
Otherwise, do not change the parser state and return None.
"""
token = lexer.token
if token.kind == TokenKind.NAME and token.value == value:
lexer.advance()
return token
return None |
def Laliberte_density(T, ws, CASRNs):
r'''Calculate the density of an aqueous electrolyte mixture using the form proposed by [1]_.
Parameters are loaded by the function as needed. Units are Kelvin and Pa*s.
.. math::
\rho_m = \left(\frac{w_w}{\rho_w} + \sum_i \frac{w_i}{\rho_{app_i}}\right)^{-1}
Parameters
----------
T : float
Temperature of fluid [K]
ws : array
Weight fractions of fluid components other than water
CASRNs : array
CAS numbers of the fluid components other than water
Returns
-------
rho_i : float
Solution density, [kg/m^3]
Notes
-----
Temperature range check is not used here.
Examples
--------
>>> Laliberte_density(273.15, [0.0037838838], ['7647-14-5'])
1002.6250120185854
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
'''
rho_w = Laliberte_density_w(T)
w_w = 1 - sum(ws)
rho = w_w/rho_w
for i in range(len(CASRNs)):
d = _Laliberte_Density_ParametersDict[CASRNs[i]]
rho_i = Laliberte_density_i(T, w_w, d["C0"], d["C1"], d["C2"], d["C3"], d["C4"])
rho = rho + ws[i]/rho_i
return 1./rho | r'''Calculate the density of an aqueous electrolyte mixture using the form proposed by [1]_.
Parameters are loaded by the function as needed. Units are Kelvin and Pa*s.
.. math::
\rho_m = \left(\frac{w_w}{\rho_w} + \sum_i \frac{w_i}{\rho_{app_i}}\right)^{-1}
Parameters
----------
T : float
Temperature of fluid [K]
ws : array
Weight fractions of fluid components other than water
CASRNs : array
CAS numbers of the fluid components other than water
Returns
-------
rho_i : float
Solution density, [kg/m^3]
Notes
-----
Temperature range check is not used here.
Examples
--------
>>> Laliberte_density(273.15, [0.0037838838], ['7647-14-5'])
1002.6250120185854
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123 | Below is the the instruction that describes the task:
### Input:
r'''Calculate the density of an aqueous electrolyte mixture using the form proposed by [1]_.
Parameters are loaded by the function as needed. Units are Kelvin and Pa*s.
.. math::
\rho_m = \left(\frac{w_w}{\rho_w} + \sum_i \frac{w_i}{\rho_{app_i}}\right)^{-1}
Parameters
----------
T : float
Temperature of fluid [K]
ws : array
Weight fractions of fluid components other than water
CASRNs : array
CAS numbers of the fluid components other than water
Returns
-------
rho_i : float
Solution density, [kg/m^3]
Notes
-----
Temperature range check is not used here.
Examples
--------
>>> Laliberte_density(273.15, [0.0037838838], ['7647-14-5'])
1002.6250120185854
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
### Response:
def Laliberte_density(T, ws, CASRNs):
r'''Calculate the density of an aqueous electrolyte mixture using the form proposed by [1]_.
Parameters are loaded by the function as needed. Units are Kelvin and Pa*s.
.. math::
\rho_m = \left(\frac{w_w}{\rho_w} + \sum_i \frac{w_i}{\rho_{app_i}}\right)^{-1}
Parameters
----------
T : float
Temperature of fluid [K]
ws : array
Weight fractions of fluid components other than water
CASRNs : array
CAS numbers of the fluid components other than water
Returns
-------
rho_i : float
Solution density, [kg/m^3]
Notes
-----
Temperature range check is not used here.
Examples
--------
>>> Laliberte_density(273.15, [0.0037838838], ['7647-14-5'])
1002.6250120185854
References
----------
.. [1] Laliberte, Marc. "A Model for Calculating the Heat Capacity of
Aqueous Solutions, with Updated Density and Viscosity Data." Journal of
Chemical & Engineering Data 54, no. 6 (June 11, 2009): 1725-60.
doi:10.1021/je8008123
'''
rho_w = Laliberte_density_w(T)
w_w = 1 - sum(ws)
rho = w_w/rho_w
for i in range(len(CASRNs)):
d = _Laliberte_Density_ParametersDict[CASRNs[i]]
rho_i = Laliberte_density_i(T, w_w, d["C0"], d["C1"], d["C2"], d["C3"], d["C4"])
rho = rho + ws[i]/rho_i
return 1./rho |
def create_span(unirange, is_bytes=False):
"""Clamp the Unicode range."""
if len(unirange) < 2:
unirange.append(unirange[0])
if is_bytes:
if unirange[0] > MAXASCII:
return None
if unirange[1] > MAXASCII:
unirange[1] = MAXASCII
return [x for x in range(unirange[0], unirange[1] + 1)] | Clamp the Unicode range. | Below is the the instruction that describes the task:
### Input:
Clamp the Unicode range.
### Response:
def create_span(unirange, is_bytes=False):
"""Clamp the Unicode range."""
if len(unirange) < 2:
unirange.append(unirange[0])
if is_bytes:
if unirange[0] > MAXASCII:
return None
if unirange[1] > MAXASCII:
unirange[1] = MAXASCII
return [x for x in range(unirange[0], unirange[1] + 1)] |
def destroy_session(self, session_id):
"""
Destroy an (existing) session.
"""
try:
del self.sessions[session_id]
except KeyError:
pass
# Invoke hooks
invoke_hooks(self.hooks, "session_destroyed", session_id) | Destroy an (existing) session. | Below is the the instruction that describes the task:
### Input:
Destroy an (existing) session.
### Response:
def destroy_session(self, session_id):
"""
Destroy an (existing) session.
"""
try:
del self.sessions[session_id]
except KeyError:
pass
# Invoke hooks
invoke_hooks(self.hooks, "session_destroyed", session_id) |
def _write_values(kwargs, variables):
"""Write values of kwargs and return thus-satisfied closures."""
writeto = []
for var_name, value in kwargs.items():
var = variables[var_name]
var.notify_will_write()
var.write(value)
writeto.append(var)
return _notify_reader_writes(writeto) | Write values of kwargs and return thus-satisfied closures. | Below is the the instruction that describes the task:
### Input:
Write values of kwargs and return thus-satisfied closures.
### Response:
def _write_values(kwargs, variables):
"""Write values of kwargs and return thus-satisfied closures."""
writeto = []
for var_name, value in kwargs.items():
var = variables[var_name]
var.notify_will_write()
var.write(value)
writeto.append(var)
return _notify_reader_writes(writeto) |
def set_int_param(params, name, value, min=None, max=None):
"""
Set a int parameter if applicable.
:param dict params: A dict containing API call parameters.
:param str name: The name of the parameter to set.
:param int value:
The value of the parameter. If ``None``, the field will not be set. If
an instance of a numeric type or a string that can be turned into a
``int``, the relevant field will be set. Any other value will raise a
`ValueError`.
:param int min:
If provided, values less than this will raise ``ValueError``.
:param int max:
If provided, values greater than this will raise ``ValueError``.
:returns: ``None``
"""
if value is None:
return
try:
value = int(str(value))
except:
raise ValueError(
"Parameter '%s' must be an integer (or a string representation of"
" an integer) or None, got %r." % (name, value))
if min is not None and value < min:
raise ValueError(
"Parameter '%s' must not be less than %r, got %r." % (
name, min, value))
if max is not None and value > max:
raise ValueError(
"Parameter '%s' must not be greater than %r, got %r." % (
name, min, value))
params[name] = str(value) | Set a int parameter if applicable.
:param dict params: A dict containing API call parameters.
:param str name: The name of the parameter to set.
:param int value:
The value of the parameter. If ``None``, the field will not be set. If
an instance of a numeric type or a string that can be turned into a
``int``, the relevant field will be set. Any other value will raise a
`ValueError`.
:param int min:
If provided, values less than this will raise ``ValueError``.
:param int max:
If provided, values greater than this will raise ``ValueError``.
:returns: ``None`` | Below is the the instruction that describes the task:
### Input:
Set a int parameter if applicable.
:param dict params: A dict containing API call parameters.
:param str name: The name of the parameter to set.
:param int value:
The value of the parameter. If ``None``, the field will not be set. If
an instance of a numeric type or a string that can be turned into a
``int``, the relevant field will be set. Any other value will raise a
`ValueError`.
:param int min:
If provided, values less than this will raise ``ValueError``.
:param int max:
If provided, values greater than this will raise ``ValueError``.
:returns: ``None``
### Response:
def set_int_param(params, name, value, min=None, max=None):
"""
Set a int parameter if applicable.
:param dict params: A dict containing API call parameters.
:param str name: The name of the parameter to set.
:param int value:
The value of the parameter. If ``None``, the field will not be set. If
an instance of a numeric type or a string that can be turned into a
``int``, the relevant field will be set. Any other value will raise a
`ValueError`.
:param int min:
If provided, values less than this will raise ``ValueError``.
:param int max:
If provided, values greater than this will raise ``ValueError``.
:returns: ``None``
"""
if value is None:
return
try:
value = int(str(value))
except:
raise ValueError(
"Parameter '%s' must be an integer (or a string representation of"
" an integer) or None, got %r." % (name, value))
if min is not None and value < min:
raise ValueError(
"Parameter '%s' must not be less than %r, got %r." % (
name, min, value))
if max is not None and value > max:
raise ValueError(
"Parameter '%s' must not be greater than %r, got %r." % (
name, min, value))
params[name] = str(value) |
def data_processing(self):
"""
This function separates data, from the file to display curves, and will put them in the good arrays.
"""
the_file_name = str(self.result_file)
the_file = open(the_file_name, 'r')
lines = the_file.readlines()
# We put all lines in an array and we put each cell of the line in a column.
lines_array = []
for line in lines:
line = line.split(',') # Each time there is a tabulation, there is a new cell
lines_array.append(line)
labels_line = lines_array[0]
cell_labels_line = 0 # Iterator on each cell of the line labels_line.
flag = True # Become FALSE when we find the word which separate data from wavelength values.
try:
while flag: # While it is TRUE, so if the word doesn't match, it's an infinite loop,
if "wave length (nm)" in labels_line[cell_labels_line]:
index = labels_line.index(labels_line[cell_labels_line]) # Find the index of the string searched.
flag = False
else:
cell_labels_line += 1
except IndexError: # In case of an infinite loop.
raise sys.exit("Warning : There is no value named 'wavelength' in the file used to plot curves. "
"So, I can't separate data to plot curves and data about tests linking with these curves.")
self.information = [] # This array will contain the data displayed under the curves.
data_wavelength = [] # This array will contain the data to plot curves.
self.num_line = 0 # Iterator on each line of lines_array,
# The array containing data about information and wavelength.
for line in lines_array:
cell_line = 0 # Iterator on each cell of the line.
self.information.append([])
data_wavelength.append([])
while cell_line < len(line):
if cell_line < index:
self.information[self.num_line].append(line[cell_line])
elif cell_line > index:
data_wavelength[self.num_line].append(line[cell_line])
cell_line += 1
self.num_line += 1
# We transform wavelengths from strings to floats.
line_wavelength = 0 # Iterator on each line of data_wavelength
for row_data_wavelength in data_wavelength:
row_data_wavelength = [float(item.strip('\n').strip('\"')) for item in row_data_wavelength]
data_wavelength[line_wavelength] = row_data_wavelength
line_wavelength += 1
self.wavelength = data_wavelength[0] # The first line contains wavelength
self.data_wanted = data_wavelength[1:] # The others contain data useful to plot curves.
the_file.close() | This function separates data, from the file to display curves, and will put them in the good arrays. | Below is the the instruction that describes the task:
### Input:
This function separates data, from the file to display curves, and will put them in the good arrays.
### Response:
def data_processing(self):
"""
This function separates data, from the file to display curves, and will put them in the good arrays.
"""
the_file_name = str(self.result_file)
the_file = open(the_file_name, 'r')
lines = the_file.readlines()
# We put all lines in an array and we put each cell of the line in a column.
lines_array = []
for line in lines:
line = line.split(',') # Each time there is a tabulation, there is a new cell
lines_array.append(line)
labels_line = lines_array[0]
cell_labels_line = 0 # Iterator on each cell of the line labels_line.
flag = True # Become FALSE when we find the word which separate data from wavelength values.
try:
while flag: # While it is TRUE, so if the word doesn't match, it's an infinite loop,
if "wave length (nm)" in labels_line[cell_labels_line]:
index = labels_line.index(labels_line[cell_labels_line]) # Find the index of the string searched.
flag = False
else:
cell_labels_line += 1
except IndexError: # In case of an infinite loop.
raise sys.exit("Warning : There is no value named 'wavelength' in the file used to plot curves. "
"So, I can't separate data to plot curves and data about tests linking with these curves.")
self.information = [] # This array will contain the data displayed under the curves.
data_wavelength = [] # This array will contain the data to plot curves.
self.num_line = 0 # Iterator on each line of lines_array,
# The array containing data about information and wavelength.
for line in lines_array:
cell_line = 0 # Iterator on each cell of the line.
self.information.append([])
data_wavelength.append([])
while cell_line < len(line):
if cell_line < index:
self.information[self.num_line].append(line[cell_line])
elif cell_line > index:
data_wavelength[self.num_line].append(line[cell_line])
cell_line += 1
self.num_line += 1
# We transform wavelengths from strings to floats.
line_wavelength = 0 # Iterator on each line of data_wavelength
for row_data_wavelength in data_wavelength:
row_data_wavelength = [float(item.strip('\n').strip('\"')) for item in row_data_wavelength]
data_wavelength[line_wavelength] = row_data_wavelength
line_wavelength += 1
self.wavelength = data_wavelength[0] # The first line contains wavelength
self.data_wanted = data_wavelength[1:] # The others contain data useful to plot curves.
the_file.close() |
def istype(obj, check):
"""Like isinstance(obj, check), but strict.
This won't catch subclasses.
"""
if isinstance(check, tuple):
for cls in check:
if type(obj) is cls:
return True
return False
else:
return type(obj) is check | Like isinstance(obj, check), but strict.
This won't catch subclasses. | Below is the the instruction that describes the task:
### Input:
Like isinstance(obj, check), but strict.
This won't catch subclasses.
### Response:
def istype(obj, check):
"""Like isinstance(obj, check), but strict.
This won't catch subclasses.
"""
if isinstance(check, tuple):
for cls in check:
if type(obj) is cls:
return True
return False
else:
return type(obj) is check |
async def _send(self, stream_id, pp_id, user_data,
expiry=None, max_retransmits=None, ordered=True):
"""
Send data ULP -> stream.
"""
if ordered:
stream_seq = self._outbound_stream_seq.get(stream_id, 0)
else:
stream_seq = 0
fragments = math.ceil(len(user_data) / USERDATA_MAX_LENGTH)
pos = 0
for fragment in range(0, fragments):
chunk = DataChunk()
chunk.flags = 0
if not ordered:
chunk.flags = SCTP_DATA_UNORDERED
if fragment == 0:
chunk.flags |= SCTP_DATA_FIRST_FRAG
if fragment == fragments - 1:
chunk.flags |= SCTP_DATA_LAST_FRAG
chunk.tsn = self._local_tsn
chunk.stream_id = stream_id
chunk.stream_seq = stream_seq
chunk.protocol = pp_id
chunk.user_data = user_data[pos:pos + USERDATA_MAX_LENGTH]
# initialize counters
chunk._abandoned = False
chunk._acked = False
chunk._book_size = len(chunk.user_data)
chunk._expiry = expiry
chunk._max_retransmits = max_retransmits
chunk._misses = 0
chunk._retransmit = False
chunk._sent_count = 0
chunk._sent_time = None
pos += USERDATA_MAX_LENGTH
self._local_tsn = tsn_plus_one(self._local_tsn)
self._outbound_queue.append(chunk)
if ordered:
self._outbound_stream_seq[stream_id] = uint16_add(stream_seq, 1)
# transmit outbound data
if not self._t3_handle:
await self._transmit() | Send data ULP -> stream. | Below is the the instruction that describes the task:
### Input:
Send data ULP -> stream.
### Response:
async def _send(self, stream_id, pp_id, user_data,
expiry=None, max_retransmits=None, ordered=True):
"""
Send data ULP -> stream.
"""
if ordered:
stream_seq = self._outbound_stream_seq.get(stream_id, 0)
else:
stream_seq = 0
fragments = math.ceil(len(user_data) / USERDATA_MAX_LENGTH)
pos = 0
for fragment in range(0, fragments):
chunk = DataChunk()
chunk.flags = 0
if not ordered:
chunk.flags = SCTP_DATA_UNORDERED
if fragment == 0:
chunk.flags |= SCTP_DATA_FIRST_FRAG
if fragment == fragments - 1:
chunk.flags |= SCTP_DATA_LAST_FRAG
chunk.tsn = self._local_tsn
chunk.stream_id = stream_id
chunk.stream_seq = stream_seq
chunk.protocol = pp_id
chunk.user_data = user_data[pos:pos + USERDATA_MAX_LENGTH]
# initialize counters
chunk._abandoned = False
chunk._acked = False
chunk._book_size = len(chunk.user_data)
chunk._expiry = expiry
chunk._max_retransmits = max_retransmits
chunk._misses = 0
chunk._retransmit = False
chunk._sent_count = 0
chunk._sent_time = None
pos += USERDATA_MAX_LENGTH
self._local_tsn = tsn_plus_one(self._local_tsn)
self._outbound_queue.append(chunk)
if ordered:
self._outbound_stream_seq[stream_id] = uint16_add(stream_seq, 1)
# transmit outbound data
if not self._t3_handle:
await self._transmit() |
def isEmpty(self):
"""
Is a given array, string, or object empty?
An "empty" object has no enumerable own-properties.
"""
if self.obj is None:
return True
if self._clean.isString():
ret = self.obj.strip() is ""
elif self._clean.isDict():
ret = len(self.obj.keys()) == 0
else:
ret = len(self.obj) == 0
return self._wrap(ret) | Is a given array, string, or object empty?
An "empty" object has no enumerable own-properties. | Below is the the instruction that describes the task:
### Input:
Is a given array, string, or object empty?
An "empty" object has no enumerable own-properties.
### Response:
def isEmpty(self):
"""
Is a given array, string, or object empty?
An "empty" object has no enumerable own-properties.
"""
if self.obj is None:
return True
if self._clean.isString():
ret = self.obj.strip() is ""
elif self._clean.isDict():
ret = len(self.obj.keys()) == 0
else:
ret = len(self.obj) == 0
return self._wrap(ret) |
def group_nodes_by_annotation_filtered(graph: BELGraph,
node_predicates: NodePredicates = None,
annotation: str = 'Subgraph',
) -> Mapping[str, Set[BaseEntity]]:
"""Group the nodes occurring in edges by the given annotation, with a node filter applied.
:param graph: A BEL graph
:param node_predicates: A predicate or list of predicates (graph, node) -> bool
:param annotation: The annotation to use for grouping
:return: A dictionary of {annotation value: set of nodes}
"""
node_filter = concatenate_node_predicates(node_predicates)
return {
key: {
node
for node in nodes
if node_filter(graph, node)
}
for key, nodes in group_nodes_by_annotation(graph, annotation).items()
} | Group the nodes occurring in edges by the given annotation, with a node filter applied.
:param graph: A BEL graph
:param node_predicates: A predicate or list of predicates (graph, node) -> bool
:param annotation: The annotation to use for grouping
:return: A dictionary of {annotation value: set of nodes} | Below is the the instruction that describes the task:
### Input:
Group the nodes occurring in edges by the given annotation, with a node filter applied.
:param graph: A BEL graph
:param node_predicates: A predicate or list of predicates (graph, node) -> bool
:param annotation: The annotation to use for grouping
:return: A dictionary of {annotation value: set of nodes}
### Response:
def group_nodes_by_annotation_filtered(graph: BELGraph,
node_predicates: NodePredicates = None,
annotation: str = 'Subgraph',
) -> Mapping[str, Set[BaseEntity]]:
"""Group the nodes occurring in edges by the given annotation, with a node filter applied.
:param graph: A BEL graph
:param node_predicates: A predicate or list of predicates (graph, node) -> bool
:param annotation: The annotation to use for grouping
:return: A dictionary of {annotation value: set of nodes}
"""
node_filter = concatenate_node_predicates(node_predicates)
return {
key: {
node
for node in nodes
if node_filter(graph, node)
}
for key, nodes in group_nodes_by_annotation(graph, annotation).items()
} |
def _table_limit(table, n, offset=0):
"""
Select the first n rows at beginning of table (may not be deterministic
depending on implementation and presence of a sorting).
Parameters
----------
n : int
Number of rows to include
offset : int, default 0
Number of rows to skip first
Returns
-------
limited : TableExpr
"""
op = ops.Limit(table, n, offset=offset)
return op.to_expr() | Select the first n rows at beginning of table (may not be deterministic
depending on implementation and presence of a sorting).
Parameters
----------
n : int
Number of rows to include
offset : int, default 0
Number of rows to skip first
Returns
-------
limited : TableExpr | Below is the the instruction that describes the task:
### Input:
Select the first n rows at beginning of table (may not be deterministic
depending on implementation and presence of a sorting).
Parameters
----------
n : int
Number of rows to include
offset : int, default 0
Number of rows to skip first
Returns
-------
limited : TableExpr
### Response:
def _table_limit(table, n, offset=0):
"""
Select the first n rows at beginning of table (may not be deterministic
depending on implementation and presence of a sorting).
Parameters
----------
n : int
Number of rows to include
offset : int, default 0
Number of rows to skip first
Returns
-------
limited : TableExpr
"""
op = ops.Limit(table, n, offset=offset)
return op.to_expr() |
def api_key(self):
"""Returns the api_key or None.
"""
if not self._api_key:
error_msg = (
f"Email is enabled but API_KEY is not set. "
f"See settings.{self.api_key_attr}"
)
try:
self._api_key = getattr(settings, self.api_key_attr)
except AttributeError:
raise EmailNotEnabledError(error_msg, code="api_key_attribute_error")
else:
if not self._api_key:
raise EmailNotEnabledError(error_msg, code="api_key_is_none")
return self._api_key | Returns the api_key or None. | Below is the the instruction that describes the task:
### Input:
Returns the api_key or None.
### Response:
def api_key(self):
"""Returns the api_key or None.
"""
if not self._api_key:
error_msg = (
f"Email is enabled but API_KEY is not set. "
f"See settings.{self.api_key_attr}"
)
try:
self._api_key = getattr(settings, self.api_key_attr)
except AttributeError:
raise EmailNotEnabledError(error_msg, code="api_key_attribute_error")
else:
if not self._api_key:
raise EmailNotEnabledError(error_msg, code="api_key_is_none")
return self._api_key |
def run_multiple_column_experiment():
"""
Compare the ideal observer against a multi-column sensorimotor network.
"""
# Create the objects
featureRange = [5, 10, 20, 30]
pointRange = 1
objectRange = [100]
numLocations = [10]
numPoints = 10
numTrials = 10
columnRange = [1, 2, 3, 4, 5, 6, 7, 8]
useLocation = 1
resultsDir = os.path.dirname(os.path.realpath(__file__))
args = []
for c in reversed(columnRange):
for o in reversed(objectRange):
for l in numLocations:
for f in featureRange:
for t in range(numTrials):
args.append(
{"numObjects": o,
"numLocations": l,
"numFeatures": f,
"numColumns": c,
"trialNum": t,
"pointRange": pointRange,
"numPoints": numPoints,
"useLocation": useLocation
}
)
print "Number of experiments:",len(args)
idealResultsFile = os.path.join(resultsDir,
"ideal_multi_column_useLocation_{}.pkl".format(useLocation))
pool = Pool(processes=cpu_count())
result = pool.map(run_ideal_classifier, args)
# Pickle results for later use
with open(idealResultsFile, "wb") as f:
cPickle.dump(result, f)
htmResultsFile = os.path.join(resultsDir, "column_convergence_results.pkl")
runExperimentPool(
numObjects=objectRange,
numLocations=[10],
numFeatures=featureRange,
numColumns=columnRange,
numPoints=10,
nTrials=numTrials,
numWorkers=cpu_count(),
resultsName=htmResultsFile)
with open(htmResultsFile, "rb") as f:
results = cPickle.load(f)
with open(idealResultsFile, "rb") as f:
resultsIdeal = cPickle.load(f)
plt.figure()
plotConvergenceByColumn(results, columnRange, featureRange, numTrials)
plotConvergenceByColumn(resultsIdeal, columnRange, featureRange, numTrials,
"--")
plt.savefig('plots/ideal_observer_multiple_column.pdf') | Compare the ideal observer against a multi-column sensorimotor network. | Below is the the instruction that describes the task:
### Input:
Compare the ideal observer against a multi-column sensorimotor network.
### Response:
def run_multiple_column_experiment():
"""
Compare the ideal observer against a multi-column sensorimotor network.
"""
# Create the objects
featureRange = [5, 10, 20, 30]
pointRange = 1
objectRange = [100]
numLocations = [10]
numPoints = 10
numTrials = 10
columnRange = [1, 2, 3, 4, 5, 6, 7, 8]
useLocation = 1
resultsDir = os.path.dirname(os.path.realpath(__file__))
args = []
for c in reversed(columnRange):
for o in reversed(objectRange):
for l in numLocations:
for f in featureRange:
for t in range(numTrials):
args.append(
{"numObjects": o,
"numLocations": l,
"numFeatures": f,
"numColumns": c,
"trialNum": t,
"pointRange": pointRange,
"numPoints": numPoints,
"useLocation": useLocation
}
)
print "Number of experiments:",len(args)
idealResultsFile = os.path.join(resultsDir,
"ideal_multi_column_useLocation_{}.pkl".format(useLocation))
pool = Pool(processes=cpu_count())
result = pool.map(run_ideal_classifier, args)
# Pickle results for later use
with open(idealResultsFile, "wb") as f:
cPickle.dump(result, f)
htmResultsFile = os.path.join(resultsDir, "column_convergence_results.pkl")
runExperimentPool(
numObjects=objectRange,
numLocations=[10],
numFeatures=featureRange,
numColumns=columnRange,
numPoints=10,
nTrials=numTrials,
numWorkers=cpu_count(),
resultsName=htmResultsFile)
with open(htmResultsFile, "rb") as f:
results = cPickle.load(f)
with open(idealResultsFile, "rb") as f:
resultsIdeal = cPickle.load(f)
plt.figure()
plotConvergenceByColumn(results, columnRange, featureRange, numTrials)
plotConvergenceByColumn(resultsIdeal, columnRange, featureRange, numTrials,
"--")
plt.savefig('plots/ideal_observer_multiple_column.pdf') |
def export_data(target_path):
"""
Exports the data of an application - media files plus database,
:param: target_path:
:return: a zip archive
"""
tasks.export_data_dir(target_path)
tasks.export_database(target_path)
tasks.export_context(target_path)
return target_path | Exports the data of an application - media files plus database,
:param: target_path:
:return: a zip archive | Below is the the instruction that describes the task:
### Input:
Exports the data of an application - media files plus database,
:param: target_path:
:return: a zip archive
### Response:
def export_data(target_path):
"""
Exports the data of an application - media files plus database,
:param: target_path:
:return: a zip archive
"""
tasks.export_data_dir(target_path)
tasks.export_database(target_path)
tasks.export_context(target_path)
return target_path |
def get_wx_font(self, s, prop):
"""
Return a wx font. Cache instances in a font dictionary for
efficiency
"""
DEBUG_MSG("get_wx_font()", 1, self)
key = hash(prop)
fontprop = prop
fontname = fontprop.get_name()
font = self.fontd.get(key)
if font is not None:
return font
# Allow use of platform independent and dependent font names
wxFontname = self.fontnames.get(fontname, wx.ROMAN)
wxFacename = '' # Empty => wxPython chooses based on wx_fontname
# Font colour is determined by the active wx.Pen
# TODO: It may be wise to cache font information
size = self.points_to_pixels(fontprop.get_size_in_points())
font =wx.Font(int(size+0.5), # Size
wxFontname, # 'Generic' name
self.fontangles[fontprop.get_style()], # Angle
self.fontweights[fontprop.get_weight()], # Weight
False, # Underline
wxFacename) # Platform font name
# cache the font and gc and return it
self.fontd[key] = font
return font | Return a wx font. Cache instances in a font dictionary for
efficiency | Below is the the instruction that describes the task:
### Input:
Return a wx font. Cache instances in a font dictionary for
efficiency
### Response:
def get_wx_font(self, s, prop):
"""
Return a wx font. Cache instances in a font dictionary for
efficiency
"""
DEBUG_MSG("get_wx_font()", 1, self)
key = hash(prop)
fontprop = prop
fontname = fontprop.get_name()
font = self.fontd.get(key)
if font is not None:
return font
# Allow use of platform independent and dependent font names
wxFontname = self.fontnames.get(fontname, wx.ROMAN)
wxFacename = '' # Empty => wxPython chooses based on wx_fontname
# Font colour is determined by the active wx.Pen
# TODO: It may be wise to cache font information
size = self.points_to_pixels(fontprop.get_size_in_points())
font =wx.Font(int(size+0.5), # Size
wxFontname, # 'Generic' name
self.fontangles[fontprop.get_style()], # Angle
self.fontweights[fontprop.get_weight()], # Weight
False, # Underline
wxFacename) # Platform font name
# cache the font and gc and return it
self.fontd[key] = font
return font |
def parse_get_bucket_notification(data):
"""
Parser for a get_bucket_notification response from S3.
:param data: Body of response from get_bucket_notification.
:return: Returns bucket notification configuration
"""
root = S3Element.fromstring('GetBucketNotificationResult', data)
notifications = _parse_add_notifying_service_config(
root, {},
'TopicConfigurations', 'TopicConfiguration'
)
notifications = _parse_add_notifying_service_config(
root, notifications,
'QueueConfigurations', 'QueueConfiguration'
)
notifications = _parse_add_notifying_service_config(
root, notifications,
'CloudFunctionConfigurations', 'CloudFunctionConfiguration'
)
return notifications | Parser for a get_bucket_notification response from S3.
:param data: Body of response from get_bucket_notification.
:return: Returns bucket notification configuration | Below is the the instruction that describes the task:
### Input:
Parser for a get_bucket_notification response from S3.
:param data: Body of response from get_bucket_notification.
:return: Returns bucket notification configuration
### Response:
def parse_get_bucket_notification(data):
"""
Parser for a get_bucket_notification response from S3.
:param data: Body of response from get_bucket_notification.
:return: Returns bucket notification configuration
"""
root = S3Element.fromstring('GetBucketNotificationResult', data)
notifications = _parse_add_notifying_service_config(
root, {},
'TopicConfigurations', 'TopicConfiguration'
)
notifications = _parse_add_notifying_service_config(
root, notifications,
'QueueConfigurations', 'QueueConfiguration'
)
notifications = _parse_add_notifying_service_config(
root, notifications,
'CloudFunctionConfigurations', 'CloudFunctionConfiguration'
)
return notifications |
def get_albums(self, search, start=0, max_items=100):
"""Search for albums.
See get_music_service_information for details on the arguments
"""
return self.get_music_service_information('albums', search, start,
max_items) | Search for albums.
See get_music_service_information for details on the arguments | Below is the the instruction that describes the task:
### Input:
Search for albums.
See get_music_service_information for details on the arguments
### Response:
def get_albums(self, search, start=0, max_items=100):
"""Search for albums.
See get_music_service_information for details on the arguments
"""
return self.get_music_service_information('albums', search, start,
max_items) |
def worker(job):
"""Primary |worker| coroutine. This is a |pull| object that pulls jobs from
a source and yield evaluated results.
Input should be of type |JobMessage|, output of type |ResultMessage|.
.. |worker| replace:: :py:func::`worker`"""
if job is EndOfQueue:
return
if not isinstance(job, JobMessage):
print("Warning: Job should be communicated using `JobMessage`.",
file=sys.stderr)
key, node = job
return run_job(key, node) | Primary |worker| coroutine. This is a |pull| object that pulls jobs from
a source and yield evaluated results.
Input should be of type |JobMessage|, output of type |ResultMessage|.
.. |worker| replace:: :py:func::`worker` | Below is the the instruction that describes the task:
### Input:
Primary |worker| coroutine. This is a |pull| object that pulls jobs from
a source and yield evaluated results.
Input should be of type |JobMessage|, output of type |ResultMessage|.
.. |worker| replace:: :py:func::`worker`
### Response:
def worker(job):
"""Primary |worker| coroutine. This is a |pull| object that pulls jobs from
a source and yield evaluated results.
Input should be of type |JobMessage|, output of type |ResultMessage|.
.. |worker| replace:: :py:func::`worker`"""
if job is EndOfQueue:
return
if not isinstance(job, JobMessage):
print("Warning: Job should be communicated using `JobMessage`.",
file=sys.stderr)
key, node = job
return run_job(key, node) |
def _get_distance_scaling(self, C, dists, mag):
"""
Implements the distance scaling function F(M, R) presented in equations
2 and 3. In the case of Joyner-Boore distance then the fixed-depth
term h is required
"""
r_h = self._get_rh(C, dists)
return (C["c1"] + C["c2"] * (mag - self.CONSTANTS["mref"])) *\
np.log(r_h / self.CONSTANTS["rref"]) +\
C["c3"] * (r_h - self.CONSTANTS["rref"]) | Implements the distance scaling function F(M, R) presented in equations
2 and 3. In the case of Joyner-Boore distance then the fixed-depth
term h is required | Below is the the instruction that describes the task:
### Input:
Implements the distance scaling function F(M, R) presented in equations
2 and 3. In the case of Joyner-Boore distance then the fixed-depth
term h is required
### Response:
def _get_distance_scaling(self, C, dists, mag):
"""
Implements the distance scaling function F(M, R) presented in equations
2 and 3. In the case of Joyner-Boore distance then the fixed-depth
term h is required
"""
r_h = self._get_rh(C, dists)
return (C["c1"] + C["c2"] * (mag - self.CONSTANTS["mref"])) *\
np.log(r_h / self.CONSTANTS["rref"]) +\
C["c3"] * (r_h - self.CONSTANTS["rref"]) |
def validate_dataset_string(self, dataset):
""" determine if a dataset string is valid, meaning it is in the format
of {username}/{dataset-slug}.
Parameters
==========
dataset: the dataset name to validate
"""
if dataset:
if '/' not in dataset:
raise ValueError('Dataset must be specified in the form of '
'\'{username}/{dataset-slug}\'')
split = dataset.split('/')
if not split[0] or not split[1]:
raise ValueError('Invalid dataset specification ' + dataset) | determine if a dataset string is valid, meaning it is in the format
of {username}/{dataset-slug}.
Parameters
==========
dataset: the dataset name to validate | Below is the the instruction that describes the task:
### Input:
determine if a dataset string is valid, meaning it is in the format
of {username}/{dataset-slug}.
Parameters
==========
dataset: the dataset name to validate
### Response:
def validate_dataset_string(self, dataset):
""" determine if a dataset string is valid, meaning it is in the format
of {username}/{dataset-slug}.
Parameters
==========
dataset: the dataset name to validate
"""
if dataset:
if '/' not in dataset:
raise ValueError('Dataset must be specified in the form of '
'\'{username}/{dataset-slug}\'')
split = dataset.split('/')
if not split[0] or not split[1]:
raise ValueError('Invalid dataset specification ' + dataset) |
def remove_callback(self, callback):
"""Remove callback previously registered."""
if callback in self._async_callbacks:
self._async_callbacks.remove(callback) | Remove callback previously registered. | Below is the the instruction that describes the task:
### Input:
Remove callback previously registered.
### Response:
def remove_callback(self, callback):
"""Remove callback previously registered."""
if callback in self._async_callbacks:
self._async_callbacks.remove(callback) |
def multivariate_gaussian_samples(matrix, N, mean=None):
"""
Generate samples from a multidimensional Gaussian with a given covariance.
:param matrix: ``(k, k)``
The covariance matrix.
:param N:
The number of samples to generate.
:param mean: ``(k,)`` (optional)
The mean of the Gaussian. Assumed to be zero if not given.
:returns samples: ``(k,)`` or ``(N, k)``
Samples from the given multivariate normal.
"""
if mean is None:
mean = np.zeros(len(matrix))
samples = np.random.multivariate_normal(mean, matrix, N)
if N == 1:
return samples[0]
return samples | Generate samples from a multidimensional Gaussian with a given covariance.
:param matrix: ``(k, k)``
The covariance matrix.
:param N:
The number of samples to generate.
:param mean: ``(k,)`` (optional)
The mean of the Gaussian. Assumed to be zero if not given.
:returns samples: ``(k,)`` or ``(N, k)``
Samples from the given multivariate normal. | Below is the the instruction that describes the task:
### Input:
Generate samples from a multidimensional Gaussian with a given covariance.
:param matrix: ``(k, k)``
The covariance matrix.
:param N:
The number of samples to generate.
:param mean: ``(k,)`` (optional)
The mean of the Gaussian. Assumed to be zero if not given.
:returns samples: ``(k,)`` or ``(N, k)``
Samples from the given multivariate normal.
### Response:
def multivariate_gaussian_samples(matrix, N, mean=None):
"""
Generate samples from a multidimensional Gaussian with a given covariance.
:param matrix: ``(k, k)``
The covariance matrix.
:param N:
The number of samples to generate.
:param mean: ``(k,)`` (optional)
The mean of the Gaussian. Assumed to be zero if not given.
:returns samples: ``(k,)`` or ``(N, k)``
Samples from the given multivariate normal.
"""
if mean is None:
mean = np.zeros(len(matrix))
samples = np.random.multivariate_normal(mean, matrix, N)
if N == 1:
return samples[0]
return samples |
def _populate_cmd_lists(self):
""" Populate self.commands"""
self.commands = {}
for cmd_instance in self.cmd_instances:
cmd_name = cmd_instance.name
self.commands[cmd_name] = cmd_instance
pass
return | Populate self.commands | Below is the the instruction that describes the task:
### Input:
Populate self.commands
### Response:
def _populate_cmd_lists(self):
""" Populate self.commands"""
self.commands = {}
for cmd_instance in self.cmd_instances:
cmd_name = cmd_instance.name
self.commands[cmd_name] = cmd_instance
pass
return |
def add_route_for(cls, _name, rule, **options):
"""
Add a route for an existing method or view. Useful for modifying routes
that a subclass inherits from a base class::
class BaseView(ClassView):
def latent_view(self):
return 'latent-view'
@route('other')
def other_view(self):
return 'other-view'
@route('/path')
class SubView(BaseView):
pass
SubView.add_route_for('latent_view', 'latent')
SubView.add_route_for('other_view', 'another')
SubView.init_app(app)
# Created routes:
# /path/latent -> SubView.latent (added)
# /path/other -> SubView.other (inherited)
# /path/another -> SubView.other (added)
:param _name: Name of the method or view on the class
:param rule: URL rule to be added
:param options: Additional options for :meth:`~flask.Flask.add_url_rule`
"""
setattr(cls, _name, route(rule, **options)(cls.__get_raw_attr(_name))) | Add a route for an existing method or view. Useful for modifying routes
that a subclass inherits from a base class::
class BaseView(ClassView):
def latent_view(self):
return 'latent-view'
@route('other')
def other_view(self):
return 'other-view'
@route('/path')
class SubView(BaseView):
pass
SubView.add_route_for('latent_view', 'latent')
SubView.add_route_for('other_view', 'another')
SubView.init_app(app)
# Created routes:
# /path/latent -> SubView.latent (added)
# /path/other -> SubView.other (inherited)
# /path/another -> SubView.other (added)
:param _name: Name of the method or view on the class
:param rule: URL rule to be added
:param options: Additional options for :meth:`~flask.Flask.add_url_rule` | Below is the the instruction that describes the task:
### Input:
Add a route for an existing method or view. Useful for modifying routes
that a subclass inherits from a base class::
class BaseView(ClassView):
def latent_view(self):
return 'latent-view'
@route('other')
def other_view(self):
return 'other-view'
@route('/path')
class SubView(BaseView):
pass
SubView.add_route_for('latent_view', 'latent')
SubView.add_route_for('other_view', 'another')
SubView.init_app(app)
# Created routes:
# /path/latent -> SubView.latent (added)
# /path/other -> SubView.other (inherited)
# /path/another -> SubView.other (added)
:param _name: Name of the method or view on the class
:param rule: URL rule to be added
:param options: Additional options for :meth:`~flask.Flask.add_url_rule`
### Response:
def add_route_for(cls, _name, rule, **options):
"""
Add a route for an existing method or view. Useful for modifying routes
that a subclass inherits from a base class::
class BaseView(ClassView):
def latent_view(self):
return 'latent-view'
@route('other')
def other_view(self):
return 'other-view'
@route('/path')
class SubView(BaseView):
pass
SubView.add_route_for('latent_view', 'latent')
SubView.add_route_for('other_view', 'another')
SubView.init_app(app)
# Created routes:
# /path/latent -> SubView.latent (added)
# /path/other -> SubView.other (inherited)
# /path/another -> SubView.other (added)
:param _name: Name of the method or view on the class
:param rule: URL rule to be added
:param options: Additional options for :meth:`~flask.Flask.add_url_rule`
"""
setattr(cls, _name, route(rule, **options)(cls.__get_raw_attr(_name))) |
def initialize(self, timestamp=None, user=None, comment=None,
filename=None, source=None, size=None):
self.timestamp = none_or(timestamp, Timestamp)
"""
Upload timestamp : mwtypes.Timestamp | None
"""
self.user = none_or(user, User)
"""
Contributing user metadata : :class:`~mwtypes.User`
"""
self.comment = none_or(comment, str)
"""
Comment left with upload : str | None
"""
self.filename = none_or(filename, str)
"""
File name without "File:" prefix and "_" instead of spaces : str | None
"""
self.source = none_or(source, str)
"""
A URI : str | None
"""
self.size = none_or(size, int)
"""
Number of bytes of content : int | None
""" | Upload timestamp : mwtypes.Timestamp | None | Below is the the instruction that describes the task:
### Input:
Upload timestamp : mwtypes.Timestamp | None
### Response:
def initialize(self, timestamp=None, user=None, comment=None,
filename=None, source=None, size=None):
self.timestamp = none_or(timestamp, Timestamp)
"""
Upload timestamp : mwtypes.Timestamp | None
"""
self.user = none_or(user, User)
"""
Contributing user metadata : :class:`~mwtypes.User`
"""
self.comment = none_or(comment, str)
"""
Comment left with upload : str | None
"""
self.filename = none_or(filename, str)
"""
File name without "File:" prefix and "_" instead of spaces : str | None
"""
self.source = none_or(source, str)
"""
A URI : str | None
"""
self.size = none_or(size, int)
"""
Number of bytes of content : int | None
""" |
def logs(self, **kwargs):
"""
Get logs from this container. Similar to the ``docker logs`` command.
The ``stream`` parameter makes the ``logs`` function return a blocking
generator you can iterate over to retrieve log output as it happens.
Args:
stdout (bool): Get ``STDOUT``. Default ``True``
stderr (bool): Get ``STDERR``. Default ``True``
stream (bool): Stream the response. Default ``False``
timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
follow (bool): Follow log output. Default ``False``
until (datetime or int): Show logs that occurred before the given
datetime or integer epoch (in seconds)
Returns:
(generator or str): Logs from the container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.logs(self.id, **kwargs) | Get logs from this container. Similar to the ``docker logs`` command.
The ``stream`` parameter makes the ``logs`` function return a blocking
generator you can iterate over to retrieve log output as it happens.
Args:
stdout (bool): Get ``STDOUT``. Default ``True``
stderr (bool): Get ``STDERR``. Default ``True``
stream (bool): Stream the response. Default ``False``
timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
follow (bool): Follow log output. Default ``False``
until (datetime or int): Show logs that occurred before the given
datetime or integer epoch (in seconds)
Returns:
(generator or str): Logs from the container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error. | Below is the the instruction that describes the task:
### Input:
Get logs from this container. Similar to the ``docker logs`` command.
The ``stream`` parameter makes the ``logs`` function return a blocking
generator you can iterate over to retrieve log output as it happens.
Args:
stdout (bool): Get ``STDOUT``. Default ``True``
stderr (bool): Get ``STDERR``. Default ``True``
stream (bool): Stream the response. Default ``False``
timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
follow (bool): Follow log output. Default ``False``
until (datetime or int): Show logs that occurred before the given
datetime or integer epoch (in seconds)
Returns:
(generator or str): Logs from the container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
### Response:
def logs(self, **kwargs):
"""
Get logs from this container. Similar to the ``docker logs`` command.
The ``stream`` parameter makes the ``logs`` function return a blocking
generator you can iterate over to retrieve log output as it happens.
Args:
stdout (bool): Get ``STDOUT``. Default ``True``
stderr (bool): Get ``STDERR``. Default ``True``
stream (bool): Stream the response. Default ``False``
timestamps (bool): Show timestamps. Default ``False``
tail (str or int): Output specified number of lines at the end of
logs. Either an integer of number of lines or the string
``all``. Default ``all``
since (datetime or int): Show logs since a given datetime or
integer epoch (in seconds)
follow (bool): Follow log output. Default ``False``
until (datetime or int): Show logs that occurred before the given
datetime or integer epoch (in seconds)
Returns:
(generator or str): Logs from the container.
Raises:
:py:class:`docker.errors.APIError`
If the server returns an error.
"""
return self.client.api.logs(self.id, **kwargs) |
def pop_frame(self):
"""
Remove and return the frame at the top of the stack.
:returns: The top frame
:rtype: Frame
:raises Exception: If there are no frames on the stack
"""
self.frames.pop(0)
if len(self.frames) == 0:
raise Exception("stack is exhausted")
return self.frames[0] | Remove and return the frame at the top of the stack.
:returns: The top frame
:rtype: Frame
:raises Exception: If there are no frames on the stack | Below is the the instruction that describes the task:
### Input:
Remove and return the frame at the top of the stack.
:returns: The top frame
:rtype: Frame
:raises Exception: If there are no frames on the stack
### Response:
def pop_frame(self):
"""
Remove and return the frame at the top of the stack.
:returns: The top frame
:rtype: Frame
:raises Exception: If there are no frames on the stack
"""
self.frames.pop(0)
if len(self.frames) == 0:
raise Exception("stack is exhausted")
return self.frames[0] |
def eval(self, script, numkeys, *keys_and_args):
"""Emulate eval"""
sha = self.script_load(script)
return self.evalsha(sha, numkeys, *keys_and_args) | Emulate eval | Below is the the instruction that describes the task:
### Input:
Emulate eval
### Response:
def eval(self, script, numkeys, *keys_and_args):
"""Emulate eval"""
sha = self.script_load(script)
return self.evalsha(sha, numkeys, *keys_and_args) |
def put(self, key, value, ttl=0):
"""
Associates the specified value with the specified key in this map. If the map previously contained a mapping for
the key, the old value is replaced by the specified value. If ttl is provided, entry will expire and get evicted
after the ttl.
:param key: (object), the specified key.
:param value: (object), the value to associate with the key.
:param ttl: (int), maximum time in seconds for this entry to stay, if not provided, the value configured on
server side configuration will be used(optional).
:return: (object), previous value associated with key or None if there was no mapping for key.
"""
check_not_none(key, "key can't be None")
check_not_none(key, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(replicated_map_put_codec, key_data, key=key_data, value=value_data,
ttl=to_millis(ttl)) | Associates the specified value with the specified key in this map. If the map previously contained a mapping for
the key, the old value is replaced by the specified value. If ttl is provided, entry will expire and get evicted
after the ttl.
:param key: (object), the specified key.
:param value: (object), the value to associate with the key.
:param ttl: (int), maximum time in seconds for this entry to stay, if not provided, the value configured on
server side configuration will be used(optional).
:return: (object), previous value associated with key or None if there was no mapping for key. | Below is the the instruction that describes the task:
### Input:
Associates the specified value with the specified key in this map. If the map previously contained a mapping for
the key, the old value is replaced by the specified value. If ttl is provided, entry will expire and get evicted
after the ttl.
:param key: (object), the specified key.
:param value: (object), the value to associate with the key.
:param ttl: (int), maximum time in seconds for this entry to stay, if not provided, the value configured on
server side configuration will be used(optional).
:return: (object), previous value associated with key or None if there was no mapping for key.
### Response:
def put(self, key, value, ttl=0):
"""
Associates the specified value with the specified key in this map. If the map previously contained a mapping for
the key, the old value is replaced by the specified value. If ttl is provided, entry will expire and get evicted
after the ttl.
:param key: (object), the specified key.
:param value: (object), the value to associate with the key.
:param ttl: (int), maximum time in seconds for this entry to stay, if not provided, the value configured on
server side configuration will be used(optional).
:return: (object), previous value associated with key or None if there was no mapping for key.
"""
check_not_none(key, "key can't be None")
check_not_none(key, "value can't be None")
key_data = self._to_data(key)
value_data = self._to_data(value)
return self._encode_invoke_on_key(replicated_map_put_codec, key_data, key=key_data, value=value_data,
ttl=to_millis(ttl)) |
def write_worker(q_out, fname, working_dir):
"""Function that will be spawned to fetch processed image
from the output queue and write to the .rec file.
Parameters
----------
q_out: queue
fname: string
working_dir: string
"""
pre_time = time.time()
count = 0
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
buf = {}
more = True
while more:
deq = q_out.get()
if deq is not None:
i, s, item = deq
buf[i] = (s, item)
else:
more = False
while count in buf:
s, item = buf[count]
del buf[count]
if s is not None:
record.write_idx(item[0], s)
if count % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', count)
pre_time = cur_time
count += 1 | Function that will be spawned to fetch processed image
from the output queue and write to the .rec file.
Parameters
----------
q_out: queue
fname: string
working_dir: string | Below is the the instruction that describes the task:
### Input:
Function that will be spawned to fetch processed image
from the output queue and write to the .rec file.
Parameters
----------
q_out: queue
fname: string
working_dir: string
### Response:
def write_worker(q_out, fname, working_dir):
"""Function that will be spawned to fetch processed image
from the output queue and write to the .rec file.
Parameters
----------
q_out: queue
fname: string
working_dir: string
"""
pre_time = time.time()
count = 0
fname = os.path.basename(fname)
fname_rec = os.path.splitext(fname)[0] + '.rec'
fname_idx = os.path.splitext(fname)[0] + '.idx'
record = mx.recordio.MXIndexedRecordIO(os.path.join(working_dir, fname_idx),
os.path.join(working_dir, fname_rec), 'w')
buf = {}
more = True
while more:
deq = q_out.get()
if deq is not None:
i, s, item = deq
buf[i] = (s, item)
else:
more = False
while count in buf:
s, item = buf[count]
del buf[count]
if s is not None:
record.write_idx(item[0], s)
if count % 1000 == 0:
cur_time = time.time()
print('time:', cur_time - pre_time, ' count:', count)
pre_time = cur_time
count += 1 |
def processReqDuringBatch(
self,
req: Request,
cons_time: int):
"""
This method will do dynamic validation and apply requests.
If there is any errors during validation it would be raised
"""
if self.isMaster:
self.node.doDynamicValidation(req)
self.node.applyReq(req, cons_time) | This method will do dynamic validation and apply requests.
If there is any errors during validation it would be raised | Below is the the instruction that describes the task:
### Input:
This method will do dynamic validation and apply requests.
If there is any errors during validation it would be raised
### Response:
def processReqDuringBatch(
self,
req: Request,
cons_time: int):
"""
This method will do dynamic validation and apply requests.
If there is any errors during validation it would be raised
"""
if self.isMaster:
self.node.doDynamicValidation(req)
self.node.applyReq(req, cons_time) |
def get_projects(self):
""" Get the projects list from database """
repos_list = []
gerrit_projects_db = self.projects_db
db = Database(user="root", passwd="", host="localhost", port=3306,
scrdb=None, shdb=gerrit_projects_db, prjdb=None)
sql = """
SELECT DISTINCT(repository_name)
FROM project_repositories
WHERE data_source='scr'
"""
repos_list_raw = db.execute(sql)
# Convert from review.openstack.org_openstack/rpm-packaging-tools to
# openstack_rpm-packaging-tools
for repo in repos_list_raw:
# repo_name = repo[0].replace("review.openstack.org_","")
repo_name = repo[0].replace(self.repository + "_", "")
repos_list.append(repo_name)
return repos_list | Get the projects list from database | Below is the the instruction that describes the task:
### Input:
Get the projects list from database
### Response:
def get_projects(self):
""" Get the projects list from database """
repos_list = []
gerrit_projects_db = self.projects_db
db = Database(user="root", passwd="", host="localhost", port=3306,
scrdb=None, shdb=gerrit_projects_db, prjdb=None)
sql = """
SELECT DISTINCT(repository_name)
FROM project_repositories
WHERE data_source='scr'
"""
repos_list_raw = db.execute(sql)
# Convert from review.openstack.org_openstack/rpm-packaging-tools to
# openstack_rpm-packaging-tools
for repo in repos_list_raw:
# repo_name = repo[0].replace("review.openstack.org_","")
repo_name = repo[0].replace(self.repository + "_", "")
repos_list.append(repo_name)
return repos_list |
def _load_plugins(self):
'''
Sets up all plugins, defaults and settings.py
'''
plugins = self.settings['PLUGINS']
self.plugins_dict = {}
for key in plugins:
# skip loading the plugin if its value is None
if plugins[key] is None:
continue
# valid plugin, import and setup
self.logger.debug("Trying to load plugin {cls}".format(cls=key))
the_class = self._import_class(key)
instance = the_class()
instance._set_logger(self.logger)
if not self.unit_test:
instance.setup(self.settings)
the_schema = None
with open(self.settings['PLUGIN_DIR'] + instance.schema) as the_file:
the_schema = json.load(the_file)
mini = {}
mini['instance'] = instance
mini['schema'] = the_schema
self.logger.debug("Successfully loaded plugin {cls}".format(cls=key))
self.plugins_dict[plugins[key]] = mini
self.plugins_dict = OrderedDict(sorted(list(self.plugins_dict.items()),
key=lambda t: t[0])) | Sets up all plugins, defaults and settings.py | Below is the the instruction that describes the task:
### Input:
Sets up all plugins, defaults and settings.py
### Response:
def _load_plugins(self):
'''
Sets up all plugins, defaults and settings.py
'''
plugins = self.settings['PLUGINS']
self.plugins_dict = {}
for key in plugins:
# skip loading the plugin if its value is None
if plugins[key] is None:
continue
# valid plugin, import and setup
self.logger.debug("Trying to load plugin {cls}".format(cls=key))
the_class = self._import_class(key)
instance = the_class()
instance._set_logger(self.logger)
if not self.unit_test:
instance.setup(self.settings)
the_schema = None
with open(self.settings['PLUGIN_DIR'] + instance.schema) as the_file:
the_schema = json.load(the_file)
mini = {}
mini['instance'] = instance
mini['schema'] = the_schema
self.logger.debug("Successfully loaded plugin {cls}".format(cls=key))
self.plugins_dict[plugins[key]] = mini
self.plugins_dict = OrderedDict(sorted(list(self.plugins_dict.items()),
key=lambda t: t[0])) |
def __startSearch(self):
"""Starts HyperSearch as a worker or runs it inline for the "dryRun" action
Parameters:
----------------------------------------------------------------------
retval: the new _HyperSearchJob instance representing the
HyperSearch job
"""
# This search uses a pre-existing permutations script
params = _ClientJobUtils.makeSearchJobParamsDict(options=self._options,
forRunning=True)
if self._options["action"] == "dryRun":
args = [sys.argv[0], "--params=%s" % (json.dumps(params))]
print
print "=================================================================="
print "RUNNING PERMUTATIONS INLINE as \"DRY RUN\"..."
print "=================================================================="
jobID = hypersearch_worker.main(args)
else:
cmdLine = _setUpExports(self._options["exports"])
# Begin the new search. The {JOBID} string is replaced by the actual
# jobID returned from jobInsert.
cmdLine += "$HYPERSEARCH"
maxWorkers = self._options["maxWorkers"]
jobID = self.__cjDAO.jobInsert(
client="GRP",
cmdLine=cmdLine,
params=json.dumps(params),
minimumWorkers=1,
maximumWorkers=maxWorkers,
jobType=self.__cjDAO.JOB_TYPE_HS)
cmdLine = "python -m nupic.swarming.hypersearch_worker" \
" --jobID=%d" % (jobID)
self._launchWorkers(cmdLine, maxWorkers)
searchJob = _HyperSearchJob(jobID)
# Save search ID to file (this is used for report generation)
self.__saveHyperSearchJobID(
permWorkDir=self._options["permWorkDir"],
outputLabel=self._options["outputLabel"],
hyperSearchJob=searchJob)
if self._options["action"] == "dryRun":
print "Successfully executed \"dry-run\" hypersearch, jobID=%d" % (jobID)
else:
print "Successfully submitted new HyperSearch job, jobID=%d" % (jobID)
_emit(Verbosity.DEBUG,
"Each worker executing the command line: %s" % (cmdLine,))
return searchJob | Starts HyperSearch as a worker or runs it inline for the "dryRun" action
Parameters:
----------------------------------------------------------------------
retval: the new _HyperSearchJob instance representing the
HyperSearch job | Below is the the instruction that describes the task:
### Input:
Starts HyperSearch as a worker or runs it inline for the "dryRun" action
Parameters:
----------------------------------------------------------------------
retval: the new _HyperSearchJob instance representing the
HyperSearch job
### Response:
def __startSearch(self):
"""Starts HyperSearch as a worker or runs it inline for the "dryRun" action
Parameters:
----------------------------------------------------------------------
retval: the new _HyperSearchJob instance representing the
HyperSearch job
"""
# This search uses a pre-existing permutations script
params = _ClientJobUtils.makeSearchJobParamsDict(options=self._options,
forRunning=True)
if self._options["action"] == "dryRun":
args = [sys.argv[0], "--params=%s" % (json.dumps(params))]
print
print "=================================================================="
print "RUNNING PERMUTATIONS INLINE as \"DRY RUN\"..."
print "=================================================================="
jobID = hypersearch_worker.main(args)
else:
cmdLine = _setUpExports(self._options["exports"])
# Begin the new search. The {JOBID} string is replaced by the actual
# jobID returned from jobInsert.
cmdLine += "$HYPERSEARCH"
maxWorkers = self._options["maxWorkers"]
jobID = self.__cjDAO.jobInsert(
client="GRP",
cmdLine=cmdLine,
params=json.dumps(params),
minimumWorkers=1,
maximumWorkers=maxWorkers,
jobType=self.__cjDAO.JOB_TYPE_HS)
cmdLine = "python -m nupic.swarming.hypersearch_worker" \
" --jobID=%d" % (jobID)
self._launchWorkers(cmdLine, maxWorkers)
searchJob = _HyperSearchJob(jobID)
# Save search ID to file (this is used for report generation)
self.__saveHyperSearchJobID(
permWorkDir=self._options["permWorkDir"],
outputLabel=self._options["outputLabel"],
hyperSearchJob=searchJob)
if self._options["action"] == "dryRun":
print "Successfully executed \"dry-run\" hypersearch, jobID=%d" % (jobID)
else:
print "Successfully submitted new HyperSearch job, jobID=%d" % (jobID)
_emit(Verbosity.DEBUG,
"Each worker executing the command line: %s" % (cmdLine,))
return searchJob |
def getSubgraphFieldCount(self, parent_name, graph_name):
"""Returns number of fields for subgraph with name graph_name and parent
graph with name parent_name.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@return: Number of fields for subgraph.
"""
graph = self._getSubGraph(parent_name, graph_name, True)
return graph.getFieldCount() | Returns number of fields for subgraph with name graph_name and parent
graph with name parent_name.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@return: Number of fields for subgraph. | Below is the the instruction that describes the task:
### Input:
Returns number of fields for subgraph with name graph_name and parent
graph with name parent_name.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@return: Number of fields for subgraph.
### Response:
def getSubgraphFieldCount(self, parent_name, graph_name):
"""Returns number of fields for subgraph with name graph_name and parent
graph with name parent_name.
@param parent_name: Root Graph Name
@param graph_name: Subgraph Name
@return: Number of fields for subgraph.
"""
graph = self._getSubGraph(parent_name, graph_name, True)
return graph.getFieldCount() |
def do_plot(args):
""" Create plots of mcmc output """
import ugali.utils.plotting
import pylab as plt
config,name,label,coord = args
filenames = make_filenames(config,label)
srcfile = filenames['srcfile']
samfile = filenames['samfile']
memfile = filenames['memfile']
if not exists(srcfile):
logger.warning("Couldn't find %s; skipping..."%srcfile)
return
if not exists(samfile):
logger.warning("Couldn't find %s; skipping..."%samfile)
return
config = ugali.utils.config.Config(config)
burn = config['mcmc']['nburn']*config['mcmc']['nwalkers']
source = ugali.analysis.source.Source()
source.load(srcfile,section='source')
outfile = samfile.replace('.npy','.png')
ugali.utils.plotting.plotTriangle(srcfile,samfile,burn=burn)
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
plotter = ugali.utils.plotting.SourcePlotter(source,config,radius=0.5)
data = fitsio.read(memfile,trim_strings=True) if exists(memfile) else None
if data is not None:
plt.figure()
kernel,isochrone = source.kernel,source.isochrone
ugali.utils.plotting.plotMembership(config,data,kernel,isochrone)
outfile = samfile.replace('.npy','_mem.png')
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
plotter.plot6(data)
outfile = samfile.replace('.npy','_6panel.png')
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
outfile = samfile.replace('.npy','_6panel.pdf')
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
try:
title = name
plotter.plot4()
outfile = samfile.replace('.npy','_4panel.png')
logger.info(" Writing %s..."%outfile)
plt.suptitle(title)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
except:
logger.warning(" Failed to create plotter.plot4()") | Create plots of mcmc output | Below is the the instruction that describes the task:
### Input:
Create plots of mcmc output
### Response:
def do_plot(args):
""" Create plots of mcmc output """
import ugali.utils.plotting
import pylab as plt
config,name,label,coord = args
filenames = make_filenames(config,label)
srcfile = filenames['srcfile']
samfile = filenames['samfile']
memfile = filenames['memfile']
if not exists(srcfile):
logger.warning("Couldn't find %s; skipping..."%srcfile)
return
if not exists(samfile):
logger.warning("Couldn't find %s; skipping..."%samfile)
return
config = ugali.utils.config.Config(config)
burn = config['mcmc']['nburn']*config['mcmc']['nwalkers']
source = ugali.analysis.source.Source()
source.load(srcfile,section='source')
outfile = samfile.replace('.npy','.png')
ugali.utils.plotting.plotTriangle(srcfile,samfile,burn=burn)
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
plotter = ugali.utils.plotting.SourcePlotter(source,config,radius=0.5)
data = fitsio.read(memfile,trim_strings=True) if exists(memfile) else None
if data is not None:
plt.figure()
kernel,isochrone = source.kernel,source.isochrone
ugali.utils.plotting.plotMembership(config,data,kernel,isochrone)
outfile = samfile.replace('.npy','_mem.png')
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
plotter.plot6(data)
outfile = samfile.replace('.npy','_6panel.png')
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
outfile = samfile.replace('.npy','_6panel.pdf')
logger.info(" Writing %s..."%outfile)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
try:
title = name
plotter.plot4()
outfile = samfile.replace('.npy','_4panel.png')
logger.info(" Writing %s..."%outfile)
plt.suptitle(title)
plt.savefig(outfile,bbox_inches='tight',dpi=60)
plt.close()
except:
logger.warning(" Failed to create plotter.plot4()") |
def Query(self, query):
"""Queries the database.
Args:
query (str): SQL query.
Returns:
sqlite3.Cursor: results.
Raises:
sqlite3.DatabaseError: if querying the database fails.
"""
cursor = self._database.cursor()
cursor.execute(query)
return cursor | Queries the database.
Args:
query (str): SQL query.
Returns:
sqlite3.Cursor: results.
Raises:
sqlite3.DatabaseError: if querying the database fails. | Below is the the instruction that describes the task:
### Input:
Queries the database.
Args:
query (str): SQL query.
Returns:
sqlite3.Cursor: results.
Raises:
sqlite3.DatabaseError: if querying the database fails.
### Response:
def Query(self, query):
"""Queries the database.
Args:
query (str): SQL query.
Returns:
sqlite3.Cursor: results.
Raises:
sqlite3.DatabaseError: if querying the database fails.
"""
cursor = self._database.cursor()
cursor.execute(query)
return cursor |
def compute_classpath_entries(cls, targets, classpath_products, extra_classpath_tuples, confs):
"""Return the list of classpath entries for a classpath covering the passed targets.
Filters and adds paths from extra_classpath_tuples to the end of the resulting list.
:param targets: The targets to generate a classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param extra_classpath_tuples: Additional classpath entries as tuples of
(string, ClasspathEntry).
:param confs: The list of confs for use by this classpath.
:returns: The classpath entries as a list of path elements.
:rtype: list of ClasspathEntry
"""
classpath_iter = cls._classpath_iter(
classpath_products.get_classpath_entries_for_targets(targets),
confs=confs,
)
total_classpath = OrderedSet(classpath_iter)
filtered_extra_classpath_iter = cls._filtered_classpath_by_confs_iter(
extra_classpath_tuples,
confs,
)
extra_classpath_iter = cls._entries_iter(filtered_extra_classpath_iter)
total_classpath.update(extra_classpath_iter)
return list(total_classpath) | Return the list of classpath entries for a classpath covering the passed targets.
Filters and adds paths from extra_classpath_tuples to the end of the resulting list.
:param targets: The targets to generate a classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param extra_classpath_tuples: Additional classpath entries as tuples of
(string, ClasspathEntry).
:param confs: The list of confs for use by this classpath.
:returns: The classpath entries as a list of path elements.
:rtype: list of ClasspathEntry | Below is the the instruction that describes the task:
### Input:
Return the list of classpath entries for a classpath covering the passed targets.
Filters and adds paths from extra_classpath_tuples to the end of the resulting list.
:param targets: The targets to generate a classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param extra_classpath_tuples: Additional classpath entries as tuples of
(string, ClasspathEntry).
:param confs: The list of confs for use by this classpath.
:returns: The classpath entries as a list of path elements.
:rtype: list of ClasspathEntry
### Response:
def compute_classpath_entries(cls, targets, classpath_products, extra_classpath_tuples, confs):
"""Return the list of classpath entries for a classpath covering the passed targets.
Filters and adds paths from extra_classpath_tuples to the end of the resulting list.
:param targets: The targets to generate a classpath for.
:param ClasspathProducts classpath_products: Product containing classpath elements.
:param extra_classpath_tuples: Additional classpath entries as tuples of
(string, ClasspathEntry).
:param confs: The list of confs for use by this classpath.
:returns: The classpath entries as a list of path elements.
:rtype: list of ClasspathEntry
"""
classpath_iter = cls._classpath_iter(
classpath_products.get_classpath_entries_for_targets(targets),
confs=confs,
)
total_classpath = OrderedSet(classpath_iter)
filtered_extra_classpath_iter = cls._filtered_classpath_by_confs_iter(
extra_classpath_tuples,
confs,
)
extra_classpath_iter = cls._entries_iter(filtered_extra_classpath_iter)
total_classpath.update(extra_classpath_iter)
return list(total_classpath) |
def image_height(image):
"""
Returns the height of the image found at the path supplied by `image`
relative to your project's images directory.
"""
image_size_cache = _get_cache('image_size_cache')
if not Image:
raise SassMissingDependency('PIL', 'image manipulation')
filepath = String.unquoted(image).value
path = None
try:
height = image_size_cache[filepath][1]
except KeyError:
height = 0
IMAGES_ROOT = _images_root()
if callable(IMAGES_ROOT):
try:
_file, _storage = list(IMAGES_ROOT(filepath))[0]
except IndexError:
pass
else:
path = _storage.open(_file)
else:
_path = os.path.join(IMAGES_ROOT, filepath.strip(os.sep))
if os.path.exists(_path):
path = open(_path, 'rb')
if path:
image = Image.open(path)
size = image.size
height = size[1]
image_size_cache[filepath] = size
return Number(height, 'px') | Returns the height of the image found at the path supplied by `image`
relative to your project's images directory. | Below is the the instruction that describes the task:
### Input:
Returns the height of the image found at the path supplied by `image`
relative to your project's images directory.
### Response:
def image_height(image):
"""
Returns the height of the image found at the path supplied by `image`
relative to your project's images directory.
"""
image_size_cache = _get_cache('image_size_cache')
if not Image:
raise SassMissingDependency('PIL', 'image manipulation')
filepath = String.unquoted(image).value
path = None
try:
height = image_size_cache[filepath][1]
except KeyError:
height = 0
IMAGES_ROOT = _images_root()
if callable(IMAGES_ROOT):
try:
_file, _storage = list(IMAGES_ROOT(filepath))[0]
except IndexError:
pass
else:
path = _storage.open(_file)
else:
_path = os.path.join(IMAGES_ROOT, filepath.strip(os.sep))
if os.path.exists(_path):
path = open(_path, 'rb')
if path:
image = Image.open(path)
size = image.size
height = size[1]
image_size_cache[filepath] = size
return Number(height, 'px') |
def decode_async_options(options):
"""Decode Async options from JSON decoding."""
async_options = copy.deepcopy(options)
# JSON don't like datetimes.
eta = async_options.get('task_args', {}).get('eta')
if eta:
from datetime import datetime
async_options['task_args']['eta'] = datetime.fromtimestamp(eta)
# If there are callbacks, reconstitute them.
callbacks = async_options.get('callbacks', {})
if callbacks:
async_options['callbacks'] = decode_callbacks(callbacks)
if '__context_checker' in options:
_checker = options['__context_checker']
async_options['_context_checker'] = path_to_reference(_checker)
if '__process_results' in options:
_processor = options['__process_results']
async_options['_process_results'] = path_to_reference(_processor)
return async_options | Decode Async options from JSON decoding. | Below is the the instruction that describes the task:
### Input:
Decode Async options from JSON decoding.
### Response:
def decode_async_options(options):
"""Decode Async options from JSON decoding."""
async_options = copy.deepcopy(options)
# JSON don't like datetimes.
eta = async_options.get('task_args', {}).get('eta')
if eta:
from datetime import datetime
async_options['task_args']['eta'] = datetime.fromtimestamp(eta)
# If there are callbacks, reconstitute them.
callbacks = async_options.get('callbacks', {})
if callbacks:
async_options['callbacks'] = decode_callbacks(callbacks)
if '__context_checker' in options:
_checker = options['__context_checker']
async_options['_context_checker'] = path_to_reference(_checker)
if '__process_results' in options:
_processor = options['__process_results']
async_options['_process_results'] = path_to_reference(_processor)
return async_options |
def to_clipboard(self, excel=True, sep=None, **kwargs):
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) | r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6 | Below is the the instruction that describes the task:
### Input:
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
### Response:
def to_clipboard(self, excel=True, sep=None, **kwargs):
r"""
Copy object to the system clipboard.
Write a text representation of object to the system clipboard.
This can be pasted into Excel, for example.
Parameters
----------
excel : bool, default True
- True, use the provided separator, writing in a csv format for
allowing easy pasting into excel.
- False, write a string representation of the object to the
clipboard.
sep : str, default ``'\t'``
Field delimiter.
**kwargs
These parameters will be passed to DataFrame.to_csv.
See Also
--------
DataFrame.to_csv : Write a DataFrame to a comma-separated values
(csv) file.
read_clipboard : Read text from clipboard and pass to read_table.
Notes
-----
Requirements for your platform.
- Linux : `xclip`, or `xsel` (with `gtk` or `PyQt4` modules)
- Windows : none
- OS X : none
Examples
--------
Copy the contents of a DataFrame to the clipboard.
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6]], columns=['A', 'B', 'C'])
>>> df.to_clipboard(sep=',')
... # Wrote the following to the system clipboard:
... # ,A,B,C
... # 0,1,2,3
... # 1,4,5,6
We can omit the the index by passing the keyword `index` and setting
it to false.
>>> df.to_clipboard(sep=',', index=False)
... # Wrote the following to the system clipboard:
... # A,B,C
... # 1,2,3
... # 4,5,6
"""
from pandas.io import clipboards
clipboards.to_clipboard(self, excel=excel, sep=sep, **kwargs) |
def update_devices(self, selection=None):
"""Determines the order, in which the |Node| and |Element| objects
currently handled by the |HydPy| objects need to be processed during
a simulation time step. Optionally, a |Selection| object for defining
new |Node| and |Element| objects can be passed."""
if selection is not None:
self.nodes = selection.nodes
self.elements = selection.elements
self._update_deviceorder() | Determines the order, in which the |Node| and |Element| objects
currently handled by the |HydPy| objects need to be processed during
a simulation time step. Optionally, a |Selection| object for defining
new |Node| and |Element| objects can be passed. | Below is the the instruction that describes the task:
### Input:
Determines the order, in which the |Node| and |Element| objects
currently handled by the |HydPy| objects need to be processed during
a simulation time step. Optionally, a |Selection| object for defining
new |Node| and |Element| objects can be passed.
### Response:
def update_devices(self, selection=None):
"""Determines the order, in which the |Node| and |Element| objects
currently handled by the |HydPy| objects need to be processed during
a simulation time step. Optionally, a |Selection| object for defining
new |Node| and |Element| objects can be passed."""
if selection is not None:
self.nodes = selection.nodes
self.elements = selection.elements
self._update_deviceorder() |
def rename(self, channel_name, new_name):
""" https://api.slack.com/methods/channels.rename
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
'name': new_name,
})
return FromUrl('https://slack.com/api/channels.rename', self._requests)(data=self.params).post() | https://api.slack.com/methods/channels.rename | Below is the the instruction that describes the task:
### Input:
https://api.slack.com/methods/channels.rename
### Response:
def rename(self, channel_name, new_name):
""" https://api.slack.com/methods/channels.rename
"""
channel_id = self.get_channel_id(channel_name)
self.params.update({
'channel': channel_id,
'name': new_name,
})
return FromUrl('https://slack.com/api/channels.rename', self._requests)(data=self.params).post() |
def _adjust_n_years(other, n, month, reference_day):
"""Adjust the number of times an annual offset is applied based on
another date, and the reference day provided"""
if n > 0:
if other.month < month or (other.month == month and
other.day < reference_day):
n -= 1
else:
if other.month > month or (other.month == month and
other.day > reference_day):
n += 1
return n | Adjust the number of times an annual offset is applied based on
another date, and the reference day provided | Below is the the instruction that describes the task:
### Input:
Adjust the number of times an annual offset is applied based on
another date, and the reference day provided
### Response:
def _adjust_n_years(other, n, month, reference_day):
"""Adjust the number of times an annual offset is applied based on
another date, and the reference day provided"""
if n > 0:
if other.month < month or (other.month == month and
other.day < reference_day):
n -= 1
else:
if other.month > month or (other.month == month and
other.day > reference_day):
n += 1
return n |
def stack(self, k=5, stratify=False, shuffle=True, seed=100, full_test=True, add_diff=False):
"""Stacks sequence of models.
Parameters
----------
k : int, default 5
Number of folds.
stratify : bool, default False
shuffle : bool, default True
seed : int, default 100
full_test : bool, default True
If True then evaluate test dataset on the full data otherwise take the mean of every fold.
add_diff : bool, default False
Returns
-------
`DataFrame`
Examples
--------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> stack_ds = pipeline.stack(k=10, seed=111)
"""
result_train = []
result_test = []
y = None
for model in self.models:
result = model.stack(k=k, stratify=stratify, shuffle=shuffle, seed=seed, full_test=full_test)
train_df = pd.DataFrame(result.X_train, columns=generate_columns(result.X_train, model.name))
test_df = pd.DataFrame(result.X_test, columns=generate_columns(result.X_test, model.name))
result_train.append(train_df)
result_test.append(test_df)
if y is None:
y = result.y_train
result_train = pd.concat(result_train, axis=1)
result_test = pd.concat(result_test, axis=1)
if add_diff:
result_train = feature_combiner(result_train)
result_test = feature_combiner(result_test)
ds = Dataset(X_train=result_train, y_train=y, X_test=result_test)
return ds | Stacks sequence of models.
Parameters
----------
k : int, default 5
Number of folds.
stratify : bool, default False
shuffle : bool, default True
seed : int, default 100
full_test : bool, default True
If True then evaluate test dataset on the full data otherwise take the mean of every fold.
add_diff : bool, default False
Returns
-------
`DataFrame`
Examples
--------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> stack_ds = pipeline.stack(k=10, seed=111) | Below is the the instruction that describes the task:
### Input:
Stacks sequence of models.
Parameters
----------
k : int, default 5
Number of folds.
stratify : bool, default False
shuffle : bool, default True
seed : int, default 100
full_test : bool, default True
If True then evaluate test dataset on the full data otherwise take the mean of every fold.
add_diff : bool, default False
Returns
-------
`DataFrame`
Examples
--------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> stack_ds = pipeline.stack(k=10, seed=111)
### Response:
def stack(self, k=5, stratify=False, shuffle=True, seed=100, full_test=True, add_diff=False):
"""Stacks sequence of models.
Parameters
----------
k : int, default 5
Number of folds.
stratify : bool, default False
shuffle : bool, default True
seed : int, default 100
full_test : bool, default True
If True then evaluate test dataset on the full data otherwise take the mean of every fold.
add_diff : bool, default False
Returns
-------
`DataFrame`
Examples
--------
>>> pipeline = ModelsPipeline(model_rf,model_lr)
>>> stack_ds = pipeline.stack(k=10, seed=111)
"""
result_train = []
result_test = []
y = None
for model in self.models:
result = model.stack(k=k, stratify=stratify, shuffle=shuffle, seed=seed, full_test=full_test)
train_df = pd.DataFrame(result.X_train, columns=generate_columns(result.X_train, model.name))
test_df = pd.DataFrame(result.X_test, columns=generate_columns(result.X_test, model.name))
result_train.append(train_df)
result_test.append(test_df)
if y is None:
y = result.y_train
result_train = pd.concat(result_train, axis=1)
result_test = pd.concat(result_test, axis=1)
if add_diff:
result_train = feature_combiner(result_train)
result_test = feature_combiner(result_test)
ds = Dataset(X_train=result_train, y_train=y, X_test=result_test)
return ds |
def write(self, w, val):
"""
Writes remaining part of TVP_TYPE_INFO structure, resuming from TVP_COLMETADATA
specs:
https://msdn.microsoft.com/en-us/library/dd302994.aspx
https://msdn.microsoft.com/en-us/library/dd305261.aspx
https://msdn.microsoft.com/en-us/library/dd303230.aspx
@param w: TdsWriter
@param val: TableValuedParam or None
@return:
"""
if val.is_null():
w.put_usmallint(tds_base.TVP_NULL_TOKEN)
else:
columns = self._table_type.columns
w.put_usmallint(len(columns))
for i, column in enumerate(columns):
w.put_uint(column.column_usertype)
w.put_usmallint(column.flags)
# TYPE_INFO structure: https://msdn.microsoft.com/en-us/library/dd358284.aspx
serializer = self._columns_serializers[i]
type_id = serializer.type
w.put_byte(type_id)
serializer.write_info(w)
w.write_b_varchar('') # ColName, must be empty in TVP according to spec
# here can optionally send TVP_ORDER_UNIQUE and TVP_COLUMN_ORDERING
# https://msdn.microsoft.com/en-us/library/dd305261.aspx
# terminating optional metadata
w.put_byte(tds_base.TVP_END_TOKEN)
# now sending rows using TVP_ROW
# https://msdn.microsoft.com/en-us/library/dd305261.aspx
if val.rows:
for row in val.rows:
w.put_byte(tds_base.TVP_ROW_TOKEN)
for i, col in enumerate(self._table_type.columns):
if not col.flags & tds_base.TVP_COLUMN_DEFAULT_FLAG:
self._columns_serializers[i].write(w, row[i])
# terminating rows
w.put_byte(tds_base.TVP_END_TOKEN) | Writes remaining part of TVP_TYPE_INFO structure, resuming from TVP_COLMETADATA
specs:
https://msdn.microsoft.com/en-us/library/dd302994.aspx
https://msdn.microsoft.com/en-us/library/dd305261.aspx
https://msdn.microsoft.com/en-us/library/dd303230.aspx
@param w: TdsWriter
@param val: TableValuedParam or None
@return: | Below is the the instruction that describes the task:
### Input:
Writes remaining part of TVP_TYPE_INFO structure, resuming from TVP_COLMETADATA
specs:
https://msdn.microsoft.com/en-us/library/dd302994.aspx
https://msdn.microsoft.com/en-us/library/dd305261.aspx
https://msdn.microsoft.com/en-us/library/dd303230.aspx
@param w: TdsWriter
@param val: TableValuedParam or None
@return:
### Response:
def write(self, w, val):
"""
Writes remaining part of TVP_TYPE_INFO structure, resuming from TVP_COLMETADATA
specs:
https://msdn.microsoft.com/en-us/library/dd302994.aspx
https://msdn.microsoft.com/en-us/library/dd305261.aspx
https://msdn.microsoft.com/en-us/library/dd303230.aspx
@param w: TdsWriter
@param val: TableValuedParam or None
@return:
"""
if val.is_null():
w.put_usmallint(tds_base.TVP_NULL_TOKEN)
else:
columns = self._table_type.columns
w.put_usmallint(len(columns))
for i, column in enumerate(columns):
w.put_uint(column.column_usertype)
w.put_usmallint(column.flags)
# TYPE_INFO structure: https://msdn.microsoft.com/en-us/library/dd358284.aspx
serializer = self._columns_serializers[i]
type_id = serializer.type
w.put_byte(type_id)
serializer.write_info(w)
w.write_b_varchar('') # ColName, must be empty in TVP according to spec
# here can optionally send TVP_ORDER_UNIQUE and TVP_COLUMN_ORDERING
# https://msdn.microsoft.com/en-us/library/dd305261.aspx
# terminating optional metadata
w.put_byte(tds_base.TVP_END_TOKEN)
# now sending rows using TVP_ROW
# https://msdn.microsoft.com/en-us/library/dd305261.aspx
if val.rows:
for row in val.rows:
w.put_byte(tds_base.TVP_ROW_TOKEN)
for i, col in enumerate(self._table_type.columns):
if not col.flags & tds_base.TVP_COLUMN_DEFAULT_FLAG:
self._columns_serializers[i].write(w, row[i])
# terminating rows
w.put_byte(tds_base.TVP_END_TOKEN) |
def leader_get(attribute=None, rid=None):
"""Wrapper to ensure that settings are migrated from the peer relation.
This is to support upgrading an environment that does not support
Juju leadership election to one that does.
If a setting is not extant in the leader-get but is on the relation-get
peer rel, it is migrated and marked as such so that it is not re-migrated.
"""
migration_key = '__leader_get_migrated_settings__'
if not is_leader():
return _leader_get(attribute=attribute)
settings_migrated = False
leader_settings = _leader_get(attribute=attribute)
previously_migrated = _leader_get(attribute=migration_key)
if previously_migrated:
migrated = set(json.loads(previously_migrated))
else:
migrated = set([])
try:
if migration_key in leader_settings:
del leader_settings[migration_key]
except TypeError:
pass
if attribute:
if attribute in migrated:
return leader_settings
# If attribute not present in leader db, check if this unit has set
# the attribute in the peer relation
if not leader_settings:
peer_setting = _relation_get(attribute=attribute, unit=local_unit(),
rid=rid)
if peer_setting:
leader_set(settings={attribute: peer_setting})
leader_settings = peer_setting
if leader_settings:
settings_migrated = True
migrated.add(attribute)
else:
r_settings = _relation_get(unit=local_unit(), rid=rid)
if r_settings:
for key in set(r_settings.keys()).difference(migrated):
# Leader setting wins
if not leader_settings.get(key):
leader_settings[key] = r_settings[key]
settings_migrated = True
migrated.add(key)
if settings_migrated:
leader_set(**leader_settings)
if migrated and settings_migrated:
migrated = json.dumps(list(migrated))
leader_set(settings={migration_key: migrated})
return leader_settings | Wrapper to ensure that settings are migrated from the peer relation.
This is to support upgrading an environment that does not support
Juju leadership election to one that does.
If a setting is not extant in the leader-get but is on the relation-get
peer rel, it is migrated and marked as such so that it is not re-migrated. | Below is the the instruction that describes the task:
### Input:
Wrapper to ensure that settings are migrated from the peer relation.
This is to support upgrading an environment that does not support
Juju leadership election to one that does.
If a setting is not extant in the leader-get but is on the relation-get
peer rel, it is migrated and marked as such so that it is not re-migrated.
### Response:
def leader_get(attribute=None, rid=None):
"""Wrapper to ensure that settings are migrated from the peer relation.
This is to support upgrading an environment that does not support
Juju leadership election to one that does.
If a setting is not extant in the leader-get but is on the relation-get
peer rel, it is migrated and marked as such so that it is not re-migrated.
"""
migration_key = '__leader_get_migrated_settings__'
if not is_leader():
return _leader_get(attribute=attribute)
settings_migrated = False
leader_settings = _leader_get(attribute=attribute)
previously_migrated = _leader_get(attribute=migration_key)
if previously_migrated:
migrated = set(json.loads(previously_migrated))
else:
migrated = set([])
try:
if migration_key in leader_settings:
del leader_settings[migration_key]
except TypeError:
pass
if attribute:
if attribute in migrated:
return leader_settings
# If attribute not present in leader db, check if this unit has set
# the attribute in the peer relation
if not leader_settings:
peer_setting = _relation_get(attribute=attribute, unit=local_unit(),
rid=rid)
if peer_setting:
leader_set(settings={attribute: peer_setting})
leader_settings = peer_setting
if leader_settings:
settings_migrated = True
migrated.add(attribute)
else:
r_settings = _relation_get(unit=local_unit(), rid=rid)
if r_settings:
for key in set(r_settings.keys()).difference(migrated):
# Leader setting wins
if not leader_settings.get(key):
leader_settings[key] = r_settings[key]
settings_migrated = True
migrated.add(key)
if settings_migrated:
leader_set(**leader_settings)
if migrated and settings_migrated:
migrated = json.dumps(list(migrated))
leader_set(settings={migration_key: migrated})
return leader_settings |
def readlines(self):
"""Read a command from the terminal.
Returns a list of tokens containing the user's input.
"""
continuation = False
while True:
yield self.readline(continuation)
continuation = True | Read a command from the terminal.
Returns a list of tokens containing the user's input. | Below is the the instruction that describes the task:
### Input:
Read a command from the terminal.
Returns a list of tokens containing the user's input.
### Response:
def readlines(self):
"""Read a command from the terminal.
Returns a list of tokens containing the user's input.
"""
continuation = False
while True:
yield self.readline(continuation)
continuation = True |
def _calculate_influence(self, neighborhood):
"""
Pre-calculate the influence for a given value of sigma.
The neighborhood has size num_neurons * num_neurons, so for a
30 * 30 map, the neighborhood will be size (900, 900).
Parameters
----------
neighborhood : float
The neighborhood value.
Returns
-------
neighborhood : numpy array
The influence from each neuron to each other neuron.
"""
n = (self.beta - 1) * np.log(1 + neighborhood*(np.e-1)) + 1
grid = np.exp((-self.distance_grid) / n**2)
return grid.reshape(self.num_neurons, self.num_neurons)[:, :, None] | Pre-calculate the influence for a given value of sigma.
The neighborhood has size num_neurons * num_neurons, so for a
30 * 30 map, the neighborhood will be size (900, 900).
Parameters
----------
neighborhood : float
The neighborhood value.
Returns
-------
neighborhood : numpy array
The influence from each neuron to each other neuron. | Below is the the instruction that describes the task:
### Input:
Pre-calculate the influence for a given value of sigma.
The neighborhood has size num_neurons * num_neurons, so for a
30 * 30 map, the neighborhood will be size (900, 900).
Parameters
----------
neighborhood : float
The neighborhood value.
Returns
-------
neighborhood : numpy array
The influence from each neuron to each other neuron.
### Response:
def _calculate_influence(self, neighborhood):
"""
Pre-calculate the influence for a given value of sigma.
The neighborhood has size num_neurons * num_neurons, so for a
30 * 30 map, the neighborhood will be size (900, 900).
Parameters
----------
neighborhood : float
The neighborhood value.
Returns
-------
neighborhood : numpy array
The influence from each neuron to each other neuron.
"""
n = (self.beta - 1) * np.log(1 + neighborhood*(np.e-1)) + 1
grid = np.exp((-self.distance_grid) / n**2)
return grid.reshape(self.num_neurons, self.num_neurons)[:, :, None] |
def _import_data(self, import_header_only=False):
"""Import data from an epw file.
Hourly data will be saved in self.data and the various header data
will be saved in the properties above.
"""
# perform checks on the file before opening it.
assert os.path.isfile(self._file_path), 'Cannot find an epw file at {}'.format(
self._file_path)
assert self._file_path.lower().endswith('epw'), '{} is not an .epw file. \n' \
'It does not possess the .epw file extension.'.format(self._file_path)
with open(self._file_path, readmode) as epwin:
line = epwin.readline()
original_header_load = bool(self._is_header_loaded)
if not self._is_header_loaded:
# import location data
# first line has location data - Here is an example
# LOCATION,Denver Golden Nr,CO,USA,TMY3,724666,39.74,-105.18,-7.0,1829.0
location_data = line.strip().split(',')
self._location = Location()
self._location.city = location_data[1].replace('\\', ' ') \
.replace('/', ' ')
self._location.state = location_data[2]
self._location.country = location_data[3]
self._location.source = location_data[4]
self._location.station_id = location_data[5]
self._location.latitude = location_data[6]
self._location.longitude = location_data[7]
self._location.time_zone = location_data[8]
self._location.elevation = location_data[9]
# asemble a dictionary of metadata
self._metadata = {
'source': self._location.source,
'country': self._location.country,
'city': self._location.city
}
self._header = [line] + [epwin.readline() for i in xrange(7)]
# parse the heating, cooling and extreme design conditions.
dday_data = self._header[1].strip().split(',')
if len(dday_data) >= 2 and int(dday_data[1]) == 1:
if dday_data[4] == 'Heating':
for key, val in zip(DesignDay.heating_keys, dday_data[5:20]):
self._heating_dict[key] = val
if dday_data[20] == 'Cooling':
for key, val in zip(DesignDay.cooling_keys, dday_data[21:53]):
self._cooling_dict[key] = val
if dday_data[53] == 'Extremes':
for key, val in zip(DesignDay.extreme_keys, dday_data[54:70]):
self._extremes_dict[key] = val
# parse typical and extreme periods into analysis periods.
week_data = self._header[2].split(',')
num_weeks = int(week_data[1]) if len(week_data) >= 2 else 0
st_ind = 2
for i in xrange(num_weeks):
week_dat = week_data[st_ind:st_ind + 4]
st_ind += 4
st = [int(num) for num in week_dat[2].split('/')]
end = [int(num) for num in week_dat[3].split('/')]
if len(st) == 3:
a_per = AnalysisPeriod(st[1], st[2], 0, end[1], end[2], 23)
elif len(st) == 2:
a_per = AnalysisPeriod(st[0], st[1], 0, end[0], end[1], 23)
if 'Max' in week_dat[0] and week_dat[1] == 'Extreme':
self._extreme_hot_weeks[week_dat[0]] = a_per
elif 'Min' in week_dat[0] and week_dat[1] == 'Extreme':
self._extreme_cold_weeks[week_dat[0]] = a_per
elif week_dat[1] == 'Typical':
self._typical_weeks[week_dat[0]] = a_per
# parse the monthly ground temperatures in the header.
grnd_data = self._header[3].strip().split(',')
num_depths = int(grnd_data[1]) if len(grnd_data) >= 2 else 0
st_ind = 2
for i in xrange(num_depths):
header_meta = dict(self._metadata) # copying the metadata dictionary
header_meta['depth'] = float(grnd_data[st_ind])
header_meta['soil conductivity'] = grnd_data[st_ind + 1]
header_meta['soil density'] = grnd_data[st_ind + 2]
header_meta['soil specific heat'] = grnd_data[st_ind + 3]
grnd_header = Header(temperature.GroundTemperature(), 'C',
AnalysisPeriod(), header_meta)
grnd_vlas = [float(x) for x in grnd_data[st_ind + 4: st_ind + 16]]
self._monthly_ground_temps[float(grnd_data[st_ind])] = \
MonthlyCollection(grnd_header, grnd_vlas, list(xrange(12)))
st_ind += 16
# parse leap year, daylight savings and comments.
leap_dl_sav = self._header[4].strip().split(',')
self._is_leap_year = True if leap_dl_sav[1] == 'Yes' else False
self.daylight_savings_start = leap_dl_sav[2]
self.daylight_savings_end = leap_dl_sav[3]
comments_1 = self._header[5].strip().split(',')
if len(comments_1) > 0:
self.comments_1 = ','.join(comments_1[1:])
comments_2 = self._header[6].strip().split(',')
if len(comments_2) > 0:
self.comments_2 = ','.join(comments_2[1:])
self._is_header_loaded = True
if import_header_only:
return
# read first line of data to overwrite the number of fields
if original_header_load is True:
for i in xrange(7):
epwin.readline()
line = epwin.readline()
self._num_of_fields = min(len(line.strip().split(',')), 35)
# create an annual analysis period
analysis_period = AnalysisPeriod(is_leap_year=self.is_leap_year)
# create headers and an empty list for each field in epw file
headers = []
for field_number in xrange(self._num_of_fields):
field = EPWFields.field_by_number(field_number)
header = Header(data_type=field.name, unit=field.unit,
analysis_period=analysis_period,
metadata=dict(self._metadata))
headers.append(header)
self._data.append([])
# collect hourly data
while line:
data = line.strip().split(',')
for field_number in xrange(self._num_of_fields):
value_type = EPWFields.field_by_number(field_number).value_type
try:
value = value_type(data[field_number])
except ValueError as e:
# failed to convert the value for the specific TypeError
if value_type != int:
raise ValueError(e)
value = int(round(float(data[field_number])))
self._data[field_number].append(value)
line = epwin.readline()
# if the first value is at 1 AM, move last item to start position
for field_number in xrange(self._num_of_fields):
point_in_time = headers[field_number].data_type.point_in_time
if point_in_time is True:
# move the last hour to first position
last_hour = self._data[field_number].pop()
self._data[field_number].insert(0, last_hour)
# finally, build the data collection objects from the headers and data
for i in xrange(self._num_of_fields):
self._data[i] = HourlyContinuousCollection(headers[i], self._data[i])
self._is_data_loaded = True | Import data from an epw file.
Hourly data will be saved in self.data and the various header data
will be saved in the properties above. | Below is the the instruction that describes the task:
### Input:
Import data from an epw file.
Hourly data will be saved in self.data and the various header data
will be saved in the properties above.
### Response:
def _import_data(self, import_header_only=False):
"""Import data from an epw file.
Hourly data will be saved in self.data and the various header data
will be saved in the properties above.
"""
# perform checks on the file before opening it.
assert os.path.isfile(self._file_path), 'Cannot find an epw file at {}'.format(
self._file_path)
assert self._file_path.lower().endswith('epw'), '{} is not an .epw file. \n' \
'It does not possess the .epw file extension.'.format(self._file_path)
with open(self._file_path, readmode) as epwin:
line = epwin.readline()
original_header_load = bool(self._is_header_loaded)
if not self._is_header_loaded:
# import location data
# first line has location data - Here is an example
# LOCATION,Denver Golden Nr,CO,USA,TMY3,724666,39.74,-105.18,-7.0,1829.0
location_data = line.strip().split(',')
self._location = Location()
self._location.city = location_data[1].replace('\\', ' ') \
.replace('/', ' ')
self._location.state = location_data[2]
self._location.country = location_data[3]
self._location.source = location_data[4]
self._location.station_id = location_data[5]
self._location.latitude = location_data[6]
self._location.longitude = location_data[7]
self._location.time_zone = location_data[8]
self._location.elevation = location_data[9]
# asemble a dictionary of metadata
self._metadata = {
'source': self._location.source,
'country': self._location.country,
'city': self._location.city
}
self._header = [line] + [epwin.readline() for i in xrange(7)]
# parse the heating, cooling and extreme design conditions.
dday_data = self._header[1].strip().split(',')
if len(dday_data) >= 2 and int(dday_data[1]) == 1:
if dday_data[4] == 'Heating':
for key, val in zip(DesignDay.heating_keys, dday_data[5:20]):
self._heating_dict[key] = val
if dday_data[20] == 'Cooling':
for key, val in zip(DesignDay.cooling_keys, dday_data[21:53]):
self._cooling_dict[key] = val
if dday_data[53] == 'Extremes':
for key, val in zip(DesignDay.extreme_keys, dday_data[54:70]):
self._extremes_dict[key] = val
# parse typical and extreme periods into analysis periods.
week_data = self._header[2].split(',')
num_weeks = int(week_data[1]) if len(week_data) >= 2 else 0
st_ind = 2
for i in xrange(num_weeks):
week_dat = week_data[st_ind:st_ind + 4]
st_ind += 4
st = [int(num) for num in week_dat[2].split('/')]
end = [int(num) for num in week_dat[3].split('/')]
if len(st) == 3:
a_per = AnalysisPeriod(st[1], st[2], 0, end[1], end[2], 23)
elif len(st) == 2:
a_per = AnalysisPeriod(st[0], st[1], 0, end[0], end[1], 23)
if 'Max' in week_dat[0] and week_dat[1] == 'Extreme':
self._extreme_hot_weeks[week_dat[0]] = a_per
elif 'Min' in week_dat[0] and week_dat[1] == 'Extreme':
self._extreme_cold_weeks[week_dat[0]] = a_per
elif week_dat[1] == 'Typical':
self._typical_weeks[week_dat[0]] = a_per
# parse the monthly ground temperatures in the header.
grnd_data = self._header[3].strip().split(',')
num_depths = int(grnd_data[1]) if len(grnd_data) >= 2 else 0
st_ind = 2
for i in xrange(num_depths):
header_meta = dict(self._metadata) # copying the metadata dictionary
header_meta['depth'] = float(grnd_data[st_ind])
header_meta['soil conductivity'] = grnd_data[st_ind + 1]
header_meta['soil density'] = grnd_data[st_ind + 2]
header_meta['soil specific heat'] = grnd_data[st_ind + 3]
grnd_header = Header(temperature.GroundTemperature(), 'C',
AnalysisPeriod(), header_meta)
grnd_vlas = [float(x) for x in grnd_data[st_ind + 4: st_ind + 16]]
self._monthly_ground_temps[float(grnd_data[st_ind])] = \
MonthlyCollection(grnd_header, grnd_vlas, list(xrange(12)))
st_ind += 16
# parse leap year, daylight savings and comments.
leap_dl_sav = self._header[4].strip().split(',')
self._is_leap_year = True if leap_dl_sav[1] == 'Yes' else False
self.daylight_savings_start = leap_dl_sav[2]
self.daylight_savings_end = leap_dl_sav[3]
comments_1 = self._header[5].strip().split(',')
if len(comments_1) > 0:
self.comments_1 = ','.join(comments_1[1:])
comments_2 = self._header[6].strip().split(',')
if len(comments_2) > 0:
self.comments_2 = ','.join(comments_2[1:])
self._is_header_loaded = True
if import_header_only:
return
# read first line of data to overwrite the number of fields
if original_header_load is True:
for i in xrange(7):
epwin.readline()
line = epwin.readline()
self._num_of_fields = min(len(line.strip().split(',')), 35)
# create an annual analysis period
analysis_period = AnalysisPeriod(is_leap_year=self.is_leap_year)
# create headers and an empty list for each field in epw file
headers = []
for field_number in xrange(self._num_of_fields):
field = EPWFields.field_by_number(field_number)
header = Header(data_type=field.name, unit=field.unit,
analysis_period=analysis_period,
metadata=dict(self._metadata))
headers.append(header)
self._data.append([])
# collect hourly data
while line:
data = line.strip().split(',')
for field_number in xrange(self._num_of_fields):
value_type = EPWFields.field_by_number(field_number).value_type
try:
value = value_type(data[field_number])
except ValueError as e:
# failed to convert the value for the specific TypeError
if value_type != int:
raise ValueError(e)
value = int(round(float(data[field_number])))
self._data[field_number].append(value)
line = epwin.readline()
# if the first value is at 1 AM, move last item to start position
for field_number in xrange(self._num_of_fields):
point_in_time = headers[field_number].data_type.point_in_time
if point_in_time is True:
# move the last hour to first position
last_hour = self._data[field_number].pop()
self._data[field_number].insert(0, last_hour)
# finally, build the data collection objects from the headers and data
for i in xrange(self._num_of_fields):
self._data[i] = HourlyContinuousCollection(headers[i], self._data[i])
self._is_data_loaded = True |
def id_fix(value):
""" fix @prefix values for ttl """
if value.startswith('KSC_M'):
pass
else:
value = value.replace(':','_')
if value.startswith('ERO') or value.startswith('OBI') or value.startswith('GO') or value.startswith('UBERON') or value.startswith('IAO'):
value = 'obo:' + value
elif value.startswith('birnlex') or value.startswith('nlx'):
value = 'NIFSTD:' + value
elif value.startswith('MESH'):
value = ':'.join(value.split('_'))
else:
value = ':' + value
return OntId(value).URIRef | fix @prefix values for ttl | Below is the the instruction that describes the task:
### Input:
fix @prefix values for ttl
### Response:
def id_fix(value):
""" fix @prefix values for ttl """
if value.startswith('KSC_M'):
pass
else:
value = value.replace(':','_')
if value.startswith('ERO') or value.startswith('OBI') or value.startswith('GO') or value.startswith('UBERON') or value.startswith('IAO'):
value = 'obo:' + value
elif value.startswith('birnlex') or value.startswith('nlx'):
value = 'NIFSTD:' + value
elif value.startswith('MESH'):
value = ':'.join(value.split('_'))
else:
value = ':' + value
return OntId(value).URIRef |
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available' | Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com | Below is the the instruction that describes the task:
### Input:
Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
### Response:
def AAAA(host, nameserver=None):
'''
Return the AAAA record(s) for ``host``.
Always returns a list.
.. versionadded:: 2014.7.5
CLI Example:
.. code-block:: bash
salt ns1 dnsutil.AAAA www.google.com
'''
if _has_dig():
return __salt__['dig.AAAA'](host, nameserver)
elif nameserver is None:
# fall back to the socket interface, if we don't care who resolves
try:
addresses = [sock[4][0] for sock in socket.getaddrinfo(host, None, socket.AF_INET6, 0, socket.SOCK_RAW)]
return addresses
except socket.gaierror:
return 'Unable to resolve {0}'.format(host)
return 'This function requires dig, which is not currently available' |
def optimize(self, piter=3, pmaxf=300, ppert=0.1):
'''
Runs :py:obj:`pPLD` on the target in an attempt to further optimize the
values of the PLD priors. See :py:class:`everest.detrender.pPLD`.
'''
self._save_npz()
optimized = pPLD(self.ID, piter=piter, pmaxf=pmaxf,
ppert=ppert, debug=True, clobber=True)
optimized.publish()
self.reset() | Runs :py:obj:`pPLD` on the target in an attempt to further optimize the
values of the PLD priors. See :py:class:`everest.detrender.pPLD`. | Below is the the instruction that describes the task:
### Input:
Runs :py:obj:`pPLD` on the target in an attempt to further optimize the
values of the PLD priors. See :py:class:`everest.detrender.pPLD`.
### Response:
def optimize(self, piter=3, pmaxf=300, ppert=0.1):
'''
Runs :py:obj:`pPLD` on the target in an attempt to further optimize the
values of the PLD priors. See :py:class:`everest.detrender.pPLD`.
'''
self._save_npz()
optimized = pPLD(self.ID, piter=piter, pmaxf=pmaxf,
ppert=ppert, debug=True, clobber=True)
optimized.publish()
self.reset() |
def generate_data_key(key_id, encryption_context=None, number_of_bytes=None,
key_spec=None, grant_tokens=None, region=None, key=None,
keyid=None, profile=None):
'''
Generate a secure data key.
CLI example::
salt myminion boto_kms.generate_data_key 'alias/mykey' number_of_bytes=1024 key_spec=AES_128
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
data_key = conn.generate_data_key(
key_id,
encryption_context=encryption_context,
number_of_bytes=number_of_bytes,
key_spec=key_spec,
grant_tokens=grant_tokens
)
r['data_key'] = data_key
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r | Generate a secure data key.
CLI example::
salt myminion boto_kms.generate_data_key 'alias/mykey' number_of_bytes=1024 key_spec=AES_128 | Below is the the instruction that describes the task:
### Input:
Generate a secure data key.
CLI example::
salt myminion boto_kms.generate_data_key 'alias/mykey' number_of_bytes=1024 key_spec=AES_128
### Response:
def generate_data_key(key_id, encryption_context=None, number_of_bytes=None,
key_spec=None, grant_tokens=None, region=None, key=None,
keyid=None, profile=None):
'''
Generate a secure data key.
CLI example::
salt myminion boto_kms.generate_data_key 'alias/mykey' number_of_bytes=1024 key_spec=AES_128
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
data_key = conn.generate_data_key(
key_id,
encryption_context=encryption_context,
number_of_bytes=number_of_bytes,
key_spec=key_spec,
grant_tokens=grant_tokens
)
r['data_key'] = data_key
except boto.exception.BotoServerError as e:
r['error'] = __utils__['boto.get_error'](e)
return r |
def get_intent_filters(self, itemtype, name):
"""
Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:returns: a dictionary with the keys `action` and `category` containing the `android:name` of those items
"""
d = {"action": [], "category": []}
for i in self.xml:
# TODO: this can probably be solved using a single xpath
for item in self.xml[i].findall(".//" + itemtype):
if self._format_value(item.get(self._ns("name"))) == name:
for sitem in item.findall(".//intent-filter"):
for ssitem in sitem.findall("action"):
if ssitem.get(self._ns("name")) not in d["action"]:
d["action"].append(ssitem.get(self._ns("name")))
for ssitem in sitem.findall("category"):
if ssitem.get(self._ns("name")) not in d["category"]:
d["category"].append(ssitem.get(self._ns("name")))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d | Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:returns: a dictionary with the keys `action` and `category` containing the `android:name` of those items | Below is the the instruction that describes the task:
### Input:
Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:returns: a dictionary with the keys `action` and `category` containing the `android:name` of those items
### Response:
def get_intent_filters(self, itemtype, name):
"""
Find intent filters for a given item and name.
Intent filter are attached to activities, services or receivers.
You can search for the intent filters of such items and get a dictionary of all
attached actions and intent categories.
:param itemtype: the type of parent item to look for, e.g. `activity`, `service` or `receiver`
:param name: the `android:name` of the parent item, e.g. activity name
:returns: a dictionary with the keys `action` and `category` containing the `android:name` of those items
"""
d = {"action": [], "category": []}
for i in self.xml:
# TODO: this can probably be solved using a single xpath
for item in self.xml[i].findall(".//" + itemtype):
if self._format_value(item.get(self._ns("name"))) == name:
for sitem in item.findall(".//intent-filter"):
for ssitem in sitem.findall("action"):
if ssitem.get(self._ns("name")) not in d["action"]:
d["action"].append(ssitem.get(self._ns("name")))
for ssitem in sitem.findall("category"):
if ssitem.get(self._ns("name")) not in d["category"]:
d["category"].append(ssitem.get(self._ns("name")))
if not d["action"]:
del d["action"]
if not d["category"]:
del d["category"]
return d |
def sentence_bytes(self, sentence):
""" Return bytes of a sentence.
This is a very simple parser. Sentence is a list of strings and numbers.
1st element of sentence MUST match a token.
"""
result = [TOKENS[sentence[0]]]
for i in sentence[1:]: # Remaining bytes
if isinstance(i, str):
result.extend(self.literal(i))
elif isinstance(i, float) or isinstance(i, int): # A number?
result.extend(self.number(i))
else:
result.extend(i) # Must be another thing
return result | Return bytes of a sentence.
This is a very simple parser. Sentence is a list of strings and numbers.
1st element of sentence MUST match a token. | Below is the the instruction that describes the task:
### Input:
Return bytes of a sentence.
This is a very simple parser. Sentence is a list of strings and numbers.
1st element of sentence MUST match a token.
### Response:
def sentence_bytes(self, sentence):
""" Return bytes of a sentence.
This is a very simple parser. Sentence is a list of strings and numbers.
1st element of sentence MUST match a token.
"""
result = [TOKENS[sentence[0]]]
for i in sentence[1:]: # Remaining bytes
if isinstance(i, str):
result.extend(self.literal(i))
elif isinstance(i, float) or isinstance(i, int): # A number?
result.extend(self.number(i))
else:
result.extend(i) # Must be another thing
return result |
def set_plot_CO_mass(self,fig=3123,xaxis='mass',linestyle=['-'],marker=['o'],color=['r'],age_years=True,sparsity=500,markersparsity=200,withoutZlabel=False,t0_model=[]):
'''
PLots C/O surface number fraction
'''
if len(t0_model)==0:
t0_model = len(self.runs_H5_surf)*[0]
plt.figure(fig)
for i in range(len(self.runs_H5_surf)):
sefiles=se(self.runs_H5_surf[i])
cycles=range(int(sefiles.se.cycles[0]),int(sefiles.se.cycles[-1]),sparsity)
mini=sefiles.get("mini")
zini=sefiles.get("zini")
label=str(mini)+'$M_{\odot}$, Z='+str(zini)
if xaxis=='cycles':
x=cycles
if xaxis=='age':
x=sefiles.get(cycles,'age')
if age_years==True:
x=np.array(x)*sefiles.get('age_unit')/(365*24*3600)
x = x - x[t0_model[i]]
if xaxis=='mass':
x=sefiles.get(cycles,'mass')
x=x[t0_model[i]:]
c12=sefiles.get(cycles,'C-12')[t0_model[i]:]
o16=sefiles.get(cycles,'O-16')[t0_model[i]:]
if withoutZlabel==True:
plt.plot(x,4./3.*np.array(c12)/np.array(o16),label=label.split(',')[0],marker=marker[i],linestyle=linestyle[i],markevery=markersparsity,color=color[i])
else:
plt.plot(x,4./3.*np.array(c12)/np.array(o16),label=label,marker=marker[i],linestyle=linestyle[i],markevery=markersparsity,color=color[i])
if xaxis=='mass':
plt.xlim(7,0.5)
#plt.gca().invert_xaxis()
plt.xlabel('$M/M_{\odot}$',fontsize=18)
plt.ylabel('C/O Ratio', fontsize=18)
plt.legend(loc=1) | PLots C/O surface number fraction | Below is the the instruction that describes the task:
### Input:
PLots C/O surface number fraction
### Response:
def set_plot_CO_mass(self,fig=3123,xaxis='mass',linestyle=['-'],marker=['o'],color=['r'],age_years=True,sparsity=500,markersparsity=200,withoutZlabel=False,t0_model=[]):
'''
PLots C/O surface number fraction
'''
if len(t0_model)==0:
t0_model = len(self.runs_H5_surf)*[0]
plt.figure(fig)
for i in range(len(self.runs_H5_surf)):
sefiles=se(self.runs_H5_surf[i])
cycles=range(int(sefiles.se.cycles[0]),int(sefiles.se.cycles[-1]),sparsity)
mini=sefiles.get("mini")
zini=sefiles.get("zini")
label=str(mini)+'$M_{\odot}$, Z='+str(zini)
if xaxis=='cycles':
x=cycles
if xaxis=='age':
x=sefiles.get(cycles,'age')
if age_years==True:
x=np.array(x)*sefiles.get('age_unit')/(365*24*3600)
x = x - x[t0_model[i]]
if xaxis=='mass':
x=sefiles.get(cycles,'mass')
x=x[t0_model[i]:]
c12=sefiles.get(cycles,'C-12')[t0_model[i]:]
o16=sefiles.get(cycles,'O-16')[t0_model[i]:]
if withoutZlabel==True:
plt.plot(x,4./3.*np.array(c12)/np.array(o16),label=label.split(',')[0],marker=marker[i],linestyle=linestyle[i],markevery=markersparsity,color=color[i])
else:
plt.plot(x,4./3.*np.array(c12)/np.array(o16),label=label,marker=marker[i],linestyle=linestyle[i],markevery=markersparsity,color=color[i])
if xaxis=='mass':
plt.xlim(7,0.5)
#plt.gca().invert_xaxis()
plt.xlabel('$M/M_{\odot}$',fontsize=18)
plt.ylabel('C/O Ratio', fontsize=18)
plt.legend(loc=1) |
def __calculate_nearest_distance(self, index_cluster1, index_cluster2):
"""!
@brief Finds two nearest objects in two specified clusters and returns distance between them.
@param[in] (uint) Index of the first cluster.
@param[in] (uint) Index of the second cluster.
@return The nearest euclidean distance between two clusters.
"""
candidate_minimum_distance = float('Inf');
for index_object1 in self.__clusters[index_cluster1]:
for index_object2 in self.__clusters[index_cluster2]:
distance = euclidean_distance_square(self.__pointer_data[index_object1], self.__pointer_data[index_object2]);
if (distance < candidate_minimum_distance):
candidate_minimum_distance = distance;
return candidate_minimum_distance; | !
@brief Finds two nearest objects in two specified clusters and returns distance between them.
@param[in] (uint) Index of the first cluster.
@param[in] (uint) Index of the second cluster.
@return The nearest euclidean distance between two clusters. | Below is the the instruction that describes the task:
### Input:
!
@brief Finds two nearest objects in two specified clusters and returns distance between them.
@param[in] (uint) Index of the first cluster.
@param[in] (uint) Index of the second cluster.
@return The nearest euclidean distance between two clusters.
### Response:
def __calculate_nearest_distance(self, index_cluster1, index_cluster2):
"""!
@brief Finds two nearest objects in two specified clusters and returns distance between them.
@param[in] (uint) Index of the first cluster.
@param[in] (uint) Index of the second cluster.
@return The nearest euclidean distance between two clusters.
"""
candidate_minimum_distance = float('Inf');
for index_object1 in self.__clusters[index_cluster1]:
for index_object2 in self.__clusters[index_cluster2]:
distance = euclidean_distance_square(self.__pointer_data[index_object1], self.__pointer_data[index_object2]);
if (distance < candidate_minimum_distance):
candidate_minimum_distance = distance;
return candidate_minimum_distance; |
def isExpired(certificate):
""" Check if certificate is expired """
if isinstance(certificate, six.string_types):
certificate = json.loads(certificate)
expiry = certificate.get('expiry', 0)
return expiry < int(time.time() * 1000) + 20 * 60 | Check if certificate is expired | Below is the the instruction that describes the task:
### Input:
Check if certificate is expired
### Response:
def isExpired(certificate):
""" Check if certificate is expired """
if isinstance(certificate, six.string_types):
certificate = json.loads(certificate)
expiry = certificate.get('expiry', 0)
return expiry < int(time.time() * 1000) + 20 * 60 |
def use_custom_term_frequencies(self, custom_term_frequencies):
'''
Parameters
----------
pd.Series
term -> frequency
Returns
-------
PriorFactory
'''
self.priors += custom_term_frequencies.reindex(self.priors.index).fillna(0)
return self | Parameters
----------
pd.Series
term -> frequency
Returns
-------
PriorFactory | Below is the the instruction that describes the task:
### Input:
Parameters
----------
pd.Series
term -> frequency
Returns
-------
PriorFactory
### Response:
def use_custom_term_frequencies(self, custom_term_frequencies):
'''
Parameters
----------
pd.Series
term -> frequency
Returns
-------
PriorFactory
'''
self.priors += custom_term_frequencies.reindex(self.priors.index).fillna(0)
return self |
def _CompareStores(self, storage_reader, compare_storage_reader):
"""Compares the contents of two stores.
Args:
storage_reader (StorageReader): storage reader.
compare_storage_reader (StorageReader): storage to compare against.
Returns:
bool: True if the content of the stores is identical.
"""
storage_counters = self._CalculateStorageCounters(storage_reader)
compare_storage_counters = self._CalculateStorageCounters(
compare_storage_reader)
# TODO: improve comparison, currently only total numbers are compared.
return storage_counters == compare_storage_counters | Compares the contents of two stores.
Args:
storage_reader (StorageReader): storage reader.
compare_storage_reader (StorageReader): storage to compare against.
Returns:
bool: True if the content of the stores is identical. | Below is the the instruction that describes the task:
### Input:
Compares the contents of two stores.
Args:
storage_reader (StorageReader): storage reader.
compare_storage_reader (StorageReader): storage to compare against.
Returns:
bool: True if the content of the stores is identical.
### Response:
def _CompareStores(self, storage_reader, compare_storage_reader):
"""Compares the contents of two stores.
Args:
storage_reader (StorageReader): storage reader.
compare_storage_reader (StorageReader): storage to compare against.
Returns:
bool: True if the content of the stores is identical.
"""
storage_counters = self._CalculateStorageCounters(storage_reader)
compare_storage_counters = self._CalculateStorageCounters(
compare_storage_reader)
# TODO: improve comparison, currently only total numbers are compared.
return storage_counters == compare_storage_counters |
def assign_enterprise_learner_role(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
Assign an enterprise learner role to EnterpriseCustomerUser whenever a new record is created.
"""
if kwargs['created'] and instance.user:
enterprise_learner_role, __ = SystemWideEnterpriseRole.objects.get_or_create(name=ENTERPRISE_LEARNER_ROLE)
SystemWideEnterpriseUserRoleAssignment.objects.get_or_create(
user=instance.user,
role=enterprise_learner_role
) | Assign an enterprise learner role to EnterpriseCustomerUser whenever a new record is created. | Below is the the instruction that describes the task:
### Input:
Assign an enterprise learner role to EnterpriseCustomerUser whenever a new record is created.
### Response:
def assign_enterprise_learner_role(sender, instance, **kwargs): # pylint: disable=unused-argument
"""
Assign an enterprise learner role to EnterpriseCustomerUser whenever a new record is created.
"""
if kwargs['created'] and instance.user:
enterprise_learner_role, __ = SystemWideEnterpriseRole.objects.get_or_create(name=ENTERPRISE_LEARNER_ROLE)
SystemWideEnterpriseUserRoleAssignment.objects.get_or_create(
user=instance.user,
role=enterprise_learner_role
) |
def daily_hours(self,local=False):
""" This returns a number from 0 to 24 that describes the number
of hours passed in a day. This is very useful for hr.attendances
"""
data = self.get(local)
daily_hours = (data.hour +
data.minute / 60.0 +
data.second / 3600.0)
return round(daily_hours,2) | This returns a number from 0 to 24 that describes the number
of hours passed in a day. This is very useful for hr.attendances | Below is the the instruction that describes the task:
### Input:
This returns a number from 0 to 24 that describes the number
of hours passed in a day. This is very useful for hr.attendances
### Response:
def daily_hours(self,local=False):
""" This returns a number from 0 to 24 that describes the number
of hours passed in a day. This is very useful for hr.attendances
"""
data = self.get(local)
daily_hours = (data.hour +
data.minute / 60.0 +
data.second / 3600.0)
return round(daily_hours,2) |
def _recurmatch(path, aug):
'''
Recursive generator providing the infrastructure for
augtools print behavior.
This function is based on test_augeas.py from
Harald Hoyer <[email protected]> in the python-augeas
repository
'''
if path:
clean_path = path.rstrip('/*')
yield (clean_path, aug.get(path))
for i in aug.match(clean_path + '/*'):
i = i.replace('!', '\\!') # escape some dirs
for _match in _recurmatch(i, aug):
yield _match | Recursive generator providing the infrastructure for
augtools print behavior.
This function is based on test_augeas.py from
Harald Hoyer <[email protected]> in the python-augeas
repository | Below is the the instruction that describes the task:
### Input:
Recursive generator providing the infrastructure for
augtools print behavior.
This function is based on test_augeas.py from
Harald Hoyer <[email protected]> in the python-augeas
repository
### Response:
def _recurmatch(path, aug):
'''
Recursive generator providing the infrastructure for
augtools print behavior.
This function is based on test_augeas.py from
Harald Hoyer <[email protected]> in the python-augeas
repository
'''
if path:
clean_path = path.rstrip('/*')
yield (clean_path, aug.get(path))
for i in aug.match(clean_path + '/*'):
i = i.replace('!', '\\!') # escape some dirs
for _match in _recurmatch(i, aug):
yield _match |
def schedule(events, slots, objective_function=None, solver=None, **kwargs):
"""Compute a schedule in schedule form
Parameters
----------
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
solver : pulp.solver
a pulp solver
objective_function : callable
from lp_problem.objective_functions
kwargs : keyword arguments
arguments for the objective function
Returns
-------
list
A list of instances of :py:class:`resources.ScheduledItem`
"""
return conv.solution_to_schedule(
solution(events, slots, objective_function, solver=solver, **kwargs),
events, slots
) | Compute a schedule in schedule form
Parameters
----------
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
solver : pulp.solver
a pulp solver
objective_function : callable
from lp_problem.objective_functions
kwargs : keyword arguments
arguments for the objective function
Returns
-------
list
A list of instances of :py:class:`resources.ScheduledItem` | Below is the the instruction that describes the task:
### Input:
Compute a schedule in schedule form
Parameters
----------
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
solver : pulp.solver
a pulp solver
objective_function : callable
from lp_problem.objective_functions
kwargs : keyword arguments
arguments for the objective function
Returns
-------
list
A list of instances of :py:class:`resources.ScheduledItem`
### Response:
def schedule(events, slots, objective_function=None, solver=None, **kwargs):
"""Compute a schedule in schedule form
Parameters
----------
events : list or tuple
of :py:class:`resources.Event` instances
slots : list or tuple
of :py:class:`resources.Slot` instances
solver : pulp.solver
a pulp solver
objective_function : callable
from lp_problem.objective_functions
kwargs : keyword arguments
arguments for the objective function
Returns
-------
list
A list of instances of :py:class:`resources.ScheduledItem`
"""
return conv.solution_to_schedule(
solution(events, slots, objective_function, solver=solver, **kwargs),
events, slots
) |
def allocate(self):
"""Initializes libvirt resources."""
network_name = None
self._hypervisor = libvirt.open(
self.configuration.get('hypervisor', 'qemu:///system'))
self._storage_pool = self._retrieve_pool()
if 'network' in self.configuration:
self._network = network.create(self._hypervisor, self.identifier,
self.configuration['network'])
network_name = self._network.name()
disk_path = self._retrieve_disk_path()
if self._storage_pool is not None:
self._storage_pool.refresh()
self._domain = domain_create(self._hypervisor, self.identifier,
self.configuration['domain'],
disk_path, network_name=network_name)
if self._network is None:
self._network = network.lookup(self._domain) | Initializes libvirt resources. | Below is the the instruction that describes the task:
### Input:
Initializes libvirt resources.
### Response:
def allocate(self):
"""Initializes libvirt resources."""
network_name = None
self._hypervisor = libvirt.open(
self.configuration.get('hypervisor', 'qemu:///system'))
self._storage_pool = self._retrieve_pool()
if 'network' in self.configuration:
self._network = network.create(self._hypervisor, self.identifier,
self.configuration['network'])
network_name = self._network.name()
disk_path = self._retrieve_disk_path()
if self._storage_pool is not None:
self._storage_pool.refresh()
self._domain = domain_create(self._hypervisor, self.identifier,
self.configuration['domain'],
disk_path, network_name=network_name)
if self._network is None:
self._network = network.lookup(self._domain) |
def start(self):
"""Start listening for incoming connections."""
self.service_info = ServiceInfo(
'_webthing._tcp.local.',
'{}._webthing._tcp.local.'.format(self.name),
address=socket.inet_aton(get_ip()),
port=self.port,
properties={
'path': '/',
},
server='{}.local.'.format(socket.gethostname()))
self.zeroconf = Zeroconf()
self.zeroconf.register_service(self.service_info)
self.server.listen(self.port)
tornado.ioloop.IOLoop.current().start() | Start listening for incoming connections. | Below is the the instruction that describes the task:
### Input:
Start listening for incoming connections.
### Response:
def start(self):
"""Start listening for incoming connections."""
self.service_info = ServiceInfo(
'_webthing._tcp.local.',
'{}._webthing._tcp.local.'.format(self.name),
address=socket.inet_aton(get_ip()),
port=self.port,
properties={
'path': '/',
},
server='{}.local.'.format(socket.gethostname()))
self.zeroconf = Zeroconf()
self.zeroconf.register_service(self.service_info)
self.server.listen(self.port)
tornado.ioloop.IOLoop.current().start() |
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause | Helper routine to extract nested cause (if any). | Below is the the instruction that describes the task:
### Input:
Helper routine to extract nested cause (if any).
### Response:
def _extract_cause(cls, exc_val):
"""Helper routine to extract nested cause (if any)."""
# See: https://www.python.org/dev/peps/pep-3134/ for why/what
# these are...
#
# '__cause__' attribute for explicitly chained exceptions
# '__context__' attribute for implicitly chained exceptions
# '__traceback__' attribute for the traceback
#
# See: https://www.python.org/dev/peps/pep-0415/ for why/what
# the '__suppress_context__' is/means/implies...
nested_exc_vals = []
seen = [exc_val]
while True:
suppress_context = getattr(
exc_val, '__suppress_context__', False)
if suppress_context:
attr_lookups = ['__cause__']
else:
attr_lookups = ['__cause__', '__context__']
nested_exc_val = None
for attr_name in attr_lookups:
attr_val = getattr(exc_val, attr_name, None)
if attr_val is None:
continue
nested_exc_val = attr_val
if nested_exc_val is None or nested_exc_val in seen:
break
seen.append(nested_exc_val)
nested_exc_vals.append(nested_exc_val)
exc_val = nested_exc_val
last_cause = None
for exc_val in reversed(nested_exc_vals):
f = cls.from_exception(exc_val, cause=last_cause,
find_cause=False)
last_cause = f
return last_cause |
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return Authentication(key)
if key not in Authentication._member_map_:
extend_enum(Authentication, key, default)
return Authentication[key] | Backport support for original codes. | Below is the the instruction that describes the task:
### Input:
Backport support for original codes.
### Response:
def get(key, default=-1):
"""Backport support for original codes."""
if isinstance(key, int):
return Authentication(key)
if key not in Authentication._member_map_:
extend_enum(Authentication, key, default)
return Authentication[key] |
def execute(self):
"""
execute the commands inside the nested pipeline.
This causes all queued up commands to be passed upstream to the
parent, including callbacks.
The state of this pipeline object gets cleaned up.
:return:
"""
stack = self._stack
callbacks = self._callbacks
self._stack = []
self._callbacks = []
deferred = []
build = self._nested_future
pipe = self._pipeline(self.connection_name)
for item, args, kwargs, ref in stack:
f = getattr(pipe, item)
deferred.append(build(f(*args, **kwargs), ref))
inject_callbacks = getattr(self.parent, '_inject_callbacks')
inject_callbacks(deferred + callbacks) | execute the commands inside the nested pipeline.
This causes all queued up commands to be passed upstream to the
parent, including callbacks.
The state of this pipeline object gets cleaned up.
:return: | Below is the the instruction that describes the task:
### Input:
execute the commands inside the nested pipeline.
This causes all queued up commands to be passed upstream to the
parent, including callbacks.
The state of this pipeline object gets cleaned up.
:return:
### Response:
def execute(self):
"""
execute the commands inside the nested pipeline.
This causes all queued up commands to be passed upstream to the
parent, including callbacks.
The state of this pipeline object gets cleaned up.
:return:
"""
stack = self._stack
callbacks = self._callbacks
self._stack = []
self._callbacks = []
deferred = []
build = self._nested_future
pipe = self._pipeline(self.connection_name)
for item, args, kwargs, ref in stack:
f = getattr(pipe, item)
deferred.append(build(f(*args, **kwargs), ref))
inject_callbacks = getattr(self.parent, '_inject_callbacks')
inject_callbacks(deferred + callbacks) |
def data_cosine(N=1024, A=0.1, sampling=1024., freq=200):
r"""Return a noisy cosine at a given frequency.
:param N: the final data size
:param A: the strength of the noise
:param float sampling: sampling frequency of the input :attr:`data`.
:param float freq: the frequency :math:`f_0` of the cosine.
.. math:: x[t] = cos(2\pi t * f_0) + A w[t]
where w[t] is a white noise of variance 1.
.. doctest::
>>> from spectrum import data_cosine
>>> a = data_cosine(N=1024, sampling=1024, A=0.5, freq=100)
"""
t = arange(0, float(N)/sampling, 1./sampling)
x = cos(2.*pi*t*freq) + A * randn(t.size)
return x | r"""Return a noisy cosine at a given frequency.
:param N: the final data size
:param A: the strength of the noise
:param float sampling: sampling frequency of the input :attr:`data`.
:param float freq: the frequency :math:`f_0` of the cosine.
.. math:: x[t] = cos(2\pi t * f_0) + A w[t]
where w[t] is a white noise of variance 1.
.. doctest::
>>> from spectrum import data_cosine
>>> a = data_cosine(N=1024, sampling=1024, A=0.5, freq=100) | Below is the the instruction that describes the task:
### Input:
r"""Return a noisy cosine at a given frequency.
:param N: the final data size
:param A: the strength of the noise
:param float sampling: sampling frequency of the input :attr:`data`.
:param float freq: the frequency :math:`f_0` of the cosine.
.. math:: x[t] = cos(2\pi t * f_0) + A w[t]
where w[t] is a white noise of variance 1.
.. doctest::
>>> from spectrum import data_cosine
>>> a = data_cosine(N=1024, sampling=1024, A=0.5, freq=100)
### Response:
def data_cosine(N=1024, A=0.1, sampling=1024., freq=200):
r"""Return a noisy cosine at a given frequency.
:param N: the final data size
:param A: the strength of the noise
:param float sampling: sampling frequency of the input :attr:`data`.
:param float freq: the frequency :math:`f_0` of the cosine.
.. math:: x[t] = cos(2\pi t * f_0) + A w[t]
where w[t] is a white noise of variance 1.
.. doctest::
>>> from spectrum import data_cosine
>>> a = data_cosine(N=1024, sampling=1024, A=0.5, freq=100)
"""
t = arange(0, float(N)/sampling, 1./sampling)
x = cos(2.*pi*t*freq) + A * randn(t.size)
return x |
def igmpize(self, ip=None, ether=None):
"""Called to explicitely fixup associated IP and Ethernet headers
Parameters:
self The instantiation of an IGMP class.
ip The instantiation of the associated IP class.
ether The instantiation of the associated Ethernet.
Returns:
True The tuple ether/ip/self passed all check and represents
a proper IGMP packet.
False One of more validation checks failed and no fields
were adjusted.
The function will examine the IGMP message to assure proper format.
Corrections will be attempted if possible. The IP header is then properly
adjusted to ensure correct formatting and assignment. The Ethernet header
is then adjusted to the proper IGMP packet format.
"""
# The rules are:
# 1. the Max Response time is meaningful only in Membership Queries and should be zero
# otherwise (RFC 2236, section 2.2)
if (self.type != 0x11): #rule 1
self.mrtime = 0
if (self.adjust_ip(ip) == True):
if (self.adjust_ether(ip, ether) == True): return True
return False | Called to explicitely fixup associated IP and Ethernet headers
Parameters:
self The instantiation of an IGMP class.
ip The instantiation of the associated IP class.
ether The instantiation of the associated Ethernet.
Returns:
True The tuple ether/ip/self passed all check and represents
a proper IGMP packet.
False One of more validation checks failed and no fields
were adjusted.
The function will examine the IGMP message to assure proper format.
Corrections will be attempted if possible. The IP header is then properly
adjusted to ensure correct formatting and assignment. The Ethernet header
is then adjusted to the proper IGMP packet format. | Below is the the instruction that describes the task:
### Input:
Called to explicitely fixup associated IP and Ethernet headers
Parameters:
self The instantiation of an IGMP class.
ip The instantiation of the associated IP class.
ether The instantiation of the associated Ethernet.
Returns:
True The tuple ether/ip/self passed all check and represents
a proper IGMP packet.
False One of more validation checks failed and no fields
were adjusted.
The function will examine the IGMP message to assure proper format.
Corrections will be attempted if possible. The IP header is then properly
adjusted to ensure correct formatting and assignment. The Ethernet header
is then adjusted to the proper IGMP packet format.
### Response:
def igmpize(self, ip=None, ether=None):
"""Called to explicitely fixup associated IP and Ethernet headers
Parameters:
self The instantiation of an IGMP class.
ip The instantiation of the associated IP class.
ether The instantiation of the associated Ethernet.
Returns:
True The tuple ether/ip/self passed all check and represents
a proper IGMP packet.
False One of more validation checks failed and no fields
were adjusted.
The function will examine the IGMP message to assure proper format.
Corrections will be attempted if possible. The IP header is then properly
adjusted to ensure correct formatting and assignment. The Ethernet header
is then adjusted to the proper IGMP packet format.
"""
# The rules are:
# 1. the Max Response time is meaningful only in Membership Queries and should be zero
# otherwise (RFC 2236, section 2.2)
if (self.type != 0x11): #rule 1
self.mrtime = 0
if (self.adjust_ip(ip) == True):
if (self.adjust_ether(ip, ether) == True): return True
return False |
def build(self, link_type, path):
super(HeadLink, self).build()
"""
:param link_type: Link type
:param target: Link target
"""
self.target = path
self.link_type = link_type
self.autoclosing = True | :param link_type: Link type
:param target: Link target | Below is the the instruction that describes the task:
### Input:
:param link_type: Link type
:param target: Link target
### Response:
def build(self, link_type, path):
super(HeadLink, self).build()
"""
:param link_type: Link type
:param target: Link target
"""
self.target = path
self.link_type = link_type
self.autoclosing = True |
def filter(filter_creator):
"""
Creates a decorator that can be used as a filter.
.. warning::
This is currently not compatible with most other decorators, if
you are using a decorator that isn't part of `hurler` you should
take caution.
"""
filter_func = [None]
def function_getter(function):
if isinstance(function, Filter):
function.add_filter(filter)
return function
else:
return Filter(
filter=filter_func[0],
callback=function,
)
def filter_decorator(*args, **kwargs):
filter_function = filter_creator(*args, **kwargs)
filter_func[0] = filter_function
return function_getter
return filter_decorator | Creates a decorator that can be used as a filter.
.. warning::
This is currently not compatible with most other decorators, if
you are using a decorator that isn't part of `hurler` you should
take caution. | Below is the the instruction that describes the task:
### Input:
Creates a decorator that can be used as a filter.
.. warning::
This is currently not compatible with most other decorators, if
you are using a decorator that isn't part of `hurler` you should
take caution.
### Response:
def filter(filter_creator):
"""
Creates a decorator that can be used as a filter.
.. warning::
This is currently not compatible with most other decorators, if
you are using a decorator that isn't part of `hurler` you should
take caution.
"""
filter_func = [None]
def function_getter(function):
if isinstance(function, Filter):
function.add_filter(filter)
return function
else:
return Filter(
filter=filter_func[0],
callback=function,
)
def filter_decorator(*args, **kwargs):
filter_function = filter_creator(*args, **kwargs)
filter_func[0] = filter_function
return function_getter
return filter_decorator |
def _lazy_urls(self):
"""Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the URLconf before the settings module has been loaded.
"""
def url_patterns():
return self._urls()[0]
return LazyURLPattern(url_patterns), self.namespace, self.slug | Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the URLconf before the settings module has been loaded. | Below is the the instruction that describes the task:
### Input:
Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the URLconf before the settings module has been loaded.
### Response:
def _lazy_urls(self):
"""Lazy loading for URL patterns.
This method avoids problems associated with attempting to evaluate
the URLconf before the settings module has been loaded.
"""
def url_patterns():
return self._urls()[0]
return LazyURLPattern(url_patterns), self.namespace, self.slug |
def get(zpool, prop=None, show_source=False, parsable=True):
'''
.. versionadded:: 2016.3.0
Retrieves the given list of properties
zpool : string
Name of storage pool
prop : string
Optional name of property to retrieve
show_source : boolean
Show source of property
parsable : boolean
Display numbers in parsable (exact) values
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' zpool.get myzpool
'''
ret = OrderedDict()
value_properties = ['name', 'property', 'value', 'source']
## collect get output
res = __salt__['cmd.run_all'](
__utils__['zfs.zpool_command'](
command='get',
flags=['-H'],
property_name=prop if prop else 'all',
target=zpool,
),
python_shell=False,
)
if res['retcode'] != 0:
return __utils__['zfs.parse_command_result'](res)
# NOTE: command output for reference
# ========================================================================
# ...
# data mountpoint /data local
# data compression off default
# ...
# =========================================================================
# parse get output
for line in res['stdout'].splitlines():
# NOTE: transform data into dict
prop_data = OrderedDict(list(zip(
value_properties,
[x for x in line.strip().split('\t') if x not in ['']],
)))
# NOTE: older zfs does not have -o, fall back to manually stipping the name field
del prop_data['name']
# NOTE: normalize values
if parsable:
# NOTE: raw numbers and pythonic types
prop_data['value'] = __utils__['zfs.from_auto'](prop_data['property'], prop_data['value'])
else:
# NOTE: human readable zfs types
prop_data['value'] = __utils__['zfs.to_auto'](prop_data['property'], prop_data['value'])
# NOTE: show source if requested
if show_source:
ret[prop_data['property']] = prop_data
del ret[prop_data['property']]['property']
else:
ret[prop_data['property']] = prop_data['value']
return ret | .. versionadded:: 2016.3.0
Retrieves the given list of properties
zpool : string
Name of storage pool
prop : string
Optional name of property to retrieve
show_source : boolean
Show source of property
parsable : boolean
Display numbers in parsable (exact) values
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' zpool.get myzpool | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2016.3.0
Retrieves the given list of properties
zpool : string
Name of storage pool
prop : string
Optional name of property to retrieve
show_source : boolean
Show source of property
parsable : boolean
Display numbers in parsable (exact) values
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' zpool.get myzpool
### Response:
def get(zpool, prop=None, show_source=False, parsable=True):
'''
.. versionadded:: 2016.3.0
Retrieves the given list of properties
zpool : string
Name of storage pool
prop : string
Optional name of property to retrieve
show_source : boolean
Show source of property
parsable : boolean
Display numbers in parsable (exact) values
.. versionadded:: 2018.3.0
CLI Example:
.. code-block:: bash
salt '*' zpool.get myzpool
'''
ret = OrderedDict()
value_properties = ['name', 'property', 'value', 'source']
## collect get output
res = __salt__['cmd.run_all'](
__utils__['zfs.zpool_command'](
command='get',
flags=['-H'],
property_name=prop if prop else 'all',
target=zpool,
),
python_shell=False,
)
if res['retcode'] != 0:
return __utils__['zfs.parse_command_result'](res)
# NOTE: command output for reference
# ========================================================================
# ...
# data mountpoint /data local
# data compression off default
# ...
# =========================================================================
# parse get output
for line in res['stdout'].splitlines():
# NOTE: transform data into dict
prop_data = OrderedDict(list(zip(
value_properties,
[x for x in line.strip().split('\t') if x not in ['']],
)))
# NOTE: older zfs does not have -o, fall back to manually stipping the name field
del prop_data['name']
# NOTE: normalize values
if parsable:
# NOTE: raw numbers and pythonic types
prop_data['value'] = __utils__['zfs.from_auto'](prop_data['property'], prop_data['value'])
else:
# NOTE: human readable zfs types
prop_data['value'] = __utils__['zfs.to_auto'](prop_data['property'], prop_data['value'])
# NOTE: show source if requested
if show_source:
ret[prop_data['property']] = prop_data
del ret[prop_data['property']]['property']
else:
ret[prop_data['property']] = prop_data['value']
return ret |
def do_check_artifact_cache(self, vts, post_process_cached_vts=None):
"""Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a tuple (cached, uncached, uncached_causes) of VersionedTargets that were
satisfied/unsatisfied from the cache.
"""
if not vts:
return [], [], []
read_cache = self._cache_factory.get_read_cache()
items = [(read_cache, vt.cache_key, vt.current_results_dir if self.cache_target_dirs else None)
for vt in vts]
res = self.context.subproc_map(call_use_cached_files, items)
cached_vts = []
uncached_vts = []
uncached_causes = []
# Note that while the input vts may represent multiple targets (for tasks that overrride
# check_artifact_cache_for), the ones we return must represent single targets.
# Once flattened, cached/uncached vts are in separate lists. Each uncached vts is paired
# with why it is missed for stat reporting purpose.
for vt, was_in_cache in zip(vts, res):
if was_in_cache:
cached_vts.extend(vt.versioned_targets)
else:
uncached_vts.extend(vt.versioned_targets)
uncached_causes.extend(repeat(was_in_cache, len(vt.versioned_targets)))
if isinstance(was_in_cache, UnreadableArtifact):
self._cache_key_errors.update(was_in_cache.key)
if post_process_cached_vts:
post_process_cached_vts(cached_vts)
for vt in cached_vts:
vt.update()
return cached_vts, uncached_vts, uncached_causes | Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a tuple (cached, uncached, uncached_causes) of VersionedTargets that were
satisfied/unsatisfied from the cache. | Below is the the instruction that describes the task:
### Input:
Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a tuple (cached, uncached, uncached_causes) of VersionedTargets that were
satisfied/unsatisfied from the cache.
### Response:
def do_check_artifact_cache(self, vts, post_process_cached_vts=None):
"""Checks the artifact cache for the specified list of VersionedTargetSets.
Returns a tuple (cached, uncached, uncached_causes) of VersionedTargets that were
satisfied/unsatisfied from the cache.
"""
if not vts:
return [], [], []
read_cache = self._cache_factory.get_read_cache()
items = [(read_cache, vt.cache_key, vt.current_results_dir if self.cache_target_dirs else None)
for vt in vts]
res = self.context.subproc_map(call_use_cached_files, items)
cached_vts = []
uncached_vts = []
uncached_causes = []
# Note that while the input vts may represent multiple targets (for tasks that overrride
# check_artifact_cache_for), the ones we return must represent single targets.
# Once flattened, cached/uncached vts are in separate lists. Each uncached vts is paired
# with why it is missed for stat reporting purpose.
for vt, was_in_cache in zip(vts, res):
if was_in_cache:
cached_vts.extend(vt.versioned_targets)
else:
uncached_vts.extend(vt.versioned_targets)
uncached_causes.extend(repeat(was_in_cache, len(vt.versioned_targets)))
if isinstance(was_in_cache, UnreadableArtifact):
self._cache_key_errors.update(was_in_cache.key)
if post_process_cached_vts:
post_process_cached_vts(cached_vts)
for vt in cached_vts:
vt.update()
return cached_vts, uncached_vts, uncached_causes |
def login_required(function=None, username=None, basic=False, must=None):
"""Decorate views to require login
@login_required
@login_required()
@login_required(username='admin')
@login_required(username=['admin', 'jon'])
@login_required(basic=True)
@login_required(must=[function, another_function])
"""
if function and not callable(function):
raise ValueError(
'Decorator receives only named arguments, '
'try login_required(username="foo")'
)
def check(validators):
"""Return in the first validation error, else return None"""
if validators is None:
return
if not isinstance(validators, (list, tuple)):
validators = [validators]
for validator in validators:
error = validator(get_username())
if error is not None:
return SimpleLogin.get_message('auth_error', error), 403
def dispatch(fun, *args, **kwargs):
if basic and request.is_json:
return dispatch_basic_auth(fun, *args, **kwargs)
if is_logged_in(username=username):
return check(must) or fun(*args, **kwargs)
elif is_logged_in():
return SimpleLogin.get_message('access_denied'), 403
else:
flash(SimpleLogin.get_message('login_required'), 'warning')
return redirect(url_for('simplelogin.login', next=request.path))
def dispatch_basic_auth(fun, *args, **kwargs):
simplelogin = current_app.extensions['simplelogin']
auth_response = simplelogin.basic_auth()
if auth_response is True:
return check(must) or fun(*args, **kwargs)
else:
return auth_response
if function:
@wraps(function)
def simple_decorator(*args, **kwargs):
"""This is for when decorator is @login_required"""
return dispatch(function, *args, **kwargs)
return simple_decorator
def decorator(f):
"""This is for when decorator is @login_required(...)"""
@wraps(f)
def wrap(*args, **kwargs):
return dispatch(f, *args, **kwargs)
return wrap
return decorator | Decorate views to require login
@login_required
@login_required()
@login_required(username='admin')
@login_required(username=['admin', 'jon'])
@login_required(basic=True)
@login_required(must=[function, another_function]) | Below is the the instruction that describes the task:
### Input:
Decorate views to require login
@login_required
@login_required()
@login_required(username='admin')
@login_required(username=['admin', 'jon'])
@login_required(basic=True)
@login_required(must=[function, another_function])
### Response:
def login_required(function=None, username=None, basic=False, must=None):
"""Decorate views to require login
@login_required
@login_required()
@login_required(username='admin')
@login_required(username=['admin', 'jon'])
@login_required(basic=True)
@login_required(must=[function, another_function])
"""
if function and not callable(function):
raise ValueError(
'Decorator receives only named arguments, '
'try login_required(username="foo")'
)
def check(validators):
"""Return in the first validation error, else return None"""
if validators is None:
return
if not isinstance(validators, (list, tuple)):
validators = [validators]
for validator in validators:
error = validator(get_username())
if error is not None:
return SimpleLogin.get_message('auth_error', error), 403
def dispatch(fun, *args, **kwargs):
if basic and request.is_json:
return dispatch_basic_auth(fun, *args, **kwargs)
if is_logged_in(username=username):
return check(must) or fun(*args, **kwargs)
elif is_logged_in():
return SimpleLogin.get_message('access_denied'), 403
else:
flash(SimpleLogin.get_message('login_required'), 'warning')
return redirect(url_for('simplelogin.login', next=request.path))
def dispatch_basic_auth(fun, *args, **kwargs):
simplelogin = current_app.extensions['simplelogin']
auth_response = simplelogin.basic_auth()
if auth_response is True:
return check(must) or fun(*args, **kwargs)
else:
return auth_response
if function:
@wraps(function)
def simple_decorator(*args, **kwargs):
"""This is for when decorator is @login_required"""
return dispatch(function, *args, **kwargs)
return simple_decorator
def decorator(f):
"""This is for when decorator is @login_required(...)"""
@wraps(f)
def wrap(*args, **kwargs):
return dispatch(f, *args, **kwargs)
return wrap
return decorator |
def get(self, key, **kw):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
return self.impl.get(key, **self._get_cache_kw(kw, None)) | Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend. | Below is the the instruction that describes the task:
### Input:
Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
### Response:
def get(self, key, **kw):
"""Retrieve a value from the cache.
:param key: the value's key.
:param \**kw: cache configuration arguments. The
backend is configured using these arguments upon first request.
Subsequent requests that use the same series of configuration
values will use that same backend.
"""
return self.impl.get(key, **self._get_cache_kw(kw, None)) |
def fit(
self,
df,
duration_col,
event_col=None,
ancillary_df=None,
show_progress=False,
timeline=None,
weights_col=None,
robust=False,
initial_point=None,
entry_col=None,
):
"""
Fit the accelerated failure time model to a right-censored dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of the column in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
ancillary_df: None, boolean, or DataFrame, optional (default=None)
Choose to model the ancillary parameters.
If None or False, explicitly do not fit the ancillary parameters using any covariates.
If True, model the ancillary parameters with the same covariates as ``df``.
If DataFrame, provide covariates to model the ancillary parameters. Must be the same row count as ``df``.
timeline: array, optional
Specify a timeline that will be used for plotting and prediction
weights_col: string
the column in DataFrame that specifies weights per observation.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
entry_col: specify a column in the DataFrame that denotes any late-entries (left truncation) that occurred. See
the docs on `left truncation <https://lifelines.readthedocs.io/en/latest/Survival%20analysis%20with%20lifelines.html#left-truncated-late-entry-data>`__
Returns
-------
self:
self with additional new properties: ``print_summary``, ``params_``, ``confidence_intervals_`` and more
Examples
--------
>>> from lifelines import WeibullAFTFitter, LogNormalAFTFitter, LogLogisticAFTFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> aft = WeibullAFTFitter()
>>> aft.fit(df, 'T', 'E')
>>> aft.print_summary()
>>> aft.predict_median(df)
>>>
>>> aft = WeibullAFTFitter()
>>> aft.fit(df, 'T', 'E', ancillary_df=df)
>>> aft.print_summary()
>>> aft.predict_median(df)
"""
self.duration_col = duration_col
self._time_cols = [duration_col]
self._censoring_type = CensoringType.RIGHT
df = df.copy()
T = pass_for_numeric_dtypes_or_raise_array(df.pop(duration_col)).astype(float)
self.durations = T.copy()
self._fit(
self._log_likelihood_right_censoring,
df,
(T.values, None),
event_col=event_col,
ancillary_df=ancillary_df,
show_progress=show_progress,
timeline=timeline,
weights_col=weights_col,
robust=robust,
initial_point=initial_point,
entry_col=entry_col,
)
return self | Fit the accelerated failure time model to a right-censored dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of the column in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
ancillary_df: None, boolean, or DataFrame, optional (default=None)
Choose to model the ancillary parameters.
If None or False, explicitly do not fit the ancillary parameters using any covariates.
If True, model the ancillary parameters with the same covariates as ``df``.
If DataFrame, provide covariates to model the ancillary parameters. Must be the same row count as ``df``.
timeline: array, optional
Specify a timeline that will be used for plotting and prediction
weights_col: string
the column in DataFrame that specifies weights per observation.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
entry_col: specify a column in the DataFrame that denotes any late-entries (left truncation) that occurred. See
the docs on `left truncation <https://lifelines.readthedocs.io/en/latest/Survival%20analysis%20with%20lifelines.html#left-truncated-late-entry-data>`__
Returns
-------
self:
self with additional new properties: ``print_summary``, ``params_``, ``confidence_intervals_`` and more
Examples
--------
>>> from lifelines import WeibullAFTFitter, LogNormalAFTFitter, LogLogisticAFTFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> aft = WeibullAFTFitter()
>>> aft.fit(df, 'T', 'E')
>>> aft.print_summary()
>>> aft.predict_median(df)
>>>
>>> aft = WeibullAFTFitter()
>>> aft.fit(df, 'T', 'E', ancillary_df=df)
>>> aft.print_summary()
>>> aft.predict_median(df) | Below is the the instruction that describes the task:
### Input:
Fit the accelerated failure time model to a right-censored dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of the column in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
ancillary_df: None, boolean, or DataFrame, optional (default=None)
Choose to model the ancillary parameters.
If None or False, explicitly do not fit the ancillary parameters using any covariates.
If True, model the ancillary parameters with the same covariates as ``df``.
If DataFrame, provide covariates to model the ancillary parameters. Must be the same row count as ``df``.
timeline: array, optional
Specify a timeline that will be used for plotting and prediction
weights_col: string
the column in DataFrame that specifies weights per observation.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
entry_col: specify a column in the DataFrame that denotes any late-entries (left truncation) that occurred. See
the docs on `left truncation <https://lifelines.readthedocs.io/en/latest/Survival%20analysis%20with%20lifelines.html#left-truncated-late-entry-data>`__
Returns
-------
self:
self with additional new properties: ``print_summary``, ``params_``, ``confidence_intervals_`` and more
Examples
--------
>>> from lifelines import WeibullAFTFitter, LogNormalAFTFitter, LogLogisticAFTFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> aft = WeibullAFTFitter()
>>> aft.fit(df, 'T', 'E')
>>> aft.print_summary()
>>> aft.predict_median(df)
>>>
>>> aft = WeibullAFTFitter()
>>> aft.fit(df, 'T', 'E', ancillary_df=df)
>>> aft.print_summary()
>>> aft.predict_median(df)
### Response:
def fit(
self,
df,
duration_col,
event_col=None,
ancillary_df=None,
show_progress=False,
timeline=None,
weights_col=None,
robust=False,
initial_point=None,
entry_col=None,
):
"""
Fit the accelerated failure time model to a right-censored dataset.
Parameters
----------
df: DataFrame
a Pandas DataFrame with necessary columns `duration_col` and
`event_col` (see below), covariates columns, and special columns (weights).
`duration_col` refers to
the lifetimes of the subjects. `event_col` refers to whether
the 'death' events was observed: 1 if observed, 0 else (censored).
duration_col: string
the name of the column in DataFrame that contains the subjects'
lifetimes.
event_col: string, optional
the name of the column in DataFrame that contains the subjects' death
observation. If left as None, assume all individuals are uncensored.
show_progress: boolean, optional (default=False)
since the fitter is iterative, show convergence
diagnostics. Useful if convergence is failing.
ancillary_df: None, boolean, or DataFrame, optional (default=None)
Choose to model the ancillary parameters.
If None or False, explicitly do not fit the ancillary parameters using any covariates.
If True, model the ancillary parameters with the same covariates as ``df``.
If DataFrame, provide covariates to model the ancillary parameters. Must be the same row count as ``df``.
timeline: array, optional
Specify a timeline that will be used for plotting and prediction
weights_col: string
the column in DataFrame that specifies weights per observation.
robust: boolean, optional (default=False)
Compute the robust errors using the Huber sandwich estimator.
initial_point: (d,) numpy array, optional
initialize the starting point of the iterative
algorithm. Default is the zero vector.
entry_col: specify a column in the DataFrame that denotes any late-entries (left truncation) that occurred. See
the docs on `left truncation <https://lifelines.readthedocs.io/en/latest/Survival%20analysis%20with%20lifelines.html#left-truncated-late-entry-data>`__
Returns
-------
self:
self with additional new properties: ``print_summary``, ``params_``, ``confidence_intervals_`` and more
Examples
--------
>>> from lifelines import WeibullAFTFitter, LogNormalAFTFitter, LogLogisticAFTFitter
>>>
>>> df = pd.DataFrame({
>>> 'T': [5, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> 'E': [1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0],
>>> 'var': [0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2],
>>> 'age': [4, 3, 9, 8, 7, 4, 4, 3, 2, 5, 6, 7],
>>> })
>>>
>>> aft = WeibullAFTFitter()
>>> aft.fit(df, 'T', 'E')
>>> aft.print_summary()
>>> aft.predict_median(df)
>>>
>>> aft = WeibullAFTFitter()
>>> aft.fit(df, 'T', 'E', ancillary_df=df)
>>> aft.print_summary()
>>> aft.predict_median(df)
"""
self.duration_col = duration_col
self._time_cols = [duration_col]
self._censoring_type = CensoringType.RIGHT
df = df.copy()
T = pass_for_numeric_dtypes_or_raise_array(df.pop(duration_col)).astype(float)
self.durations = T.copy()
self._fit(
self._log_likelihood_right_censoring,
df,
(T.values, None),
event_col=event_col,
ancillary_df=ancillary_df,
show_progress=show_progress,
timeline=timeline,
weights_col=weights_col,
robust=robust,
initial_point=initial_point,
entry_col=entry_col,
)
return self |
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a structure of tokens derived from a line of a text file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key != 'line':
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
msg_value = structure.get('msg')
if not msg_value:
parser_mediator.ProduceExtractionWarning(
'missing msg value: {0!s}'.format(structure))
return
try:
seconds = int(msg_value[0], 10)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'unsupported number of seconds in msg value: {0!s}'.format(
structure))
return
try:
milliseconds = int(msg_value[1], 10)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'unsupported number of milliseconds in msg value: {0!s}'.format(
structure))
return
timestamp = ((seconds * 1000) + milliseconds) * 1000
body_text = structure[2][0]
try:
# Try to parse the body text as key value pairs. Note that not
# all log lines will be properly formatted key value pairs.
key_value_dict = self._SELINUX_KEY_VALUE_DICT.parseString(body_text)
except pyparsing.ParseException:
key_value_dict = {}
event_data = SELinuxLogEventData()
event_data.audit_type = structure.get('type', None)
event_data.body = body_text
event_data.pid = key_value_dict.get('pid', None)
# TODO: pass line number to offset or remove.
event_data.offset = 0
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) | Parses a structure of tokens derived from a line of a text file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown. | Below is the the instruction that describes the task:
### Input:
Parses a structure of tokens derived from a line of a text file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
### Response:
def ParseRecord(self, parser_mediator, key, structure):
"""Parses a structure of tokens derived from a line of a text file.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
key (str): name of the parsed structure.
structure (pyparsing.ParseResults): structure of tokens derived from
a line of a text file.
Raises:
ParseError: when the structure type is unknown.
"""
if key != 'line':
raise errors.ParseError(
'Unable to parse record, unknown structure: {0:s}'.format(key))
msg_value = structure.get('msg')
if not msg_value:
parser_mediator.ProduceExtractionWarning(
'missing msg value: {0!s}'.format(structure))
return
try:
seconds = int(msg_value[0], 10)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'unsupported number of seconds in msg value: {0!s}'.format(
structure))
return
try:
milliseconds = int(msg_value[1], 10)
except ValueError:
parser_mediator.ProduceExtractionWarning(
'unsupported number of milliseconds in msg value: {0!s}'.format(
structure))
return
timestamp = ((seconds * 1000) + milliseconds) * 1000
body_text = structure[2][0]
try:
# Try to parse the body text as key value pairs. Note that not
# all log lines will be properly formatted key value pairs.
key_value_dict = self._SELINUX_KEY_VALUE_DICT.parseString(body_text)
except pyparsing.ParseException:
key_value_dict = {}
event_data = SELinuxLogEventData()
event_data.audit_type = structure.get('type', None)
event_data.body = body_text
event_data.pid = key_value_dict.get('pid', None)
# TODO: pass line number to offset or remove.
event_data.offset = 0
event = time_events.TimestampEvent(
timestamp, definitions.TIME_DESCRIPTION_WRITTEN)
parser_mediator.ProduceEventWithEventData(event, event_data) |
def remove_node(cls, cluster_id_label, private_dns, parameters=None):
"""
Add a node to an existing cluster
"""
conn = Qubole.agent(version=Cluster.api_version)
parameters = {} if not parameters else parameters
data = {"private_dns" : private_dns, "parameters" : parameters}
return conn.delete(cls.element_path(cluster_id_label) + "/nodes", data) | Add a node to an existing cluster | Below is the the instruction that describes the task:
### Input:
Add a node to an existing cluster
### Response:
def remove_node(cls, cluster_id_label, private_dns, parameters=None):
"""
Add a node to an existing cluster
"""
conn = Qubole.agent(version=Cluster.api_version)
parameters = {} if not parameters else parameters
data = {"private_dns" : private_dns, "parameters" : parameters}
return conn.delete(cls.element_path(cluster_id_label) + "/nodes", data) |
def run(self):
"""Run analysis"""
try:
self.results = self.checker(self.source_code)
except Exception as e:
logger.error(e, exc_info=True) | Run analysis | Below is the the instruction that describes the task:
### Input:
Run analysis
### Response:
def run(self):
"""Run analysis"""
try:
self.results = self.checker(self.source_code)
except Exception as e:
logger.error(e, exc_info=True) |
def calculate_pore_shape(elements, coordinates, adjust=1, increment=0.1,
**kwargs):
"""Return average diameter for a molecule."""
# Copy the coordinates as will perform many opertaions on them
coordinates = deepcopy(coordinates)
# Center of our cartesian system is always at origin
origin = np.array([0, 0, 0])
# Initial center of mass to reverse translation at the end
initial_com = center_of_mass(elements, coordinates)
# We just shift the cage to the origin.
coordinates = shift_com(elements, coordinates)
# We create an array of vdw radii of elements.
elements_vdw = np.array([[atomic_vdw_radius[x.upper()]] for x in elements])
# We calculate maximum diameter of a molecule to determine the radius
# of a sampling sphere neccessary to enclose the whole molecule.
shpere_radius = max_dim(elements, coordinates)[2]/2
sphere_surface_area = 4 * np.pi * shpere_radius**2
# Here we determine the number of sampling points necessary for a fine
# sampling. Smaller molecules require more finner density of sampling
# points on the sampling sphere's surface, whereas largen require less.
# This formula was created so that larger molecule do not take much longer
# to analyse, as number_sampling_points*length_of_sampling_vectors
# results in quadratic increase of sampling time. The 250 factor was
# specificly determined to produce close to 1 sampling point /Angstrom^2
# for a sphere of radius ~ 24 Angstrom. We can adjust how fine is the
# sampling by changing the adjust factor.
number_of_points = int(np.log10(sphere_surface_area) * 250 * adjust)
# Here I use code by Alexandre Devert for spreading points on a sphere:
# http://blog.marmakoide.org/?p=1
golden_angle = np.pi * (3 - np.sqrt(5))
theta = golden_angle * np.arange(number_of_points)
z = np.linspace(1 - 1.0 / number_of_points, 1.0 / number_of_points - 1.0,
number_of_points)
radius = np.sqrt(1 - z * z)
points = np.zeros((number_of_points, 3))
points[:, 0] = radius * np.cos(theta) * shpere_radius
points[:, 1] = radius * np.sin(theta) * shpere_radius
points[:, 2] = z * shpere_radius
# Here we will compute the eps parameter for the sklearn.cluster.DBSCAN
# (3-dimensional spatial clustering algorithm) which is the mean distance
# to the closest point of all points.
values = []
tree = KDTree(points)
for i in points:
dist, ind = tree.query(i.reshape(1, -1), k=10)
values.extend(dist)
mean_distance = np.mean(values)
# The best eps is parametrized when adding the mean distance and it's root.
eps = mean_distance + mean_distance**0.5
# Here we either run the sampling points vectors analysis in serial
# or parallel. The vectors that go through molecular voids return
# as analysed list with the increment at vector's path with largest
# included sphere, coordinates for this narrow channel point. vectors
# that find molecule on theirs path are return as NoneType object.
results = [
vector_analysis_pore_shape(point, coordinates, elements_vdw)
for point in points
]
results_cleaned = [x for x in results if x is not None]
ele = np.array(['X'] * len(results_cleaned))
coor = np.array(results_cleaned)
return coor | Return average diameter for a molecule. | Below is the the instruction that describes the task:
### Input:
Return average diameter for a molecule.
### Response:
def calculate_pore_shape(elements, coordinates, adjust=1, increment=0.1,
**kwargs):
"""Return average diameter for a molecule."""
# Copy the coordinates as will perform many opertaions on them
coordinates = deepcopy(coordinates)
# Center of our cartesian system is always at origin
origin = np.array([0, 0, 0])
# Initial center of mass to reverse translation at the end
initial_com = center_of_mass(elements, coordinates)
# We just shift the cage to the origin.
coordinates = shift_com(elements, coordinates)
# We create an array of vdw radii of elements.
elements_vdw = np.array([[atomic_vdw_radius[x.upper()]] for x in elements])
# We calculate maximum diameter of a molecule to determine the radius
# of a sampling sphere neccessary to enclose the whole molecule.
shpere_radius = max_dim(elements, coordinates)[2]/2
sphere_surface_area = 4 * np.pi * shpere_radius**2
# Here we determine the number of sampling points necessary for a fine
# sampling. Smaller molecules require more finner density of sampling
# points on the sampling sphere's surface, whereas largen require less.
# This formula was created so that larger molecule do not take much longer
# to analyse, as number_sampling_points*length_of_sampling_vectors
# results in quadratic increase of sampling time. The 250 factor was
# specificly determined to produce close to 1 sampling point /Angstrom^2
# for a sphere of radius ~ 24 Angstrom. We can adjust how fine is the
# sampling by changing the adjust factor.
number_of_points = int(np.log10(sphere_surface_area) * 250 * adjust)
# Here I use code by Alexandre Devert for spreading points on a sphere:
# http://blog.marmakoide.org/?p=1
golden_angle = np.pi * (3 - np.sqrt(5))
theta = golden_angle * np.arange(number_of_points)
z = np.linspace(1 - 1.0 / number_of_points, 1.0 / number_of_points - 1.0,
number_of_points)
radius = np.sqrt(1 - z * z)
points = np.zeros((number_of_points, 3))
points[:, 0] = radius * np.cos(theta) * shpere_radius
points[:, 1] = radius * np.sin(theta) * shpere_radius
points[:, 2] = z * shpere_radius
# Here we will compute the eps parameter for the sklearn.cluster.DBSCAN
# (3-dimensional spatial clustering algorithm) which is the mean distance
# to the closest point of all points.
values = []
tree = KDTree(points)
for i in points:
dist, ind = tree.query(i.reshape(1, -1), k=10)
values.extend(dist)
mean_distance = np.mean(values)
# The best eps is parametrized when adding the mean distance and it's root.
eps = mean_distance + mean_distance**0.5
# Here we either run the sampling points vectors analysis in serial
# or parallel. The vectors that go through molecular voids return
# as analysed list with the increment at vector's path with largest
# included sphere, coordinates for this narrow channel point. vectors
# that find molecule on theirs path are return as NoneType object.
results = [
vector_analysis_pore_shape(point, coordinates, elements_vdw)
for point in points
]
results_cleaned = [x for x in results if x is not None]
ele = np.array(['X'] * len(results_cleaned))
coor = np.array(results_cleaned)
return coor |
def exec_rabbitmqctl(self, command, args=[], rabbitmqctl_opts=['-q']):
"""
Execute a ``rabbitmqctl`` command inside a running container.
:param command: the command to run
:param args: a list of args for the command
:param rabbitmqctl_opts:
a list of extra options to pass to ``rabbitmqctl``
:returns: a tuple of the command exit code and output
"""
cmd = ['rabbitmqctl'] + rabbitmqctl_opts + [command] + args
return self.inner().exec_run(cmd) | Execute a ``rabbitmqctl`` command inside a running container.
:param command: the command to run
:param args: a list of args for the command
:param rabbitmqctl_opts:
a list of extra options to pass to ``rabbitmqctl``
:returns: a tuple of the command exit code and output | Below is the the instruction that describes the task:
### Input:
Execute a ``rabbitmqctl`` command inside a running container.
:param command: the command to run
:param args: a list of args for the command
:param rabbitmqctl_opts:
a list of extra options to pass to ``rabbitmqctl``
:returns: a tuple of the command exit code and output
### Response:
def exec_rabbitmqctl(self, command, args=[], rabbitmqctl_opts=['-q']):
"""
Execute a ``rabbitmqctl`` command inside a running container.
:param command: the command to run
:param args: a list of args for the command
:param rabbitmqctl_opts:
a list of extra options to pass to ``rabbitmqctl``
:returns: a tuple of the command exit code and output
"""
cmd = ['rabbitmqctl'] + rabbitmqctl_opts + [command] + args
return self.inner().exec_run(cmd) |
def diagnose_embedding(emb, source, target):
"""A detailed diagnostic for minor embeddings.
This diagnostic produces a generator, which lists all issues with `emb`. The errors
are yielded in the form
ExceptionClass, arg1, arg2,...
where the arguments following the class are used to construct the exception object.
User-friendly variants of this function are :func:`is_valid_embedding`, which returns a
bool, and :func:`verify_embedding` which raises the first observed error. All exceptions
are subclasses of :exc:`.EmbeddingError`.
Args:
emb (dict):
Dictionary mapping source nodes to arrays of target nodes.
source (list/:obj:`networkx.Graph`):
Graph to be embedded as a NetworkX graph or a list of edges.
target (list/:obj:`networkx.Graph`):
Graph being embedded into as a NetworkX graph or a list of edges.
Yields:
One of:
:exc:`.MissingChainError`, snode: a source node label that does not occur as a key of `emb`, or for which emb[snode] is empty
:exc:`.ChainOverlapError`, tnode, snode0, snode0: a target node which occurs in both `emb[snode0]` and `emb[snode1]`
:exc:`.DisconnectedChainError`, snode: a source node label whose chain is not a connected subgraph of `target`
:exc:`.InvalidNodeError`, tnode, snode: a source node label and putative target node label which is not a node of `target`
:exc:`.MissingEdgeError`, snode0, snode1: a pair of source node labels defining an edge which is not present between their chains
"""
if not hasattr(source, 'edges'):
source = nx.Graph(source)
if not hasattr(target, 'edges'):
target = nx.Graph(target)
label = {}
embedded = set()
for x in source:
try:
embx = emb[x]
missing_chain = len(embx) == 0
except KeyError:
missing_chain = True
if missing_chain:
yield MissingChainError, x
continue
all_present = True
for q in embx:
if label.get(q, x) != x:
yield ChainOverlapError, q, x, label[q]
elif q not in target:
all_present = False
yield InvalidNodeError, x, q
else:
label[q] = x
if all_present:
embedded.add(x)
if not nx.is_connected(target.subgraph(embx)):
yield DisconnectedChainError, x
yielded = nx.Graph()
for p, q in target.subgraph(label).edges():
yielded.add_edge(label[p], label[q])
for x, y in source.edges():
if x == y:
continue
if x in embedded and y in embedded and not yielded.has_edge(x, y):
yield MissingEdgeError, x, y | A detailed diagnostic for minor embeddings.
This diagnostic produces a generator, which lists all issues with `emb`. The errors
are yielded in the form
ExceptionClass, arg1, arg2,...
where the arguments following the class are used to construct the exception object.
User-friendly variants of this function are :func:`is_valid_embedding`, which returns a
bool, and :func:`verify_embedding` which raises the first observed error. All exceptions
are subclasses of :exc:`.EmbeddingError`.
Args:
emb (dict):
Dictionary mapping source nodes to arrays of target nodes.
source (list/:obj:`networkx.Graph`):
Graph to be embedded as a NetworkX graph or a list of edges.
target (list/:obj:`networkx.Graph`):
Graph being embedded into as a NetworkX graph or a list of edges.
Yields:
One of:
:exc:`.MissingChainError`, snode: a source node label that does not occur as a key of `emb`, or for which emb[snode] is empty
:exc:`.ChainOverlapError`, tnode, snode0, snode0: a target node which occurs in both `emb[snode0]` and `emb[snode1]`
:exc:`.DisconnectedChainError`, snode: a source node label whose chain is not a connected subgraph of `target`
:exc:`.InvalidNodeError`, tnode, snode: a source node label and putative target node label which is not a node of `target`
:exc:`.MissingEdgeError`, snode0, snode1: a pair of source node labels defining an edge which is not present between their chains | Below is the the instruction that describes the task:
### Input:
A detailed diagnostic for minor embeddings.
This diagnostic produces a generator, which lists all issues with `emb`. The errors
are yielded in the form
ExceptionClass, arg1, arg2,...
where the arguments following the class are used to construct the exception object.
User-friendly variants of this function are :func:`is_valid_embedding`, which returns a
bool, and :func:`verify_embedding` which raises the first observed error. All exceptions
are subclasses of :exc:`.EmbeddingError`.
Args:
emb (dict):
Dictionary mapping source nodes to arrays of target nodes.
source (list/:obj:`networkx.Graph`):
Graph to be embedded as a NetworkX graph or a list of edges.
target (list/:obj:`networkx.Graph`):
Graph being embedded into as a NetworkX graph or a list of edges.
Yields:
One of:
:exc:`.MissingChainError`, snode: a source node label that does not occur as a key of `emb`, or for which emb[snode] is empty
:exc:`.ChainOverlapError`, tnode, snode0, snode0: a target node which occurs in both `emb[snode0]` and `emb[snode1]`
:exc:`.DisconnectedChainError`, snode: a source node label whose chain is not a connected subgraph of `target`
:exc:`.InvalidNodeError`, tnode, snode: a source node label and putative target node label which is not a node of `target`
:exc:`.MissingEdgeError`, snode0, snode1: a pair of source node labels defining an edge which is not present between their chains
### Response:
def diagnose_embedding(emb, source, target):
"""A detailed diagnostic for minor embeddings.
This diagnostic produces a generator, which lists all issues with `emb`. The errors
are yielded in the form
ExceptionClass, arg1, arg2,...
where the arguments following the class are used to construct the exception object.
User-friendly variants of this function are :func:`is_valid_embedding`, which returns a
bool, and :func:`verify_embedding` which raises the first observed error. All exceptions
are subclasses of :exc:`.EmbeddingError`.
Args:
emb (dict):
Dictionary mapping source nodes to arrays of target nodes.
source (list/:obj:`networkx.Graph`):
Graph to be embedded as a NetworkX graph or a list of edges.
target (list/:obj:`networkx.Graph`):
Graph being embedded into as a NetworkX graph or a list of edges.
Yields:
One of:
:exc:`.MissingChainError`, snode: a source node label that does not occur as a key of `emb`, or for which emb[snode] is empty
:exc:`.ChainOverlapError`, tnode, snode0, snode0: a target node which occurs in both `emb[snode0]` and `emb[snode1]`
:exc:`.DisconnectedChainError`, snode: a source node label whose chain is not a connected subgraph of `target`
:exc:`.InvalidNodeError`, tnode, snode: a source node label and putative target node label which is not a node of `target`
:exc:`.MissingEdgeError`, snode0, snode1: a pair of source node labels defining an edge which is not present between their chains
"""
if not hasattr(source, 'edges'):
source = nx.Graph(source)
if not hasattr(target, 'edges'):
target = nx.Graph(target)
label = {}
embedded = set()
for x in source:
try:
embx = emb[x]
missing_chain = len(embx) == 0
except KeyError:
missing_chain = True
if missing_chain:
yield MissingChainError, x
continue
all_present = True
for q in embx:
if label.get(q, x) != x:
yield ChainOverlapError, q, x, label[q]
elif q not in target:
all_present = False
yield InvalidNodeError, x, q
else:
label[q] = x
if all_present:
embedded.add(x)
if not nx.is_connected(target.subgraph(embx)):
yield DisconnectedChainError, x
yielded = nx.Graph()
for p, q in target.subgraph(label).edges():
yielded.add_edge(label[p], label[q])
for x, y in source.edges():
if x == y:
continue
if x in embedded and y in embedded and not yielded.has_edge(x, y):
yield MissingEdgeError, x, y |
def set_power_configuration(policy=None, delayType=None, delayValue=None):
'''
Sets the power configuration on the device. This is only available for some
C-Series servers.
.. versionadded:: 2019.2.0
Args:
policy(str): The action to be taken when chassis power is restored after
an unexpected power loss. This can be one of the following:
reset: The server is allowed to boot up normally when power is
restored. The server can restart immediately or, optionally, after a
fixed or random delay.
stay-off: The server remains off until it is manually restarted.
last-state: The server restarts and the system attempts to restore
any processes that were running before power was lost.
delayType(str): If the selected policy is reset, the restart can be
delayed with this option. This can be one of the following:
fixed: The server restarts after a fixed delay.
random: The server restarts after a random delay.
delayValue(int): If a fixed delay is selected, once chassis power is
restored and the Cisco IMC has finished rebooting, the system waits for
the specified number of seconds before restarting the server. Enter an
integer between 0 and 240.
CLI Example:
.. code-block:: bash
salt '*' cimc.set_power_configuration stay-off
salt '*' cimc.set_power_configuration reset fixed 0
'''
query = ""
if policy == "reset":
query = ' vpResumeOnACPowerLoss="reset"'
if delayType:
if delayType == "fixed":
query += ' delayType="fixed"'
if delayValue:
query += ' delay="{0}"'.format(delayValue)
elif delayType == "random":
query += ' delayType="random"'
else:
raise salt.exceptions.CommandExecutionError("Invalid delay type entered.")
elif policy == "stay-off":
query = ' vpResumeOnACPowerLoss="reset"'
elif policy == "last-state":
query = ' vpResumeOnACPowerLoss="last-state"'
else:
raise salt.exceptions.CommandExecutionError("The power state must be specified.")
dn = "sys/rack-unit-1/board/Resume-on-AC-power-loss"
inconfig = """<biosVfResumeOnACPowerLoss
dn="sys/rack-unit-1/board/Resume-on-AC-power-loss"{0}>
</biosVfResumeOnACPowerLoss>""".format(query)
ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)
return ret | Sets the power configuration on the device. This is only available for some
C-Series servers.
.. versionadded:: 2019.2.0
Args:
policy(str): The action to be taken when chassis power is restored after
an unexpected power loss. This can be one of the following:
reset: The server is allowed to boot up normally when power is
restored. The server can restart immediately or, optionally, after a
fixed or random delay.
stay-off: The server remains off until it is manually restarted.
last-state: The server restarts and the system attempts to restore
any processes that were running before power was lost.
delayType(str): If the selected policy is reset, the restart can be
delayed with this option. This can be one of the following:
fixed: The server restarts after a fixed delay.
random: The server restarts after a random delay.
delayValue(int): If a fixed delay is selected, once chassis power is
restored and the Cisco IMC has finished rebooting, the system waits for
the specified number of seconds before restarting the server. Enter an
integer between 0 and 240.
CLI Example:
.. code-block:: bash
salt '*' cimc.set_power_configuration stay-off
salt '*' cimc.set_power_configuration reset fixed 0 | Below is the the instruction that describes the task:
### Input:
Sets the power configuration on the device. This is only available for some
C-Series servers.
.. versionadded:: 2019.2.0
Args:
policy(str): The action to be taken when chassis power is restored after
an unexpected power loss. This can be one of the following:
reset: The server is allowed to boot up normally when power is
restored. The server can restart immediately or, optionally, after a
fixed or random delay.
stay-off: The server remains off until it is manually restarted.
last-state: The server restarts and the system attempts to restore
any processes that were running before power was lost.
delayType(str): If the selected policy is reset, the restart can be
delayed with this option. This can be one of the following:
fixed: The server restarts after a fixed delay.
random: The server restarts after a random delay.
delayValue(int): If a fixed delay is selected, once chassis power is
restored and the Cisco IMC has finished rebooting, the system waits for
the specified number of seconds before restarting the server. Enter an
integer between 0 and 240.
CLI Example:
.. code-block:: bash
salt '*' cimc.set_power_configuration stay-off
salt '*' cimc.set_power_configuration reset fixed 0
### Response:
def set_power_configuration(policy=None, delayType=None, delayValue=None):
'''
Sets the power configuration on the device. This is only available for some
C-Series servers.
.. versionadded:: 2019.2.0
Args:
policy(str): The action to be taken when chassis power is restored after
an unexpected power loss. This can be one of the following:
reset: The server is allowed to boot up normally when power is
restored. The server can restart immediately or, optionally, after a
fixed or random delay.
stay-off: The server remains off until it is manually restarted.
last-state: The server restarts and the system attempts to restore
any processes that were running before power was lost.
delayType(str): If the selected policy is reset, the restart can be
delayed with this option. This can be one of the following:
fixed: The server restarts after a fixed delay.
random: The server restarts after a random delay.
delayValue(int): If a fixed delay is selected, once chassis power is
restored and the Cisco IMC has finished rebooting, the system waits for
the specified number of seconds before restarting the server. Enter an
integer between 0 and 240.
CLI Example:
.. code-block:: bash
salt '*' cimc.set_power_configuration stay-off
salt '*' cimc.set_power_configuration reset fixed 0
'''
query = ""
if policy == "reset":
query = ' vpResumeOnACPowerLoss="reset"'
if delayType:
if delayType == "fixed":
query += ' delayType="fixed"'
if delayValue:
query += ' delay="{0}"'.format(delayValue)
elif delayType == "random":
query += ' delayType="random"'
else:
raise salt.exceptions.CommandExecutionError("Invalid delay type entered.")
elif policy == "stay-off":
query = ' vpResumeOnACPowerLoss="reset"'
elif policy == "last-state":
query = ' vpResumeOnACPowerLoss="last-state"'
else:
raise salt.exceptions.CommandExecutionError("The power state must be specified.")
dn = "sys/rack-unit-1/board/Resume-on-AC-power-loss"
inconfig = """<biosVfResumeOnACPowerLoss
dn="sys/rack-unit-1/board/Resume-on-AC-power-loss"{0}>
</biosVfResumeOnACPowerLoss>""".format(query)
ret = __proxy__['cimc.set_config_modify'](dn, inconfig, False)
return ret |
def _get_host(name, array):
'''Private function to check host'''
host = None
for temp in array.list_hosts():
if temp['name'] == name:
host = temp
break
return host | Private function to check host | Below is the the instruction that describes the task:
### Input:
Private function to check host
### Response:
def _get_host(name, array):
'''Private function to check host'''
host = None
for temp in array.list_hosts():
if temp['name'] == name:
host = temp
break
return host |
def set_font(self, family,style='',size=0):
"Select a font; size given in points"
family=family.lower()
if(family==''):
family=self.font_family
if(family=='arial'):
family='helvetica'
elif(family=='symbol' or family=='zapfdingbats'):
style=''
style=style.upper()
if('U' in style):
self.underline=1
style=style.replace('U','')
else:
self.underline=0
if(style=='IB'):
style='BI'
if(size==0):
size=self.font_size_pt
#Test if font is already selected
if(self.font_family==family and self.font_style==style and self.font_size_pt==size):
return
#Test if used for the first time
fontkey=family+style
if fontkey not in self.fonts:
#Check if one of the standard fonts
if fontkey in self.core_fonts:
if fontkey not in fpdf_charwidths:
#Load metric file
name=os.path.join(FPDF_FONT_DIR,family)
if(family=='times' or family=='helvetica'):
name+=style.lower()
exec(compile(open(name+'.font').read(), name+'.font', 'exec'))
if fontkey not in fpdf_charwidths:
self.error('Could not include font metric file for'+fontkey)
i=len(self.fonts)+1
self.fonts[fontkey]={'i':i,'type':'core','name':self.core_fonts[fontkey],'up':-100,'ut':50,'cw':fpdf_charwidths[fontkey]}
else:
self.error('Undefined font: '+family+' '+style)
#Select it
self.font_family=family
self.font_style=style
self.font_size_pt=size
self.font_size=size/self.k
self.current_font=self.fonts[fontkey]
self.unifontsubset = (self.fonts[fontkey]['type'] == 'TTF')
if(self.page>0):
self._out(sprintf('BT /F%d %.2f Tf ET',self.current_font['i'],self.font_size_pt)) | Select a font; size given in points | Below is the the instruction that describes the task:
### Input:
Select a font; size given in points
### Response:
def set_font(self, family,style='',size=0):
"Select a font; size given in points"
family=family.lower()
if(family==''):
family=self.font_family
if(family=='arial'):
family='helvetica'
elif(family=='symbol' or family=='zapfdingbats'):
style=''
style=style.upper()
if('U' in style):
self.underline=1
style=style.replace('U','')
else:
self.underline=0
if(style=='IB'):
style='BI'
if(size==0):
size=self.font_size_pt
#Test if font is already selected
if(self.font_family==family and self.font_style==style and self.font_size_pt==size):
return
#Test if used for the first time
fontkey=family+style
if fontkey not in self.fonts:
#Check if one of the standard fonts
if fontkey in self.core_fonts:
if fontkey not in fpdf_charwidths:
#Load metric file
name=os.path.join(FPDF_FONT_DIR,family)
if(family=='times' or family=='helvetica'):
name+=style.lower()
exec(compile(open(name+'.font').read(), name+'.font', 'exec'))
if fontkey not in fpdf_charwidths:
self.error('Could not include font metric file for'+fontkey)
i=len(self.fonts)+1
self.fonts[fontkey]={'i':i,'type':'core','name':self.core_fonts[fontkey],'up':-100,'ut':50,'cw':fpdf_charwidths[fontkey]}
else:
self.error('Undefined font: '+family+' '+style)
#Select it
self.font_family=family
self.font_style=style
self.font_size_pt=size
self.font_size=size/self.k
self.current_font=self.fonts[fontkey]
self.unifontsubset = (self.fonts[fontkey]['type'] == 'TTF')
if(self.page>0):
self._out(sprintf('BT /F%d %.2f Tf ET',self.current_font['i'],self.font_size_pt)) |
def _get_irsb(self, v):
"""
Get the IRSB object from an address, a SimRun, or a CFGNode.
:param v: Can be one of the following: an address, or a CFGNode.
:return: The IRSB instance.
:rtype: pyvex.IRSB
"""
if isinstance(v, CFGNode):
v = v.addr
if type(v) is int:
# Generate an IRSB from self._project
if v in self._run_cache:
return self._run_cache[v]
if self.project:
irsb = self.project.factory.block(v, backup_state=self._base_state).vex
self._run_cache[v] = irsb
return irsb
else:
raise AngrBladeError("Project must be specified if you give me all addresses for SimRuns")
else:
raise AngrBladeError('Unsupported SimRun argument type %s', type(v)) | Get the IRSB object from an address, a SimRun, or a CFGNode.
:param v: Can be one of the following: an address, or a CFGNode.
:return: The IRSB instance.
:rtype: pyvex.IRSB | Below is the the instruction that describes the task:
### Input:
Get the IRSB object from an address, a SimRun, or a CFGNode.
:param v: Can be one of the following: an address, or a CFGNode.
:return: The IRSB instance.
:rtype: pyvex.IRSB
### Response:
def _get_irsb(self, v):
"""
Get the IRSB object from an address, a SimRun, or a CFGNode.
:param v: Can be one of the following: an address, or a CFGNode.
:return: The IRSB instance.
:rtype: pyvex.IRSB
"""
if isinstance(v, CFGNode):
v = v.addr
if type(v) is int:
# Generate an IRSB from self._project
if v in self._run_cache:
return self._run_cache[v]
if self.project:
irsb = self.project.factory.block(v, backup_state=self._base_state).vex
self._run_cache[v] = irsb
return irsb
else:
raise AngrBladeError("Project must be specified if you give me all addresses for SimRuns")
else:
raise AngrBladeError('Unsupported SimRun argument type %s', type(v)) |
def _parse_args_forward_mode(self, *args) -> Tuple[dict, dict]:
"""
Parse input arguments used in forward mode differentiation.
End result will be two arrays X and dX, each of shape (n) or (T, n)
Allowed input shapes are:
(1) ARRAY_N: two arrays of size n
(2) ARRAY_TxN: two arrays of size Txn
(3) DICT: two dictionaries, each mapping variable names to values
(4) ARGS: a list of 2n values; n variables followed by n seeds
(5) KWARGS: a kwargs list (currently not supported)
"""
# Types for arg_vars and arg_seed
arg_vars: np.ndarray = {}
arg_seed: np.ndarray = {}
# Get the number of arguments and inputs
argc: int = len(args)
m: int = self.m
# Check each type in turn
# The most common case is two arguments were passed for the vars and seeds
# They can both be numpy arrays or dicts
if argc == 0:
return (None, None)
if argc == 1:
# The lone argument
arg = args[0]
# Case zero: None
if arg is None:
return (None, None)
# Case one: a dict
elif isinstance(arg, dict):
arg_vars = self._parse_var_dict(arg)
# Case two: a scalar
elif isinstance(arg, scalar_instance_types):
arg_vars = self._parse_var_scalar(arg)
# Case three: a numpy array
elif isinstance(arg, np.ndarray):
arg_vars = self._parse_var_array(arg)
T_vars = self._check_forward_mode_input_dict(arg_vars)
self.T = T_vars
return (arg_vars, self._default_seed(arg_vars))
if argc == 2:
# Case one: a pair of dicts
if isinstance(args[0], dict) and isinstance(args[1], dict):
arg_vars = self._parse_var_dict(args[0])
arg_seed = self._parse_seed_dict(args[1])
# Case two: two scalars
elif isinstance(args[0], scalar_instance_types) and isinstance(args[1], scalar_instance_types):
arg_vars = self._parse_var_scalar(args[0])
arg_seed = self._parse_var_scalar(args[1])
# Case three: two numpy arrays
elif isinstance(args[0], np.ndarray) and isinstance(args[1], np.ndarray):
arg_vars = self._parse_var_array(args[0])
arg_seed = self._parse_var_array(args[1])
else:
raise ValueError(f'Input types must either be dict, scalar, or np.ndarray for Fluxion.parse_args.')
T_vars = self._check_forward_mode_input_dict(arg_vars)
T_seed = self._check_forward_mode_input_dict(arg_seed)
self.T = T_vars
if T_seed in (1,T_vars):
return (arg_vars, arg_seed)
else:
raise ValueError(f'Bound variables in {args[0]} inconsistent with bound variables in {args[1]}')
# If we reach here, we either got ARGS or KWARGS
if argc == 2 * m:
# Initialize X and dX in the correct shape
X = np.array(args[0:m],dtype=np.float64)
dX = np.array(args[m:2*m],dtype=np.float64)
# Reevaluate the two arrays
return self._parse_args_forward_mode(X, dX)
# KWARGS not yet supported
msg = f'argc={argc}'
for arg in args:
msg += f'{arg}'
raise ValueError(f'Unrecognized input type for Fluxion.parse_args. Details:\n{msg}') | Parse input arguments used in forward mode differentiation.
End result will be two arrays X and dX, each of shape (n) or (T, n)
Allowed input shapes are:
(1) ARRAY_N: two arrays of size n
(2) ARRAY_TxN: two arrays of size Txn
(3) DICT: two dictionaries, each mapping variable names to values
(4) ARGS: a list of 2n values; n variables followed by n seeds
(5) KWARGS: a kwargs list (currently not supported) | Below is the the instruction that describes the task:
### Input:
Parse input arguments used in forward mode differentiation.
End result will be two arrays X and dX, each of shape (n) or (T, n)
Allowed input shapes are:
(1) ARRAY_N: two arrays of size n
(2) ARRAY_TxN: two arrays of size Txn
(3) DICT: two dictionaries, each mapping variable names to values
(4) ARGS: a list of 2n values; n variables followed by n seeds
(5) KWARGS: a kwargs list (currently not supported)
### Response:
def _parse_args_forward_mode(self, *args) -> Tuple[dict, dict]:
"""
Parse input arguments used in forward mode differentiation.
End result will be two arrays X and dX, each of shape (n) or (T, n)
Allowed input shapes are:
(1) ARRAY_N: two arrays of size n
(2) ARRAY_TxN: two arrays of size Txn
(3) DICT: two dictionaries, each mapping variable names to values
(4) ARGS: a list of 2n values; n variables followed by n seeds
(5) KWARGS: a kwargs list (currently not supported)
"""
# Types for arg_vars and arg_seed
arg_vars: np.ndarray = {}
arg_seed: np.ndarray = {}
# Get the number of arguments and inputs
argc: int = len(args)
m: int = self.m
# Check each type in turn
# The most common case is two arguments were passed for the vars and seeds
# They can both be numpy arrays or dicts
if argc == 0:
return (None, None)
if argc == 1:
# The lone argument
arg = args[0]
# Case zero: None
if arg is None:
return (None, None)
# Case one: a dict
elif isinstance(arg, dict):
arg_vars = self._parse_var_dict(arg)
# Case two: a scalar
elif isinstance(arg, scalar_instance_types):
arg_vars = self._parse_var_scalar(arg)
# Case three: a numpy array
elif isinstance(arg, np.ndarray):
arg_vars = self._parse_var_array(arg)
T_vars = self._check_forward_mode_input_dict(arg_vars)
self.T = T_vars
return (arg_vars, self._default_seed(arg_vars))
if argc == 2:
# Case one: a pair of dicts
if isinstance(args[0], dict) and isinstance(args[1], dict):
arg_vars = self._parse_var_dict(args[0])
arg_seed = self._parse_seed_dict(args[1])
# Case two: two scalars
elif isinstance(args[0], scalar_instance_types) and isinstance(args[1], scalar_instance_types):
arg_vars = self._parse_var_scalar(args[0])
arg_seed = self._parse_var_scalar(args[1])
# Case three: two numpy arrays
elif isinstance(args[0], np.ndarray) and isinstance(args[1], np.ndarray):
arg_vars = self._parse_var_array(args[0])
arg_seed = self._parse_var_array(args[1])
else:
raise ValueError(f'Input types must either be dict, scalar, or np.ndarray for Fluxion.parse_args.')
T_vars = self._check_forward_mode_input_dict(arg_vars)
T_seed = self._check_forward_mode_input_dict(arg_seed)
self.T = T_vars
if T_seed in (1,T_vars):
return (arg_vars, arg_seed)
else:
raise ValueError(f'Bound variables in {args[0]} inconsistent with bound variables in {args[1]}')
# If we reach here, we either got ARGS or KWARGS
if argc == 2 * m:
# Initialize X and dX in the correct shape
X = np.array(args[0:m],dtype=np.float64)
dX = np.array(args[m:2*m],dtype=np.float64)
# Reevaluate the two arrays
return self._parse_args_forward_mode(X, dX)
# KWARGS not yet supported
msg = f'argc={argc}'
for arg in args:
msg += f'{arg}'
raise ValueError(f'Unrecognized input type for Fluxion.parse_args. Details:\n{msg}') |
def _add_tumor_params(paired, items, gatk_type):
"""Add tumor/normal BAM input parameters to command line.
"""
params = []
if not paired:
raise ValueError("Specified MuTect2 calling but 'tumor' phenotype not present in batch\n"
"https://bcbio-nextgen.readthedocs.org/en/latest/contents/"
"pipelines.html#cancer-variant-calling\n"
"for samples: %s" % ", " .join([dd.get_sample_name(x) for x in items]))
if gatk_type == "gatk4":
params += ["-I", paired.tumor_bam]
params += ["--tumor-sample", paired.tumor_name]
else:
params += ["-I:tumor", paired.tumor_bam]
if paired.normal_bam is not None:
if gatk_type == "gatk4":
params += ["-I", paired.normal_bam]
params += ["--normal-sample", paired.normal_name]
else:
params += ["-I:normal", paired.normal_bam]
if paired.normal_panel is not None:
panel_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(items[0]), "mutect2", "panels"))
normal_panel = vcfutils.bgzip_and_index(paired.normal_panel, items[0]["config"], out_dir=panel_dir)
if gatk_type == "gatk4":
params += ["--panel-of-normals", normal_panel]
else:
params += ["--normal_panel", normal_panel]
return params | Add tumor/normal BAM input parameters to command line. | Below is the the instruction that describes the task:
### Input:
Add tumor/normal BAM input parameters to command line.
### Response:
def _add_tumor_params(paired, items, gatk_type):
"""Add tumor/normal BAM input parameters to command line.
"""
params = []
if not paired:
raise ValueError("Specified MuTect2 calling but 'tumor' phenotype not present in batch\n"
"https://bcbio-nextgen.readthedocs.org/en/latest/contents/"
"pipelines.html#cancer-variant-calling\n"
"for samples: %s" % ", " .join([dd.get_sample_name(x) for x in items]))
if gatk_type == "gatk4":
params += ["-I", paired.tumor_bam]
params += ["--tumor-sample", paired.tumor_name]
else:
params += ["-I:tumor", paired.tumor_bam]
if paired.normal_bam is not None:
if gatk_type == "gatk4":
params += ["-I", paired.normal_bam]
params += ["--normal-sample", paired.normal_name]
else:
params += ["-I:normal", paired.normal_bam]
if paired.normal_panel is not None:
panel_dir = utils.safe_makedir(os.path.join(dd.get_work_dir(items[0]), "mutect2", "panels"))
normal_panel = vcfutils.bgzip_and_index(paired.normal_panel, items[0]["config"], out_dir=panel_dir)
if gatk_type == "gatk4":
params += ["--panel-of-normals", normal_panel]
else:
params += ["--normal_panel", normal_panel]
return params |
def parse_time(s):
"""
Like datetime.datetime.strptime(s, "%w %Y/%m/%d %H:%M:%S") but 5x faster.
"""
result = None
if "epoch" in s:
epoch_time = float(s.rstrip().split(' ')[1][:-1])
result = datetime.datetime.utcfromtimestamp(epoch_time)
else:
_, date_part, time_part = s.split(' ')
year, mon, day = date_part.split('/')
hour, minute, sec = time_part.split(':')
result = datetime.datetime(*map(int, (year, mon, day, hour, minute, sec)))
return result | Like datetime.datetime.strptime(s, "%w %Y/%m/%d %H:%M:%S") but 5x faster. | Below is the the instruction that describes the task:
### Input:
Like datetime.datetime.strptime(s, "%w %Y/%m/%d %H:%M:%S") but 5x faster.
### Response:
def parse_time(s):
"""
Like datetime.datetime.strptime(s, "%w %Y/%m/%d %H:%M:%S") but 5x faster.
"""
result = None
if "epoch" in s:
epoch_time = float(s.rstrip().split(' ')[1][:-1])
result = datetime.datetime.utcfromtimestamp(epoch_time)
else:
_, date_part, time_part = s.split(' ')
year, mon, day = date_part.split('/')
hour, minute, sec = time_part.split(':')
result = datetime.datetime(*map(int, (year, mon, day, hour, minute, sec)))
return result |
def add(workflow_definition: dict, templates_root: str):
"""Add a workflow definition to the Configuration Database.
Templates are expected to be found in a directory tree with the following
structure:
- workflow_id:
|- workflow_version
|- stage_id
|- stage_version
|- <templates>
Args:
workflow_definition (dict): Workflow definition.
templates_root (str): Workflow templates root path
"""
schema_path = join(dirname(__file__), 'schema', 'workflow_definition.json')
with open(schema_path, 'r') as file:
schema = json.loads(file.read())
jsonschema.validate(workflow_definition, schema)
_id = workflow_definition['id']
_version = workflow_definition['version']
_load_templates(workflow_definition, templates_root)
workflow_id = workflow_definition['id']
version = workflow_definition['version']
name = "workflow_definitions:{}:{}".format(workflow_id, version)
if DB.get_keys(name):
raise KeyError('Workflow definition already exists: {}'.format(name))
# DB.set_hash_values(name, workflow_definition)
DB.save_dict(name, workflow_definition, hierarchical=False) | Add a workflow definition to the Configuration Database.
Templates are expected to be found in a directory tree with the following
structure:
- workflow_id:
|- workflow_version
|- stage_id
|- stage_version
|- <templates>
Args:
workflow_definition (dict): Workflow definition.
templates_root (str): Workflow templates root path | Below is the the instruction that describes the task:
### Input:
Add a workflow definition to the Configuration Database.
Templates are expected to be found in a directory tree with the following
structure:
- workflow_id:
|- workflow_version
|- stage_id
|- stage_version
|- <templates>
Args:
workflow_definition (dict): Workflow definition.
templates_root (str): Workflow templates root path
### Response:
def add(workflow_definition: dict, templates_root: str):
"""Add a workflow definition to the Configuration Database.
Templates are expected to be found in a directory tree with the following
structure:
- workflow_id:
|- workflow_version
|- stage_id
|- stage_version
|- <templates>
Args:
workflow_definition (dict): Workflow definition.
templates_root (str): Workflow templates root path
"""
schema_path = join(dirname(__file__), 'schema', 'workflow_definition.json')
with open(schema_path, 'r') as file:
schema = json.loads(file.read())
jsonschema.validate(workflow_definition, schema)
_id = workflow_definition['id']
_version = workflow_definition['version']
_load_templates(workflow_definition, templates_root)
workflow_id = workflow_definition['id']
version = workflow_definition['version']
name = "workflow_definitions:{}:{}".format(workflow_id, version)
if DB.get_keys(name):
raise KeyError('Workflow definition already exists: {}'.format(name))
# DB.set_hash_values(name, workflow_definition)
DB.save_dict(name, workflow_definition, hierarchical=False) |
def gcs_files(prefix_filter=None):
"""List all files in GCS bucket."""
top_level_xml_str = download_gcs_file("", prefix_filter=prefix_filter)
xml_root = ElementTree.fromstring(top_level_xml_str)
filenames = [el[0].text for el in xml_root if el.tag.endswith("Contents")]
return filenames | List all files in GCS bucket. | Below is the the instruction that describes the task:
### Input:
List all files in GCS bucket.
### Response:
def gcs_files(prefix_filter=None):
"""List all files in GCS bucket."""
top_level_xml_str = download_gcs_file("", prefix_filter=prefix_filter)
xml_root = ElementTree.fromstring(top_level_xml_str)
filenames = [el[0].text for el in xml_root if el.tag.endswith("Contents")]
return filenames |
def shrink_local_fsdb(self, dangling=True, corrupted=True, dryrun=False):
'''shrink local fsdb by removing dangling and/or corrupted files
return number of deleted files
'''
log.debug('shrinking local fsdb [danglings={}, corrupted={}]'.format(dangling, corrupted))
count = 0
if dangling:
for fid in self.dangling_files():
log.info("shrinking: removing dangling '{}'".format(fid))
if not dryrun:
self._fsdb.remove(fid)
count += 1
if corrupted:
for fid in self._fsdb.corrupted():
log.info("shrinking: removing corrupted '{}'".format(fid))
if not dryrun:
self._fsdb.remove(fid)
count += 1
return count | shrink local fsdb by removing dangling and/or corrupted files
return number of deleted files | Below is the the instruction that describes the task:
### Input:
shrink local fsdb by removing dangling and/or corrupted files
return number of deleted files
### Response:
def shrink_local_fsdb(self, dangling=True, corrupted=True, dryrun=False):
'''shrink local fsdb by removing dangling and/or corrupted files
return number of deleted files
'''
log.debug('shrinking local fsdb [danglings={}, corrupted={}]'.format(dangling, corrupted))
count = 0
if dangling:
for fid in self.dangling_files():
log.info("shrinking: removing dangling '{}'".format(fid))
if not dryrun:
self._fsdb.remove(fid)
count += 1
if corrupted:
for fid in self._fsdb.corrupted():
log.info("shrinking: removing corrupted '{}'".format(fid))
if not dryrun:
self._fsdb.remove(fid)
count += 1
return count |
def on_post(self, req, resp, handler=None, **kwargs):
"""Respond on POST HTTP request assuming resource creation flow.
This request handler assumes that POST requests are associated with
resource creation. Thus default flow for such requests is:
* Create new resource instance and prepare its representation by
calling its creation method handler.
* Try to retrieve URI of newly created object using
``self.get_object_location()``. If it succeeds use that URI as the
value of ``Location`` header in response object instance.
* Set response status code to ``201 Created``.
Args:
req (falcon.Request): request object instance.
resp (falcon.Response): response object instance to be modified
handler (method): creation method handler to be called. Defaults
to ``self.create``.
**kwargs: additional keyword arguments retrieved from url template.
"""
obj = self.handle(
handler or self.create, req, resp, **kwargs
)
try:
resp.location = self.get_object_location(obj)
except NotImplementedError:
pass
resp.status = falcon.HTTP_CREATED | Respond on POST HTTP request assuming resource creation flow.
This request handler assumes that POST requests are associated with
resource creation. Thus default flow for such requests is:
* Create new resource instance and prepare its representation by
calling its creation method handler.
* Try to retrieve URI of newly created object using
``self.get_object_location()``. If it succeeds use that URI as the
value of ``Location`` header in response object instance.
* Set response status code to ``201 Created``.
Args:
req (falcon.Request): request object instance.
resp (falcon.Response): response object instance to be modified
handler (method): creation method handler to be called. Defaults
to ``self.create``.
**kwargs: additional keyword arguments retrieved from url template. | Below is the the instruction that describes the task:
### Input:
Respond on POST HTTP request assuming resource creation flow.
This request handler assumes that POST requests are associated with
resource creation. Thus default flow for such requests is:
* Create new resource instance and prepare its representation by
calling its creation method handler.
* Try to retrieve URI of newly created object using
``self.get_object_location()``. If it succeeds use that URI as the
value of ``Location`` header in response object instance.
* Set response status code to ``201 Created``.
Args:
req (falcon.Request): request object instance.
resp (falcon.Response): response object instance to be modified
handler (method): creation method handler to be called. Defaults
to ``self.create``.
**kwargs: additional keyword arguments retrieved from url template.
### Response:
def on_post(self, req, resp, handler=None, **kwargs):
"""Respond on POST HTTP request assuming resource creation flow.
This request handler assumes that POST requests are associated with
resource creation. Thus default flow for such requests is:
* Create new resource instance and prepare its representation by
calling its creation method handler.
* Try to retrieve URI of newly created object using
``self.get_object_location()``. If it succeeds use that URI as the
value of ``Location`` header in response object instance.
* Set response status code to ``201 Created``.
Args:
req (falcon.Request): request object instance.
resp (falcon.Response): response object instance to be modified
handler (method): creation method handler to be called. Defaults
to ``self.create``.
**kwargs: additional keyword arguments retrieved from url template.
"""
obj = self.handle(
handler or self.create, req, resp, **kwargs
)
try:
resp.location = self.get_object_location(obj)
except NotImplementedError:
pass
resp.status = falcon.HTTP_CREATED |
def sed(self, photon_energy, distance=1 * u.kpc, seed=None):
"""Spectral energy distribution at a given distance from the source
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` float or array
Photon energy array.
distance : :class:`~astropy.units.Quantity` float, optional
Distance to the source. If set to 0, the intrinsic luminosity will
be returned. Default is 1 kpc.
seed : int, str or None
Number or name of seed photon field for which the IC contribution
is required. If set to None it will return the sum of all
contributions (default).
"""
sed = super(InverseCompton, self).sed(photon_energy, distance=distance)
if seed is not None:
if distance != 0:
out_unit = "erg/(cm2 s)"
else:
out_unit = "erg/s"
sed = (
self.flux(photon_energy, distance=distance, seed=seed)
* photon_energy ** 2.0
).to(out_unit)
return sed | Spectral energy distribution at a given distance from the source
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` float or array
Photon energy array.
distance : :class:`~astropy.units.Quantity` float, optional
Distance to the source. If set to 0, the intrinsic luminosity will
be returned. Default is 1 kpc.
seed : int, str or None
Number or name of seed photon field for which the IC contribution
is required. If set to None it will return the sum of all
contributions (default). | Below is the the instruction that describes the task:
### Input:
Spectral energy distribution at a given distance from the source
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` float or array
Photon energy array.
distance : :class:`~astropy.units.Quantity` float, optional
Distance to the source. If set to 0, the intrinsic luminosity will
be returned. Default is 1 kpc.
seed : int, str or None
Number or name of seed photon field for which the IC contribution
is required. If set to None it will return the sum of all
contributions (default).
### Response:
def sed(self, photon_energy, distance=1 * u.kpc, seed=None):
"""Spectral energy distribution at a given distance from the source
Parameters
----------
photon_energy : :class:`~astropy.units.Quantity` float or array
Photon energy array.
distance : :class:`~astropy.units.Quantity` float, optional
Distance to the source. If set to 0, the intrinsic luminosity will
be returned. Default is 1 kpc.
seed : int, str or None
Number or name of seed photon field for which the IC contribution
is required. If set to None it will return the sum of all
contributions (default).
"""
sed = super(InverseCompton, self).sed(photon_energy, distance=distance)
if seed is not None:
if distance != 0:
out_unit = "erg/(cm2 s)"
else:
out_unit = "erg/s"
sed = (
self.flux(photon_energy, distance=distance, seed=seed)
* photon_energy ** 2.0
).to(out_unit)
return sed |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.