code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def generate_base_provider_parser():
"""Function that generates the base provider to be used by all dns providers."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('action', help='specify the action to take', default='list',
choices=['create', 'list', 'update', 'delete'])
parser.add_argument(
'domain', help='specify the domain, supports subdomains as well')
parser.add_argument('type', help='specify the entry type', default='TXT',
choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'LOC'])
parser.add_argument('--name', help='specify the record name')
parser.add_argument('--content', help='specify the record content')
parser.add_argument('--ttl', type=int,
help='specify the record time-to-live')
parser.add_argument('--priority', help='specify the record priority')
parser.add_argument(
'--identifier', help='specify the record for update or delete actions')
parser.add_argument('--log_level', help='specify the log level', default='ERROR',
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'])
parser.add_argument('--output',
help=('specify the type of output: by default a formatted table (TABLE), '
'a formatted table without header (TABLE-NO-HEADER), '
'a JSON string (JSON) or no output (QUIET)'),
default='TABLE', choices=['TABLE', 'TABLE-NO-HEADER', 'JSON', 'QUIET'])
return parser | Function that generates the base provider to be used by all dns providers. | Below is the the instruction that describes the task:
### Input:
Function that generates the base provider to be used by all dns providers.
### Response:
def generate_base_provider_parser():
"""Function that generates the base provider to be used by all dns providers."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('action', help='specify the action to take', default='list',
choices=['create', 'list', 'update', 'delete'])
parser.add_argument(
'domain', help='specify the domain, supports subdomains as well')
parser.add_argument('type', help='specify the entry type', default='TXT',
choices=['A', 'AAAA', 'CNAME', 'MX', 'NS', 'SOA', 'TXT', 'SRV', 'LOC'])
parser.add_argument('--name', help='specify the record name')
parser.add_argument('--content', help='specify the record content')
parser.add_argument('--ttl', type=int,
help='specify the record time-to-live')
parser.add_argument('--priority', help='specify the record priority')
parser.add_argument(
'--identifier', help='specify the record for update or delete actions')
parser.add_argument('--log_level', help='specify the log level', default='ERROR',
choices=['CRITICAL', 'ERROR', 'WARNING', 'INFO', 'DEBUG', 'NOTSET'])
parser.add_argument('--output',
help=('specify the type of output: by default a formatted table (TABLE), '
'a formatted table without header (TABLE-NO-HEADER), '
'a JSON string (JSON) or no output (QUIET)'),
default='TABLE', choices=['TABLE', 'TABLE-NO-HEADER', 'JSON', 'QUIET'])
return parser |
def _AskUser(self):
"""Prompt the user for the next action.
Returns:
A string, the character entered by the user.
"""
if self._show_percent:
progress = int(self._displayed*100 / (len(self._text.splitlines())))
progress_text = ' (%d%%)' % progress
else:
progress_text = ''
question = AnsiText(
'Enter: next line, Space: next page, '
'b: prev page, q: quit.%s' %
progress_text, ['green'])
sys.stdout.write(question)
sys.stdout.flush()
ch = self._GetCh()
sys.stdout.write('\r%s\r' % (' '*len(question)))
sys.stdout.flush()
return ch | Prompt the user for the next action.
Returns:
A string, the character entered by the user. | Below is the the instruction that describes the task:
### Input:
Prompt the user for the next action.
Returns:
A string, the character entered by the user.
### Response:
def _AskUser(self):
"""Prompt the user for the next action.
Returns:
A string, the character entered by the user.
"""
if self._show_percent:
progress = int(self._displayed*100 / (len(self._text.splitlines())))
progress_text = ' (%d%%)' % progress
else:
progress_text = ''
question = AnsiText(
'Enter: next line, Space: next page, '
'b: prev page, q: quit.%s' %
progress_text, ['green'])
sys.stdout.write(question)
sys.stdout.flush()
ch = self._GetCh()
sys.stdout.write('\r%s\r' % (' '*len(question)))
sys.stdout.flush()
return ch |
def design_matrix(phases, degree):
r"""
Constructs an :math:`N \times 2n+1` matrix of the form:
.. math::
\begin{bmatrix}
1
& \sin(1 \cdot 2\pi \cdot \phi_0)
& \cos(1 \cdot 2\pi \cdot \phi_0)
& \ldots
& \sin(n \cdot 2\pi \cdot \phi_0)
& \cos(n \cdot 2\pi \cdot \phi_0)
\\
\vdots
& \vdots
& \vdots
& \ddots
& \vdots
& \vdots
\\
1
& \sin(1 \cdot 2\pi \cdot \phi_N)
& \cos(1 \cdot 2\pi \cdot \phi_N)
& \ldots
& \sin(n \cdot 2\pi \cdot \phi_N)
& \cos(n \cdot 2\pi \cdot \phi_N)
\end{bmatrix}
where :math:`n =` *degree*, :math:`N =` *n_samples*, and
:math:`\phi_i =` *phases[i]*.
Parameters
----------
phases : array-like, shape = [n_samples]
"""
n_samples = phases.size
# initialize coefficient matrix
M = numpy.empty((n_samples, 2*degree+1))
# indices
i = numpy.arange(1, degree+1)
# initialize the Nxn matrix that is repeated within the
# sine and cosine terms
x = numpy.empty((n_samples, degree))
# the Nxn matrix now has N copies of the same row, and each row is
# integer multiples of pi counting from 1 to the degree
x[:,:] = i*2*numpy.pi
# multiply each row of x by the phases
x.T[:,:] *= phases
# place 1's in the first column of the coefficient matrix
M[:,0] = 1
# the odd indices of the coefficient matrix have sine terms
M[:,1::2] = numpy.sin(x)
# the even indices of the coefficient matrix have cosine terms
M[:,2::2] = numpy.cos(x)
return M | r"""
Constructs an :math:`N \times 2n+1` matrix of the form:
.. math::
\begin{bmatrix}
1
& \sin(1 \cdot 2\pi \cdot \phi_0)
& \cos(1 \cdot 2\pi \cdot \phi_0)
& \ldots
& \sin(n \cdot 2\pi \cdot \phi_0)
& \cos(n \cdot 2\pi \cdot \phi_0)
\\
\vdots
& \vdots
& \vdots
& \ddots
& \vdots
& \vdots
\\
1
& \sin(1 \cdot 2\pi \cdot \phi_N)
& \cos(1 \cdot 2\pi \cdot \phi_N)
& \ldots
& \sin(n \cdot 2\pi \cdot \phi_N)
& \cos(n \cdot 2\pi \cdot \phi_N)
\end{bmatrix}
where :math:`n =` *degree*, :math:`N =` *n_samples*, and
:math:`\phi_i =` *phases[i]*.
Parameters
----------
phases : array-like, shape = [n_samples] | Below is the the instruction that describes the task:
### Input:
r"""
Constructs an :math:`N \times 2n+1` matrix of the form:
.. math::
\begin{bmatrix}
1
& \sin(1 \cdot 2\pi \cdot \phi_0)
& \cos(1 \cdot 2\pi \cdot \phi_0)
& \ldots
& \sin(n \cdot 2\pi \cdot \phi_0)
& \cos(n \cdot 2\pi \cdot \phi_0)
\\
\vdots
& \vdots
& \vdots
& \ddots
& \vdots
& \vdots
\\
1
& \sin(1 \cdot 2\pi \cdot \phi_N)
& \cos(1 \cdot 2\pi \cdot \phi_N)
& \ldots
& \sin(n \cdot 2\pi \cdot \phi_N)
& \cos(n \cdot 2\pi \cdot \phi_N)
\end{bmatrix}
where :math:`n =` *degree*, :math:`N =` *n_samples*, and
:math:`\phi_i =` *phases[i]*.
Parameters
----------
phases : array-like, shape = [n_samples]
### Response:
def design_matrix(phases, degree):
r"""
Constructs an :math:`N \times 2n+1` matrix of the form:
.. math::
\begin{bmatrix}
1
& \sin(1 \cdot 2\pi \cdot \phi_0)
& \cos(1 \cdot 2\pi \cdot \phi_0)
& \ldots
& \sin(n \cdot 2\pi \cdot \phi_0)
& \cos(n \cdot 2\pi \cdot \phi_0)
\\
\vdots
& \vdots
& \vdots
& \ddots
& \vdots
& \vdots
\\
1
& \sin(1 \cdot 2\pi \cdot \phi_N)
& \cos(1 \cdot 2\pi \cdot \phi_N)
& \ldots
& \sin(n \cdot 2\pi \cdot \phi_N)
& \cos(n \cdot 2\pi \cdot \phi_N)
\end{bmatrix}
where :math:`n =` *degree*, :math:`N =` *n_samples*, and
:math:`\phi_i =` *phases[i]*.
Parameters
----------
phases : array-like, shape = [n_samples]
"""
n_samples = phases.size
# initialize coefficient matrix
M = numpy.empty((n_samples, 2*degree+1))
# indices
i = numpy.arange(1, degree+1)
# initialize the Nxn matrix that is repeated within the
# sine and cosine terms
x = numpy.empty((n_samples, degree))
# the Nxn matrix now has N copies of the same row, and each row is
# integer multiples of pi counting from 1 to the degree
x[:,:] = i*2*numpy.pi
# multiply each row of x by the phases
x.T[:,:] *= phases
# place 1's in the first column of the coefficient matrix
M[:,0] = 1
# the odd indices of the coefficient matrix have sine terms
M[:,1::2] = numpy.sin(x)
# the even indices of the coefficient matrix have cosine terms
M[:,2::2] = numpy.cos(x)
return M |
def set_pre_handler(self, handler):
'''pre handler push
return: ret_error or ret_ok
'''
set_flag = False
for protoc in self._pre_handler_table:
if isinstance(handler, self._pre_handler_table[protoc]["type"]):
self._pre_handler_table[protoc]["obj"] = handler
return RET_OK
if set_flag is False:
return RET_ERROR | pre handler push
return: ret_error or ret_ok | Below is the the instruction that describes the task:
### Input:
pre handler push
return: ret_error or ret_ok
### Response:
def set_pre_handler(self, handler):
'''pre handler push
return: ret_error or ret_ok
'''
set_flag = False
for protoc in self._pre_handler_table:
if isinstance(handler, self._pre_handler_table[protoc]["type"]):
self._pre_handler_table[protoc]["obj"] = handler
return RET_OK
if set_flag is False:
return RET_ERROR |
def sync_time(self):
"""Sets the time on the pyboard to match the time on the host."""
now = time.localtime(time.time())
self.remote(set_time, (now.tm_year, now.tm_mon, now.tm_mday, now.tm_wday + 1,
now.tm_hour, now.tm_min, now.tm_sec, 0))
return now | Sets the time on the pyboard to match the time on the host. | Below is the the instruction that describes the task:
### Input:
Sets the time on the pyboard to match the time on the host.
### Response:
def sync_time(self):
"""Sets the time on the pyboard to match the time on the host."""
now = time.localtime(time.time())
self.remote(set_time, (now.tm_year, now.tm_mon, now.tm_mday, now.tm_wday + 1,
now.tm_hour, now.tm_min, now.tm_sec, 0))
return now |
def torestfrequency(self, f0, d0):
"""Convert a frequency measure and a doppler measure (e.g.
obtained from another spectral line with a known rest frequency) to
a rest frequency.
:param f0: frequency reference code (see :meth:`frequency`)
:param v0: a doppler measure
Example::
dp = dm.doppler('radio', '2196.24984km/s') # a measured doppler speed
f = dm.frequency('lsrk','1410MHz') # a measured frequency
dm.torestfrequency(f, dp) # the corresponding rest frequency
"""
if is_measure(f0) and f0['type'] == 'frequency' \
and is_measure(d0) and d0['type'] == 'doppler':
return self.torest(f0, d0)
else:
raise TypeError('Illegal Doppler or rest frequency specified') | Convert a frequency measure and a doppler measure (e.g.
obtained from another spectral line with a known rest frequency) to
a rest frequency.
:param f0: frequency reference code (see :meth:`frequency`)
:param v0: a doppler measure
Example::
dp = dm.doppler('radio', '2196.24984km/s') # a measured doppler speed
f = dm.frequency('lsrk','1410MHz') # a measured frequency
dm.torestfrequency(f, dp) # the corresponding rest frequency | Below is the the instruction that describes the task:
### Input:
Convert a frequency measure and a doppler measure (e.g.
obtained from another spectral line with a known rest frequency) to
a rest frequency.
:param f0: frequency reference code (see :meth:`frequency`)
:param v0: a doppler measure
Example::
dp = dm.doppler('radio', '2196.24984km/s') # a measured doppler speed
f = dm.frequency('lsrk','1410MHz') # a measured frequency
dm.torestfrequency(f, dp) # the corresponding rest frequency
### Response:
def torestfrequency(self, f0, d0):
"""Convert a frequency measure and a doppler measure (e.g.
obtained from another spectral line with a known rest frequency) to
a rest frequency.
:param f0: frequency reference code (see :meth:`frequency`)
:param v0: a doppler measure
Example::
dp = dm.doppler('radio', '2196.24984km/s') # a measured doppler speed
f = dm.frequency('lsrk','1410MHz') # a measured frequency
dm.torestfrequency(f, dp) # the corresponding rest frequency
"""
if is_measure(f0) and f0['type'] == 'frequency' \
and is_measure(d0) and d0['type'] == 'doppler':
return self.torest(f0, d0)
else:
raise TypeError('Illegal Doppler or rest frequency specified') |
def get_repository_ids_by_asset(self, asset_id):
"""Gets the list of ``Repository`` ``Ids`` mapped to an ``Asset``.
arg: asset_id (osid.id.Id): ``Id`` of an ``Asset``
return: (osid.id.IdList) - list of repository ``Ids``
raise: NotFound - ``asset_id`` is not found
raise: NullArgument - ``asset_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('REPOSITORY', local=True)
lookup_session = mgr.get_asset_lookup_session(proxy=self._proxy)
lookup_session.use_federated_repository_view()
asset = lookup_session.get_asset(asset_id)
id_list = []
for idstr in asset._my_map['assignedRepositoryIds']:
id_list.append(Id(idstr))
return IdList(id_list) | Gets the list of ``Repository`` ``Ids`` mapped to an ``Asset``.
arg: asset_id (osid.id.Id): ``Id`` of an ``Asset``
return: (osid.id.IdList) - list of repository ``Ids``
raise: NotFound - ``asset_id`` is not found
raise: NullArgument - ``asset_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the list of ``Repository`` ``Ids`` mapped to an ``Asset``.
arg: asset_id (osid.id.Id): ``Id`` of an ``Asset``
return: (osid.id.IdList) - list of repository ``Ids``
raise: NotFound - ``asset_id`` is not found
raise: NullArgument - ``asset_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_repository_ids_by_asset(self, asset_id):
"""Gets the list of ``Repository`` ``Ids`` mapped to an ``Asset``.
arg: asset_id (osid.id.Id): ``Id`` of an ``Asset``
return: (osid.id.IdList) - list of repository ``Ids``
raise: NotFound - ``asset_id`` is not found
raise: NullArgument - ``asset_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
mgr = self._get_provider_manager('REPOSITORY', local=True)
lookup_session = mgr.get_asset_lookup_session(proxy=self._proxy)
lookup_session.use_federated_repository_view()
asset = lookup_session.get_asset(asset_id)
id_list = []
for idstr in asset._my_map['assignedRepositoryIds']:
id_list.append(Id(idstr))
return IdList(id_list) |
def _write_mosaic(self, key, outfile):
"""Write out mosaic data (or any new data generated within Ginga)
to single-extension FITS.
"""
maxsize = self.settings.get('max_mosaic_size', 1e8) # Default 10k x 10k
channel = self.fv.get_channel(self.chname)
image = channel.datasrc[key]
# Prevent writing very large mosaic
if (image.width * image.height) > maxsize:
s = 'Mosaic too large to be written {0}'.format(image.shape)
self.w.status.set_text(s)
self.logger.error(s)
return
# Insert mosaic data and header into output HDU
hdu = fits.PrimaryHDU(image.get_data())
self._write_header(image, hdu)
# Write history to PRIMARY
self._write_history(key, hdu)
# Write to file
if minversion(astropy, '1.3'):
hdu.writeto(outfile, overwrite=True)
else:
hdu.writeto(outfile, clobber=True) | Write out mosaic data (or any new data generated within Ginga)
to single-extension FITS. | Below is the the instruction that describes the task:
### Input:
Write out mosaic data (or any new data generated within Ginga)
to single-extension FITS.
### Response:
def _write_mosaic(self, key, outfile):
"""Write out mosaic data (or any new data generated within Ginga)
to single-extension FITS.
"""
maxsize = self.settings.get('max_mosaic_size', 1e8) # Default 10k x 10k
channel = self.fv.get_channel(self.chname)
image = channel.datasrc[key]
# Prevent writing very large mosaic
if (image.width * image.height) > maxsize:
s = 'Mosaic too large to be written {0}'.format(image.shape)
self.w.status.set_text(s)
self.logger.error(s)
return
# Insert mosaic data and header into output HDU
hdu = fits.PrimaryHDU(image.get_data())
self._write_header(image, hdu)
# Write history to PRIMARY
self._write_history(key, hdu)
# Write to file
if minversion(astropy, '1.3'):
hdu.writeto(outfile, overwrite=True)
else:
hdu.writeto(outfile, clobber=True) |
def execute_no_results(self, sock_info, generator):
"""Execute all operations, returning no results (w=0).
"""
if self.uses_collation:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
if self.uses_array_filters:
raise ConfigurationError(
'arrayFilters is unsupported for unacknowledged writes.')
# Cannot have both unacknowledged writes and bypass document validation.
if self.bypass_doc_val and sock_info.max_wire_version >= 4:
raise OperationFailure("Cannot set bypass_document_validation with"
" unacknowledged write concern")
# OP_MSG
if sock_info.max_wire_version > 5:
if self.ordered:
return self.execute_command_no_results(sock_info, generator)
return self.execute_op_msg_no_results(sock_info, generator)
coll = self.collection
# If ordered is True we have to send GLE or use write
# commands so we can abort on the first error.
write_concern = WriteConcern(w=int(self.ordered))
op_id = _randint()
next_run = next(generator)
while next_run:
# An ordered bulk write needs to send acknowledged writes to short
# circuit the next run. However, the final message on the final
# run can be unacknowledged.
run = next_run
next_run = next(generator, None)
needs_ack = self.ordered and next_run is not None
try:
if run.op_type == _INSERT:
self.execute_insert_no_results(
sock_info, run, op_id, needs_ack)
elif run.op_type == _UPDATE:
for operation in run.ops:
doc = operation['u']
check_keys = True
if doc and next(iter(doc)).startswith('$'):
check_keys = False
coll._update(
sock_info,
operation['q'],
doc,
operation['upsert'],
check_keys,
operation['multi'],
write_concern=write_concern,
op_id=op_id,
ordered=self.ordered,
bypass_doc_val=self.bypass_doc_val)
else:
for operation in run.ops:
coll._delete(sock_info,
operation['q'],
not operation['limit'],
write_concern,
op_id,
self.ordered)
except OperationFailure:
if self.ordered:
break | Execute all operations, returning no results (w=0). | Below is the the instruction that describes the task:
### Input:
Execute all operations, returning no results (w=0).
### Response:
def execute_no_results(self, sock_info, generator):
"""Execute all operations, returning no results (w=0).
"""
if self.uses_collation:
raise ConfigurationError(
'Collation is unsupported for unacknowledged writes.')
if self.uses_array_filters:
raise ConfigurationError(
'arrayFilters is unsupported for unacknowledged writes.')
# Cannot have both unacknowledged writes and bypass document validation.
if self.bypass_doc_val and sock_info.max_wire_version >= 4:
raise OperationFailure("Cannot set bypass_document_validation with"
" unacknowledged write concern")
# OP_MSG
if sock_info.max_wire_version > 5:
if self.ordered:
return self.execute_command_no_results(sock_info, generator)
return self.execute_op_msg_no_results(sock_info, generator)
coll = self.collection
# If ordered is True we have to send GLE or use write
# commands so we can abort on the first error.
write_concern = WriteConcern(w=int(self.ordered))
op_id = _randint()
next_run = next(generator)
while next_run:
# An ordered bulk write needs to send acknowledged writes to short
# circuit the next run. However, the final message on the final
# run can be unacknowledged.
run = next_run
next_run = next(generator, None)
needs_ack = self.ordered and next_run is not None
try:
if run.op_type == _INSERT:
self.execute_insert_no_results(
sock_info, run, op_id, needs_ack)
elif run.op_type == _UPDATE:
for operation in run.ops:
doc = operation['u']
check_keys = True
if doc and next(iter(doc)).startswith('$'):
check_keys = False
coll._update(
sock_info,
operation['q'],
doc,
operation['upsert'],
check_keys,
operation['multi'],
write_concern=write_concern,
op_id=op_id,
ordered=self.ordered,
bypass_doc_val=self.bypass_doc_val)
else:
for operation in run.ops:
coll._delete(sock_info,
operation['q'],
not operation['limit'],
write_concern,
op_id,
self.ordered)
except OperationFailure:
if self.ordered:
break |
def eager_send(chainlet, chunks):
"""
Eager version of `lazy_send` evaluating the return value immediately
:note: The return value by an ``n`` to ``m`` link is considered fully evaluated.
:param chainlet: the chainlet to receive and return data
:type chainlet: chainlink.ChainLink
:param chunks: the stream slice of data to pass to ``chainlet``
:type chunks: iterable
:return: the resulting stream slice of data returned by ``chainlet``
:rtype: iterable
"""
fork, join = chainlet.chain_fork, chainlet.chain_join
if fork and join:
return _send_n_get_m(chainlet, chunks)
elif fork:
return tuple(_lazy_send_1_get_m(chainlet, chunks))
elif join:
return tuple(_lazy_send_n_get_1(chainlet, chunks))
else:
return tuple(_lazy_send_1_get_1(chainlet, chunks)) | Eager version of `lazy_send` evaluating the return value immediately
:note: The return value by an ``n`` to ``m`` link is considered fully evaluated.
:param chainlet: the chainlet to receive and return data
:type chainlet: chainlink.ChainLink
:param chunks: the stream slice of data to pass to ``chainlet``
:type chunks: iterable
:return: the resulting stream slice of data returned by ``chainlet``
:rtype: iterable | Below is the the instruction that describes the task:
### Input:
Eager version of `lazy_send` evaluating the return value immediately
:note: The return value by an ``n`` to ``m`` link is considered fully evaluated.
:param chainlet: the chainlet to receive and return data
:type chainlet: chainlink.ChainLink
:param chunks: the stream slice of data to pass to ``chainlet``
:type chunks: iterable
:return: the resulting stream slice of data returned by ``chainlet``
:rtype: iterable
### Response:
def eager_send(chainlet, chunks):
"""
Eager version of `lazy_send` evaluating the return value immediately
:note: The return value by an ``n`` to ``m`` link is considered fully evaluated.
:param chainlet: the chainlet to receive and return data
:type chainlet: chainlink.ChainLink
:param chunks: the stream slice of data to pass to ``chainlet``
:type chunks: iterable
:return: the resulting stream slice of data returned by ``chainlet``
:rtype: iterable
"""
fork, join = chainlet.chain_fork, chainlet.chain_join
if fork and join:
return _send_n_get_m(chainlet, chunks)
elif fork:
return tuple(_lazy_send_1_get_m(chainlet, chunks))
elif join:
return tuple(_lazy_send_n_get_1(chainlet, chunks))
else:
return tuple(_lazy_send_1_get_1(chainlet, chunks)) |
def urlsafe_nopadding_b64decode(data):
'''URL safe Base64 decode without padding (=)'''
padding = len(data) % 4
if padding != 0:
padding = 4 - padding
padding = '=' * padding
data = data + padding
return urlsafe_b64decode(data) | URL safe Base64 decode without padding (=) | Below is the the instruction that describes the task:
### Input:
URL safe Base64 decode without padding (=)
### Response:
def urlsafe_nopadding_b64decode(data):
'''URL safe Base64 decode without padding (=)'''
padding = len(data) % 4
if padding != 0:
padding = 4 - padding
padding = '=' * padding
data = data + padding
return urlsafe_b64decode(data) |
def NDP_Attack_Fake_Router(ra, iface=None, mac_src_filter=None,
ip_src_filter=None):
"""
The purpose of this function is to send provided RA message at layer 2
(i.e. providing a packet starting with IPv6 will not work) in response
to received RS messages. In the end, the function is a simple wrapper
around sendp() that monitor the link for RS messages.
It is probably better explained with an example:
>>> ra = Ether()/IPv6()/ICMPv6ND_RA()
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:1::", prefixlen=64)
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:2::", prefixlen=64)
>>> ra /= ICMPv6NDOptSrcLLAddr(lladdr="00:11:22:33:44:55")
>>> NDP_Attack_Fake_Router(ra, iface="eth0")
Fake RA sent in response to RS from fe80::213:58ff:fe8c:b573
Fake RA sent in response to RS from fe80::213:72ff:fe8c:b9ae
...
Following arguments can be used to change the behavior:
ra: the RA message to send in response to received RS message.
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If none is provided, conf.iface is
used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only RS messages received from this source will trigger a reply.
Note that no changes to provided RA is done which imply that if
you intend to target only the source of the RS using this option,
you will have to set the Ethernet destination address to the same
value in your RA.
The default value for this parameter is None: no filtering on the
source of RS is done.
ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter
on. Only RS messages received from this source address will trigger
replies. Same comment as for previous argument apply: if you use
the option, you will probably want to set a specific Ethernet
destination address in the RA.
"""
def is_request(req, mac_src_filter, ip_src_filter):
"""
Check if packet req is a request
"""
if not (Ether in req and IPv6 in req and ICMPv6ND_RS in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
ip_src = req[IPv6].src
if ip_src_filter and ip_src != ip_src_filter:
return 0
return 1
def ra_reply_callback(req, iface):
"""
Callback that sends an RA in reply to an RS
"""
src = req[IPv6].src
sendp(ra, iface=iface, verbose=0)
print("Fake RA sent in response to RS from %s" % src)
if not iface:
iface = conf.iface
sniff_filter = "icmp6"
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter),
prn=lambda x: ra_reply_callback(x, iface),
iface=iface) | The purpose of this function is to send provided RA message at layer 2
(i.e. providing a packet starting with IPv6 will not work) in response
to received RS messages. In the end, the function is a simple wrapper
around sendp() that monitor the link for RS messages.
It is probably better explained with an example:
>>> ra = Ether()/IPv6()/ICMPv6ND_RA()
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:1::", prefixlen=64)
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:2::", prefixlen=64)
>>> ra /= ICMPv6NDOptSrcLLAddr(lladdr="00:11:22:33:44:55")
>>> NDP_Attack_Fake_Router(ra, iface="eth0")
Fake RA sent in response to RS from fe80::213:58ff:fe8c:b573
Fake RA sent in response to RS from fe80::213:72ff:fe8c:b9ae
...
Following arguments can be used to change the behavior:
ra: the RA message to send in response to received RS message.
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If none is provided, conf.iface is
used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only RS messages received from this source will trigger a reply.
Note that no changes to provided RA is done which imply that if
you intend to target only the source of the RS using this option,
you will have to set the Ethernet destination address to the same
value in your RA.
The default value for this parameter is None: no filtering on the
source of RS is done.
ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter
on. Only RS messages received from this source address will trigger
replies. Same comment as for previous argument apply: if you use
the option, you will probably want to set a specific Ethernet
destination address in the RA. | Below is the the instruction that describes the task:
### Input:
The purpose of this function is to send provided RA message at layer 2
(i.e. providing a packet starting with IPv6 will not work) in response
to received RS messages. In the end, the function is a simple wrapper
around sendp() that monitor the link for RS messages.
It is probably better explained with an example:
>>> ra = Ether()/IPv6()/ICMPv6ND_RA()
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:1::", prefixlen=64)
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:2::", prefixlen=64)
>>> ra /= ICMPv6NDOptSrcLLAddr(lladdr="00:11:22:33:44:55")
>>> NDP_Attack_Fake_Router(ra, iface="eth0")
Fake RA sent in response to RS from fe80::213:58ff:fe8c:b573
Fake RA sent in response to RS from fe80::213:72ff:fe8c:b9ae
...
Following arguments can be used to change the behavior:
ra: the RA message to send in response to received RS message.
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If none is provided, conf.iface is
used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only RS messages received from this source will trigger a reply.
Note that no changes to provided RA is done which imply that if
you intend to target only the source of the RS using this option,
you will have to set the Ethernet destination address to the same
value in your RA.
The default value for this parameter is None: no filtering on the
source of RS is done.
ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter
on. Only RS messages received from this source address will trigger
replies. Same comment as for previous argument apply: if you use
the option, you will probably want to set a specific Ethernet
destination address in the RA.
### Response:
def NDP_Attack_Fake_Router(ra, iface=None, mac_src_filter=None,
ip_src_filter=None):
"""
The purpose of this function is to send provided RA message at layer 2
(i.e. providing a packet starting with IPv6 will not work) in response
to received RS messages. In the end, the function is a simple wrapper
around sendp() that monitor the link for RS messages.
It is probably better explained with an example:
>>> ra = Ether()/IPv6()/ICMPv6ND_RA()
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:1::", prefixlen=64)
>>> ra /= ICMPv6NDOptPrefixInfo(prefix="2001:db8:2::", prefixlen=64)
>>> ra /= ICMPv6NDOptSrcLLAddr(lladdr="00:11:22:33:44:55")
>>> NDP_Attack_Fake_Router(ra, iface="eth0")
Fake RA sent in response to RS from fe80::213:58ff:fe8c:b573
Fake RA sent in response to RS from fe80::213:72ff:fe8c:b9ae
...
Following arguments can be used to change the behavior:
ra: the RA message to send in response to received RS message.
iface: a specific interface (e.g. "eth0") of the system on which the
DoS should be launched. If none is provided, conf.iface is
used.
mac_src_filter: a mac address (e.g "00:13:72:8c:b5:69") to filter on.
Only RS messages received from this source will trigger a reply.
Note that no changes to provided RA is done which imply that if
you intend to target only the source of the RS using this option,
you will have to set the Ethernet destination address to the same
value in your RA.
The default value for this parameter is None: no filtering on the
source of RS is done.
ip_src_filter: an IPv6 address (e.g. fe80::21e:bff:fe4e:3b2) to filter
on. Only RS messages received from this source address will trigger
replies. Same comment as for previous argument apply: if you use
the option, you will probably want to set a specific Ethernet
destination address in the RA.
"""
def is_request(req, mac_src_filter, ip_src_filter):
"""
Check if packet req is a request
"""
if not (Ether in req and IPv6 in req and ICMPv6ND_RS in req):
return 0
mac_src = req[Ether].src
if mac_src_filter and mac_src != mac_src_filter:
return 0
ip_src = req[IPv6].src
if ip_src_filter and ip_src != ip_src_filter:
return 0
return 1
def ra_reply_callback(req, iface):
"""
Callback that sends an RA in reply to an RS
"""
src = req[IPv6].src
sendp(ra, iface=iface, verbose=0)
print("Fake RA sent in response to RS from %s" % src)
if not iface:
iface = conf.iface
sniff_filter = "icmp6"
sniff(store=0,
filter=sniff_filter,
lfilter=lambda x: is_request(x, mac_src_filter, ip_src_filter),
prn=lambda x: ra_reply_callback(x, iface),
iface=iface) |
def handle_stmt(self, stmt, p_elem, pset={}):
"""
Run handler method for statement `stmt`.
`p_elem` is the parent node in the output schema. `pset` is
the current "patch set" - a dictionary with keys being QNames
of schema nodes at the current level of hierarchy for which
(or descendants thereof) any pending patches exist. The values
are instances of the Patch class.
All handler methods are defined below and must have the same
arguments as this method. They should create the output schema
fragment corresponding to `stmt`, apply all patches from
`pset` belonging to `stmt`, insert the fragment under `p_elem`
and perform all side effects as necessary.
"""
if self.debug > 0:
sys.stderr.write("Handling '%s %s'\n" %
(util.keyword_to_str(stmt.raw_keyword), stmt.arg))
try:
method = self.stmt_handler[stmt.keyword]
except KeyError:
if isinstance(stmt.keyword, tuple):
try:
method = self.ext_handler[stmt.keyword[0]][stmt.keyword[1]]
except KeyError:
method = self.rng_annotation
method(stmt, p_elem)
return
else:
raise error.EmitError(
"Unknown keyword %s - this should not happen.\n"
% stmt.keyword)
method(stmt, p_elem, pset) | Run handler method for statement `stmt`.
`p_elem` is the parent node in the output schema. `pset` is
the current "patch set" - a dictionary with keys being QNames
of schema nodes at the current level of hierarchy for which
(or descendants thereof) any pending patches exist. The values
are instances of the Patch class.
All handler methods are defined below and must have the same
arguments as this method. They should create the output schema
fragment corresponding to `stmt`, apply all patches from
`pset` belonging to `stmt`, insert the fragment under `p_elem`
and perform all side effects as necessary. | Below is the the instruction that describes the task:
### Input:
Run handler method for statement `stmt`.
`p_elem` is the parent node in the output schema. `pset` is
the current "patch set" - a dictionary with keys being QNames
of schema nodes at the current level of hierarchy for which
(or descendants thereof) any pending patches exist. The values
are instances of the Patch class.
All handler methods are defined below and must have the same
arguments as this method. They should create the output schema
fragment corresponding to `stmt`, apply all patches from
`pset` belonging to `stmt`, insert the fragment under `p_elem`
and perform all side effects as necessary.
### Response:
def handle_stmt(self, stmt, p_elem, pset={}):
"""
Run handler method for statement `stmt`.
`p_elem` is the parent node in the output schema. `pset` is
the current "patch set" - a dictionary with keys being QNames
of schema nodes at the current level of hierarchy for which
(or descendants thereof) any pending patches exist. The values
are instances of the Patch class.
All handler methods are defined below and must have the same
arguments as this method. They should create the output schema
fragment corresponding to `stmt`, apply all patches from
`pset` belonging to `stmt`, insert the fragment under `p_elem`
and perform all side effects as necessary.
"""
if self.debug > 0:
sys.stderr.write("Handling '%s %s'\n" %
(util.keyword_to_str(stmt.raw_keyword), stmt.arg))
try:
method = self.stmt_handler[stmt.keyword]
except KeyError:
if isinstance(stmt.keyword, tuple):
try:
method = self.ext_handler[stmt.keyword[0]][stmt.keyword[1]]
except KeyError:
method = self.rng_annotation
method(stmt, p_elem)
return
else:
raise error.EmitError(
"Unknown keyword %s - this should not happen.\n"
% stmt.keyword)
method(stmt, p_elem, pset) |
def post(action, params=None, version=6):
"""
For the documentation, see https://foosoft.net/projects/anki-connect/
:param str action:
:param dict params:
:param int version:
:return:
"""
if params is None:
params = dict()
to_send = {
'action': action,
'version': version,
'params': params
}
r = requests.post(AnkiConnect.URL, json=to_send)
return r.json() | For the documentation, see https://foosoft.net/projects/anki-connect/
:param str action:
:param dict params:
:param int version:
:return: | Below is the the instruction that describes the task:
### Input:
For the documentation, see https://foosoft.net/projects/anki-connect/
:param str action:
:param dict params:
:param int version:
:return:
### Response:
def post(action, params=None, version=6):
"""
For the documentation, see https://foosoft.net/projects/anki-connect/
:param str action:
:param dict params:
:param int version:
:return:
"""
if params is None:
params = dict()
to_send = {
'action': action,
'version': version,
'params': params
}
r = requests.post(AnkiConnect.URL, json=to_send)
return r.json() |
def convert_to_mosek(sdp):
"""Convert an SDP relaxation to a MOSEK task.
:param sdp: The SDP relaxation to convert.
:type sdp: :class:`ncpol2sdpa.sdp`.
:returns: :class:`mosek.Task`.
"""
import mosek
# Cheat when variables are complex and convert with PICOS
if sdp.complex_matrix:
from .picos_utils import convert_to_picos
Problem = convert_to_picos(sdp).to_real()
Problem._make_mosek_instance()
task = Problem.msk_task
if sdp.verbose > 0:
task.set_Stream(mosek.streamtype.log, streamprinter)
return task
barci, barcj, barcval, barai, baraj, baraval = \
convert_to_mosek_matrix(sdp)
bkc = [mosek.boundkey.fx] * sdp.n_vars
blc = [-v for v in sdp.obj_facvar]
buc = [-v for v in sdp.obj_facvar]
env = mosek.Env()
task = env.Task(0, 0)
if sdp.verbose > 0:
task.set_Stream(mosek.streamtype.log, streamprinter)
numvar = 0
numcon = len(bkc)
BARVARDIM = [sum(sdp.block_struct)]
task.appendvars(numvar)
task.appendcons(numcon)
task.appendbarvars(BARVARDIM)
for i in range(numcon):
task.putconbound(i, bkc[i], blc[i], buc[i])
symc = task.appendsparsesymmat(BARVARDIM[0], barci, barcj, barcval)
task.putbarcj(0, [symc], [1.0])
for i in range(len(barai)):
syma = task.appendsparsesymmat(BARVARDIM[0], barai[i], baraj[i],
baraval[i])
task.putbaraij(i, 0, [syma], [1.0])
# Input the objective sense (minimize/maximize)
task.putobjsense(mosek.objsense.minimize)
return task | Convert an SDP relaxation to a MOSEK task.
:param sdp: The SDP relaxation to convert.
:type sdp: :class:`ncpol2sdpa.sdp`.
:returns: :class:`mosek.Task`. | Below is the the instruction that describes the task:
### Input:
Convert an SDP relaxation to a MOSEK task.
:param sdp: The SDP relaxation to convert.
:type sdp: :class:`ncpol2sdpa.sdp`.
:returns: :class:`mosek.Task`.
### Response:
def convert_to_mosek(sdp):
"""Convert an SDP relaxation to a MOSEK task.
:param sdp: The SDP relaxation to convert.
:type sdp: :class:`ncpol2sdpa.sdp`.
:returns: :class:`mosek.Task`.
"""
import mosek
# Cheat when variables are complex and convert with PICOS
if sdp.complex_matrix:
from .picos_utils import convert_to_picos
Problem = convert_to_picos(sdp).to_real()
Problem._make_mosek_instance()
task = Problem.msk_task
if sdp.verbose > 0:
task.set_Stream(mosek.streamtype.log, streamprinter)
return task
barci, barcj, barcval, barai, baraj, baraval = \
convert_to_mosek_matrix(sdp)
bkc = [mosek.boundkey.fx] * sdp.n_vars
blc = [-v for v in sdp.obj_facvar]
buc = [-v for v in sdp.obj_facvar]
env = mosek.Env()
task = env.Task(0, 0)
if sdp.verbose > 0:
task.set_Stream(mosek.streamtype.log, streamprinter)
numvar = 0
numcon = len(bkc)
BARVARDIM = [sum(sdp.block_struct)]
task.appendvars(numvar)
task.appendcons(numcon)
task.appendbarvars(BARVARDIM)
for i in range(numcon):
task.putconbound(i, bkc[i], blc[i], buc[i])
symc = task.appendsparsesymmat(BARVARDIM[0], barci, barcj, barcval)
task.putbarcj(0, [symc], [1.0])
for i in range(len(barai)):
syma = task.appendsparsesymmat(BARVARDIM[0], barai[i], baraj[i],
baraval[i])
task.putbaraij(i, 0, [syma], [1.0])
# Input the objective sense (minimize/maximize)
task.putobjsense(mosek.objsense.minimize)
return task |
def select_inputs(self, address, nfees, ntokens, min_confirmations=6):
"""
Selects the inputs for the spool transaction.
Args:
address (str): bitcoin address to select inputs for
nfees (int): number of fees
ntokens (int): number of tokens
min_confirmations (Optional[int]): minimum number of required
confirmations; defaults to 6
"""
unspents = self._t.get(address, min_confirmations=min_confirmations)['unspents']
unspents = [u for u in unspents if u not in self._spents.queue]
if len(unspents) == 0:
raise Exception("No spendable outputs found")
fees = [u for u in unspents if u['amount'] == self.fee][:nfees]
tokens = [u for u in unspents if u['amount'] == self.token][:ntokens]
if len(fees) != nfees or len(tokens) != ntokens:
raise SpoolFundsError("Not enough outputs to spend. Refill your wallet")
if self._spents.qsize() > self.SPENTS_QUEUE_MAXSIZE - (nfees + ntokens):
[self._spents.get() for i in range(self._spents.qsize() + nfees + ntokens - self.SPENTS_QUEUE_MAXSIZE)]
[self._spents.put(fee) for fee in fees]
[self._spents.put(token) for token in tokens]
return fees + tokens | Selects the inputs for the spool transaction.
Args:
address (str): bitcoin address to select inputs for
nfees (int): number of fees
ntokens (int): number of tokens
min_confirmations (Optional[int]): minimum number of required
confirmations; defaults to 6 | Below is the the instruction that describes the task:
### Input:
Selects the inputs for the spool transaction.
Args:
address (str): bitcoin address to select inputs for
nfees (int): number of fees
ntokens (int): number of tokens
min_confirmations (Optional[int]): minimum number of required
confirmations; defaults to 6
### Response:
def select_inputs(self, address, nfees, ntokens, min_confirmations=6):
"""
Selects the inputs for the spool transaction.
Args:
address (str): bitcoin address to select inputs for
nfees (int): number of fees
ntokens (int): number of tokens
min_confirmations (Optional[int]): minimum number of required
confirmations; defaults to 6
"""
unspents = self._t.get(address, min_confirmations=min_confirmations)['unspents']
unspents = [u for u in unspents if u not in self._spents.queue]
if len(unspents) == 0:
raise Exception("No spendable outputs found")
fees = [u for u in unspents if u['amount'] == self.fee][:nfees]
tokens = [u for u in unspents if u['amount'] == self.token][:ntokens]
if len(fees) != nfees or len(tokens) != ntokens:
raise SpoolFundsError("Not enough outputs to spend. Refill your wallet")
if self._spents.qsize() > self.SPENTS_QUEUE_MAXSIZE - (nfees + ntokens):
[self._spents.get() for i in range(self._spents.qsize() + nfees + ntokens - self.SPENTS_QUEUE_MAXSIZE)]
[self._spents.put(fee) for fee in fees]
[self._spents.put(token) for token in tokens]
return fees + tokens |
def find_closest(db, pos):
"""Find the closest point in db to pos.
:returns: Closest dataset as well as the distance in meters.
"""
def get_dist(d1, d2):
"""Get distance between d1 and d2 in meters."""
lat1, lon1 = d1['latitude'], d1['longitude']
lat2, lon2 = d2['latitude'], d2['longitude']
R = 6371000.0 # metres
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
delta_phi = math.radians(lat2-lat1)
delta_delta = math.radians(lon2-lon1)
a = math.sin(delta_phi/2) * math.sin(delta_phi/2) + \
math.cos(phi1) * math.cos(phi2) * \
math.sin(delta_delta/2) * math.sin(delta_delta/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = R * c
return d
closest_dataset, closest_dist = db[0], get_dist(pos, db[0])
for dataset in db:
dist = get_dist(pos, dataset)
if dist < closest_dist:
closest_dataset = dataset
closest_dist = dist
return closest_dataset, closest_dist | Find the closest point in db to pos.
:returns: Closest dataset as well as the distance in meters. | Below is the the instruction that describes the task:
### Input:
Find the closest point in db to pos.
:returns: Closest dataset as well as the distance in meters.
### Response:
def find_closest(db, pos):
"""Find the closest point in db to pos.
:returns: Closest dataset as well as the distance in meters.
"""
def get_dist(d1, d2):
"""Get distance between d1 and d2 in meters."""
lat1, lon1 = d1['latitude'], d1['longitude']
lat2, lon2 = d2['latitude'], d2['longitude']
R = 6371000.0 # metres
phi1 = math.radians(lat1)
phi2 = math.radians(lat2)
delta_phi = math.radians(lat2-lat1)
delta_delta = math.radians(lon2-lon1)
a = math.sin(delta_phi/2) * math.sin(delta_phi/2) + \
math.cos(phi1) * math.cos(phi2) * \
math.sin(delta_delta/2) * math.sin(delta_delta/2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = R * c
return d
closest_dataset, closest_dist = db[0], get_dist(pos, db[0])
for dataset in db:
dist = get_dist(pos, dataset)
if dist < closest_dist:
closest_dataset = dataset
closest_dist = dist
return closest_dataset, closest_dist |
def url_to_text(self, url):
'''
Download PDF file and transform its document to string.
Args:
url: PDF url.
Returns:
string.
'''
path, headers = urllib.request.urlretrieve(url)
return self.path_to_text(path) | Download PDF file and transform its document to string.
Args:
url: PDF url.
Returns:
string. | Below is the the instruction that describes the task:
### Input:
Download PDF file and transform its document to string.
Args:
url: PDF url.
Returns:
string.
### Response:
def url_to_text(self, url):
'''
Download PDF file and transform its document to string.
Args:
url: PDF url.
Returns:
string.
'''
path, headers = urllib.request.urlretrieve(url)
return self.path_to_text(path) |
def is_manage_allowed(self):
"""Check if manage is allowed
"""
checkPermission = self.context.portal_membership.checkPermission
return checkPermission(ManageWorksheets, self.context) | Check if manage is allowed | Below is the the instruction that describes the task:
### Input:
Check if manage is allowed
### Response:
def is_manage_allowed(self):
"""Check if manage is allowed
"""
checkPermission = self.context.portal_membership.checkPermission
return checkPermission(ManageWorksheets, self.context) |
def nextChainableJobGraph(jobGraph, jobStore):
"""Returns the next chainable jobGraph after this jobGraph if one
exists, or None if the chain must terminate.
"""
#If no more jobs to run or services not finished, quit
if len(jobGraph.stack) == 0 or len(jobGraph.services) > 0 or jobGraph.checkpoint != None:
logger.debug("Stopping running chain of jobs: length of stack: %s, services: %s, checkpoint: %s",
len(jobGraph.stack), len(jobGraph.services), jobGraph.checkpoint != None)
return None
#Get the next set of jobs to run
jobs = jobGraph.stack[-1]
assert len(jobs) > 0
#If there are 2 or more jobs to run in parallel we quit
if len(jobs) >= 2:
logger.debug("No more jobs can run in series by this worker,"
" it's got %i children", len(jobs)-1)
return None
#We check the requirements of the jobGraph to see if we can run it
#within the current worker
successorJobNode = jobs[0]
if successorJobNode.memory > jobGraph.memory:
logger.debug("We need more memory for the next job, so finishing")
return None
if successorJobNode.cores > jobGraph.cores:
logger.debug("We need more cores for the next job, so finishing")
return None
if successorJobNode.disk > jobGraph.disk:
logger.debug("We need more disk for the next job, so finishing")
return None
if successorJobNode.preemptable != jobGraph.preemptable:
logger.debug("Preemptability is different for the next job, returning to the leader")
return None
if successorJobNode.predecessorNumber > 1:
logger.debug("The jobGraph has multiple predecessors, we must return to the leader.")
return None
# Load the successor jobGraph
successorJobGraph = jobStore.load(successorJobNode.jobStoreID)
# Somewhat ugly, but check if job is a checkpoint job and quit if
# so
if successorJobGraph.command.startswith("_toil "):
#Load the job
successorJob = Job._loadJob(successorJobGraph.command, jobStore)
# Check it is not a checkpoint
if successorJob.checkpoint:
logger.debug("Next job is checkpoint, so finishing")
return None
# Made it through! This job is chainable.
return successorJobGraph | Returns the next chainable jobGraph after this jobGraph if one
exists, or None if the chain must terminate. | Below is the the instruction that describes the task:
### Input:
Returns the next chainable jobGraph after this jobGraph if one
exists, or None if the chain must terminate.
### Response:
def nextChainableJobGraph(jobGraph, jobStore):
"""Returns the next chainable jobGraph after this jobGraph if one
exists, or None if the chain must terminate.
"""
#If no more jobs to run or services not finished, quit
if len(jobGraph.stack) == 0 or len(jobGraph.services) > 0 or jobGraph.checkpoint != None:
logger.debug("Stopping running chain of jobs: length of stack: %s, services: %s, checkpoint: %s",
len(jobGraph.stack), len(jobGraph.services), jobGraph.checkpoint != None)
return None
#Get the next set of jobs to run
jobs = jobGraph.stack[-1]
assert len(jobs) > 0
#If there are 2 or more jobs to run in parallel we quit
if len(jobs) >= 2:
logger.debug("No more jobs can run in series by this worker,"
" it's got %i children", len(jobs)-1)
return None
#We check the requirements of the jobGraph to see if we can run it
#within the current worker
successorJobNode = jobs[0]
if successorJobNode.memory > jobGraph.memory:
logger.debug("We need more memory for the next job, so finishing")
return None
if successorJobNode.cores > jobGraph.cores:
logger.debug("We need more cores for the next job, so finishing")
return None
if successorJobNode.disk > jobGraph.disk:
logger.debug("We need more disk for the next job, so finishing")
return None
if successorJobNode.preemptable != jobGraph.preemptable:
logger.debug("Preemptability is different for the next job, returning to the leader")
return None
if successorJobNode.predecessorNumber > 1:
logger.debug("The jobGraph has multiple predecessors, we must return to the leader.")
return None
# Load the successor jobGraph
successorJobGraph = jobStore.load(successorJobNode.jobStoreID)
# Somewhat ugly, but check if job is a checkpoint job and quit if
# so
if successorJobGraph.command.startswith("_toil "):
#Load the job
successorJob = Job._loadJob(successorJobGraph.command, jobStore)
# Check it is not a checkpoint
if successorJob.checkpoint:
logger.debug("Next job is checkpoint, so finishing")
return None
# Made it through! This job is chainable.
return successorJobGraph |
def _write_avg_gradient(self)->None:
"Writes the average of the gradients to Tensorboard."
avg_gradient = sum(x.data.mean() for x in self.gradients)/len(self.gradients)
self._add_gradient_scalar('avg_gradient', scalar_value=avg_gradient) | Writes the average of the gradients to Tensorboard. | Below is the the instruction that describes the task:
### Input:
Writes the average of the gradients to Tensorboard.
### Response:
def _write_avg_gradient(self)->None:
"Writes the average of the gradients to Tensorboard."
avg_gradient = sum(x.data.mean() for x in self.gradients)/len(self.gradients)
self._add_gradient_scalar('avg_gradient', scalar_value=avg_gradient) |
def send_command_return_multilines(self, obj, command, *arguments):
""" Send command with no output.
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
:return: list of command output lines.
:rtype: list(str)
"""
return self._perform_command('{}/{}'.format(self.session_url, obj.ref), command,
OperReturnType.multiline_output, *arguments).json() | Send command with no output.
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
:return: list of command output lines.
:rtype: list(str) | Below is the the instruction that describes the task:
### Input:
Send command with no output.
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
:return: list of command output lines.
:rtype: list(str)
### Response:
def send_command_return_multilines(self, obj, command, *arguments):
""" Send command with no output.
:param obj: requested object.
:param command: command to send.
:param arguments: list of command arguments.
:return: list of command output lines.
:rtype: list(str)
"""
return self._perform_command('{}/{}'.format(self.session_url, obj.ref), command,
OperReturnType.multiline_output, *arguments).json() |
def main():
"""This main function saves the stdin termios settings, calls real_main,
and restores stdin termios settings when it returns.
"""
save_settings = None
stdin_fd = -1
try:
import termios
stdin_fd = sys.stdin.fileno()
save_settings = termios.tcgetattr(stdin_fd)
except:
pass
try:
real_main()
finally:
if save_settings:
termios.tcsetattr(stdin_fd, termios.TCSANOW, save_settings) | This main function saves the stdin termios settings, calls real_main,
and restores stdin termios settings when it returns. | Below is the the instruction that describes the task:
### Input:
This main function saves the stdin termios settings, calls real_main,
and restores stdin termios settings when it returns.
### Response:
def main():
"""This main function saves the stdin termios settings, calls real_main,
and restores stdin termios settings when it returns.
"""
save_settings = None
stdin_fd = -1
try:
import termios
stdin_fd = sys.stdin.fileno()
save_settings = termios.tcgetattr(stdin_fd)
except:
pass
try:
real_main()
finally:
if save_settings:
termios.tcsetattr(stdin_fd, termios.TCSANOW, save_settings) |
def calculate_derivative_T(self, T, P, zs, ws, method, order=1):
r'''Method to calculate a derivative of a mixture property with respect
to temperature at constant pressure and composition
of a given order using a specified method. Uses SciPy's derivative
function, with a delta of 1E-6 K and a number of points equal to
2*order + 1.
This method can be overwritten by subclasses who may perfer to add
analytical methods for some or all methods as this is much faster.
If the calculation does not succeed, returns the actual error
encountered.
Parameters
----------
T : float
Temperature at which to calculate the derivative, [K]
P : float
Pressure at which to calculate the derivative, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Method for which to find the derivative
order : int
Order of the derivative, >= 1
Returns
-------
d_prop_d_T_at_P : float
Calculated derivative property at constant pressure,
[`units/K^order`]
'''
return derivative(self.calculate, T, dx=1e-6, args=[P, zs, ws, method], n=order, order=1+order*2) | r'''Method to calculate a derivative of a mixture property with respect
to temperature at constant pressure and composition
of a given order using a specified method. Uses SciPy's derivative
function, with a delta of 1E-6 K and a number of points equal to
2*order + 1.
This method can be overwritten by subclasses who may perfer to add
analytical methods for some or all methods as this is much faster.
If the calculation does not succeed, returns the actual error
encountered.
Parameters
----------
T : float
Temperature at which to calculate the derivative, [K]
P : float
Pressure at which to calculate the derivative, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Method for which to find the derivative
order : int
Order of the derivative, >= 1
Returns
-------
d_prop_d_T_at_P : float
Calculated derivative property at constant pressure,
[`units/K^order`] | Below is the the instruction that describes the task:
### Input:
r'''Method to calculate a derivative of a mixture property with respect
to temperature at constant pressure and composition
of a given order using a specified method. Uses SciPy's derivative
function, with a delta of 1E-6 K and a number of points equal to
2*order + 1.
This method can be overwritten by subclasses who may perfer to add
analytical methods for some or all methods as this is much faster.
If the calculation does not succeed, returns the actual error
encountered.
Parameters
----------
T : float
Temperature at which to calculate the derivative, [K]
P : float
Pressure at which to calculate the derivative, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Method for which to find the derivative
order : int
Order of the derivative, >= 1
Returns
-------
d_prop_d_T_at_P : float
Calculated derivative property at constant pressure,
[`units/K^order`]
### Response:
def calculate_derivative_T(self, T, P, zs, ws, method, order=1):
r'''Method to calculate a derivative of a mixture property with respect
to temperature at constant pressure and composition
of a given order using a specified method. Uses SciPy's derivative
function, with a delta of 1E-6 K and a number of points equal to
2*order + 1.
This method can be overwritten by subclasses who may perfer to add
analytical methods for some or all methods as this is much faster.
If the calculation does not succeed, returns the actual error
encountered.
Parameters
----------
T : float
Temperature at which to calculate the derivative, [K]
P : float
Pressure at which to calculate the derivative, [Pa]
zs : list[float]
Mole fractions of all species in the mixture, [-]
ws : list[float]
Weight fractions of all species in the mixture, [-]
method : str
Method for which to find the derivative
order : int
Order of the derivative, >= 1
Returns
-------
d_prop_d_T_at_P : float
Calculated derivative property at constant pressure,
[`units/K^order`]
'''
return derivative(self.calculate, T, dx=1e-6, args=[P, zs, ws, method], n=order, order=1+order*2) |
def validate_course_run_id(self, value):
"""
Validates that the course run id is part of the Enterprise Customer's catalog.
"""
enterprise_customer = self.context.get('enterprise_customer')
if not enterprise_customer.catalog_contains_course(value):
raise serializers.ValidationError(
'The course run id {course_run_id} is not in the catalog '
'for Enterprise Customer {enterprise_customer}'.format(
course_run_id=value,
enterprise_customer=enterprise_customer.name,
)
)
return value | Validates that the course run id is part of the Enterprise Customer's catalog. | Below is the the instruction that describes the task:
### Input:
Validates that the course run id is part of the Enterprise Customer's catalog.
### Response:
def validate_course_run_id(self, value):
"""
Validates that the course run id is part of the Enterprise Customer's catalog.
"""
enterprise_customer = self.context.get('enterprise_customer')
if not enterprise_customer.catalog_contains_course(value):
raise serializers.ValidationError(
'The course run id {course_run_id} is not in the catalog '
'for Enterprise Customer {enterprise_customer}'.format(
course_run_id=value,
enterprise_customer=enterprise_customer.name,
)
)
return value |
def upload_file(self, localFileName, remoteFileName=None, connId='default'):
"""
Sends file from local drive to current directory on FTP server in binary mode.
Returns server output.
Parameters:
- localFileName - file name or path to a file on a local drive.
- remoteFileName (optional) - a name or path containing name under which file should be saved.
- connId(optional) - connection identifier. By default equals 'default'
If remoteFileName agument is not given, local name will be used.
Examples:
| upload file | x.txt | connId=ftp1 |
| upload file | D:/rfftppy/y.txt | |
| upload file | u.txt | uu.txt |
| upload file | D:/rfftppy/z.txt | zz.txt |
| upload file | D:\\rfftppy\\v.txt | |
"""
thisConn = self.__getConnection(connId)
outputMsg = ""
remoteFileName_ = ""
localFilePath = os.path.normpath(localFileName)
if not os.path.isfile(localFilePath):
raise FtpLibraryError("Valid file path should be provided.")
else:
if remoteFileName==None:
fileTuple = os.path.split(localFileName)
if len(fileTuple)==2:
remoteFileName_ = fileTuple[1]
else:
remoteFileName_ = 'defaultFileName'
else:
remoteFileName_ = remoteFileName
try:
outputMsg += thisConn.storbinary("STOR " + remoteFileName_, open(localFilePath, "rb"))
except ftplib.all_errors as e:
raise FtpLibraryError(str(e))
if self.printOutput:
logger.info(outputMsg)
return outputMsg | Sends file from local drive to current directory on FTP server in binary mode.
Returns server output.
Parameters:
- localFileName - file name or path to a file on a local drive.
- remoteFileName (optional) - a name or path containing name under which file should be saved.
- connId(optional) - connection identifier. By default equals 'default'
If remoteFileName agument is not given, local name will be used.
Examples:
| upload file | x.txt | connId=ftp1 |
| upload file | D:/rfftppy/y.txt | |
| upload file | u.txt | uu.txt |
| upload file | D:/rfftppy/z.txt | zz.txt |
| upload file | D:\\rfftppy\\v.txt | | | Below is the the instruction that describes the task:
### Input:
Sends file from local drive to current directory on FTP server in binary mode.
Returns server output.
Parameters:
- localFileName - file name or path to a file on a local drive.
- remoteFileName (optional) - a name or path containing name under which file should be saved.
- connId(optional) - connection identifier. By default equals 'default'
If remoteFileName agument is not given, local name will be used.
Examples:
| upload file | x.txt | connId=ftp1 |
| upload file | D:/rfftppy/y.txt | |
| upload file | u.txt | uu.txt |
| upload file | D:/rfftppy/z.txt | zz.txt |
| upload file | D:\\rfftppy\\v.txt | |
### Response:
def upload_file(self, localFileName, remoteFileName=None, connId='default'):
"""
Sends file from local drive to current directory on FTP server in binary mode.
Returns server output.
Parameters:
- localFileName - file name or path to a file on a local drive.
- remoteFileName (optional) - a name or path containing name under which file should be saved.
- connId(optional) - connection identifier. By default equals 'default'
If remoteFileName agument is not given, local name will be used.
Examples:
| upload file | x.txt | connId=ftp1 |
| upload file | D:/rfftppy/y.txt | |
| upload file | u.txt | uu.txt |
| upload file | D:/rfftppy/z.txt | zz.txt |
| upload file | D:\\rfftppy\\v.txt | |
"""
thisConn = self.__getConnection(connId)
outputMsg = ""
remoteFileName_ = ""
localFilePath = os.path.normpath(localFileName)
if not os.path.isfile(localFilePath):
raise FtpLibraryError("Valid file path should be provided.")
else:
if remoteFileName==None:
fileTuple = os.path.split(localFileName)
if len(fileTuple)==2:
remoteFileName_ = fileTuple[1]
else:
remoteFileName_ = 'defaultFileName'
else:
remoteFileName_ = remoteFileName
try:
outputMsg += thisConn.storbinary("STOR " + remoteFileName_, open(localFilePath, "rb"))
except ftplib.all_errors as e:
raise FtpLibraryError(str(e))
if self.printOutput:
logger.info(outputMsg)
return outputMsg |
def center(self, coords):
""" center the map on a "map pixel"
"""
x, y = [round(i, 0) for i in coords]
self.view_rect.center = x, y
tw, th = self.data.tile_size
left, ox = divmod(x, tw)
top, oy = divmod(y, th)
vec = int(ox / 2), int(oy)
iso = vector2_to_iso(vec)
self._x_offset = iso[0]
self._y_offset = iso[1]
print(self._tile_view.size)
print(self._buffer.get_size())
# center the buffer on the screen
self._x_offset += (self._buffer.get_width() - self.view_rect.width) // 2
self._y_offset += (self._buffer.get_height() - self.view_rect.height) // 4
# adjust the view if the view has changed without a redraw
dx = int(left - self._tile_view.left)
dy = int(top - self._tile_view.top)
view_change = max(abs(dx), abs(dy))
# force redraw every time: edge queuing not supported yet
self._redraw_cutoff = 0
if view_change and (view_change <= self._redraw_cutoff):
self._buffer.scroll(-dx * tw, -dy * th)
self._tile_view.move_ip(dx, dy)
self._queue_edge_tiles(dx, dy)
self._flush_tile_queue()
elif view_change > self._redraw_cutoff:
# logger.info('scrolling too quickly. redraw forced')
self._tile_view.move_ip(dx, dy)
self.redraw_tiles() | center the map on a "map pixel" | Below is the the instruction that describes the task:
### Input:
center the map on a "map pixel"
### Response:
def center(self, coords):
""" center the map on a "map pixel"
"""
x, y = [round(i, 0) for i in coords]
self.view_rect.center = x, y
tw, th = self.data.tile_size
left, ox = divmod(x, tw)
top, oy = divmod(y, th)
vec = int(ox / 2), int(oy)
iso = vector2_to_iso(vec)
self._x_offset = iso[0]
self._y_offset = iso[1]
print(self._tile_view.size)
print(self._buffer.get_size())
# center the buffer on the screen
self._x_offset += (self._buffer.get_width() - self.view_rect.width) // 2
self._y_offset += (self._buffer.get_height() - self.view_rect.height) // 4
# adjust the view if the view has changed without a redraw
dx = int(left - self._tile_view.left)
dy = int(top - self._tile_view.top)
view_change = max(abs(dx), abs(dy))
# force redraw every time: edge queuing not supported yet
self._redraw_cutoff = 0
if view_change and (view_change <= self._redraw_cutoff):
self._buffer.scroll(-dx * tw, -dy * th)
self._tile_view.move_ip(dx, dy)
self._queue_edge_tiles(dx, dy)
self._flush_tile_queue()
elif view_change > self._redraw_cutoff:
# logger.info('scrolling too quickly. redraw forced')
self._tile_view.move_ip(dx, dy)
self.redraw_tiles() |
def _mmInit(self):
"""Create the minimum match dictionary of keys"""
# cache references to speed up loop a bit
mmkeys = {}
mmkeysGet = mmkeys.setdefault
minkeylength = self.minkeylength
for key in self.data.keys():
# add abbreviations as short as minkeylength
# always add at least one entry (even for key="")
lenkey = len(key)
start = min(minkeylength,lenkey)
for i in range(start,lenkey+1):
mmkeysGet(key[0:i],[]).append(key)
self.mmkeys = mmkeys | Create the minimum match dictionary of keys | Below is the the instruction that describes the task:
### Input:
Create the minimum match dictionary of keys
### Response:
def _mmInit(self):
"""Create the minimum match dictionary of keys"""
# cache references to speed up loop a bit
mmkeys = {}
mmkeysGet = mmkeys.setdefault
minkeylength = self.minkeylength
for key in self.data.keys():
# add abbreviations as short as minkeylength
# always add at least one entry (even for key="")
lenkey = len(key)
start = min(minkeylength,lenkey)
for i in range(start,lenkey+1):
mmkeysGet(key[0:i],[]).append(key)
self.mmkeys = mmkeys |
def get_attrs_declarations(item_type):
"""
Helper method to return a dictionary of tuples. Each key is attr_name, and value is (attr_type, attr_is_optional)
:param item_type:
:return:
"""
# this will raise an error if the type is not an attr-created type
attribs = fields(item_type)
res = dict()
for attr in attribs:
attr_name = attr.name
# -- is the attribute mandatory ?
optional = is_optional(attr)
# -- get and check the attribute type
typ = guess_type_from_validators(attr)
# -- store both info in result dict
res[attr_name] = (typ, optional)
return res | Helper method to return a dictionary of tuples. Each key is attr_name, and value is (attr_type, attr_is_optional)
:param item_type:
:return: | Below is the the instruction that describes the task:
### Input:
Helper method to return a dictionary of tuples. Each key is attr_name, and value is (attr_type, attr_is_optional)
:param item_type:
:return:
### Response:
def get_attrs_declarations(item_type):
"""
Helper method to return a dictionary of tuples. Each key is attr_name, and value is (attr_type, attr_is_optional)
:param item_type:
:return:
"""
# this will raise an error if the type is not an attr-created type
attribs = fields(item_type)
res = dict()
for attr in attribs:
attr_name = attr.name
# -- is the attribute mandatory ?
optional = is_optional(attr)
# -- get and check the attribute type
typ = guess_type_from_validators(attr)
# -- store both info in result dict
res[attr_name] = (typ, optional)
return res |
def nonce_solid(name, n_inputs, n_outputs):
"""Creates a solid with the given number of (meaningless) inputs and outputs.
Config controls the behavior of the nonce solid."""
@solid(
name=name,
inputs=[
InputDefinition(name='input_{}'.format(i)) for i in range(n_inputs)
],
outputs=[
OutputDefinition(name='output_{}'.format(i))
for i in range(n_outputs)
],
)
def solid_fn(context, **_kwargs):
for i in range(200):
time.sleep(0.02)
if i % 1000 == 420:
context.log.error(
'Error message seq={i} from solid {name}'.format(
i=i, name=name
)
)
elif i % 100 == 0:
context.log.warning(
'Warning message seq={i} from solid {name}'.format(
i=i, name=name
)
)
elif i % 10 == 0:
context.log.info(
'Info message seq={i} from solid {name}'.format(
i=i, name=name
)
)
else:
context.log.debug(
'Debug message seq={i} from solid {name}'.format(
i=i, name=name
)
)
return MultipleResults.from_dict(
{'output_{}'.format(i): 'foo' for i in range(n_outputs)}
)
return solid_fn | Creates a solid with the given number of (meaningless) inputs and outputs.
Config controls the behavior of the nonce solid. | Below is the the instruction that describes the task:
### Input:
Creates a solid with the given number of (meaningless) inputs and outputs.
Config controls the behavior of the nonce solid.
### Response:
def nonce_solid(name, n_inputs, n_outputs):
"""Creates a solid with the given number of (meaningless) inputs and outputs.
Config controls the behavior of the nonce solid."""
@solid(
name=name,
inputs=[
InputDefinition(name='input_{}'.format(i)) for i in range(n_inputs)
],
outputs=[
OutputDefinition(name='output_{}'.format(i))
for i in range(n_outputs)
],
)
def solid_fn(context, **_kwargs):
for i in range(200):
time.sleep(0.02)
if i % 1000 == 420:
context.log.error(
'Error message seq={i} from solid {name}'.format(
i=i, name=name
)
)
elif i % 100 == 0:
context.log.warning(
'Warning message seq={i} from solid {name}'.format(
i=i, name=name
)
)
elif i % 10 == 0:
context.log.info(
'Info message seq={i} from solid {name}'.format(
i=i, name=name
)
)
else:
context.log.debug(
'Debug message seq={i} from solid {name}'.format(
i=i, name=name
)
)
return MultipleResults.from_dict(
{'output_{}'.format(i): 'foo' for i in range(n_outputs)}
)
return solid_fn |
def _GetStat(self):
"""Retrieves information about the file entry.
Returns:
VFSStat: a stat object.
"""
stat_object = super(APFSFileEntry, self)._GetStat()
# File data stat information.
stat_object.size = self._fsapfs_file_entry.size
# Ownership and permissions stat information.
stat_object.mode = self._fsapfs_file_entry.file_mode & 0x0fff
stat_object.uid = self._fsapfs_file_entry.owner_identifier
stat_object.gid = self._fsapfs_file_entry.group_identifier
# File entry type stat information.
stat_object.type = self.entry_type
# Other stat information.
stat_object.ino = self._fsapfs_file_entry.identifier
stat_object.fs_type = 'APFS'
stat_object.is_allocated = True
return stat_object | Retrieves information about the file entry.
Returns:
VFSStat: a stat object. | Below is the the instruction that describes the task:
### Input:
Retrieves information about the file entry.
Returns:
VFSStat: a stat object.
### Response:
def _GetStat(self):
"""Retrieves information about the file entry.
Returns:
VFSStat: a stat object.
"""
stat_object = super(APFSFileEntry, self)._GetStat()
# File data stat information.
stat_object.size = self._fsapfs_file_entry.size
# Ownership and permissions stat information.
stat_object.mode = self._fsapfs_file_entry.file_mode & 0x0fff
stat_object.uid = self._fsapfs_file_entry.owner_identifier
stat_object.gid = self._fsapfs_file_entry.group_identifier
# File entry type stat information.
stat_object.type = self.entry_type
# Other stat information.
stat_object.ino = self._fsapfs_file_entry.identifier
stat_object.fs_type = 'APFS'
stat_object.is_allocated = True
return stat_object |
def _expectation(p, mean, none, kern, feat, nghp=None):
"""
Compute the expectation:
expectation[n] = <x_n K_{x_n, Z}>_p(x_n)
- K_{.,.} :: RBF kernel
:return: NxDxM
"""
Xmu, Xcov = p.mu, p.cov
with tf.control_dependencies([tf.assert_equal(
tf.shape(Xmu)[1], tf.constant(kern.input_dim, settings.int_type),
message="Currently cannot handle slicing in exKxz.")]):
Xmu = tf.identity(Xmu)
with params_as_tensors_for(kern, feat):
D = tf.shape(Xmu)[1]
lengthscales = kern.lengthscales if kern.ARD \
else tf.zeros((D,), dtype=settings.float_type) + kern.lengthscales
chol_L_plus_Xcov = tf.cholesky(tf.matrix_diag(lengthscales ** 2) + Xcov) # NxDxD
all_diffs = tf.transpose(feat.Z) - tf.expand_dims(Xmu, 2) # NxDxM
sqrt_det_L = tf.reduce_prod(lengthscales)
sqrt_det_L_plus_Xcov = tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(chol_L_plus_Xcov)), axis=1))
determinants = sqrt_det_L / sqrt_det_L_plus_Xcov # N
exponent_mahalanobis = tf.cholesky_solve(chol_L_plus_Xcov, all_diffs) # NxDxM
non_exponent_term = tf.matmul(Xcov, exponent_mahalanobis, transpose_a=True)
non_exponent_term = tf.expand_dims(Xmu, 2) + non_exponent_term # NxDxM
exponent_mahalanobis = tf.reduce_sum(all_diffs * exponent_mahalanobis, 1) # NxM
exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis) # NxM
return kern.variance * (determinants[:, None] * exponent_mahalanobis)[:, None, :] * non_exponent_term | Compute the expectation:
expectation[n] = <x_n K_{x_n, Z}>_p(x_n)
- K_{.,.} :: RBF kernel
:return: NxDxM | Below is the the instruction that describes the task:
### Input:
Compute the expectation:
expectation[n] = <x_n K_{x_n, Z}>_p(x_n)
- K_{.,.} :: RBF kernel
:return: NxDxM
### Response:
def _expectation(p, mean, none, kern, feat, nghp=None):
"""
Compute the expectation:
expectation[n] = <x_n K_{x_n, Z}>_p(x_n)
- K_{.,.} :: RBF kernel
:return: NxDxM
"""
Xmu, Xcov = p.mu, p.cov
with tf.control_dependencies([tf.assert_equal(
tf.shape(Xmu)[1], tf.constant(kern.input_dim, settings.int_type),
message="Currently cannot handle slicing in exKxz.")]):
Xmu = tf.identity(Xmu)
with params_as_tensors_for(kern, feat):
D = tf.shape(Xmu)[1]
lengthscales = kern.lengthscales if kern.ARD \
else tf.zeros((D,), dtype=settings.float_type) + kern.lengthscales
chol_L_plus_Xcov = tf.cholesky(tf.matrix_diag(lengthscales ** 2) + Xcov) # NxDxD
all_diffs = tf.transpose(feat.Z) - tf.expand_dims(Xmu, 2) # NxDxM
sqrt_det_L = tf.reduce_prod(lengthscales)
sqrt_det_L_plus_Xcov = tf.exp(tf.reduce_sum(tf.log(tf.matrix_diag_part(chol_L_plus_Xcov)), axis=1))
determinants = sqrt_det_L / sqrt_det_L_plus_Xcov # N
exponent_mahalanobis = tf.cholesky_solve(chol_L_plus_Xcov, all_diffs) # NxDxM
non_exponent_term = tf.matmul(Xcov, exponent_mahalanobis, transpose_a=True)
non_exponent_term = tf.expand_dims(Xmu, 2) + non_exponent_term # NxDxM
exponent_mahalanobis = tf.reduce_sum(all_diffs * exponent_mahalanobis, 1) # NxM
exponent_mahalanobis = tf.exp(-0.5 * exponent_mahalanobis) # NxM
return kern.variance * (determinants[:, None] * exponent_mahalanobis)[:, None, :] * non_exponent_term |
def get_metrics(self, from_time=None, to_time=None, metrics=None,
ifs=[], storageIds=[], view=None):
"""
This endpoint is not supported as of v6. Use the timeseries API
instead. To get all metrics for a host with the timeseries API use
the query:
'select * where hostId = $HOST_ID'.
To get specific metrics for a host use a comma-separated list of
the metric names as follows:
'select $METRIC_NAME1, $METRIC_NAME2 where hostId = $HOST_ID'.
For more information see http://tiny.cloudera.com/tsquery_doc
@param from_time: A datetime; start of the period to query (optional).
@param to_time: A datetime; end of the period to query (default = now).
@param metrics: List of metrics to query (default = all).
@param ifs: network interfaces to query. Default all, use None to disable.
@param storageIds: storage IDs to query. Default all, use None to disable.
@param view: View to materialize ('full' or 'summary')
@return: List of metrics and their readings.
"""
params = { }
if ifs:
params['ifs'] = ifs
elif ifs is None:
params['queryNw'] = 'false'
if storageIds:
params['storageIds'] = storageIds
elif storageIds is None:
params['queryStorage'] = 'false'
return self._get_resource_root().get_metrics(self._path() + '/metrics',
from_time, to_time, metrics, view, params) | This endpoint is not supported as of v6. Use the timeseries API
instead. To get all metrics for a host with the timeseries API use
the query:
'select * where hostId = $HOST_ID'.
To get specific metrics for a host use a comma-separated list of
the metric names as follows:
'select $METRIC_NAME1, $METRIC_NAME2 where hostId = $HOST_ID'.
For more information see http://tiny.cloudera.com/tsquery_doc
@param from_time: A datetime; start of the period to query (optional).
@param to_time: A datetime; end of the period to query (default = now).
@param metrics: List of metrics to query (default = all).
@param ifs: network interfaces to query. Default all, use None to disable.
@param storageIds: storage IDs to query. Default all, use None to disable.
@param view: View to materialize ('full' or 'summary')
@return: List of metrics and their readings. | Below is the the instruction that describes the task:
### Input:
This endpoint is not supported as of v6. Use the timeseries API
instead. To get all metrics for a host with the timeseries API use
the query:
'select * where hostId = $HOST_ID'.
To get specific metrics for a host use a comma-separated list of
the metric names as follows:
'select $METRIC_NAME1, $METRIC_NAME2 where hostId = $HOST_ID'.
For more information see http://tiny.cloudera.com/tsquery_doc
@param from_time: A datetime; start of the period to query (optional).
@param to_time: A datetime; end of the period to query (default = now).
@param metrics: List of metrics to query (default = all).
@param ifs: network interfaces to query. Default all, use None to disable.
@param storageIds: storage IDs to query. Default all, use None to disable.
@param view: View to materialize ('full' or 'summary')
@return: List of metrics and their readings.
### Response:
def get_metrics(self, from_time=None, to_time=None, metrics=None,
ifs=[], storageIds=[], view=None):
"""
This endpoint is not supported as of v6. Use the timeseries API
instead. To get all metrics for a host with the timeseries API use
the query:
'select * where hostId = $HOST_ID'.
To get specific metrics for a host use a comma-separated list of
the metric names as follows:
'select $METRIC_NAME1, $METRIC_NAME2 where hostId = $HOST_ID'.
For more information see http://tiny.cloudera.com/tsquery_doc
@param from_time: A datetime; start of the period to query (optional).
@param to_time: A datetime; end of the period to query (default = now).
@param metrics: List of metrics to query (default = all).
@param ifs: network interfaces to query. Default all, use None to disable.
@param storageIds: storage IDs to query. Default all, use None to disable.
@param view: View to materialize ('full' or 'summary')
@return: List of metrics and their readings.
"""
params = { }
if ifs:
params['ifs'] = ifs
elif ifs is None:
params['queryNw'] = 'false'
if storageIds:
params['storageIds'] = storageIds
elif storageIds is None:
params['queryStorage'] = 'false'
return self._get_resource_root().get_metrics(self._path() + '/metrics',
from_time, to_time, metrics, view, params) |
def get_csv_rows_for_installed(
old_csv_rows, # type: Iterable[List[str]]
installed, # type: Dict[str, str]
changed, # type: set
generated, # type: List[str]
lib_dir, # type: str
):
# type: (...) -> List[InstalledCSVRow]
"""
:param installed: A map from archive RECORD path to installation RECORD
path.
"""
installed_rows = [] # type: List[InstalledCSVRow]
for row in old_csv_rows:
if len(row) > 3:
logger.warning(
'RECORD line has more than three elements: {}'.format(row)
)
# Make a copy because we are mutating the row.
row = list(row)
old_path = row[0]
new_path = installed.pop(old_path, old_path)
row[0] = new_path
if new_path in changed:
digest, length = rehash(new_path)
row[1] = digest
row[2] = length
installed_rows.append(tuple(row))
for f in generated:
digest, length = rehash(f)
installed_rows.append((normpath(f, lib_dir), digest, str(length)))
for f in installed:
installed_rows.append((installed[f], '', ''))
return installed_rows | :param installed: A map from archive RECORD path to installation RECORD
path. | Below is the the instruction that describes the task:
### Input:
:param installed: A map from archive RECORD path to installation RECORD
path.
### Response:
def get_csv_rows_for_installed(
old_csv_rows, # type: Iterable[List[str]]
installed, # type: Dict[str, str]
changed, # type: set
generated, # type: List[str]
lib_dir, # type: str
):
# type: (...) -> List[InstalledCSVRow]
"""
:param installed: A map from archive RECORD path to installation RECORD
path.
"""
installed_rows = [] # type: List[InstalledCSVRow]
for row in old_csv_rows:
if len(row) > 3:
logger.warning(
'RECORD line has more than three elements: {}'.format(row)
)
# Make a copy because we are mutating the row.
row = list(row)
old_path = row[0]
new_path = installed.pop(old_path, old_path)
row[0] = new_path
if new_path in changed:
digest, length = rehash(new_path)
row[1] = digest
row[2] = length
installed_rows.append(tuple(row))
for f in generated:
digest, length = rehash(f)
installed_rows.append((normpath(f, lib_dir), digest, str(length)))
for f in installed:
installed_rows.append((installed[f], '', ''))
return installed_rows |
def disambiguate_url(url, location=None):
"""turn multi-ip interfaces '0.0.0.0' and '*' into connectable
ones, based on the location (default interpretation is localhost).
This is for zeromq urls, such as tcp://*:10101."""
try:
proto,ip,port = split_url(url)
except AssertionError:
# probably not tcp url; could be ipc, etc.
return url
ip = disambiguate_ip_address(ip,location)
return "%s://%s:%s"%(proto,ip,port) | turn multi-ip interfaces '0.0.0.0' and '*' into connectable
ones, based on the location (default interpretation is localhost).
This is for zeromq urls, such as tcp://*:10101. | Below is the the instruction that describes the task:
### Input:
turn multi-ip interfaces '0.0.0.0' and '*' into connectable
ones, based on the location (default interpretation is localhost).
This is for zeromq urls, such as tcp://*:10101.
### Response:
def disambiguate_url(url, location=None):
"""turn multi-ip interfaces '0.0.0.0' and '*' into connectable
ones, based on the location (default interpretation is localhost).
This is for zeromq urls, such as tcp://*:10101."""
try:
proto,ip,port = split_url(url)
except AssertionError:
# probably not tcp url; could be ipc, etc.
return url
ip = disambiguate_ip_address(ip,location)
return "%s://%s:%s"%(proto,ip,port) |
def resize(im, short, max_size):
"""
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param short: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:return: resized image (NDArray) and scale (float)
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(short) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
return im, im_scale | only resize input image to target size and return scale
:param im: BGR image input by opencv
:param short: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:return: resized image (NDArray) and scale (float) | Below is the the instruction that describes the task:
### Input:
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param short: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:return: resized image (NDArray) and scale (float)
### Response:
def resize(im, short, max_size):
"""
only resize input image to target size and return scale
:param im: BGR image input by opencv
:param short: one dimensional size (the short side)
:param max_size: one dimensional max size (the long side)
:return: resized image (NDArray) and scale (float)
"""
im_shape = im.shape
im_size_min = np.min(im_shape[0:2])
im_size_max = np.max(im_shape[0:2])
im_scale = float(short) / float(im_size_min)
# prevent bigger axis from being more than max_size:
if np.round(im_scale * im_size_max) > max_size:
im_scale = float(max_size) / float(im_size_max)
im = cv2.resize(im, None, None, fx=im_scale, fy=im_scale, interpolation=cv2.INTER_LINEAR)
return im, im_scale |
def run(host=DEFAULT_HOST, port=DEFAULT_PORT, path='.'):
"""Run the development server
"""
path = abspath(path)
c = Clay(path)
c.run(host=host, port=port) | Run the development server | Below is the the instruction that describes the task:
### Input:
Run the development server
### Response:
def run(host=DEFAULT_HOST, port=DEFAULT_PORT, path='.'):
"""Run the development server
"""
path = abspath(path)
c = Clay(path)
c.run(host=host, port=port) |
def from_whypo(cls, xml, encoding='utf-8'):
"""Constructor from xml element *WHYPO*
:param xml.etree.ElementTree xml: the xml *WHYPO* element
:param string encoding: encoding of the xml
"""
word = unicode(xml.get('WORD'), encoding)
confidence = float(xml.get('CM'))
return cls(word, confidence) | Constructor from xml element *WHYPO*
:param xml.etree.ElementTree xml: the xml *WHYPO* element
:param string encoding: encoding of the xml | Below is the the instruction that describes the task:
### Input:
Constructor from xml element *WHYPO*
:param xml.etree.ElementTree xml: the xml *WHYPO* element
:param string encoding: encoding of the xml
### Response:
def from_whypo(cls, xml, encoding='utf-8'):
"""Constructor from xml element *WHYPO*
:param xml.etree.ElementTree xml: the xml *WHYPO* element
:param string encoding: encoding of the xml
"""
word = unicode(xml.get('WORD'), encoding)
confidence = float(xml.get('CM'))
return cls(word, confidence) |
def from_iso_time(timestring, use_dateutil=True):
"""Parse an ISO8601-formatted datetime string and return a datetime.time
object.
"""
if not _iso8601_time_re.match(timestring):
raise ValueError('Not a valid ISO8601-formatted time string')
if dateutil_available and use_dateutil:
return parser.parse(timestring).time()
else:
if len(timestring) > 8: # has microseconds
fmt = '%H:%M:%S.%f'
else:
fmt = '%H:%M:%S'
return datetime.datetime.strptime(timestring, fmt).time() | Parse an ISO8601-formatted datetime string and return a datetime.time
object. | Below is the the instruction that describes the task:
### Input:
Parse an ISO8601-formatted datetime string and return a datetime.time
object.
### Response:
def from_iso_time(timestring, use_dateutil=True):
"""Parse an ISO8601-formatted datetime string and return a datetime.time
object.
"""
if not _iso8601_time_re.match(timestring):
raise ValueError('Not a valid ISO8601-formatted time string')
if dateutil_available and use_dateutil:
return parser.parse(timestring).time()
else:
if len(timestring) > 8: # has microseconds
fmt = '%H:%M:%S.%f'
else:
fmt = '%H:%M:%S'
return datetime.datetime.strptime(timestring, fmt).time() |
def joint_probabilities_nn(
neighbors,
distances,
perplexities,
symmetrize=True,
normalization="pair-wise",
n_reference_samples=None,
n_jobs=1,
):
"""Compute the conditional probability matrix P_{j|i}.
This method computes an approximation to P using the nearest neighbors.
Parameters
----------
neighbors: np.ndarray
A `n_samples * k_neighbors` matrix containing the indices to each
points" nearest neighbors in descending order.
distances: np.ndarray
A `n_samples * k_neighbors` matrix containing the distances to the
neighbors at indices defined in the neighbors parameter.
perplexities: double
The desired perplexity of the probability distribution.
symmetrize: bool
Whether to symmetrize the probability matrix or not. Symmetrizing is
used for typical t-SNE, but does not make sense when embedding new data
into an existing embedding.
normalization: str
The normalization scheme to use for the affinities. Standard t-SNE
considers interactions between all the data points, therefore the entire
affinity matrix is regarded as a probability distribution, and must sum
to 1. When embedding new points, we only consider interactions to
existing points, and treat each point separately. In this case, we
row-normalize the affinity matrix, meaning each point gets its own
probability distribution.
n_reference_samples: int
The number of samples in the existing (reference) embedding. Needed to
properly construct the sparse P matrix.
n_jobs: int
Number of threads.
Returns
-------
csr_matrix
A `n_samples * n_reference_samples` matrix containing the probabilities
that a new sample would appear as a neighbor of a reference point.
"""
assert normalization in (
"pair-wise",
"point-wise",
), f"Unrecognized normalization scheme `{normalization}`."
n_samples, k_neighbors = distances.shape
if n_reference_samples is None:
n_reference_samples = n_samples
# Compute asymmetric pairwise input similarities
conditional_P = _tsne.compute_gaussian_perplexity(
distances, np.array(perplexities, dtype=float), num_threads=n_jobs
)
conditional_P = np.asarray(conditional_P)
P = sp.csr_matrix(
(
conditional_P.ravel(),
neighbors.ravel(),
range(0, n_samples * k_neighbors + 1, k_neighbors),
),
shape=(n_samples, n_reference_samples),
)
# Symmetrize the probability matrix
if symmetrize:
P = (P + P.T) / 2
if normalization == "pair-wise":
P /= np.sum(P)
elif normalization == "point-wise":
P = sp.diags(np.asarray(1 / P.sum(axis=1)).ravel()) @ P
return P | Compute the conditional probability matrix P_{j|i}.
This method computes an approximation to P using the nearest neighbors.
Parameters
----------
neighbors: np.ndarray
A `n_samples * k_neighbors` matrix containing the indices to each
points" nearest neighbors in descending order.
distances: np.ndarray
A `n_samples * k_neighbors` matrix containing the distances to the
neighbors at indices defined in the neighbors parameter.
perplexities: double
The desired perplexity of the probability distribution.
symmetrize: bool
Whether to symmetrize the probability matrix or not. Symmetrizing is
used for typical t-SNE, but does not make sense when embedding new data
into an existing embedding.
normalization: str
The normalization scheme to use for the affinities. Standard t-SNE
considers interactions between all the data points, therefore the entire
affinity matrix is regarded as a probability distribution, and must sum
to 1. When embedding new points, we only consider interactions to
existing points, and treat each point separately. In this case, we
row-normalize the affinity matrix, meaning each point gets its own
probability distribution.
n_reference_samples: int
The number of samples in the existing (reference) embedding. Needed to
properly construct the sparse P matrix.
n_jobs: int
Number of threads.
Returns
-------
csr_matrix
A `n_samples * n_reference_samples` matrix containing the probabilities
that a new sample would appear as a neighbor of a reference point. | Below is the the instruction that describes the task:
### Input:
Compute the conditional probability matrix P_{j|i}.
This method computes an approximation to P using the nearest neighbors.
Parameters
----------
neighbors: np.ndarray
A `n_samples * k_neighbors` matrix containing the indices to each
points" nearest neighbors in descending order.
distances: np.ndarray
A `n_samples * k_neighbors` matrix containing the distances to the
neighbors at indices defined in the neighbors parameter.
perplexities: double
The desired perplexity of the probability distribution.
symmetrize: bool
Whether to symmetrize the probability matrix or not. Symmetrizing is
used for typical t-SNE, but does not make sense when embedding new data
into an existing embedding.
normalization: str
The normalization scheme to use for the affinities. Standard t-SNE
considers interactions between all the data points, therefore the entire
affinity matrix is regarded as a probability distribution, and must sum
to 1. When embedding new points, we only consider interactions to
existing points, and treat each point separately. In this case, we
row-normalize the affinity matrix, meaning each point gets its own
probability distribution.
n_reference_samples: int
The number of samples in the existing (reference) embedding. Needed to
properly construct the sparse P matrix.
n_jobs: int
Number of threads.
Returns
-------
csr_matrix
A `n_samples * n_reference_samples` matrix containing the probabilities
that a new sample would appear as a neighbor of a reference point.
### Response:
def joint_probabilities_nn(
neighbors,
distances,
perplexities,
symmetrize=True,
normalization="pair-wise",
n_reference_samples=None,
n_jobs=1,
):
"""Compute the conditional probability matrix P_{j|i}.
This method computes an approximation to P using the nearest neighbors.
Parameters
----------
neighbors: np.ndarray
A `n_samples * k_neighbors` matrix containing the indices to each
points" nearest neighbors in descending order.
distances: np.ndarray
A `n_samples * k_neighbors` matrix containing the distances to the
neighbors at indices defined in the neighbors parameter.
perplexities: double
The desired perplexity of the probability distribution.
symmetrize: bool
Whether to symmetrize the probability matrix or not. Symmetrizing is
used for typical t-SNE, but does not make sense when embedding new data
into an existing embedding.
normalization: str
The normalization scheme to use for the affinities. Standard t-SNE
considers interactions between all the data points, therefore the entire
affinity matrix is regarded as a probability distribution, and must sum
to 1. When embedding new points, we only consider interactions to
existing points, and treat each point separately. In this case, we
row-normalize the affinity matrix, meaning each point gets its own
probability distribution.
n_reference_samples: int
The number of samples in the existing (reference) embedding. Needed to
properly construct the sparse P matrix.
n_jobs: int
Number of threads.
Returns
-------
csr_matrix
A `n_samples * n_reference_samples` matrix containing the probabilities
that a new sample would appear as a neighbor of a reference point.
"""
assert normalization in (
"pair-wise",
"point-wise",
), f"Unrecognized normalization scheme `{normalization}`."
n_samples, k_neighbors = distances.shape
if n_reference_samples is None:
n_reference_samples = n_samples
# Compute asymmetric pairwise input similarities
conditional_P = _tsne.compute_gaussian_perplexity(
distances, np.array(perplexities, dtype=float), num_threads=n_jobs
)
conditional_P = np.asarray(conditional_P)
P = sp.csr_matrix(
(
conditional_P.ravel(),
neighbors.ravel(),
range(0, n_samples * k_neighbors + 1, k_neighbors),
),
shape=(n_samples, n_reference_samples),
)
# Symmetrize the probability matrix
if symmetrize:
P = (P + P.T) / 2
if normalization == "pair-wise":
P /= np.sum(P)
elif normalization == "point-wise":
P = sp.diags(np.asarray(1 / P.sum(axis=1)).ravel()) @ P
return P |
def sam_iter(handle, start_line=None, headers=False):
"""Iterate over SAM file and return SAM entries
Args:
handle (file): SAM file handle, can be any iterator so long as it
it returns subsequent "lines" of a SAM entry
start_line (str): Next SAM entry, if 'handle' has been partially read
and you want to start iterating at the next entry, read the next
SAM entry and pass it to this variable when calling sam_iter.
See 'Examples.'
headers (bool): Yields headers if True, else skips lines starting with
"@"
Yields:
SamEntry: class containing all SAM data, yields str for headers if
headers options is True then yields GamEntry for entries
Examples:
The following two examples demonstrate how to use sam_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in sam_iter(open('test.sam')):
... print(entry.qname) # Print query sequence name
... print(entry.flag) # Print flag number of alignment
... print(entry.rname) # Print reference sequence name
... print(entry.pos) # Print starting position of alignment
... print(entry.mapq) # Print mapping confidence of alignment
... print(entry.cigar) # Print CIGAR string of alignment
... print(entry.rnext) # Print paired read name
... print(entry.pnext) # Print position of paired read
... print(entry.tlen) # Print alignment length of all paired reads
... print(entry.seq) # Print query sequence
... print(entry.qual) # Print query quality scores
... print(entry.write()) # Print whole SAM entry
>>> sam_handle = open('test.gff3')
>>> next(sam_handle) # Skip first line/entry
>>> next_line = next(sam_handle) # Store next entry
>>> for entry in sam_iter(open('test.sam')):
... print(entry.qname) # Print query sequence name
... print(entry.flag) # Print flag number of alignment
... print(entry.rname) # Print reference sequence name
... print(entry.pos) # Print starting position of alignment
... print(entry.mapq) # Print mapping confidence of alignment
... print(entry.cigar) # Print CIGAR string of alignment
... print(entry.rnext) # Print paired read name
... print(entry.pnext) # Print position of paired read
... print(entry.tlen) # Print alignment length of all paired reads
... print(entry.seq) # Print query sequence
... print(entry.qual) # Print query quality scores
... print(entry.write()) # Print whole SAM entry
"""
# Speed tricks: reduces function calls
split = str.split
strip = str.strip
next_line = next
if start_line is None:
line = next_line(handle) # Read first B6/M8 entry
else:
line = start_line # Set header to given header
# Check if input is text or bytestream
if (isinstance(line, bytes)):
def next_line(i):
return next(i).decode('utf-8')
line = strip(line.decode('utf-8'))
else:
line = strip(line)
# A manual 'for' loop isn't needed to read the file properly and quickly,
# unlike fasta_iter and fastq_iter, but it is necessary begin iterating
# partway through a file when the user gives a starting line.
try: # Manually construct a for loop to improve speed by using 'next'
while True: # Loop until StopIteration Exception raised
split_line = split(line, '\t')
if line.startswith('@') and not headers:
line = strip(next_line(handle))
continue
elif line.startswith('@') and headers:
yield line
line = strip(next_line(handle))
continue
data = SamEntry()
data.qname = split_line[0]
try: # Differentiate between int and hex bit flags
data.flag = int(split_line[1])
except ValueError:
data.flag = split_line[1]
data.rname = split_line[2]
data.pos = int(split_line[3])
data.mapq = int(split_line[4])
data.cigar = split_line[5]
data.rnext = split_line[6]
data.pnext = int(split_line[7])
data.tlen = int(split_line[8])
data.seq = split_line[9]
data.qual = split_line[10]
line = strip(next_line(handle)) # Raises StopIteration at EOF
yield data
except StopIteration: # Yield last SAM entry
yield data | Iterate over SAM file and return SAM entries
Args:
handle (file): SAM file handle, can be any iterator so long as it
it returns subsequent "lines" of a SAM entry
start_line (str): Next SAM entry, if 'handle' has been partially read
and you want to start iterating at the next entry, read the next
SAM entry and pass it to this variable when calling sam_iter.
See 'Examples.'
headers (bool): Yields headers if True, else skips lines starting with
"@"
Yields:
SamEntry: class containing all SAM data, yields str for headers if
headers options is True then yields GamEntry for entries
Examples:
The following two examples demonstrate how to use sam_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in sam_iter(open('test.sam')):
... print(entry.qname) # Print query sequence name
... print(entry.flag) # Print flag number of alignment
... print(entry.rname) # Print reference sequence name
... print(entry.pos) # Print starting position of alignment
... print(entry.mapq) # Print mapping confidence of alignment
... print(entry.cigar) # Print CIGAR string of alignment
... print(entry.rnext) # Print paired read name
... print(entry.pnext) # Print position of paired read
... print(entry.tlen) # Print alignment length of all paired reads
... print(entry.seq) # Print query sequence
... print(entry.qual) # Print query quality scores
... print(entry.write()) # Print whole SAM entry
>>> sam_handle = open('test.gff3')
>>> next(sam_handle) # Skip first line/entry
>>> next_line = next(sam_handle) # Store next entry
>>> for entry in sam_iter(open('test.sam')):
... print(entry.qname) # Print query sequence name
... print(entry.flag) # Print flag number of alignment
... print(entry.rname) # Print reference sequence name
... print(entry.pos) # Print starting position of alignment
... print(entry.mapq) # Print mapping confidence of alignment
... print(entry.cigar) # Print CIGAR string of alignment
... print(entry.rnext) # Print paired read name
... print(entry.pnext) # Print position of paired read
... print(entry.tlen) # Print alignment length of all paired reads
... print(entry.seq) # Print query sequence
... print(entry.qual) # Print query quality scores
... print(entry.write()) # Print whole SAM entry | Below is the the instruction that describes the task:
### Input:
Iterate over SAM file and return SAM entries
Args:
handle (file): SAM file handle, can be any iterator so long as it
it returns subsequent "lines" of a SAM entry
start_line (str): Next SAM entry, if 'handle' has been partially read
and you want to start iterating at the next entry, read the next
SAM entry and pass it to this variable when calling sam_iter.
See 'Examples.'
headers (bool): Yields headers if True, else skips lines starting with
"@"
Yields:
SamEntry: class containing all SAM data, yields str for headers if
headers options is True then yields GamEntry for entries
Examples:
The following two examples demonstrate how to use sam_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in sam_iter(open('test.sam')):
... print(entry.qname) # Print query sequence name
... print(entry.flag) # Print flag number of alignment
... print(entry.rname) # Print reference sequence name
... print(entry.pos) # Print starting position of alignment
... print(entry.mapq) # Print mapping confidence of alignment
... print(entry.cigar) # Print CIGAR string of alignment
... print(entry.rnext) # Print paired read name
... print(entry.pnext) # Print position of paired read
... print(entry.tlen) # Print alignment length of all paired reads
... print(entry.seq) # Print query sequence
... print(entry.qual) # Print query quality scores
... print(entry.write()) # Print whole SAM entry
>>> sam_handle = open('test.gff3')
>>> next(sam_handle) # Skip first line/entry
>>> next_line = next(sam_handle) # Store next entry
>>> for entry in sam_iter(open('test.sam')):
... print(entry.qname) # Print query sequence name
... print(entry.flag) # Print flag number of alignment
... print(entry.rname) # Print reference sequence name
... print(entry.pos) # Print starting position of alignment
... print(entry.mapq) # Print mapping confidence of alignment
... print(entry.cigar) # Print CIGAR string of alignment
... print(entry.rnext) # Print paired read name
... print(entry.pnext) # Print position of paired read
... print(entry.tlen) # Print alignment length of all paired reads
... print(entry.seq) # Print query sequence
... print(entry.qual) # Print query quality scores
... print(entry.write()) # Print whole SAM entry
### Response:
def sam_iter(handle, start_line=None, headers=False):
"""Iterate over SAM file and return SAM entries
Args:
handle (file): SAM file handle, can be any iterator so long as it
it returns subsequent "lines" of a SAM entry
start_line (str): Next SAM entry, if 'handle' has been partially read
and you want to start iterating at the next entry, read the next
SAM entry and pass it to this variable when calling sam_iter.
See 'Examples.'
headers (bool): Yields headers if True, else skips lines starting with
"@"
Yields:
SamEntry: class containing all SAM data, yields str for headers if
headers options is True then yields GamEntry for entries
Examples:
The following two examples demonstrate how to use sam_iter.
Note: These doctests will not pass, examples are only in doctest
format as per convention. bio_utils uses pytests for testing.
>>> for entry in sam_iter(open('test.sam')):
... print(entry.qname) # Print query sequence name
... print(entry.flag) # Print flag number of alignment
... print(entry.rname) # Print reference sequence name
... print(entry.pos) # Print starting position of alignment
... print(entry.mapq) # Print mapping confidence of alignment
... print(entry.cigar) # Print CIGAR string of alignment
... print(entry.rnext) # Print paired read name
... print(entry.pnext) # Print position of paired read
... print(entry.tlen) # Print alignment length of all paired reads
... print(entry.seq) # Print query sequence
... print(entry.qual) # Print query quality scores
... print(entry.write()) # Print whole SAM entry
>>> sam_handle = open('test.gff3')
>>> next(sam_handle) # Skip first line/entry
>>> next_line = next(sam_handle) # Store next entry
>>> for entry in sam_iter(open('test.sam')):
... print(entry.qname) # Print query sequence name
... print(entry.flag) # Print flag number of alignment
... print(entry.rname) # Print reference sequence name
... print(entry.pos) # Print starting position of alignment
... print(entry.mapq) # Print mapping confidence of alignment
... print(entry.cigar) # Print CIGAR string of alignment
... print(entry.rnext) # Print paired read name
... print(entry.pnext) # Print position of paired read
... print(entry.tlen) # Print alignment length of all paired reads
... print(entry.seq) # Print query sequence
... print(entry.qual) # Print query quality scores
... print(entry.write()) # Print whole SAM entry
"""
# Speed tricks: reduces function calls
split = str.split
strip = str.strip
next_line = next
if start_line is None:
line = next_line(handle) # Read first B6/M8 entry
else:
line = start_line # Set header to given header
# Check if input is text or bytestream
if (isinstance(line, bytes)):
def next_line(i):
return next(i).decode('utf-8')
line = strip(line.decode('utf-8'))
else:
line = strip(line)
# A manual 'for' loop isn't needed to read the file properly and quickly,
# unlike fasta_iter and fastq_iter, but it is necessary begin iterating
# partway through a file when the user gives a starting line.
try: # Manually construct a for loop to improve speed by using 'next'
while True: # Loop until StopIteration Exception raised
split_line = split(line, '\t')
if line.startswith('@') and not headers:
line = strip(next_line(handle))
continue
elif line.startswith('@') and headers:
yield line
line = strip(next_line(handle))
continue
data = SamEntry()
data.qname = split_line[0]
try: # Differentiate between int and hex bit flags
data.flag = int(split_line[1])
except ValueError:
data.flag = split_line[1]
data.rname = split_line[2]
data.pos = int(split_line[3])
data.mapq = int(split_line[4])
data.cigar = split_line[5]
data.rnext = split_line[6]
data.pnext = int(split_line[7])
data.tlen = int(split_line[8])
data.seq = split_line[9]
data.qual = split_line[10]
line = strip(next_line(handle)) # Raises StopIteration at EOF
yield data
except StopIteration: # Yield last SAM entry
yield data |
def get_request_mock():
"""Build a ``request`` mock up for tests"""
from django.test.client import RequestFactory
from django.core.handlers.base import BaseHandler
factory = RequestFactory()
request = factory.get('/')
class FakeUser():
is_authenticated = False
is_staff = False
request.user = FakeUser()
request.session = {}
return request | Build a ``request`` mock up for tests | Below is the the instruction that describes the task:
### Input:
Build a ``request`` mock up for tests
### Response:
def get_request_mock():
"""Build a ``request`` mock up for tests"""
from django.test.client import RequestFactory
from django.core.handlers.base import BaseHandler
factory = RequestFactory()
request = factory.get('/')
class FakeUser():
is_authenticated = False
is_staff = False
request.user = FakeUser()
request.session = {}
return request |
def coding_sequence(self):
"""
cDNA coding sequence (from start codon to stop codon, without
any introns)
"""
if self.sequence is None:
return None
start = self.first_start_codon_spliced_offset
end = self.last_stop_codon_spliced_offset
# If start codon is the at nucleotide offsets [3,4,5] and
# stop codon is at nucleotide offsets [20,21,22]
# then start = 3 and end = 22.
#
# Adding 1 to end since Python uses non-inclusive ends in slices/ranges.
# pylint: disable=invalid-slice-index
# TODO(tavi) Figure out pylint is not happy with this slice
return self.sequence[start:end + 1] | cDNA coding sequence (from start codon to stop codon, without
any introns) | Below is the the instruction that describes the task:
### Input:
cDNA coding sequence (from start codon to stop codon, without
any introns)
### Response:
def coding_sequence(self):
"""
cDNA coding sequence (from start codon to stop codon, without
any introns)
"""
if self.sequence is None:
return None
start = self.first_start_codon_spliced_offset
end = self.last_stop_codon_spliced_offset
# If start codon is the at nucleotide offsets [3,4,5] and
# stop codon is at nucleotide offsets [20,21,22]
# then start = 3 and end = 22.
#
# Adding 1 to end since Python uses non-inclusive ends in slices/ranges.
# pylint: disable=invalid-slice-index
# TODO(tavi) Figure out pylint is not happy with this slice
return self.sequence[start:end + 1] |
def receive_trial_result(self, parameter_id, parameters, value):
'''
Record an observation of the objective function
parameter_id : int
parameters : dict of parameters
value: final metrics of the trial, including reward
'''
logger.debug('acquiring lock for param {}'.format(parameter_id))
self.thread_lock.acquire()
logger.debug('lock for current acquired')
reward = extract_scalar_reward(value)
if self.optimize_mode is OptimizeMode.Minimize:
reward = -reward
logger.debug('receive trial result is:\n')
logger.debug(str(parameters))
logger.debug(str(reward))
indiv = Individual(indiv_id=int(os.path.split(parameters['save_dir'])[1]),
graph_cfg=graph_loads(parameters['graph']), result=reward)
self.population.append(indiv)
logger.debug('releasing lock')
self.thread_lock.release()
self.events[indiv.indiv_id].set() | Record an observation of the objective function
parameter_id : int
parameters : dict of parameters
value: final metrics of the trial, including reward | Below is the the instruction that describes the task:
### Input:
Record an observation of the objective function
parameter_id : int
parameters : dict of parameters
value: final metrics of the trial, including reward
### Response:
def receive_trial_result(self, parameter_id, parameters, value):
'''
Record an observation of the objective function
parameter_id : int
parameters : dict of parameters
value: final metrics of the trial, including reward
'''
logger.debug('acquiring lock for param {}'.format(parameter_id))
self.thread_lock.acquire()
logger.debug('lock for current acquired')
reward = extract_scalar_reward(value)
if self.optimize_mode is OptimizeMode.Minimize:
reward = -reward
logger.debug('receive trial result is:\n')
logger.debug(str(parameters))
logger.debug(str(reward))
indiv = Individual(indiv_id=int(os.path.split(parameters['save_dir'])[1]),
graph_cfg=graph_loads(parameters['graph']), result=reward)
self.population.append(indiv)
logger.debug('releasing lock')
self.thread_lock.release()
self.events[indiv.indiv_id].set() |
def enable_gui(gui, kernel=None):
"""Enable integration with a given GUI"""
if gui not in loop_map:
raise ValueError("GUI %r not supported" % gui)
if kernel is None:
if Application.initialized():
kernel = getattr(Application.instance(), 'kernel', None)
if kernel is None:
raise RuntimeError("You didn't specify a kernel,"
" and no IPython Application with a kernel appears to be running."
)
loop = loop_map[gui]
if kernel.eventloop is not None and kernel.eventloop is not loop:
raise RuntimeError("Cannot activate multiple GUI eventloops")
kernel.eventloop = loop | Enable integration with a given GUI | Below is the the instruction that describes the task:
### Input:
Enable integration with a given GUI
### Response:
def enable_gui(gui, kernel=None):
"""Enable integration with a given GUI"""
if gui not in loop_map:
raise ValueError("GUI %r not supported" % gui)
if kernel is None:
if Application.initialized():
kernel = getattr(Application.instance(), 'kernel', None)
if kernel is None:
raise RuntimeError("You didn't specify a kernel,"
" and no IPython Application with a kernel appears to be running."
)
loop = loop_map[gui]
if kernel.eventloop is not None and kernel.eventloop is not loop:
raise RuntimeError("Cannot activate multiple GUI eventloops")
kernel.eventloop = loop |
def SVD_sim(sp, lowcut, highcut, samp_rate,
amp_range=np.arange(-10, 10, 0.01)):
"""
Generate basis vectors of a set of simulated seismograms.
Inputs should have a range of S-P amplitude ratios, in theory to simulate \
a range of focal mechanisms.
:type sp: int
:param sp: S-P time in seconds - will be converted to samples according \
to samp_rate.
:type lowcut: float
:param lowcut: Low-cut for bandpass filter in Hz
:type highcut: float
:param highcut: High-cut for bandpass filter in Hz
:type samp_rate: float
:param samp_rate: Sampling rate in Hz
:type amp_range: numpy.ndarray
:param amp_range: Amplitude ratio range to generate synthetics for.
:returns: set of output basis vectors
:rtype: :class:`numpy.ndarray`
"""
# Convert SP to samples
sp = int(sp * samp_rate)
# Scan through a range of amplitude ratios
synthetics = [Stream(Trace(seis_sim(sp, a))) for a in amp_range]
for st in synthetics:
for tr in st:
tr.stats.station = 'SYNTH'
tr.stats.channel = 'SH1'
tr.stats.sampling_rate = samp_rate
tr.filter('bandpass', freqmin=lowcut, freqmax=highcut)
# We have a list of obspy Trace objects, we can pass this to EQcorrscan's
# SVD functions
U, s, V, stachans = clustering.svd(synthetics)
return U, s, V, stachans | Generate basis vectors of a set of simulated seismograms.
Inputs should have a range of S-P amplitude ratios, in theory to simulate \
a range of focal mechanisms.
:type sp: int
:param sp: S-P time in seconds - will be converted to samples according \
to samp_rate.
:type lowcut: float
:param lowcut: Low-cut for bandpass filter in Hz
:type highcut: float
:param highcut: High-cut for bandpass filter in Hz
:type samp_rate: float
:param samp_rate: Sampling rate in Hz
:type amp_range: numpy.ndarray
:param amp_range: Amplitude ratio range to generate synthetics for.
:returns: set of output basis vectors
:rtype: :class:`numpy.ndarray` | Below is the the instruction that describes the task:
### Input:
Generate basis vectors of a set of simulated seismograms.
Inputs should have a range of S-P amplitude ratios, in theory to simulate \
a range of focal mechanisms.
:type sp: int
:param sp: S-P time in seconds - will be converted to samples according \
to samp_rate.
:type lowcut: float
:param lowcut: Low-cut for bandpass filter in Hz
:type highcut: float
:param highcut: High-cut for bandpass filter in Hz
:type samp_rate: float
:param samp_rate: Sampling rate in Hz
:type amp_range: numpy.ndarray
:param amp_range: Amplitude ratio range to generate synthetics for.
:returns: set of output basis vectors
:rtype: :class:`numpy.ndarray`
### Response:
def SVD_sim(sp, lowcut, highcut, samp_rate,
amp_range=np.arange(-10, 10, 0.01)):
"""
Generate basis vectors of a set of simulated seismograms.
Inputs should have a range of S-P amplitude ratios, in theory to simulate \
a range of focal mechanisms.
:type sp: int
:param sp: S-P time in seconds - will be converted to samples according \
to samp_rate.
:type lowcut: float
:param lowcut: Low-cut for bandpass filter in Hz
:type highcut: float
:param highcut: High-cut for bandpass filter in Hz
:type samp_rate: float
:param samp_rate: Sampling rate in Hz
:type amp_range: numpy.ndarray
:param amp_range: Amplitude ratio range to generate synthetics for.
:returns: set of output basis vectors
:rtype: :class:`numpy.ndarray`
"""
# Convert SP to samples
sp = int(sp * samp_rate)
# Scan through a range of amplitude ratios
synthetics = [Stream(Trace(seis_sim(sp, a))) for a in amp_range]
for st in synthetics:
for tr in st:
tr.stats.station = 'SYNTH'
tr.stats.channel = 'SH1'
tr.stats.sampling_rate = samp_rate
tr.filter('bandpass', freqmin=lowcut, freqmax=highcut)
# We have a list of obspy Trace objects, we can pass this to EQcorrscan's
# SVD functions
U, s, V, stachans = clustering.svd(synthetics)
return U, s, V, stachans |
def _init_ui(self):
"""Initial the first UI page.
- load html from '/' endpoint
- if <title> is defined, use as windows title
"""
(content, mimetype) = make_response(self._url_map_to_function('/'))
try:
beautifulsoup = BeautifulSoup(content)
self.window.set_title(beautifulsoup.find('title').string)
except:
pass
if self.debug is True:
print self.app_dir
# Use load_string instead of load_uri because it shows warning.
self.webkit_web_view.load_string(
content,
mime_type=mimetype,
encoding='utf-8',
base_uri='/',
) | Initial the first UI page.
- load html from '/' endpoint
- if <title> is defined, use as windows title | Below is the the instruction that describes the task:
### Input:
Initial the first UI page.
- load html from '/' endpoint
- if <title> is defined, use as windows title
### Response:
def _init_ui(self):
"""Initial the first UI page.
- load html from '/' endpoint
- if <title> is defined, use as windows title
"""
(content, mimetype) = make_response(self._url_map_to_function('/'))
try:
beautifulsoup = BeautifulSoup(content)
self.window.set_title(beautifulsoup.find('title').string)
except:
pass
if self.debug is True:
print self.app_dir
# Use load_string instead of load_uri because it shows warning.
self.webkit_web_view.load_string(
content,
mime_type=mimetype,
encoding='utf-8',
base_uri='/',
) |
def generate_xhtml(path, dirs, files):
"""Return a XHTML document listing the directories and files."""
# Prepare the path to display.
if path != '/':
dirs.insert(0, '..')
if not path.endswith('/'):
path += '/'
def itemize(item):
return '<a href="%s">%s</a>' % (item, path+item)
dirs = [d + '/' for d in dirs]
return """
<html>
<body>
<h1>%s</h1>
<pre>%s\n%s</pre>
</body>
</html>
""" % (path, '\n'.join(itemize(dir) for dir in dirs), '\n'.join(itemize(file) for file in files)) | Return a XHTML document listing the directories and files. | Below is the the instruction that describes the task:
### Input:
Return a XHTML document listing the directories and files.
### Response:
def generate_xhtml(path, dirs, files):
"""Return a XHTML document listing the directories and files."""
# Prepare the path to display.
if path != '/':
dirs.insert(0, '..')
if not path.endswith('/'):
path += '/'
def itemize(item):
return '<a href="%s">%s</a>' % (item, path+item)
dirs = [d + '/' for d in dirs]
return """
<html>
<body>
<h1>%s</h1>
<pre>%s\n%s</pre>
</body>
</html>
""" % (path, '\n'.join(itemize(dir) for dir in dirs), '\n'.join(itemize(file) for file in files)) |
def metadata(self, name):
"""Return value and metadata associated with the named value
Parameters
----------
name : str
name to retrieve. If the name contains '.'s it will be retrieved recursively
Raises
------
KeyError
if name is not defined in the ConfigTree
"""
if name in self._children:
return self._children[name].metadata()
else:
head, _, tail = name.partition('.')
if head in self._children:
return self._children[head].metadata(key=tail)
else:
raise KeyError(name) | Return value and metadata associated with the named value
Parameters
----------
name : str
name to retrieve. If the name contains '.'s it will be retrieved recursively
Raises
------
KeyError
if name is not defined in the ConfigTree | Below is the the instruction that describes the task:
### Input:
Return value and metadata associated with the named value
Parameters
----------
name : str
name to retrieve. If the name contains '.'s it will be retrieved recursively
Raises
------
KeyError
if name is not defined in the ConfigTree
### Response:
def metadata(self, name):
"""Return value and metadata associated with the named value
Parameters
----------
name : str
name to retrieve. If the name contains '.'s it will be retrieved recursively
Raises
------
KeyError
if name is not defined in the ConfigTree
"""
if name in self._children:
return self._children[name].metadata()
else:
head, _, tail = name.partition('.')
if head in self._children:
return self._children[head].metadata(key=tail)
else:
raise KeyError(name) |
def as_proximal_lang_operator(op, norm_bound=None):
"""Wrap ``op`` as a ``proximal.BlackBox``.
This is intended to be used with the `ProxImaL language solvers.
<https://github.com/comp-imaging/proximal>`_
For documentation on the proximal language (ProxImaL) see [Hei+2016].
Parameters
----------
op : `Operator`
Linear operator to be wrapped. Its domain and range must implement
``shape``, and elements in these need to implement ``asarray``.
norm_bound : float, optional
An upper bound on the spectral norm of the operator. Note that this is
the norm as defined by ProxImaL, and hence use the unweighted spaces.
Returns
-------
``proximal.BlackBox`` : proximal_lang_operator
The wrapped operator.
Notes
-----
If the data representation of ``op``'s domain and range is of type
`NumpyTensorSpace` this incurs no significant overhead. If the data
space is implemented with CUDA or some other non-local representation,
the overhead is significant.
References
----------
[Hei+2016] Heide, F et al. *ProxImaL: Efficient Image Optimization using
Proximal Algorithms*. ACM Transactions on Graphics (TOG), 2016.
"""
# TODO: use out parameter once "as editable array" is added
def forward(inp, out):
out[:] = op(inp).asarray()
def adjoint(inp, out):
out[:] = op.adjoint(inp).asarray()
import proximal
return proximal.LinOpFactory(input_shape=op.domain.shape,
output_shape=op.range.shape,
forward=forward,
adjoint=adjoint,
norm_bound=norm_bound) | Wrap ``op`` as a ``proximal.BlackBox``.
This is intended to be used with the `ProxImaL language solvers.
<https://github.com/comp-imaging/proximal>`_
For documentation on the proximal language (ProxImaL) see [Hei+2016].
Parameters
----------
op : `Operator`
Linear operator to be wrapped. Its domain and range must implement
``shape``, and elements in these need to implement ``asarray``.
norm_bound : float, optional
An upper bound on the spectral norm of the operator. Note that this is
the norm as defined by ProxImaL, and hence use the unweighted spaces.
Returns
-------
``proximal.BlackBox`` : proximal_lang_operator
The wrapped operator.
Notes
-----
If the data representation of ``op``'s domain and range is of type
`NumpyTensorSpace` this incurs no significant overhead. If the data
space is implemented with CUDA or some other non-local representation,
the overhead is significant.
References
----------
[Hei+2016] Heide, F et al. *ProxImaL: Efficient Image Optimization using
Proximal Algorithms*. ACM Transactions on Graphics (TOG), 2016. | Below is the the instruction that describes the task:
### Input:
Wrap ``op`` as a ``proximal.BlackBox``.
This is intended to be used with the `ProxImaL language solvers.
<https://github.com/comp-imaging/proximal>`_
For documentation on the proximal language (ProxImaL) see [Hei+2016].
Parameters
----------
op : `Operator`
Linear operator to be wrapped. Its domain and range must implement
``shape``, and elements in these need to implement ``asarray``.
norm_bound : float, optional
An upper bound on the spectral norm of the operator. Note that this is
the norm as defined by ProxImaL, and hence use the unweighted spaces.
Returns
-------
``proximal.BlackBox`` : proximal_lang_operator
The wrapped operator.
Notes
-----
If the data representation of ``op``'s domain and range is of type
`NumpyTensorSpace` this incurs no significant overhead. If the data
space is implemented with CUDA or some other non-local representation,
the overhead is significant.
References
----------
[Hei+2016] Heide, F et al. *ProxImaL: Efficient Image Optimization using
Proximal Algorithms*. ACM Transactions on Graphics (TOG), 2016.
### Response:
def as_proximal_lang_operator(op, norm_bound=None):
"""Wrap ``op`` as a ``proximal.BlackBox``.
This is intended to be used with the `ProxImaL language solvers.
<https://github.com/comp-imaging/proximal>`_
For documentation on the proximal language (ProxImaL) see [Hei+2016].
Parameters
----------
op : `Operator`
Linear operator to be wrapped. Its domain and range must implement
``shape``, and elements in these need to implement ``asarray``.
norm_bound : float, optional
An upper bound on the spectral norm of the operator. Note that this is
the norm as defined by ProxImaL, and hence use the unweighted spaces.
Returns
-------
``proximal.BlackBox`` : proximal_lang_operator
The wrapped operator.
Notes
-----
If the data representation of ``op``'s domain and range is of type
`NumpyTensorSpace` this incurs no significant overhead. If the data
space is implemented with CUDA or some other non-local representation,
the overhead is significant.
References
----------
[Hei+2016] Heide, F et al. *ProxImaL: Efficient Image Optimization using
Proximal Algorithms*. ACM Transactions on Graphics (TOG), 2016.
"""
# TODO: use out parameter once "as editable array" is added
def forward(inp, out):
out[:] = op(inp).asarray()
def adjoint(inp, out):
out[:] = op.adjoint(inp).asarray()
import proximal
return proximal.LinOpFactory(input_shape=op.domain.shape,
output_shape=op.range.shape,
forward=forward,
adjoint=adjoint,
norm_bound=norm_bound) |
def get_config(network, data_shape, **kwargs):
"""Configuration factory for various networks
Parameters
----------
network : str
base network name, such as vgg_reduced, inceptionv3, resnet...
data_shape : int
input data dimension
kwargs : dict
extra arguments
"""
if network == 'vgg16_reduced':
if data_shape >= 448:
from_layers = ['relu4_3', 'relu7', '', '', '', '', '']
num_filters = [512, -1, 512, 256, 256, 256, 256]
strides = [-1, -1, 2, 2, 2, 2, 1]
pads = [-1, -1, 1, 1, 1, 1, 1]
sizes = [[.07, .1025], [.15,.2121], [.3, .3674], [.45, .5196], [.6, .6708], \
[.75, .8216], [.9, .9721]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5,3,1./3], [1,2,.5], [1,2,.5]]
normalizations = [20, -1, -1, -1, -1, -1, -1]
steps = [] if data_shape != 512 else [x / 512.0 for x in
[8, 16, 32, 64, 128, 256, 512]]
else:
from_layers = ['relu4_3', 'relu7', '', '', '', '']
num_filters = [512, -1, 512, 256, 256, 256]
strides = [-1, -1, 2, 2, 1, 1]
pads = [-1, -1, 1, 1, 0, 0]
sizes = [[.1, .141], [.2,.272], [.37, .447], [.54, .619], [.71, .79], [.88, .961]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5], [1,2,.5]]
normalizations = [20, -1, -1, -1, -1, -1]
steps = [] if data_shape != 300 else [x / 300.0 for x in [8, 16, 32, 64, 100, 300]]
if not (data_shape == 300 or data_shape == 512):
logging.warn('data_shape %d was not tested, use with caucious.' % data_shape)
return locals()
elif network == 'inceptionv3':
from_layers = ['ch_concat_mixed_7_chconcat', 'ch_concat_mixed_10_chconcat', '', '', '', '']
num_filters = [-1, -1, 512, 256, 256, 128]
strides = [-1, -1, 2, 2, 2, 2]
pads = [-1, -1, 1, 1, 1, 1]
sizes = [[.1, .141], [.2,.272], [.37, .447], [.54, .619], [.71, .79], [.88, .961]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5], [1,2,.5]]
normalizations = -1
steps = []
return locals()
elif network == 'resnet50':
num_layers = 50
image_shape = '3,224,224' # resnet require it as shape check
network = 'resnet'
from_layers = ['_plus12', '_plus15', '', '', '', '']
num_filters = [-1, -1, 512, 256, 256, 128]
strides = [-1, -1, 2, 2, 2, 2]
pads = [-1, -1, 1, 1, 1, 1]
sizes = [[.1, .141], [.2,.272], [.37, .447], [.54, .619], [.71, .79], [.88, .961]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5], [1,2,.5]]
normalizations = -1
steps = []
return locals()
elif network == 'resnet101':
num_layers = 101
image_shape = '3,224,224'
network = 'resnet'
from_layers = ['_plus29', '_plus32', '', '', '', '']
num_filters = [-1, -1, 512, 256, 256, 128]
strides = [-1, -1, 2, 2, 2, 2]
pads = [-1, -1, 1, 1, 1, 1]
sizes = [[.1, .141], [.2,.272], [.37, .447], [.54, .619], [.71, .79], [.88, .961]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5], [1,2,.5]]
normalizations = -1
steps = []
return locals()
else:
msg = 'No configuration found for %s with data_shape %d' % (network, data_shape)
raise NotImplementedError(msg) | Configuration factory for various networks
Parameters
----------
network : str
base network name, such as vgg_reduced, inceptionv3, resnet...
data_shape : int
input data dimension
kwargs : dict
extra arguments | Below is the the instruction that describes the task:
### Input:
Configuration factory for various networks
Parameters
----------
network : str
base network name, such as vgg_reduced, inceptionv3, resnet...
data_shape : int
input data dimension
kwargs : dict
extra arguments
### Response:
def get_config(network, data_shape, **kwargs):
"""Configuration factory for various networks
Parameters
----------
network : str
base network name, such as vgg_reduced, inceptionv3, resnet...
data_shape : int
input data dimension
kwargs : dict
extra arguments
"""
if network == 'vgg16_reduced':
if data_shape >= 448:
from_layers = ['relu4_3', 'relu7', '', '', '', '', '']
num_filters = [512, -1, 512, 256, 256, 256, 256]
strides = [-1, -1, 2, 2, 2, 2, 1]
pads = [-1, -1, 1, 1, 1, 1, 1]
sizes = [[.07, .1025], [.15,.2121], [.3, .3674], [.45, .5196], [.6, .6708], \
[.75, .8216], [.9, .9721]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5,3,1./3], [1,2,.5], [1,2,.5]]
normalizations = [20, -1, -1, -1, -1, -1, -1]
steps = [] if data_shape != 512 else [x / 512.0 for x in
[8, 16, 32, 64, 128, 256, 512]]
else:
from_layers = ['relu4_3', 'relu7', '', '', '', '']
num_filters = [512, -1, 512, 256, 256, 256]
strides = [-1, -1, 2, 2, 1, 1]
pads = [-1, -1, 1, 1, 0, 0]
sizes = [[.1, .141], [.2,.272], [.37, .447], [.54, .619], [.71, .79], [.88, .961]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5], [1,2,.5]]
normalizations = [20, -1, -1, -1, -1, -1]
steps = [] if data_shape != 300 else [x / 300.0 for x in [8, 16, 32, 64, 100, 300]]
if not (data_shape == 300 or data_shape == 512):
logging.warn('data_shape %d was not tested, use with caucious.' % data_shape)
return locals()
elif network == 'inceptionv3':
from_layers = ['ch_concat_mixed_7_chconcat', 'ch_concat_mixed_10_chconcat', '', '', '', '']
num_filters = [-1, -1, 512, 256, 256, 128]
strides = [-1, -1, 2, 2, 2, 2]
pads = [-1, -1, 1, 1, 1, 1]
sizes = [[.1, .141], [.2,.272], [.37, .447], [.54, .619], [.71, .79], [.88, .961]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5], [1,2,.5]]
normalizations = -1
steps = []
return locals()
elif network == 'resnet50':
num_layers = 50
image_shape = '3,224,224' # resnet require it as shape check
network = 'resnet'
from_layers = ['_plus12', '_plus15', '', '', '', '']
num_filters = [-1, -1, 512, 256, 256, 128]
strides = [-1, -1, 2, 2, 2, 2]
pads = [-1, -1, 1, 1, 1, 1]
sizes = [[.1, .141], [.2,.272], [.37, .447], [.54, .619], [.71, .79], [.88, .961]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5], [1,2,.5]]
normalizations = -1
steps = []
return locals()
elif network == 'resnet101':
num_layers = 101
image_shape = '3,224,224'
network = 'resnet'
from_layers = ['_plus29', '_plus32', '', '', '', '']
num_filters = [-1, -1, 512, 256, 256, 128]
strides = [-1, -1, 2, 2, 2, 2]
pads = [-1, -1, 1, 1, 1, 1]
sizes = [[.1, .141], [.2,.272], [.37, .447], [.54, .619], [.71, .79], [.88, .961]]
ratios = [[1,2,.5], [1,2,.5,3,1./3], [1,2,.5,3,1./3], [1,2,.5,3,1./3], \
[1,2,.5], [1,2,.5]]
normalizations = -1
steps = []
return locals()
else:
msg = 'No configuration found for %s with data_shape %d' % (network, data_shape)
raise NotImplementedError(msg) |
def load_python_bindings(python_input):
"""
Custom key bindings.
"""
bindings = KeyBindings()
sidebar_visible = Condition(lambda: python_input.show_sidebar)
handle = bindings.add
@handle('c-l')
def _(event):
"""
Clear whole screen and render again -- also when the sidebar is visible.
"""
event.app.renderer.clear()
@handle('c-z')
def _(event):
"""
Suspend.
"""
if python_input.enable_system_bindings:
event.app.suspend_to_background()
@handle('f2')
def _(event):
"""
Show/hide sidebar.
"""
python_input.show_sidebar = not python_input.show_sidebar
if python_input.show_sidebar:
event.app.layout.focus(python_input.ptpython_layout.sidebar)
else:
event.app.layout.focus_last()
@handle('f3')
def _(event):
"""
Select from the history.
"""
python_input.enter_history()
@handle('f4')
def _(event):
"""
Toggle between Vi and Emacs mode.
"""
python_input.vi_mode = not python_input.vi_mode
@handle('f6')
def _(event):
"""
Enable/Disable paste mode.
"""
python_input.paste_mode = not python_input.paste_mode
@handle('tab', filter= ~sidebar_visible & ~has_selection & tab_should_insert_whitespace)
def _(event):
"""
When tab should insert whitespace, do that instead of completion.
"""
event.app.current_buffer.insert_text(' ')
@Condition
def is_multiline():
return document_is_multiline_python(python_input.default_buffer.document)
@handle('enter', filter= ~sidebar_visible & ~has_selection &
(vi_insert_mode | emacs_insert_mode) &
has_focus(DEFAULT_BUFFER) & ~is_multiline)
@handle(Keys.Escape, Keys.Enter, filter= ~sidebar_visible & emacs_mode)
def _(event):
"""
Accept input (for single line input).
"""
b = event.current_buffer
if b.validate():
# When the cursor is at the end, and we have an empty line:
# drop the empty lines, but return the value.
b.document = Document(
text=b.text.rstrip(),
cursor_position=len(b.text.rstrip()))
b.validate_and_handle()
@handle('enter', filter= ~sidebar_visible & ~has_selection &
(vi_insert_mode | emacs_insert_mode) &
has_focus(DEFAULT_BUFFER) & is_multiline)
def _(event):
"""
Behaviour of the Enter key.
Auto indent after newline/Enter.
(When not in Vi navigaton mode, and when multiline is enabled.)
"""
b = event.current_buffer
empty_lines_required = python_input.accept_input_on_enter or 10000
def at_the_end(b):
""" we consider the cursor at the end when there is no text after
the cursor, or only whitespace. """
text = b.document.text_after_cursor
return text == '' or (text.isspace() and not '\n' in text)
if python_input.paste_mode:
# In paste mode, always insert text.
b.insert_text('\n')
elif at_the_end(b) and b.document.text.replace(' ', '').endswith(
'\n' * (empty_lines_required - 1)):
# When the cursor is at the end, and we have an empty line:
# drop the empty lines, but return the value.
if b.validate():
b.document = Document(
text=b.text.rstrip(),
cursor_position=len(b.text.rstrip()))
b.validate_and_handle()
else:
auto_newline(b)
@handle('c-d', filter=~sidebar_visible &
has_focus(python_input.default_buffer) &
Condition(lambda:
# The current buffer is empty.
not get_app().current_buffer.text))
def _(event):
"""
Override Control-D exit, to ask for confirmation.
"""
if python_input.confirm_exit:
python_input.show_exit_confirmation = True
else:
event.app.exit(exception=EOFError)
@handle('c-c', filter=has_focus(python_input.default_buffer))
def _(event):
" Abort when Control-C has been pressed. "
event.app.exit(exception=KeyboardInterrupt, style='class:aborting')
return bindings | Custom key bindings. | Below is the the instruction that describes the task:
### Input:
Custom key bindings.
### Response:
def load_python_bindings(python_input):
"""
Custom key bindings.
"""
bindings = KeyBindings()
sidebar_visible = Condition(lambda: python_input.show_sidebar)
handle = bindings.add
@handle('c-l')
def _(event):
"""
Clear whole screen and render again -- also when the sidebar is visible.
"""
event.app.renderer.clear()
@handle('c-z')
def _(event):
"""
Suspend.
"""
if python_input.enable_system_bindings:
event.app.suspend_to_background()
@handle('f2')
def _(event):
"""
Show/hide sidebar.
"""
python_input.show_sidebar = not python_input.show_sidebar
if python_input.show_sidebar:
event.app.layout.focus(python_input.ptpython_layout.sidebar)
else:
event.app.layout.focus_last()
@handle('f3')
def _(event):
"""
Select from the history.
"""
python_input.enter_history()
@handle('f4')
def _(event):
"""
Toggle between Vi and Emacs mode.
"""
python_input.vi_mode = not python_input.vi_mode
@handle('f6')
def _(event):
"""
Enable/Disable paste mode.
"""
python_input.paste_mode = not python_input.paste_mode
@handle('tab', filter= ~sidebar_visible & ~has_selection & tab_should_insert_whitespace)
def _(event):
"""
When tab should insert whitespace, do that instead of completion.
"""
event.app.current_buffer.insert_text(' ')
@Condition
def is_multiline():
return document_is_multiline_python(python_input.default_buffer.document)
@handle('enter', filter= ~sidebar_visible & ~has_selection &
(vi_insert_mode | emacs_insert_mode) &
has_focus(DEFAULT_BUFFER) & ~is_multiline)
@handle(Keys.Escape, Keys.Enter, filter= ~sidebar_visible & emacs_mode)
def _(event):
"""
Accept input (for single line input).
"""
b = event.current_buffer
if b.validate():
# When the cursor is at the end, and we have an empty line:
# drop the empty lines, but return the value.
b.document = Document(
text=b.text.rstrip(),
cursor_position=len(b.text.rstrip()))
b.validate_and_handle()
@handle('enter', filter= ~sidebar_visible & ~has_selection &
(vi_insert_mode | emacs_insert_mode) &
has_focus(DEFAULT_BUFFER) & is_multiline)
def _(event):
"""
Behaviour of the Enter key.
Auto indent after newline/Enter.
(When not in Vi navigaton mode, and when multiline is enabled.)
"""
b = event.current_buffer
empty_lines_required = python_input.accept_input_on_enter or 10000
def at_the_end(b):
""" we consider the cursor at the end when there is no text after
the cursor, or only whitespace. """
text = b.document.text_after_cursor
return text == '' or (text.isspace() and not '\n' in text)
if python_input.paste_mode:
# In paste mode, always insert text.
b.insert_text('\n')
elif at_the_end(b) and b.document.text.replace(' ', '').endswith(
'\n' * (empty_lines_required - 1)):
# When the cursor is at the end, and we have an empty line:
# drop the empty lines, but return the value.
if b.validate():
b.document = Document(
text=b.text.rstrip(),
cursor_position=len(b.text.rstrip()))
b.validate_and_handle()
else:
auto_newline(b)
@handle('c-d', filter=~sidebar_visible &
has_focus(python_input.default_buffer) &
Condition(lambda:
# The current buffer is empty.
not get_app().current_buffer.text))
def _(event):
"""
Override Control-D exit, to ask for confirmation.
"""
if python_input.confirm_exit:
python_input.show_exit_confirmation = True
else:
event.app.exit(exception=EOFError)
@handle('c-c', filter=has_focus(python_input.default_buffer))
def _(event):
" Abort when Control-C has been pressed. "
event.app.exit(exception=KeyboardInterrupt, style='class:aborting')
return bindings |
def create_host(factories, value):
"""Use the factories to create a host object.
:param factories: a list of functions that return host objects
(Hostname, IPv4Address, IPv6Address) for valid arguments
:param value: a value to be passed as argument to factories
:returns: an object representing the value, created by one of
the factories.
It is a return value of the first factory that could create it for
the given argument.
:raises InvalidHostError: if the value is not a valid input for any
factory used by this function
"""
data = [value]
for func in factories:
try:
return func(value)
except InvalidHostError as ex:
data.append(str(ex))
msg_tpl = (
"Failed to create a host object for '{}', raising the following errors"
" in the process:" + "\n".join(data)
)
raise InvalidHostError(msg_tpl.format(value)) | Use the factories to create a host object.
:param factories: a list of functions that return host objects
(Hostname, IPv4Address, IPv6Address) for valid arguments
:param value: a value to be passed as argument to factories
:returns: an object representing the value, created by one of
the factories.
It is a return value of the first factory that could create it for
the given argument.
:raises InvalidHostError: if the value is not a valid input for any
factory used by this function | Below is the the instruction that describes the task:
### Input:
Use the factories to create a host object.
:param factories: a list of functions that return host objects
(Hostname, IPv4Address, IPv6Address) for valid arguments
:param value: a value to be passed as argument to factories
:returns: an object representing the value, created by one of
the factories.
It is a return value of the first factory that could create it for
the given argument.
:raises InvalidHostError: if the value is not a valid input for any
factory used by this function
### Response:
def create_host(factories, value):
"""Use the factories to create a host object.
:param factories: a list of functions that return host objects
(Hostname, IPv4Address, IPv6Address) for valid arguments
:param value: a value to be passed as argument to factories
:returns: an object representing the value, created by one of
the factories.
It is a return value of the first factory that could create it for
the given argument.
:raises InvalidHostError: if the value is not a valid input for any
factory used by this function
"""
data = [value]
for func in factories:
try:
return func(value)
except InvalidHostError as ex:
data.append(str(ex))
msg_tpl = (
"Failed to create a host object for '{}', raising the following errors"
" in the process:" + "\n".join(data)
)
raise InvalidHostError(msg_tpl.format(value)) |
def get_sorted_series_files(self, startpath="", series_number=None, return_files_with_info=False,
sort_keys="SliceLocation", return_files=True, remove_doubled_slice_locations=True):
"""
Function returns sorted list of dicom files. File paths are organized
by SeriesUID, StudyUID and FrameUID
:param startpath: path prefix. E.g. "~/data"
:param series_number: ID of series used for filtering the data
:param return_files_with_info: return more complex information about sorted files
:param return_files: return simple list of sorted files
:type sort_keys: One key or list of keys used for sorting method by the order of keys.
"""
dcmdir = self.files_with_info[:]
# select sublist with SeriesNumber
if series_number is not None:
dcmdir = [
line for line in dcmdir if line['SeriesNumber'] == series_number
]
dcmdir = sort_list_of_dicts(dcmdir, keys=sort_keys)
logger.debug('SeriesNumber: ' + str(series_number))
if remove_doubled_slice_locations:
dcmdir = self._remove_doubled_slice_locations(dcmdir)
filelist = []
for onefile in dcmdir:
filelist.append(os.path.join(startpath,
self.dirpath, onefile['filename']))
# head, tail = os.path.split(onefile['filename'])
retval = []
if return_files:
retval.append(filelist)
if return_files_with_info:
retval.append(dcmdir)
if len(retval) == 0:
retval = None
elif len(retval) == 1:
retval = retval[0]
else:
retval = tuple(retval)
return retval | Function returns sorted list of dicom files. File paths are organized
by SeriesUID, StudyUID and FrameUID
:param startpath: path prefix. E.g. "~/data"
:param series_number: ID of series used for filtering the data
:param return_files_with_info: return more complex information about sorted files
:param return_files: return simple list of sorted files
:type sort_keys: One key or list of keys used for sorting method by the order of keys. | Below is the the instruction that describes the task:
### Input:
Function returns sorted list of dicom files. File paths are organized
by SeriesUID, StudyUID and FrameUID
:param startpath: path prefix. E.g. "~/data"
:param series_number: ID of series used for filtering the data
:param return_files_with_info: return more complex information about sorted files
:param return_files: return simple list of sorted files
:type sort_keys: One key or list of keys used for sorting method by the order of keys.
### Response:
def get_sorted_series_files(self, startpath="", series_number=None, return_files_with_info=False,
sort_keys="SliceLocation", return_files=True, remove_doubled_slice_locations=True):
"""
Function returns sorted list of dicom files. File paths are organized
by SeriesUID, StudyUID and FrameUID
:param startpath: path prefix. E.g. "~/data"
:param series_number: ID of series used for filtering the data
:param return_files_with_info: return more complex information about sorted files
:param return_files: return simple list of sorted files
:type sort_keys: One key or list of keys used for sorting method by the order of keys.
"""
dcmdir = self.files_with_info[:]
# select sublist with SeriesNumber
if series_number is not None:
dcmdir = [
line for line in dcmdir if line['SeriesNumber'] == series_number
]
dcmdir = sort_list_of_dicts(dcmdir, keys=sort_keys)
logger.debug('SeriesNumber: ' + str(series_number))
if remove_doubled_slice_locations:
dcmdir = self._remove_doubled_slice_locations(dcmdir)
filelist = []
for onefile in dcmdir:
filelist.append(os.path.join(startpath,
self.dirpath, onefile['filename']))
# head, tail = os.path.split(onefile['filename'])
retval = []
if return_files:
retval.append(filelist)
if return_files_with_info:
retval.append(dcmdir)
if len(retval) == 0:
retval = None
elif len(retval) == 1:
retval = retval[0]
else:
retval = tuple(retval)
return retval |
def text(cls, text, *, resize=None, single_use=None, selective=None):
"""
Creates a new button with the given text.
Args:
resize (`bool`):
If present, the entire keyboard will be reconfigured to
be resized and be smaller if there are not many buttons.
single_use (`bool`):
If present, the entire keyboard will be reconfigured to
be usable only once before it hides itself.
selective (`bool`):
If present, the entire keyboard will be reconfigured to
be "selective". The keyboard will be shown only to specific
users. It will target users that are @mentioned in the text
of the message or to the sender of the message you reply to.
"""
return cls(types.KeyboardButton(text),
resize=resize, single_use=single_use, selective=selective) | Creates a new button with the given text.
Args:
resize (`bool`):
If present, the entire keyboard will be reconfigured to
be resized and be smaller if there are not many buttons.
single_use (`bool`):
If present, the entire keyboard will be reconfigured to
be usable only once before it hides itself.
selective (`bool`):
If present, the entire keyboard will be reconfigured to
be "selective". The keyboard will be shown only to specific
users. It will target users that are @mentioned in the text
of the message or to the sender of the message you reply to. | Below is the the instruction that describes the task:
### Input:
Creates a new button with the given text.
Args:
resize (`bool`):
If present, the entire keyboard will be reconfigured to
be resized and be smaller if there are not many buttons.
single_use (`bool`):
If present, the entire keyboard will be reconfigured to
be usable only once before it hides itself.
selective (`bool`):
If present, the entire keyboard will be reconfigured to
be "selective". The keyboard will be shown only to specific
users. It will target users that are @mentioned in the text
of the message or to the sender of the message you reply to.
### Response:
def text(cls, text, *, resize=None, single_use=None, selective=None):
"""
Creates a new button with the given text.
Args:
resize (`bool`):
If present, the entire keyboard will be reconfigured to
be resized and be smaller if there are not many buttons.
single_use (`bool`):
If present, the entire keyboard will be reconfigured to
be usable only once before it hides itself.
selective (`bool`):
If present, the entire keyboard will be reconfigured to
be "selective". The keyboard will be shown only to specific
users. It will target users that are @mentioned in the text
of the message or to the sender of the message you reply to.
"""
return cls(types.KeyboardButton(text),
resize=resize, single_use=single_use, selective=selective) |
def uniq(args):
"""
%prog uniq gffile cdsfasta
Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping
'piles' are processed, one by one.
Here, we use a different algorithm, that retains the best non-overlapping
subset witin each pile, rather than single best model. Scoring function is
also different, rather than based on score or span, we optimize for the
subset that show the best combined score. Score is defined by:
score = (1 - AED) * length
"""
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gffile, cdsfasta = args
gff = Gff(gffile)
sizes = Sizes(cdsfasta).mapping
gene_register = {}
for g in gff:
if g.type != "mRNA":
continue
aed = float(g.attributes["_AED"][0])
gene_register[g.parent] = (1 - aed) * sizes[g.accn]
allgenes = import_feats(gffile)
g = get_piles(allgenes)
bestids = set()
for group in g:
ranges = [to_range(x, score=gene_register[x.accn], id=x.accn) \
for x in group]
selected_chain, score = range_chain(ranges)
bestids |= set(x.id for x in selected_chain)
removed = set(x.accn for x in allgenes) - bestids
fw = open("removed.ids", "w")
print("\n".join(sorted(removed)), file=fw)
fw.close()
populate_children(opts.outfile, bestids, gffile, "gene") | %prog uniq gffile cdsfasta
Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping
'piles' are processed, one by one.
Here, we use a different algorithm, that retains the best non-overlapping
subset witin each pile, rather than single best model. Scoring function is
also different, rather than based on score or span, we optimize for the
subset that show the best combined score. Score is defined by:
score = (1 - AED) * length | Below is the the instruction that describes the task:
### Input:
%prog uniq gffile cdsfasta
Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping
'piles' are processed, one by one.
Here, we use a different algorithm, that retains the best non-overlapping
subset witin each pile, rather than single best model. Scoring function is
also different, rather than based on score or span, we optimize for the
subset that show the best combined score. Score is defined by:
score = (1 - AED) * length
### Response:
def uniq(args):
"""
%prog uniq gffile cdsfasta
Remove overlapping gene models. Similar to formats.gff.uniq(), overlapping
'piles' are processed, one by one.
Here, we use a different algorithm, that retains the best non-overlapping
subset witin each pile, rather than single best model. Scoring function is
also different, rather than based on score or span, we optimize for the
subset that show the best combined score. Score is defined by:
score = (1 - AED) * length
"""
p = OptionParser(uniq.__doc__)
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 2:
sys.exit(not p.print_help())
gffile, cdsfasta = args
gff = Gff(gffile)
sizes = Sizes(cdsfasta).mapping
gene_register = {}
for g in gff:
if g.type != "mRNA":
continue
aed = float(g.attributes["_AED"][0])
gene_register[g.parent] = (1 - aed) * sizes[g.accn]
allgenes = import_feats(gffile)
g = get_piles(allgenes)
bestids = set()
for group in g:
ranges = [to_range(x, score=gene_register[x.accn], id=x.accn) \
for x in group]
selected_chain, score = range_chain(ranges)
bestids |= set(x.id for x in selected_chain)
removed = set(x.accn for x in allgenes) - bestids
fw = open("removed.ids", "w")
print("\n".join(sorted(removed)), file=fw)
fw.close()
populate_children(opts.outfile, bestids, gffile, "gene") |
def export(self):
"""
Run the export process
Returns:
(list of str): The path of the exported disks.
"""
if self._with_threads:
utils.invoke_different_funcs_in_parallel(
*map(lambda mgr: mgr.export, self._get_export_mgr())
)
else:
for mgr in self._get_export_mgr():
mgr.export()
return self.exported_disks_paths() | Run the export process
Returns:
(list of str): The path of the exported disks. | Below is the the instruction that describes the task:
### Input:
Run the export process
Returns:
(list of str): The path of the exported disks.
### Response:
def export(self):
"""
Run the export process
Returns:
(list of str): The path of the exported disks.
"""
if self._with_threads:
utils.invoke_different_funcs_in_parallel(
*map(lambda mgr: mgr.export, self._get_export_mgr())
)
else:
for mgr in self._get_export_mgr():
mgr.export()
return self.exported_disks_paths() |
def hpo_genes(context, hpo_term):
"""Export a list of genes based on hpo terms"""
LOG.info("Running scout export hpo_genes")
adapter = context.obj['adapter']
header = ["#Gene_id\tCount"]
if not hpo_term:
LOG.warning("Please use at least one hpo term")
context.abort()
for line in header:
click.echo(line)
for term in adapter.generate_hpo_gene_list(*hpo_term):
click.echo("{0}\t{1}".format(term[0], term[1])) | Export a list of genes based on hpo terms | Below is the the instruction that describes the task:
### Input:
Export a list of genes based on hpo terms
### Response:
def hpo_genes(context, hpo_term):
"""Export a list of genes based on hpo terms"""
LOG.info("Running scout export hpo_genes")
adapter = context.obj['adapter']
header = ["#Gene_id\tCount"]
if not hpo_term:
LOG.warning("Please use at least one hpo term")
context.abort()
for line in header:
click.echo(line)
for term in adapter.generate_hpo_gene_list(*hpo_term):
click.echo("{0}\t{1}".format(term[0], term[1])) |
def report_score_vs_rmsd_funnels(designs, path):
"""
Create a PDF showing the score vs. RMSD funnels for all the reasonable
designs. This method was copied from an old version of this script, and
does not currently work.
"""
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
print "Reporting score vs RMSD funnels..."
pdf = PdfPages(path)
designs = sorted(designs, key=lambda x: x.fancy_path)
for index, design in enumerate(designs):
plt.figure(figsize=(8.5, 11))
plt.suptitle(design.fancy_path)
axes = plt.subplot(2, 1, 1)
plot_score_vs_dist(axes, design, metric="Max COOH Distance")
axes = plt.subplot(2, 1, 2)
plot_score_vs_dist(axes, design, metric="Loop RMSD")
pdf.savefig(orientation='portrait')
plt.close()
pdf.close() | Create a PDF showing the score vs. RMSD funnels for all the reasonable
designs. This method was copied from an old version of this script, and
does not currently work. | Below is the the instruction that describes the task:
### Input:
Create a PDF showing the score vs. RMSD funnels for all the reasonable
designs. This method was copied from an old version of this script, and
does not currently work.
### Response:
def report_score_vs_rmsd_funnels(designs, path):
"""
Create a PDF showing the score vs. RMSD funnels for all the reasonable
designs. This method was copied from an old version of this script, and
does not currently work.
"""
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
print "Reporting score vs RMSD funnels..."
pdf = PdfPages(path)
designs = sorted(designs, key=lambda x: x.fancy_path)
for index, design in enumerate(designs):
plt.figure(figsize=(8.5, 11))
plt.suptitle(design.fancy_path)
axes = plt.subplot(2, 1, 1)
plot_score_vs_dist(axes, design, metric="Max COOH Distance")
axes = plt.subplot(2, 1, 2)
plot_score_vs_dist(axes, design, metric="Loop RMSD")
pdf.savefig(orientation='portrait')
plt.close()
pdf.close() |
def run_python_module(modulename, args):
"""Run a python module, as though with ``python -m name args...``.
`modulename` is the name of the module, possibly a dot-separated name.
`args` is the argument array to present as sys.argv, including the first
element naming the module being executed.
"""
openfile = None
glo, loc = globals(), locals()
try:
try:
# Search for the module - inside its parent package, if any - using
# standard import mechanics.
if '.' in modulename:
packagename, name = rsplit1(modulename, '.')
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
else:
packagename, name = None, modulename
searchpath = None # "top-level search" in imp.find_module()
openfile, pathname, _ = imp.find_module(name, searchpath)
# Complain if this is a magic non-file module.
if openfile is None and pathname is None:
raise NoSource(
"module does not live in a file: %r" % modulename
)
# If `modulename` is actually a package, not a mere module, then we
# pretend to be Python 2.7 and try running its __main__.py script.
if openfile is None:
packagename = modulename
name = '__main__'
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
openfile, pathname, _ = imp.find_module(name, searchpath)
except ImportError:
_, err, _ = sys.exc_info()
raise NoSource(str(err))
finally:
if openfile:
openfile.close()
# Finally, hand the file off to run_python_file for execution.
pathname = os.path.abspath(pathname)
args[0] = pathname
run_python_file(pathname, args, package=packagename) | Run a python module, as though with ``python -m name args...``.
`modulename` is the name of the module, possibly a dot-separated name.
`args` is the argument array to present as sys.argv, including the first
element naming the module being executed. | Below is the the instruction that describes the task:
### Input:
Run a python module, as though with ``python -m name args...``.
`modulename` is the name of the module, possibly a dot-separated name.
`args` is the argument array to present as sys.argv, including the first
element naming the module being executed.
### Response:
def run_python_module(modulename, args):
"""Run a python module, as though with ``python -m name args...``.
`modulename` is the name of the module, possibly a dot-separated name.
`args` is the argument array to present as sys.argv, including the first
element naming the module being executed.
"""
openfile = None
glo, loc = globals(), locals()
try:
try:
# Search for the module - inside its parent package, if any - using
# standard import mechanics.
if '.' in modulename:
packagename, name = rsplit1(modulename, '.')
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
else:
packagename, name = None, modulename
searchpath = None # "top-level search" in imp.find_module()
openfile, pathname, _ = imp.find_module(name, searchpath)
# Complain if this is a magic non-file module.
if openfile is None and pathname is None:
raise NoSource(
"module does not live in a file: %r" % modulename
)
# If `modulename` is actually a package, not a mere module, then we
# pretend to be Python 2.7 and try running its __main__.py script.
if openfile is None:
packagename = modulename
name = '__main__'
package = __import__(packagename, glo, loc, ['__path__'])
searchpath = package.__path__
openfile, pathname, _ = imp.find_module(name, searchpath)
except ImportError:
_, err, _ = sys.exc_info()
raise NoSource(str(err))
finally:
if openfile:
openfile.close()
# Finally, hand the file off to run_python_file for execution.
pathname = os.path.abspath(pathname)
args[0] = pathname
run_python_file(pathname, args, package=packagename) |
def predict(self, X):
"""
Assign classes to test data.
Parameters
----------
X : array
Test data, of dimension N times d (rows are examples, columns
are data dimensions)
Returns
-------
y_predicted : array
A vector of length N containing assigned classes. If no inlier
classes were specified during training, then 0 denotes an inlier
and 1 denotes an outlier. If multiple inlier classes were
specified, then each element of y_predicted is either on of
those inlier classes, or an outlier class (denoted by the
maximum inlier class ID plus 1).
"""
predictions_proba = self.predict_proba(X)
predictions = []
allclasses = copy.copy(self.classes)
allclasses.append('anomaly')
for i in range(X.shape[0]):
predictions.append(allclasses[predictions_proba[i, :].argmax()])
return predictions | Assign classes to test data.
Parameters
----------
X : array
Test data, of dimension N times d (rows are examples, columns
are data dimensions)
Returns
-------
y_predicted : array
A vector of length N containing assigned classes. If no inlier
classes were specified during training, then 0 denotes an inlier
and 1 denotes an outlier. If multiple inlier classes were
specified, then each element of y_predicted is either on of
those inlier classes, or an outlier class (denoted by the
maximum inlier class ID plus 1). | Below is the the instruction that describes the task:
### Input:
Assign classes to test data.
Parameters
----------
X : array
Test data, of dimension N times d (rows are examples, columns
are data dimensions)
Returns
-------
y_predicted : array
A vector of length N containing assigned classes. If no inlier
classes were specified during training, then 0 denotes an inlier
and 1 denotes an outlier. If multiple inlier classes were
specified, then each element of y_predicted is either on of
those inlier classes, or an outlier class (denoted by the
maximum inlier class ID plus 1).
### Response:
def predict(self, X):
"""
Assign classes to test data.
Parameters
----------
X : array
Test data, of dimension N times d (rows are examples, columns
are data dimensions)
Returns
-------
y_predicted : array
A vector of length N containing assigned classes. If no inlier
classes were specified during training, then 0 denotes an inlier
and 1 denotes an outlier. If multiple inlier classes were
specified, then each element of y_predicted is either on of
those inlier classes, or an outlier class (denoted by the
maximum inlier class ID plus 1).
"""
predictions_proba = self.predict_proba(X)
predictions = []
allclasses = copy.copy(self.classes)
allclasses.append('anomaly')
for i in range(X.shape[0]):
predictions.append(allclasses[predictions_proba[i, :].argmax()])
return predictions |
def apply_Mueller(I,Q,U,V, gain_offsets, phase_offsets, chan_per_coarse, feedtype='l'):
'''
Returns calibrated Stokes parameters for an observation given an array
of differential gains and phase differences.
'''
#Find shape of data arrays and calculate number of coarse channels
shape = I.shape
ax0 = I.shape[0]
ax1 = I.shape[1]
nchans = I.shape[2]
ncoarse = nchans/chan_per_coarse
#Reshape data arrays to separate coarse channels
I = np.reshape(I,(ax0,ax1,ncoarse,chan_per_coarse))
Q = np.reshape(Q,(ax0,ax1,ncoarse,chan_per_coarse))
U = np.reshape(U,(ax0,ax1,ncoarse,chan_per_coarse))
V = np.reshape(V,(ax0,ax1,ncoarse,chan_per_coarse))
#Swap axes 2 and 3 to in order for broadcasting to work correctly
I = np.swapaxes(I,2,3)
Q = np.swapaxes(Q,2,3)
U = np.swapaxes(U,2,3)
V = np.swapaxes(V,2,3)
#Apply top left corner of electronics chain inverse Mueller matrix
a = 1/(1-gain_offsets**2)
if feedtype=='l':
Icorr = a*(I-gain_offsets*Q)
Qcorr = a*(-1*gain_offsets*I+Q)
I = None
Q = None
if feedtype=='c':
Icorr = a*(I-gain_offsets*V)
Vcorr = a*(-1*gain_offsets*I+V)
I = None
V = None
#Apply bottom right corner of electronics chain inverse Mueller matrix
if feedtype=='l':
Ucorr = U*np.cos(phase_offsets)-V*np.sin(phase_offsets)
Vcorr = U*np.sin(phase_offsets)+V*np.cos(phase_offsets)
U = None
V = None
if feedtype=='c':
Qcorr = Q*np.cos(phase_offsets)+U*np.sin(phase_offsets)
Ucorr = -1*Q*np.sin(phase_offsets)+U*np.cos(phase_offsets)
Q = None
U = None
#Reshape arrays to original shape
Icorr = np.reshape(np.swapaxes(Icorr,2,3),shape)
Qcorr = np.reshape(np.swapaxes(Qcorr,2,3),shape)
Ucorr = np.reshape(np.swapaxes(Ucorr,2,3),shape)
Vcorr = np.reshape(np.swapaxes(Vcorr,2,3),shape)
#Return corrected data arrays
return Icorr,Qcorr,Ucorr,Vcorr | Returns calibrated Stokes parameters for an observation given an array
of differential gains and phase differences. | Below is the the instruction that describes the task:
### Input:
Returns calibrated Stokes parameters for an observation given an array
of differential gains and phase differences.
### Response:
def apply_Mueller(I,Q,U,V, gain_offsets, phase_offsets, chan_per_coarse, feedtype='l'):
'''
Returns calibrated Stokes parameters for an observation given an array
of differential gains and phase differences.
'''
#Find shape of data arrays and calculate number of coarse channels
shape = I.shape
ax0 = I.shape[0]
ax1 = I.shape[1]
nchans = I.shape[2]
ncoarse = nchans/chan_per_coarse
#Reshape data arrays to separate coarse channels
I = np.reshape(I,(ax0,ax1,ncoarse,chan_per_coarse))
Q = np.reshape(Q,(ax0,ax1,ncoarse,chan_per_coarse))
U = np.reshape(U,(ax0,ax1,ncoarse,chan_per_coarse))
V = np.reshape(V,(ax0,ax1,ncoarse,chan_per_coarse))
#Swap axes 2 and 3 to in order for broadcasting to work correctly
I = np.swapaxes(I,2,3)
Q = np.swapaxes(Q,2,3)
U = np.swapaxes(U,2,3)
V = np.swapaxes(V,2,3)
#Apply top left corner of electronics chain inverse Mueller matrix
a = 1/(1-gain_offsets**2)
if feedtype=='l':
Icorr = a*(I-gain_offsets*Q)
Qcorr = a*(-1*gain_offsets*I+Q)
I = None
Q = None
if feedtype=='c':
Icorr = a*(I-gain_offsets*V)
Vcorr = a*(-1*gain_offsets*I+V)
I = None
V = None
#Apply bottom right corner of electronics chain inverse Mueller matrix
if feedtype=='l':
Ucorr = U*np.cos(phase_offsets)-V*np.sin(phase_offsets)
Vcorr = U*np.sin(phase_offsets)+V*np.cos(phase_offsets)
U = None
V = None
if feedtype=='c':
Qcorr = Q*np.cos(phase_offsets)+U*np.sin(phase_offsets)
Ucorr = -1*Q*np.sin(phase_offsets)+U*np.cos(phase_offsets)
Q = None
U = None
#Reshape arrays to original shape
Icorr = np.reshape(np.swapaxes(Icorr,2,3),shape)
Qcorr = np.reshape(np.swapaxes(Qcorr,2,3),shape)
Ucorr = np.reshape(np.swapaxes(Ucorr,2,3),shape)
Vcorr = np.reshape(np.swapaxes(Vcorr,2,3),shape)
#Return corrected data arrays
return Icorr,Qcorr,Ucorr,Vcorr |
def yesno(question, default=None):
"""Asks a yes/no question
Args:
question: string **without** the question mark and without the options.
Example: 'Create links'
default: default option. Accepted values are 'Y', 'YES', 'N', 'NO' or lowercase versions of
these valus (this argument is case-insensitive)
Returns:
bool: True if user answered Yes, False otherwise
"""
if default is not None:
if isinstance(default, bool):
pass
else:
default_ = default.upper()
if default_ not in ('Y', 'YES', 'N', 'NO'):
raise RuntimeError("Invalid default value: '{}'".format(default))
default = default_ in ('Y', 'YES')
while True:
ans = input("{} ({}/{})? ".format(question, "Y" if default == True else "y",
"N" if default == False else "n")).upper()
if ans == "" and default is not None:
ret = default
break
elif ans in ("N", "NO"):
ret = False
break
elif ans in ("Y", "YES"):
ret = True
break
return ret | Asks a yes/no question
Args:
question: string **without** the question mark and without the options.
Example: 'Create links'
default: default option. Accepted values are 'Y', 'YES', 'N', 'NO' or lowercase versions of
these valus (this argument is case-insensitive)
Returns:
bool: True if user answered Yes, False otherwise | Below is the the instruction that describes the task:
### Input:
Asks a yes/no question
Args:
question: string **without** the question mark and without the options.
Example: 'Create links'
default: default option. Accepted values are 'Y', 'YES', 'N', 'NO' or lowercase versions of
these valus (this argument is case-insensitive)
Returns:
bool: True if user answered Yes, False otherwise
### Response:
def yesno(question, default=None):
"""Asks a yes/no question
Args:
question: string **without** the question mark and without the options.
Example: 'Create links'
default: default option. Accepted values are 'Y', 'YES', 'N', 'NO' or lowercase versions of
these valus (this argument is case-insensitive)
Returns:
bool: True if user answered Yes, False otherwise
"""
if default is not None:
if isinstance(default, bool):
pass
else:
default_ = default.upper()
if default_ not in ('Y', 'YES', 'N', 'NO'):
raise RuntimeError("Invalid default value: '{}'".format(default))
default = default_ in ('Y', 'YES')
while True:
ans = input("{} ({}/{})? ".format(question, "Y" if default == True else "y",
"N" if default == False else "n")).upper()
if ans == "" and default is not None:
ret = default
break
elif ans in ("N", "NO"):
ret = False
break
elif ans in ("Y", "YES"):
ret = True
break
return ret |
def copy_any(src, dst, only_missing=False): # pragma: no cover
"""Copy a file or a directory tree, deleting the destination before processing"""
if not only_missing:
remove_if_exist(dst)
if os.path.exists(src):
if os.path.isdir(src):
if not only_missing:
shutil.copytree(src, dst, symlinks=False, ignore=None)
else:
for dirpath, filepath in recwalk(src):
srcfile = os.path.join(dirpath, filepath)
relpath = os.path.relpath(srcfile, src)
dstfile = os.path.join(dst, relpath)
if not os.path.exists(dstfile):
create_dir_if_not_exist(os.path.dirname(dstfile))
shutil.copyfile(srcfile, dstfile)
shutil.copystat(srcfile, dstfile)
return True
elif os.path.isfile(src) and (not only_missing or not os.path.exists(dst)):
shutil.copyfile(src, dst)
shutil.copystat(src, dst)
return True
return False | Copy a file or a directory tree, deleting the destination before processing | Below is the the instruction that describes the task:
### Input:
Copy a file or a directory tree, deleting the destination before processing
### Response:
def copy_any(src, dst, only_missing=False): # pragma: no cover
"""Copy a file or a directory tree, deleting the destination before processing"""
if not only_missing:
remove_if_exist(dst)
if os.path.exists(src):
if os.path.isdir(src):
if not only_missing:
shutil.copytree(src, dst, symlinks=False, ignore=None)
else:
for dirpath, filepath in recwalk(src):
srcfile = os.path.join(dirpath, filepath)
relpath = os.path.relpath(srcfile, src)
dstfile = os.path.join(dst, relpath)
if not os.path.exists(dstfile):
create_dir_if_not_exist(os.path.dirname(dstfile))
shutil.copyfile(srcfile, dstfile)
shutil.copystat(srcfile, dstfile)
return True
elif os.path.isfile(src) and (not only_missing or not os.path.exists(dst)):
shutil.copyfile(src, dst)
shutil.copystat(src, dst)
return True
return False |
def _clear_queue(to_clear):
"""Clear all items from a queue safely."""
while not to_clear.empty():
try:
to_clear.get(False)
to_clear.task_done()
except queue.Empty:
continue | Clear all items from a queue safely. | Below is the the instruction that describes the task:
### Input:
Clear all items from a queue safely.
### Response:
def _clear_queue(to_clear):
"""Clear all items from a queue safely."""
while not to_clear.empty():
try:
to_clear.get(False)
to_clear.task_done()
except queue.Empty:
continue |
def load(path_or_file, validate=True, strict=True, fmt='auto'):
r"""Load a JAMS Annotation from a file.
Parameters
----------
path_or_file : str or file-like
Path to the JAMS file to load
OR
An open file handle to load from.
validate : bool
Attempt to validate the JAMS object
strict : bool
if `validate == True`, enforce strict schema validation
fmt : str ['auto', 'jams', 'jamz']
The encoding format of the input
If `auto`, encoding is inferred from the file name.
If the input is an open file handle, `jams` encoding
is used.
Returns
-------
jam : JAMS
The loaded JAMS object
Raises
------
SchemaError
if `validate == True`, `strict==True`, and validation fails
See also
--------
JAMS.validate
JAMS.save
Examples
--------
>>> # Load a jams object from a file name
>>> J = jams.load('data.jams')
>>> # Or from an open file descriptor
>>> with open('data.jams', 'r') as fdesc:
... J = jams.load(fdesc)
>>> # Non-strict validation
>>> J = jams.load('data.jams', strict=False)
>>> # No validation at all
>>> J = jams.load('data.jams', validate=False)
"""
with _open(path_or_file, mode='r', fmt=fmt) as fdesc:
jam = JAMS(**json.load(fdesc))
if validate:
jam.validate(strict=strict)
return jam | r"""Load a JAMS Annotation from a file.
Parameters
----------
path_or_file : str or file-like
Path to the JAMS file to load
OR
An open file handle to load from.
validate : bool
Attempt to validate the JAMS object
strict : bool
if `validate == True`, enforce strict schema validation
fmt : str ['auto', 'jams', 'jamz']
The encoding format of the input
If `auto`, encoding is inferred from the file name.
If the input is an open file handle, `jams` encoding
is used.
Returns
-------
jam : JAMS
The loaded JAMS object
Raises
------
SchemaError
if `validate == True`, `strict==True`, and validation fails
See also
--------
JAMS.validate
JAMS.save
Examples
--------
>>> # Load a jams object from a file name
>>> J = jams.load('data.jams')
>>> # Or from an open file descriptor
>>> with open('data.jams', 'r') as fdesc:
... J = jams.load(fdesc)
>>> # Non-strict validation
>>> J = jams.load('data.jams', strict=False)
>>> # No validation at all
>>> J = jams.load('data.jams', validate=False) | Below is the the instruction that describes the task:
### Input:
r"""Load a JAMS Annotation from a file.
Parameters
----------
path_or_file : str or file-like
Path to the JAMS file to load
OR
An open file handle to load from.
validate : bool
Attempt to validate the JAMS object
strict : bool
if `validate == True`, enforce strict schema validation
fmt : str ['auto', 'jams', 'jamz']
The encoding format of the input
If `auto`, encoding is inferred from the file name.
If the input is an open file handle, `jams` encoding
is used.
Returns
-------
jam : JAMS
The loaded JAMS object
Raises
------
SchemaError
if `validate == True`, `strict==True`, and validation fails
See also
--------
JAMS.validate
JAMS.save
Examples
--------
>>> # Load a jams object from a file name
>>> J = jams.load('data.jams')
>>> # Or from an open file descriptor
>>> with open('data.jams', 'r') as fdesc:
... J = jams.load(fdesc)
>>> # Non-strict validation
>>> J = jams.load('data.jams', strict=False)
>>> # No validation at all
>>> J = jams.load('data.jams', validate=False)
### Response:
def load(path_or_file, validate=True, strict=True, fmt='auto'):
r"""Load a JAMS Annotation from a file.
Parameters
----------
path_or_file : str or file-like
Path to the JAMS file to load
OR
An open file handle to load from.
validate : bool
Attempt to validate the JAMS object
strict : bool
if `validate == True`, enforce strict schema validation
fmt : str ['auto', 'jams', 'jamz']
The encoding format of the input
If `auto`, encoding is inferred from the file name.
If the input is an open file handle, `jams` encoding
is used.
Returns
-------
jam : JAMS
The loaded JAMS object
Raises
------
SchemaError
if `validate == True`, `strict==True`, and validation fails
See also
--------
JAMS.validate
JAMS.save
Examples
--------
>>> # Load a jams object from a file name
>>> J = jams.load('data.jams')
>>> # Or from an open file descriptor
>>> with open('data.jams', 'r') as fdesc:
... J = jams.load(fdesc)
>>> # Non-strict validation
>>> J = jams.load('data.jams', strict=False)
>>> # No validation at all
>>> J = jams.load('data.jams', validate=False)
"""
with _open(path_or_file, mode='r', fmt=fmt) as fdesc:
jam = JAMS(**json.load(fdesc))
if validate:
jam.validate(strict=strict)
return jam |
def slugs_navigation_encode(self, u_m, phi_c, theta_c, psiDot_c, ay_body, totalDist, dist2Go, fromWP, toWP, h_c):
'''
Data used in the navigation algorithm.
u_m : Measured Airspeed prior to the nav filter in m/s (float)
phi_c : Commanded Roll (float)
theta_c : Commanded Pitch (float)
psiDot_c : Commanded Turn rate (float)
ay_body : Y component of the body acceleration (float)
totalDist : Total Distance to Run on this leg of Navigation (float)
dist2Go : Remaining distance to Run on this leg of Navigation (float)
fromWP : Origin WP (uint8_t)
toWP : Destination WP (uint8_t)
h_c : Commanded altitude in 0.1 m (uint16_t)
'''
return MAVLink_slugs_navigation_message(u_m, phi_c, theta_c, psiDot_c, ay_body, totalDist, dist2Go, fromWP, toWP, h_c) | Data used in the navigation algorithm.
u_m : Measured Airspeed prior to the nav filter in m/s (float)
phi_c : Commanded Roll (float)
theta_c : Commanded Pitch (float)
psiDot_c : Commanded Turn rate (float)
ay_body : Y component of the body acceleration (float)
totalDist : Total Distance to Run on this leg of Navigation (float)
dist2Go : Remaining distance to Run on this leg of Navigation (float)
fromWP : Origin WP (uint8_t)
toWP : Destination WP (uint8_t)
h_c : Commanded altitude in 0.1 m (uint16_t) | Below is the the instruction that describes the task:
### Input:
Data used in the navigation algorithm.
u_m : Measured Airspeed prior to the nav filter in m/s (float)
phi_c : Commanded Roll (float)
theta_c : Commanded Pitch (float)
psiDot_c : Commanded Turn rate (float)
ay_body : Y component of the body acceleration (float)
totalDist : Total Distance to Run on this leg of Navigation (float)
dist2Go : Remaining distance to Run on this leg of Navigation (float)
fromWP : Origin WP (uint8_t)
toWP : Destination WP (uint8_t)
h_c : Commanded altitude in 0.1 m (uint16_t)
### Response:
def slugs_navigation_encode(self, u_m, phi_c, theta_c, psiDot_c, ay_body, totalDist, dist2Go, fromWP, toWP, h_c):
'''
Data used in the navigation algorithm.
u_m : Measured Airspeed prior to the nav filter in m/s (float)
phi_c : Commanded Roll (float)
theta_c : Commanded Pitch (float)
psiDot_c : Commanded Turn rate (float)
ay_body : Y component of the body acceleration (float)
totalDist : Total Distance to Run on this leg of Navigation (float)
dist2Go : Remaining distance to Run on this leg of Navigation (float)
fromWP : Origin WP (uint8_t)
toWP : Destination WP (uint8_t)
h_c : Commanded altitude in 0.1 m (uint16_t)
'''
return MAVLink_slugs_navigation_message(u_m, phi_c, theta_c, psiDot_c, ay_body, totalDist, dist2Go, fromWP, toWP, h_c) |
def ne(name, value):
'''
Only succeed if the value in the given register location is not equal to
the given value
USAGE:
.. code-block:: yaml
foo:
check.ne:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
'''
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
if name not in __reg__:
ret['result'] = False
ret['comment'] = 'Value {0} not in register'.format(name)
return ret
if __reg__[name]['val'] != value:
ret['result'] = True
return ret | Only succeed if the value in the given register location is not equal to
the given value
USAGE:
.. code-block:: yaml
foo:
check.ne:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo | Below is the the instruction that describes the task:
### Input:
Only succeed if the value in the given register location is not equal to
the given value
USAGE:
.. code-block:: yaml
foo:
check.ne:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
### Response:
def ne(name, value):
'''
Only succeed if the value in the given register location is not equal to
the given value
USAGE:
.. code-block:: yaml
foo:
check.ne:
- value: 42
run_remote_ex:
local.cmd:
- tgt: '*'
- func: test.ping
- require:
- check: foo
'''
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
if name not in __reg__:
ret['result'] = False
ret['comment'] = 'Value {0} not in register'.format(name)
return ret
if __reg__[name]['val'] != value:
ret['result'] = True
return ret |
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists | Check to see if the given key (or an alias) is in the config file. | Below is the the instruction that describes the task:
### Input:
Check to see if the given key (or an alias) is in the config file.
### Response:
def in_config(self, key):
"""Check to see if the given key (or an alias) is in the config file.
"""
# if the requested key is an alias, then return the proper key
key = self._real_key(key)
exists = self._config.get(key)
return exists |
def disable_svc_event_handler(self, service):
"""Disable event handlers for a service
Format of the line that triggers function call::
DISABLE_SVC_EVENT_HANDLER;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
if service.event_handler_enabled:
service.modified_attributes |= \
DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value
service.event_handler_enabled = False
self.send_an_element(service.get_update_status_brok()) | Disable event handlers for a service
Format of the line that triggers function call::
DISABLE_SVC_EVENT_HANDLER;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None | Below is the the instruction that describes the task:
### Input:
Disable event handlers for a service
Format of the line that triggers function call::
DISABLE_SVC_EVENT_HANDLER;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
### Response:
def disable_svc_event_handler(self, service):
"""Disable event handlers for a service
Format of the line that triggers function call::
DISABLE_SVC_EVENT_HANDLER;<host_name>;<service_description>
:param service: service to edit
:type service: alignak.objects.service.Service
:return: None
"""
if service.event_handler_enabled:
service.modified_attributes |= \
DICT_MODATTR["MODATTR_EVENT_HANDLER_ENABLED"].value
service.event_handler_enabled = False
self.send_an_element(service.get_update_status_brok()) |
def get_vcs_details_output_vcs_details_principal_switch_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vcs_details = ET.Element("get_vcs_details")
config = get_vcs_details
output = ET.SubElement(get_vcs_details, "output")
vcs_details = ET.SubElement(output, "vcs-details")
principal_switch_wwn = ET.SubElement(vcs_details, "principal-switch-wwn")
principal_switch_wwn.text = kwargs.pop('principal_switch_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_vcs_details_output_vcs_details_principal_switch_wwn(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_vcs_details = ET.Element("get_vcs_details")
config = get_vcs_details
output = ET.SubElement(get_vcs_details, "output")
vcs_details = ET.SubElement(output, "vcs-details")
principal_switch_wwn = ET.SubElement(vcs_details, "principal-switch-wwn")
principal_switch_wwn.text = kwargs.pop('principal_switch_wwn')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def download_segmentation_image_file(self, mapobject_type_name,
plate_name, well_name, well_pos_y, well_pos_x, tpoint, zplane, align,
directory):
'''Downloads a segmentation image and writes it to a *PNG* file on disk.
Parameters
----------
mapobject_type_name: str
name of the segmented objects
plate_name: str
name of the plate
well_name: str
name of the well in which the image is located
well_pos_y: int
y-position of the site relative to the well grid
well_pos_x: int
x-position of the site relative to the well grid
tpoint: int
zero-based time point index
zplane: int
zero-based z-plane index
align: bool
option to apply alignment to download
directory: str
absolute path to the directory on disk where the file should be saved
Warning
-------
Due to the *PNG* file format the approach is limited to images which
contain less than 65536 objects.
See also
--------
:meth:`tmclient.api.TmClient.download_segmentation_image`
'''
response = self._download_segmentation_image(
mapobject_type_name, plate_name, well_name, well_pos_y, well_pos_x,
tpoint, zplane, align
)
image = np.array(response, np.int32)
if np.max(image) >= 2**16:
raise ValueError(
'Cannot store segmentation image as PNG file because it '
'contains more than 65536 objects.'
)
filename = '{0}_{1}_{2}_y{3:03d}_x{4:03d}_z{5:03d}_t{6:03d}_{7}.png'.format(
self.experiment_name, plate_name, well_name, well_pos_y,
well_pos_x, zplane, tpoint, mapobject_type_name
)
data = cv2.imencode(filename, image.astype(np.uint16))[1]
self._write_file(directory, filename, data) | Downloads a segmentation image and writes it to a *PNG* file on disk.
Parameters
----------
mapobject_type_name: str
name of the segmented objects
plate_name: str
name of the plate
well_name: str
name of the well in which the image is located
well_pos_y: int
y-position of the site relative to the well grid
well_pos_x: int
x-position of the site relative to the well grid
tpoint: int
zero-based time point index
zplane: int
zero-based z-plane index
align: bool
option to apply alignment to download
directory: str
absolute path to the directory on disk where the file should be saved
Warning
-------
Due to the *PNG* file format the approach is limited to images which
contain less than 65536 objects.
See also
--------
:meth:`tmclient.api.TmClient.download_segmentation_image` | Below is the the instruction that describes the task:
### Input:
Downloads a segmentation image and writes it to a *PNG* file on disk.
Parameters
----------
mapobject_type_name: str
name of the segmented objects
plate_name: str
name of the plate
well_name: str
name of the well in which the image is located
well_pos_y: int
y-position of the site relative to the well grid
well_pos_x: int
x-position of the site relative to the well grid
tpoint: int
zero-based time point index
zplane: int
zero-based z-plane index
align: bool
option to apply alignment to download
directory: str
absolute path to the directory on disk where the file should be saved
Warning
-------
Due to the *PNG* file format the approach is limited to images which
contain less than 65536 objects.
See also
--------
:meth:`tmclient.api.TmClient.download_segmentation_image`
### Response:
def download_segmentation_image_file(self, mapobject_type_name,
plate_name, well_name, well_pos_y, well_pos_x, tpoint, zplane, align,
directory):
'''Downloads a segmentation image and writes it to a *PNG* file on disk.
Parameters
----------
mapobject_type_name: str
name of the segmented objects
plate_name: str
name of the plate
well_name: str
name of the well in which the image is located
well_pos_y: int
y-position of the site relative to the well grid
well_pos_x: int
x-position of the site relative to the well grid
tpoint: int
zero-based time point index
zplane: int
zero-based z-plane index
align: bool
option to apply alignment to download
directory: str
absolute path to the directory on disk where the file should be saved
Warning
-------
Due to the *PNG* file format the approach is limited to images which
contain less than 65536 objects.
See also
--------
:meth:`tmclient.api.TmClient.download_segmentation_image`
'''
response = self._download_segmentation_image(
mapobject_type_name, plate_name, well_name, well_pos_y, well_pos_x,
tpoint, zplane, align
)
image = np.array(response, np.int32)
if np.max(image) >= 2**16:
raise ValueError(
'Cannot store segmentation image as PNG file because it '
'contains more than 65536 objects.'
)
filename = '{0}_{1}_{2}_y{3:03d}_x{4:03d}_z{5:03d}_t{6:03d}_{7}.png'.format(
self.experiment_name, plate_name, well_name, well_pos_y,
well_pos_x, zplane, tpoint, mapobject_type_name
)
data = cv2.imencode(filename, image.astype(np.uint16))[1]
self._write_file(directory, filename, data) |
def blend_rect(self, x0, y0, x1, y1, dx, dy, destination, alpha=0xff):
"""Blend a rectangle onto the image"""
x0, y0, x1, y1 = self.rect_helper(x0, y0, x1, y1)
for x in range(x0, x1 + 1):
for y in range(y0, y1 + 1):
o = self._offset(x, y)
rgba = self.canvas[o:o + 4]
rgba[3] = alpha
destination.point(dx + x - x0, dy + y - y0, rgba) | Blend a rectangle onto the image | Below is the the instruction that describes the task:
### Input:
Blend a rectangle onto the image
### Response:
def blend_rect(self, x0, y0, x1, y1, dx, dy, destination, alpha=0xff):
"""Blend a rectangle onto the image"""
x0, y0, x1, y1 = self.rect_helper(x0, y0, x1, y1)
for x in range(x0, x1 + 1):
for y in range(y0, y1 + 1):
o = self._offset(x, y)
rgba = self.canvas[o:o + 4]
rgba[3] = alpha
destination.point(dx + x - x0, dy + y - y0, rgba) |
def add_member(self, container_id, member_id):
"""AddMember.
[Preview API]
:param str container_id:
:param str member_id:
:rtype: bool
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
response = self._send(http_method='PUT',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('bool', response) | AddMember.
[Preview API]
:param str container_id:
:param str member_id:
:rtype: bool | Below is the the instruction that describes the task:
### Input:
AddMember.
[Preview API]
:param str container_id:
:param str member_id:
:rtype: bool
### Response:
def add_member(self, container_id, member_id):
"""AddMember.
[Preview API]
:param str container_id:
:param str member_id:
:rtype: bool
"""
route_values = {}
if container_id is not None:
route_values['containerId'] = self._serialize.url('container_id', container_id, 'str')
if member_id is not None:
route_values['memberId'] = self._serialize.url('member_id', member_id, 'str')
response = self._send(http_method='PUT',
location_id='8ba35978-138e-41f8-8963-7b1ea2c5f775',
version='5.0-preview.1',
route_values=route_values)
return self._deserialize('bool', response) |
def get_choice(value):
"""Gets a key to choose a choice from any value."""
if value is None:
return 'null'
for attr in ['__name__', 'name']:
if hasattr(value, attr):
return getattr(value, attr)
return str(value) | Gets a key to choose a choice from any value. | Below is the the instruction that describes the task:
### Input:
Gets a key to choose a choice from any value.
### Response:
def get_choice(value):
"""Gets a key to choose a choice from any value."""
if value is None:
return 'null'
for attr in ['__name__', 'name']:
if hasattr(value, attr):
return getattr(value, attr)
return str(value) |
def add_item_to_basket(self, item, variant=VARIANT.MEDIUM, quantity=1):
'''
Add an item to the current basket.
:param Item item: Item from menu.
:param int variant: Item SKU id. Ignored if the item is a side.
:param int quantity: The quantity of item to be added.
:return: A response having added an item to the current basket.
:rtype: requests.Response
'''
item_type = item.type
if item_type == 'Pizza':
return self.add_pizza_to_basket(item, variant, quantity)
elif item_type == 'Side':
return self.add_side_to_basket(item, quantity)
return None | Add an item to the current basket.
:param Item item: Item from menu.
:param int variant: Item SKU id. Ignored if the item is a side.
:param int quantity: The quantity of item to be added.
:return: A response having added an item to the current basket.
:rtype: requests.Response | Below is the the instruction that describes the task:
### Input:
Add an item to the current basket.
:param Item item: Item from menu.
:param int variant: Item SKU id. Ignored if the item is a side.
:param int quantity: The quantity of item to be added.
:return: A response having added an item to the current basket.
:rtype: requests.Response
### Response:
def add_item_to_basket(self, item, variant=VARIANT.MEDIUM, quantity=1):
'''
Add an item to the current basket.
:param Item item: Item from menu.
:param int variant: Item SKU id. Ignored if the item is a side.
:param int quantity: The quantity of item to be added.
:return: A response having added an item to the current basket.
:rtype: requests.Response
'''
item_type = item.type
if item_type == 'Pizza':
return self.add_pizza_to_basket(item, variant, quantity)
elif item_type == 'Side':
return self.add_side_to_basket(item, quantity)
return None |
def p_if_expr_header(p):
""" if_header : IF expr
"""
global ENABLED
IFDEFS.append((ENABLED, p.lineno(1)))
ENABLED = bool(int(p[2])) if p[2].isdigit() else ID_TABLE.defined(p[2]) | if_header : IF expr | Below is the the instruction that describes the task:
### Input:
if_header : IF expr
### Response:
def p_if_expr_header(p):
""" if_header : IF expr
"""
global ENABLED
IFDEFS.append((ENABLED, p.lineno(1)))
ENABLED = bool(int(p[2])) if p[2].isdigit() else ID_TABLE.defined(p[2]) |
def deleteThreads(self, thread_ids):
"""
Deletes threads
:param thread_ids: Thread IDs to delete. See :ref:`intro_threads`
:return: Whether the request was successful
:raises: FBchatException if request failed
"""
thread_ids = require_list(thread_ids)
data_unpin = dict()
data_delete = dict()
for i, thread_id in enumerate(thread_ids):
data_unpin["ids[{}]".format(thread_id)] = "false"
data_delete["ids[{}]".format(i)] = thread_id
r_unpin = self._post(self.req_url.PINNED_STATUS, data_unpin)
r_delete = self._post(self.req_url.DELETE_THREAD, data_delete)
return r_unpin.ok and r_delete.ok | Deletes threads
:param thread_ids: Thread IDs to delete. See :ref:`intro_threads`
:return: Whether the request was successful
:raises: FBchatException if request failed | Below is the the instruction that describes the task:
### Input:
Deletes threads
:param thread_ids: Thread IDs to delete. See :ref:`intro_threads`
:return: Whether the request was successful
:raises: FBchatException if request failed
### Response:
def deleteThreads(self, thread_ids):
"""
Deletes threads
:param thread_ids: Thread IDs to delete. See :ref:`intro_threads`
:return: Whether the request was successful
:raises: FBchatException if request failed
"""
thread_ids = require_list(thread_ids)
data_unpin = dict()
data_delete = dict()
for i, thread_id in enumerate(thread_ids):
data_unpin["ids[{}]".format(thread_id)] = "false"
data_delete["ids[{}]".format(i)] = thread_id
r_unpin = self._post(self.req_url.PINNED_STATUS, data_unpin)
r_delete = self._post(self.req_url.DELETE_THREAD, data_delete)
return r_unpin.ok and r_delete.ok |
def getThirdpartyLibs(self, libs, configuration = 'Development', includePlatformDefaults = True):
"""
Retrieves the ThirdPartyLibraryDetails instance for Unreal-bundled versions of the specified third-party libraries
"""
if includePlatformDefaults == True:
libs = self._defaultThirdpartyLibs() + libs
interrogator = self._getUE4BuildInterrogator()
return interrogator.interrogate(self.getPlatformIdentifier(), configuration, libs, self._getLibraryOverrides()) | Retrieves the ThirdPartyLibraryDetails instance for Unreal-bundled versions of the specified third-party libraries | Below is the the instruction that describes the task:
### Input:
Retrieves the ThirdPartyLibraryDetails instance for Unreal-bundled versions of the specified third-party libraries
### Response:
def getThirdpartyLibs(self, libs, configuration = 'Development', includePlatformDefaults = True):
"""
Retrieves the ThirdPartyLibraryDetails instance for Unreal-bundled versions of the specified third-party libraries
"""
if includePlatformDefaults == True:
libs = self._defaultThirdpartyLibs() + libs
interrogator = self._getUE4BuildInterrogator()
return interrogator.interrogate(self.getPlatformIdentifier(), configuration, libs, self._getLibraryOverrides()) |
def update_database(self, server_name, name, new_database_name=None,
service_objective_id=None, edition=None,
max_size_bytes=None):
'''
Updates existing database details.
server_name:
Name of the server to contain the new database.
name:
Required. The name for the new database. See Naming Requirements
in Azure SQL Database General Guidelines and Limitations and
Database Identifiers for more information.
new_database_name:
Optional. The new name for the new database.
service_objective_id:
Optional. The new service level to apply to the database. For more
information about service levels, see Azure SQL Database Service
Tiers and Performance Levels. Use List Service Level Objectives to
get the correct ID for the desired service objective.
edition:
Optional. The new edition for the new database.
max_size_bytes:
Optional. The new size of the database in bytes. For information on
available sizes for each edition, see Azure SQL Database Service
Tiers (Editions).
'''
_validate_not_none('server_name', server_name)
_validate_not_none('name', name)
return self._perform_put(
self._get_databases_path(server_name, name),
_SqlManagementXmlSerializer.update_database_to_xml(
new_database_name, service_objective_id, edition,
max_size_bytes
)
) | Updates existing database details.
server_name:
Name of the server to contain the new database.
name:
Required. The name for the new database. See Naming Requirements
in Azure SQL Database General Guidelines and Limitations and
Database Identifiers for more information.
new_database_name:
Optional. The new name for the new database.
service_objective_id:
Optional. The new service level to apply to the database. For more
information about service levels, see Azure SQL Database Service
Tiers and Performance Levels. Use List Service Level Objectives to
get the correct ID for the desired service objective.
edition:
Optional. The new edition for the new database.
max_size_bytes:
Optional. The new size of the database in bytes. For information on
available sizes for each edition, see Azure SQL Database Service
Tiers (Editions). | Below is the the instruction that describes the task:
### Input:
Updates existing database details.
server_name:
Name of the server to contain the new database.
name:
Required. The name for the new database. See Naming Requirements
in Azure SQL Database General Guidelines and Limitations and
Database Identifiers for more information.
new_database_name:
Optional. The new name for the new database.
service_objective_id:
Optional. The new service level to apply to the database. For more
information about service levels, see Azure SQL Database Service
Tiers and Performance Levels. Use List Service Level Objectives to
get the correct ID for the desired service objective.
edition:
Optional. The new edition for the new database.
max_size_bytes:
Optional. The new size of the database in bytes. For information on
available sizes for each edition, see Azure SQL Database Service
Tiers (Editions).
### Response:
def update_database(self, server_name, name, new_database_name=None,
service_objective_id=None, edition=None,
max_size_bytes=None):
'''
Updates existing database details.
server_name:
Name of the server to contain the new database.
name:
Required. The name for the new database. See Naming Requirements
in Azure SQL Database General Guidelines and Limitations and
Database Identifiers for more information.
new_database_name:
Optional. The new name for the new database.
service_objective_id:
Optional. The new service level to apply to the database. For more
information about service levels, see Azure SQL Database Service
Tiers and Performance Levels. Use List Service Level Objectives to
get the correct ID for the desired service objective.
edition:
Optional. The new edition for the new database.
max_size_bytes:
Optional. The new size of the database in bytes. For information on
available sizes for each edition, see Azure SQL Database Service
Tiers (Editions).
'''
_validate_not_none('server_name', server_name)
_validate_not_none('name', name)
return self._perform_put(
self._get_databases_path(server_name, name),
_SqlManagementXmlSerializer.update_database_to_xml(
new_database_name, service_objective_id, edition,
max_size_bytes
)
) |
def all(self, order_by=None, limit=0):
"""
Fetch all items.
:param limit: How many rows to fetch.
:param order_by: column on which to order the results. \
To change the sort, prepend with < or >.
"""
with rconnect() as conn:
try:
query = self._base()
if order_by is not None:
query = self._order_by(query, order_by)
if limit > 0:
query = self._limit(query, limit)
log.debug(query)
rv = query.run(conn)
except Exception as e:
log.warn(e)
raise
else:
data = [self._model(_) for _ in rv]
return data | Fetch all items.
:param limit: How many rows to fetch.
:param order_by: column on which to order the results. \
To change the sort, prepend with < or >. | Below is the the instruction that describes the task:
### Input:
Fetch all items.
:param limit: How many rows to fetch.
:param order_by: column on which to order the results. \
To change the sort, prepend with < or >.
### Response:
def all(self, order_by=None, limit=0):
"""
Fetch all items.
:param limit: How many rows to fetch.
:param order_by: column on which to order the results. \
To change the sort, prepend with < or >.
"""
with rconnect() as conn:
try:
query = self._base()
if order_by is not None:
query = self._order_by(query, order_by)
if limit > 0:
query = self._limit(query, limit)
log.debug(query)
rv = query.run(conn)
except Exception as e:
log.warn(e)
raise
else:
data = [self._model(_) for _ in rv]
return data |
def head_orifice(Diam, RatioVCOrifice, FlowRate):
"""Return the head of the orifice."""
#Checking input validity
ut.check_range([Diam, ">0", "Diameter"], [FlowRate, ">0", "Flow rate"],
[RatioVCOrifice, "0-1", "VC orifice ratio"])
return ((FlowRate
/ (RatioVCOrifice * area_circle(Diam).magnitude)
)**2
/ (2*gravity.magnitude)
) | Return the head of the orifice. | Below is the the instruction that describes the task:
### Input:
Return the head of the orifice.
### Response:
def head_orifice(Diam, RatioVCOrifice, FlowRate):
"""Return the head of the orifice."""
#Checking input validity
ut.check_range([Diam, ">0", "Diameter"], [FlowRate, ">0", "Flow rate"],
[RatioVCOrifice, "0-1", "VC orifice ratio"])
return ((FlowRate
/ (RatioVCOrifice * area_circle(Diam).magnitude)
)**2
/ (2*gravity.magnitude)
) |
def offset(self, heading_initial, distance, ellipse = 'WGS84'):
'''
Offset a LatLon object by a heading (in degrees) and distance (in km)
to return a new LatLon object
'''
lat1, lon1 = self.lat.decimal_degree, self.lon.decimal_degree
g = pyproj.Geod(ellps = ellipse)
distance = distance * 1000 # Convert km to meters
lon2, lat2, back_bearing = g.fwd(lon1, lat1, heading_initial, distance, radians = False)
return LatLon(Latitude(lat2), Longitude(lon2)) | Offset a LatLon object by a heading (in degrees) and distance (in km)
to return a new LatLon object | Below is the the instruction that describes the task:
### Input:
Offset a LatLon object by a heading (in degrees) and distance (in km)
to return a new LatLon object
### Response:
def offset(self, heading_initial, distance, ellipse = 'WGS84'):
'''
Offset a LatLon object by a heading (in degrees) and distance (in km)
to return a new LatLon object
'''
lat1, lon1 = self.lat.decimal_degree, self.lon.decimal_degree
g = pyproj.Geod(ellps = ellipse)
distance = distance * 1000 # Convert km to meters
lon2, lat2, back_bearing = g.fwd(lon1, lat1, heading_initial, distance, radians = False)
return LatLon(Latitude(lat2), Longitude(lon2)) |
def generate_all_deps(self, target: Target):
"""Generate all dependencies of `target` (the target nodes)."""
yield from (self.targets[dep_name]
for dep_name in self.generate_dep_names(target)) | Generate all dependencies of `target` (the target nodes). | Below is the the instruction that describes the task:
### Input:
Generate all dependencies of `target` (the target nodes).
### Response:
def generate_all_deps(self, target: Target):
"""Generate all dependencies of `target` (the target nodes)."""
yield from (self.targets[dep_name]
for dep_name in self.generate_dep_names(target)) |
def find_ent_endurance_tier_price(package, tier_level):
"""Find the price in the given package with the specified tier level
:param package: The Enterprise (Endurance) product package
:param tier_level: The endurance tier for which a price is desired
:return: Returns the price for the given tier, or an error if not found
"""
for item in package['items']:
for attribute in item.get('attributes', []):
if int(attribute['value']) == ENDURANCE_TIERS.get(tier_level):
break
else:
continue
price_id = _find_price_id(item['prices'], 'storage_tier_level')
if price_id:
return price_id
raise ValueError("Could not find price for endurance tier level") | Find the price in the given package with the specified tier level
:param package: The Enterprise (Endurance) product package
:param tier_level: The endurance tier for which a price is desired
:return: Returns the price for the given tier, or an error if not found | Below is the the instruction that describes the task:
### Input:
Find the price in the given package with the specified tier level
:param package: The Enterprise (Endurance) product package
:param tier_level: The endurance tier for which a price is desired
:return: Returns the price for the given tier, or an error if not found
### Response:
def find_ent_endurance_tier_price(package, tier_level):
"""Find the price in the given package with the specified tier level
:param package: The Enterprise (Endurance) product package
:param tier_level: The endurance tier for which a price is desired
:return: Returns the price for the given tier, or an error if not found
"""
for item in package['items']:
for attribute in item.get('attributes', []):
if int(attribute['value']) == ENDURANCE_TIERS.get(tier_level):
break
else:
continue
price_id = _find_price_id(item['prices'], 'storage_tier_level')
if price_id:
return price_id
raise ValueError("Could not find price for endurance tier level") |
def is_parent_of(self, parent, child):
"""Return whether ``child`` is a branch descended from ``parent`` at
any remove.
"""
if parent == 'trunk':
return True
if child == 'trunk':
return False
if child not in self._branches:
raise ValueError(
"The branch {} seems not to have ever been created".format(
child
)
)
if self._branches[child][0] == parent:
return True
return self.is_parent_of(parent, self._branches[child][0]) | Return whether ``child`` is a branch descended from ``parent`` at
any remove. | Below is the the instruction that describes the task:
### Input:
Return whether ``child`` is a branch descended from ``parent`` at
any remove.
### Response:
def is_parent_of(self, parent, child):
"""Return whether ``child`` is a branch descended from ``parent`` at
any remove.
"""
if parent == 'trunk':
return True
if child == 'trunk':
return False
if child not in self._branches:
raise ValueError(
"The branch {} seems not to have ever been created".format(
child
)
)
if self._branches[child][0] == parent:
return True
return self.is_parent_of(parent, self._branches[child][0]) |
def getOverlayMouseScale(self, ulOverlayHandle):
"""
Gets the mouse scaling factor that is used for mouse events. The actual texture may be a different size, but this is
typically the size of the underlying UI in pixels.
"""
fn = self.function_table.getOverlayMouseScale
pvecMouseScale = HmdVector2_t()
result = fn(ulOverlayHandle, byref(pvecMouseScale))
return result, pvecMouseScale | Gets the mouse scaling factor that is used for mouse events. The actual texture may be a different size, but this is
typically the size of the underlying UI in pixels. | Below is the the instruction that describes the task:
### Input:
Gets the mouse scaling factor that is used for mouse events. The actual texture may be a different size, but this is
typically the size of the underlying UI in pixels.
### Response:
def getOverlayMouseScale(self, ulOverlayHandle):
"""
Gets the mouse scaling factor that is used for mouse events. The actual texture may be a different size, but this is
typically the size of the underlying UI in pixels.
"""
fn = self.function_table.getOverlayMouseScale
pvecMouseScale = HmdVector2_t()
result = fn(ulOverlayHandle, byref(pvecMouseScale))
return result, pvecMouseScale |
def _getPayload(self, record):
"""
The data that will be sent to the RESTful API
"""
try:
# top level payload items
d = record.__dict__
pid = d.pop('process', 'nopid')
tid = d.pop('thread', 'notid')
payload = {
k: v for (k, v) in d.items()
if k in TOP_KEYS
}
# logging meta attributes
payload['meta'] = {
k: v for (k, v) in d.items()
if k in META_KEYS
}
# everything else goes in details
payload['details'] = {
k: simple_json(v) for (k, v) in d.items()
if k not in self.detail_ignore_set
}
payload['log'] = payload.pop('name', 'n/a')
payload['level'] = payload.pop('levelname', 'n/a')
payload['meta']['line'] = payload['meta'].pop('lineno', 'n/a')
payload['message'] = record.getMessage()
tb = self._getTraceback(record)
if tb:
payload['traceback'] = tb
except Exception as e:
payload = {
'level': 'ERROR',
'message': 'could not format',
'exception': repr(e),
}
payload['pid'] = 'p-{}'.format(pid)
payload['tid'] = 't-{}'.format(tid)
return payload | The data that will be sent to the RESTful API | Below is the the instruction that describes the task:
### Input:
The data that will be sent to the RESTful API
### Response:
def _getPayload(self, record):
"""
The data that will be sent to the RESTful API
"""
try:
# top level payload items
d = record.__dict__
pid = d.pop('process', 'nopid')
tid = d.pop('thread', 'notid')
payload = {
k: v for (k, v) in d.items()
if k in TOP_KEYS
}
# logging meta attributes
payload['meta'] = {
k: v for (k, v) in d.items()
if k in META_KEYS
}
# everything else goes in details
payload['details'] = {
k: simple_json(v) for (k, v) in d.items()
if k not in self.detail_ignore_set
}
payload['log'] = payload.pop('name', 'n/a')
payload['level'] = payload.pop('levelname', 'n/a')
payload['meta']['line'] = payload['meta'].pop('lineno', 'n/a')
payload['message'] = record.getMessage()
tb = self._getTraceback(record)
if tb:
payload['traceback'] = tb
except Exception as e:
payload = {
'level': 'ERROR',
'message': 'could not format',
'exception': repr(e),
}
payload['pid'] = 'p-{}'.format(pid)
payload['tid'] = 't-{}'.format(tid)
return payload |
def searchResults(self, REQUEST=None, used=None, **kw):
"""Search the catalog
Search terms can be passed in the REQUEST or as keyword
arguments.
The used argument is now deprecated and ignored
"""
if REQUEST and REQUEST.get('getRequestUID') \
and self.id == CATALOG_ANALYSIS_LISTING:
# Fetch all analyses that have the request UID passed in as an ancestor,
# cause we want Primary ARs to always display the analyses from their
# derived ARs (if result is not empty)
request = REQUEST.copy()
orig_uid = request.get('getRequestUID')
# If a list of request uid, retrieve them sequentially to make the
# masking process easier
if isinstance(orig_uid, list):
results = list()
for uid in orig_uid:
request['getRequestUID'] = [uid]
results += self.searchResults(REQUEST=request, used=used, **kw)
return results
# Get all analyses, those from descendant ARs included
del request['getRequestUID']
request['getAncestorsUIDs'] = orig_uid
results = self.searchResults(REQUEST=request, used=used, **kw)
# Masking
primary = filter(lambda an: an.getParentUID == orig_uid, results)
derived = filter(lambda an: an.getParentUID != orig_uid, results)
derived_keys = map(lambda an: an.getKeyword, derived)
results = filter(lambda an: an.getKeyword not in derived_keys, primary)
return results + derived
# Normal search
return self._catalog.searchResults(REQUEST, used, **kw) | Search the catalog
Search terms can be passed in the REQUEST or as keyword
arguments.
The used argument is now deprecated and ignored | Below is the the instruction that describes the task:
### Input:
Search the catalog
Search terms can be passed in the REQUEST or as keyword
arguments.
The used argument is now deprecated and ignored
### Response:
def searchResults(self, REQUEST=None, used=None, **kw):
"""Search the catalog
Search terms can be passed in the REQUEST or as keyword
arguments.
The used argument is now deprecated and ignored
"""
if REQUEST and REQUEST.get('getRequestUID') \
and self.id == CATALOG_ANALYSIS_LISTING:
# Fetch all analyses that have the request UID passed in as an ancestor,
# cause we want Primary ARs to always display the analyses from their
# derived ARs (if result is not empty)
request = REQUEST.copy()
orig_uid = request.get('getRequestUID')
# If a list of request uid, retrieve them sequentially to make the
# masking process easier
if isinstance(orig_uid, list):
results = list()
for uid in orig_uid:
request['getRequestUID'] = [uid]
results += self.searchResults(REQUEST=request, used=used, **kw)
return results
# Get all analyses, those from descendant ARs included
del request['getRequestUID']
request['getAncestorsUIDs'] = orig_uid
results = self.searchResults(REQUEST=request, used=used, **kw)
# Masking
primary = filter(lambda an: an.getParentUID == orig_uid, results)
derived = filter(lambda an: an.getParentUID != orig_uid, results)
derived_keys = map(lambda an: an.getKeyword, derived)
results = filter(lambda an: an.getKeyword not in derived_keys, primary)
return results + derived
# Normal search
return self._catalog.searchResults(REQUEST, used, **kw) |
def chunk_list(l, n):
"""Return `n` size lists from a given list `l`"""
return [l[i:i + n] for i in range(0, len(l), n)] | Return `n` size lists from a given list `l` | Below is the the instruction that describes the task:
### Input:
Return `n` size lists from a given list `l`
### Response:
def chunk_list(l, n):
"""Return `n` size lists from a given list `l`"""
return [l[i:i + n] for i in range(0, len(l), n)] |
def push(self):
""" create a github repo and push the local repo into it
"""
self.github_repo.create_and_push()
self._repo = self.github_repo.repo
return self._repo | create a github repo and push the local repo into it | Below is the the instruction that describes the task:
### Input:
create a github repo and push the local repo into it
### Response:
def push(self):
""" create a github repo and push the local repo into it
"""
self.github_repo.create_and_push()
self._repo = self.github_repo.repo
return self._repo |
def add_section(self, section_name):
"""Add an empty section.
"""
if section_name == "DEFAULT":
raise Exception("'DEFAULT' is reserved section name.")
if section_name in self._sections:
raise Exception(
"Error! %s is already one of the sections" % section_name)
else:
self._sections[section_name] = Section(section_name) | Add an empty section. | Below is the the instruction that describes the task:
### Input:
Add an empty section.
### Response:
def add_section(self, section_name):
"""Add an empty section.
"""
if section_name == "DEFAULT":
raise Exception("'DEFAULT' is reserved section name.")
if section_name in self._sections:
raise Exception(
"Error! %s is already one of the sections" % section_name)
else:
self._sections[section_name] = Section(section_name) |
def get_season(self, season_key, card_type="micro_card"):
"""
Calling Season API.
Arg:
season_key: key of the season
card_type: optional, default to micro_card. Accepted values are
micro_card & summary_card
Return:
json data
"""
season_url = self.api_path + "season/" + season_key + "/"
params = {}
params["card_type"] = card_type
response = self.get_response(season_url, params)
return response | Calling Season API.
Arg:
season_key: key of the season
card_type: optional, default to micro_card. Accepted values are
micro_card & summary_card
Return:
json data | Below is the the instruction that describes the task:
### Input:
Calling Season API.
Arg:
season_key: key of the season
card_type: optional, default to micro_card. Accepted values are
micro_card & summary_card
Return:
json data
### Response:
def get_season(self, season_key, card_type="micro_card"):
"""
Calling Season API.
Arg:
season_key: key of the season
card_type: optional, default to micro_card. Accepted values are
micro_card & summary_card
Return:
json data
"""
season_url = self.api_path + "season/" + season_key + "/"
params = {}
params["card_type"] = card_type
response = self.get_response(season_url, params)
return response |
def add_request_init_listener(self, fn, *args, **kwargs):
"""
Adds a callback with arguments to be called when any request is created.
It will be invoked as `fn(response_future, *args, **kwargs)` after each client request is created,
and before the request is sent\*. This can be used to create extensions by adding result callbacks to the
response future.
\* where `response_future` is the :class:`.ResponseFuture` for the request.
Note that the init callback is done on the client thread creating the request, so you may need to consider
synchronization if you have multiple threads. Any callbacks added to the response future will be executed
on the event loop thread, so the normal advice about minimizing cycles and avoiding blocking apply (see Note in
:meth:`.ResponseFuture.add_callbacks`.
See `this example <https://github.com/datastax/python-driver/blob/master/examples/request_init_listener.py>`_ in the
source tree for an example.
"""
self._request_init_callbacks.append((fn, args, kwargs)) | Adds a callback with arguments to be called when any request is created.
It will be invoked as `fn(response_future, *args, **kwargs)` after each client request is created,
and before the request is sent\*. This can be used to create extensions by adding result callbacks to the
response future.
\* where `response_future` is the :class:`.ResponseFuture` for the request.
Note that the init callback is done on the client thread creating the request, so you may need to consider
synchronization if you have multiple threads. Any callbacks added to the response future will be executed
on the event loop thread, so the normal advice about minimizing cycles and avoiding blocking apply (see Note in
:meth:`.ResponseFuture.add_callbacks`.
See `this example <https://github.com/datastax/python-driver/blob/master/examples/request_init_listener.py>`_ in the
source tree for an example. | Below is the the instruction that describes the task:
### Input:
Adds a callback with arguments to be called when any request is created.
It will be invoked as `fn(response_future, *args, **kwargs)` after each client request is created,
and before the request is sent\*. This can be used to create extensions by adding result callbacks to the
response future.
\* where `response_future` is the :class:`.ResponseFuture` for the request.
Note that the init callback is done on the client thread creating the request, so you may need to consider
synchronization if you have multiple threads. Any callbacks added to the response future will be executed
on the event loop thread, so the normal advice about minimizing cycles and avoiding blocking apply (see Note in
:meth:`.ResponseFuture.add_callbacks`.
See `this example <https://github.com/datastax/python-driver/blob/master/examples/request_init_listener.py>`_ in the
source tree for an example.
### Response:
def add_request_init_listener(self, fn, *args, **kwargs):
"""
Adds a callback with arguments to be called when any request is created.
It will be invoked as `fn(response_future, *args, **kwargs)` after each client request is created,
and before the request is sent\*. This can be used to create extensions by adding result callbacks to the
response future.
\* where `response_future` is the :class:`.ResponseFuture` for the request.
Note that the init callback is done on the client thread creating the request, so you may need to consider
synchronization if you have multiple threads. Any callbacks added to the response future will be executed
on the event loop thread, so the normal advice about minimizing cycles and avoiding blocking apply (see Note in
:meth:`.ResponseFuture.add_callbacks`.
See `this example <https://github.com/datastax/python-driver/blob/master/examples/request_init_listener.py>`_ in the
source tree for an example.
"""
self._request_init_callbacks.append((fn, args, kwargs)) |
def complete_multipart_upload(self, bucket, object_name, upload_id,
parts_list, content_type=None, metadata={}):
"""
Complete a multipart upload.
N.B. This can be possibly be a slow operation.
@param bucket: The bucket name
@param object_name: The object name
@param upload_id: The multipart upload id
@param parts_list: A List of all the parts
(2-tuples of part sequence number and etag)
@param content_type: The Content-Type of the object
@param metadata: C{dict} containing additional metadata
@return: a C{Deferred} that fires after request is complete
"""
data = self._build_complete_multipart_upload_xml(parts_list)
objectname_plus = '%s?uploadId=%s' % (object_name, upload_id)
details = self._details(
method=b"POST",
url_context=self._url_context(bucket=bucket, object_name=objectname_plus),
headers=self._headers(content_type),
metadata=metadata,
body=data,
)
d = self._submit(self._query_factory(details))
# TODO - handle error responses
d.addCallback(
lambda (response, body): MultipartCompletionResponse.from_xml(body)
)
return d | Complete a multipart upload.
N.B. This can be possibly be a slow operation.
@param bucket: The bucket name
@param object_name: The object name
@param upload_id: The multipart upload id
@param parts_list: A List of all the parts
(2-tuples of part sequence number and etag)
@param content_type: The Content-Type of the object
@param metadata: C{dict} containing additional metadata
@return: a C{Deferred} that fires after request is complete | Below is the the instruction that describes the task:
### Input:
Complete a multipart upload.
N.B. This can be possibly be a slow operation.
@param bucket: The bucket name
@param object_name: The object name
@param upload_id: The multipart upload id
@param parts_list: A List of all the parts
(2-tuples of part sequence number and etag)
@param content_type: The Content-Type of the object
@param metadata: C{dict} containing additional metadata
@return: a C{Deferred} that fires after request is complete
### Response:
def complete_multipart_upload(self, bucket, object_name, upload_id,
parts_list, content_type=None, metadata={}):
"""
Complete a multipart upload.
N.B. This can be possibly be a slow operation.
@param bucket: The bucket name
@param object_name: The object name
@param upload_id: The multipart upload id
@param parts_list: A List of all the parts
(2-tuples of part sequence number and etag)
@param content_type: The Content-Type of the object
@param metadata: C{dict} containing additional metadata
@return: a C{Deferred} that fires after request is complete
"""
data = self._build_complete_multipart_upload_xml(parts_list)
objectname_plus = '%s?uploadId=%s' % (object_name, upload_id)
details = self._details(
method=b"POST",
url_context=self._url_context(bucket=bucket, object_name=objectname_plus),
headers=self._headers(content_type),
metadata=metadata,
body=data,
)
d = self._submit(self._query_factory(details))
# TODO - handle error responses
d.addCallback(
lambda (response, body): MultipartCompletionResponse.from_xml(body)
)
return d |
def filter_traceback(error, tb, ignore_pkg=CURRENT_PACKAGE):
"""Filtered out all parent stacktraces starting with the given stacktrace that has
a given variable name in its globals.
"""
if not isinstance(tb, types.TracebackType):
return tb
def in_namespace(n):
return n and (n.startswith(ignore_pkg + '.') or n == ignore_pkg)
# Skip test runner traceback levels
while tb and in_namespace(tb.tb_frame.f_globals['__package__']):
tb = tb.tb_next
starting_tb = tb
limit = 0
while tb and not in_namespace(tb.tb_frame.f_globals['__package__']):
tb = tb.tb_next
limit += 1
return ''.join(traceback.format_exception(error.__class__, error, starting_tb, limit)) | Filtered out all parent stacktraces starting with the given stacktrace that has
a given variable name in its globals. | Below is the the instruction that describes the task:
### Input:
Filtered out all parent stacktraces starting with the given stacktrace that has
a given variable name in its globals.
### Response:
def filter_traceback(error, tb, ignore_pkg=CURRENT_PACKAGE):
"""Filtered out all parent stacktraces starting with the given stacktrace that has
a given variable name in its globals.
"""
if not isinstance(tb, types.TracebackType):
return tb
def in_namespace(n):
return n and (n.startswith(ignore_pkg + '.') or n == ignore_pkg)
# Skip test runner traceback levels
while tb and in_namespace(tb.tb_frame.f_globals['__package__']):
tb = tb.tb_next
starting_tb = tb
limit = 0
while tb and not in_namespace(tb.tb_frame.f_globals['__package__']):
tb = tb.tb_next
limit += 1
return ''.join(traceback.format_exception(error.__class__, error, starting_tb, limit)) |
def fcoe_get_login_output_fcoe_login_list_fcoe_login_connected_peer_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_login = ET.Element("fcoe_get_login")
config = fcoe_get_login
output = ET.SubElement(fcoe_get_login, "output")
fcoe_login_list = ET.SubElement(output, "fcoe-login-list")
fcoe_login_session_mac_key = ET.SubElement(fcoe_login_list, "fcoe-login-session-mac")
fcoe_login_session_mac_key.text = kwargs.pop('fcoe_login_session_mac')
fcoe_login_connected_peer_type = ET.SubElement(fcoe_login_list, "fcoe-login-connected-peer-type")
fcoe_login_connected_peer_type.text = kwargs.pop('fcoe_login_connected_peer_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def fcoe_get_login_output_fcoe_login_list_fcoe_login_connected_peer_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcoe_get_login = ET.Element("fcoe_get_login")
config = fcoe_get_login
output = ET.SubElement(fcoe_get_login, "output")
fcoe_login_list = ET.SubElement(output, "fcoe-login-list")
fcoe_login_session_mac_key = ET.SubElement(fcoe_login_list, "fcoe-login-session-mac")
fcoe_login_session_mac_key.text = kwargs.pop('fcoe_login_session_mac')
fcoe_login_connected_peer_type = ET.SubElement(fcoe_login_list, "fcoe-login-connected-peer-type")
fcoe_login_connected_peer_type.text = kwargs.pop('fcoe_login_connected_peer_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def load_at2_file(cls, filename):
"""Read an AT2 formatted time series.
Parameters
----------
filename: str
Filename to open.
"""
with open(filename) as fp:
next(fp)
description = next(fp).strip()
next(fp)
parts = next(fp).split()
time_step = float(parts[1])
accels = [float(p) for l in fp for p in l.split()]
return cls(filename, description, time_step, accels) | Read an AT2 formatted time series.
Parameters
----------
filename: str
Filename to open. | Below is the the instruction that describes the task:
### Input:
Read an AT2 formatted time series.
Parameters
----------
filename: str
Filename to open.
### Response:
def load_at2_file(cls, filename):
"""Read an AT2 formatted time series.
Parameters
----------
filename: str
Filename to open.
"""
with open(filename) as fp:
next(fp)
description = next(fp).strip()
next(fp)
parts = next(fp).split()
time_step = float(parts[1])
accels = [float(p) for l in fp for p in l.split()]
return cls(filename, description, time_step, accels) |
def save(self, filename, encoding ='ISO-8859-1', standalone='no'):
"""
Stores any element in a svg file (including header).
Calling this method only makes sense if the root element is an svg elemnt
"""
f = codecs.open(filename, 'w', encoding)
s = self.wrap_xml(self.getXML(), encoding, standalone)
#s = s.replace("&", "&")
f.write(s)
f.close() | Stores any element in a svg file (including header).
Calling this method only makes sense if the root element is an svg elemnt | Below is the the instruction that describes the task:
### Input:
Stores any element in a svg file (including header).
Calling this method only makes sense if the root element is an svg elemnt
### Response:
def save(self, filename, encoding ='ISO-8859-1', standalone='no'):
"""
Stores any element in a svg file (including header).
Calling this method only makes sense if the root element is an svg elemnt
"""
f = codecs.open(filename, 'w', encoding)
s = self.wrap_xml(self.getXML(), encoding, standalone)
#s = s.replace("&", "&")
f.write(s)
f.close() |
def _search_env(keys):
"""
Search the environment for the supplied keys, returning the first
one found or None if none was found.
"""
matches = (os.environ[key] for key in keys if key in os.environ)
return next(matches, None) | Search the environment for the supplied keys, returning the first
one found or None if none was found. | Below is the the instruction that describes the task:
### Input:
Search the environment for the supplied keys, returning the first
one found or None if none was found.
### Response:
def _search_env(keys):
"""
Search the environment for the supplied keys, returning the first
one found or None if none was found.
"""
matches = (os.environ[key] for key in keys if key in os.environ)
return next(matches, None) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.