code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def processAndSetDefaults(self):
"""
The heart of the 'Instruction' object. This method will make sure that
all fields not entered will be defaulted to a correct value. Also
checks for incongruities in the data entered, if it was by the user.
"""
# INPUT, OUTPUT, GIVEN + BUILDABLE DEPS
if not self.input:
raise ValueError(NO_INPUT_FILE)
if not self.output:
# Build directory must exist, right?
if not self.build_directory:
File()
pass # Can it be built? / reference self.output_format for this
else:
pass # if it is not congruent with other info provided
if not self.build_directory:
pass # Initialize it
for dependency in self.given_dependencies:
pass # Check if the dependcy exists
if self.output_format != self.output.getType():
raise ValueError("")
# Given dependencies must actually exist!
# output_name must be at a lower extenion level than input_name
# The build directory
return | The heart of the 'Instruction' object. This method will make sure that
all fields not entered will be defaulted to a correct value. Also
checks for incongruities in the data entered, if it was by the user. | Below is the the instruction that describes the task:
### Input:
The heart of the 'Instruction' object. This method will make sure that
all fields not entered will be defaulted to a correct value. Also
checks for incongruities in the data entered, if it was by the user.
### Response:
def processAndSetDefaults(self):
"""
The heart of the 'Instruction' object. This method will make sure that
all fields not entered will be defaulted to a correct value. Also
checks for incongruities in the data entered, if it was by the user.
"""
# INPUT, OUTPUT, GIVEN + BUILDABLE DEPS
if not self.input:
raise ValueError(NO_INPUT_FILE)
if not self.output:
# Build directory must exist, right?
if not self.build_directory:
File()
pass # Can it be built? / reference self.output_format for this
else:
pass # if it is not congruent with other info provided
if not self.build_directory:
pass # Initialize it
for dependency in self.given_dependencies:
pass # Check if the dependcy exists
if self.output_format != self.output.getType():
raise ValueError("")
# Given dependencies must actually exist!
# output_name must be at a lower extenion level than input_name
# The build directory
return |
def GetATR(self, reader):
"""Return the ATR of the card inserted into the reader."""
atr = "no card inserted"
try:
if not type(reader) is str:
connection = reader.createConnection()
connection.connect()
atr = toHexString(connection.getATR())
connection.disconnect()
except NoCardException:
pass
except CardConnectionException:
pass
return atr | Return the ATR of the card inserted into the reader. | Below is the the instruction that describes the task:
### Input:
Return the ATR of the card inserted into the reader.
### Response:
def GetATR(self, reader):
"""Return the ATR of the card inserted into the reader."""
atr = "no card inserted"
try:
if not type(reader) is str:
connection = reader.createConnection()
connection.connect()
atr = toHexString(connection.getATR())
connection.disconnect()
except NoCardException:
pass
except CardConnectionException:
pass
return atr |
def process_dataset(dataset, models, **kargs):
""" Convert ``dataset`` to processed data using ``models``.
:class:`gvar.dataset.Dataset` (or similar dictionary) object
``dataset`` is processed by each model in list ``models``,
and the results collected into a new dictionary ``pdata`` for use in
:meth:`MultiFitter.lsqfit` and :meth:`MultiFitter.chained_lsqft`.
Assumes that the models have defined method
:meth:`MultiFitterModel.builddataset`. Keyword arguments
``kargs`` are passed on to :func:`gvar.dataset.avg_data` when
averaging the data.
"""
dset = collections.OrderedDict()
for m in MultiFitter.flatten_models(models):
dset[m.datatag] = (
m.builddataset(dataset) if m.ncg <= 1 else
MultiFitter.coarse_grain(m.builddataset(dataset), ncg=m.ncg)
)
return gvar.dataset.avg_data(dset, **kargs) | Convert ``dataset`` to processed data using ``models``.
:class:`gvar.dataset.Dataset` (or similar dictionary) object
``dataset`` is processed by each model in list ``models``,
and the results collected into a new dictionary ``pdata`` for use in
:meth:`MultiFitter.lsqfit` and :meth:`MultiFitter.chained_lsqft`.
Assumes that the models have defined method
:meth:`MultiFitterModel.builddataset`. Keyword arguments
``kargs`` are passed on to :func:`gvar.dataset.avg_data` when
averaging the data. | Below is the the instruction that describes the task:
### Input:
Convert ``dataset`` to processed data using ``models``.
:class:`gvar.dataset.Dataset` (or similar dictionary) object
``dataset`` is processed by each model in list ``models``,
and the results collected into a new dictionary ``pdata`` for use in
:meth:`MultiFitter.lsqfit` and :meth:`MultiFitter.chained_lsqft`.
Assumes that the models have defined method
:meth:`MultiFitterModel.builddataset`. Keyword arguments
``kargs`` are passed on to :func:`gvar.dataset.avg_data` when
averaging the data.
### Response:
def process_dataset(dataset, models, **kargs):
""" Convert ``dataset`` to processed data using ``models``.
:class:`gvar.dataset.Dataset` (or similar dictionary) object
``dataset`` is processed by each model in list ``models``,
and the results collected into a new dictionary ``pdata`` for use in
:meth:`MultiFitter.lsqfit` and :meth:`MultiFitter.chained_lsqft`.
Assumes that the models have defined method
:meth:`MultiFitterModel.builddataset`. Keyword arguments
``kargs`` are passed on to :func:`gvar.dataset.avg_data` when
averaging the data.
"""
dset = collections.OrderedDict()
for m in MultiFitter.flatten_models(models):
dset[m.datatag] = (
m.builddataset(dataset) if m.ncg <= 1 else
MultiFitter.coarse_grain(m.builddataset(dataset), ncg=m.ncg)
)
return gvar.dataset.avg_data(dset, **kargs) |
def _add_open_file(self, file_obj):
"""Add file_obj to the list of open files on the filesystem.
Used internally to manage open files.
The position in the open_files array is the file descriptor number.
Args:
file_obj: File object to be added to open files list.
Returns:
File descriptor number for the file object.
"""
if self._free_fd_heap:
open_fd = heapq.heappop(self._free_fd_heap)
self.open_files[open_fd] = [file_obj]
return open_fd
self.open_files.append([file_obj])
return len(self.open_files) - 1 | Add file_obj to the list of open files on the filesystem.
Used internally to manage open files.
The position in the open_files array is the file descriptor number.
Args:
file_obj: File object to be added to open files list.
Returns:
File descriptor number for the file object. | Below is the the instruction that describes the task:
### Input:
Add file_obj to the list of open files on the filesystem.
Used internally to manage open files.
The position in the open_files array is the file descriptor number.
Args:
file_obj: File object to be added to open files list.
Returns:
File descriptor number for the file object.
### Response:
def _add_open_file(self, file_obj):
"""Add file_obj to the list of open files on the filesystem.
Used internally to manage open files.
The position in the open_files array is the file descriptor number.
Args:
file_obj: File object to be added to open files list.
Returns:
File descriptor number for the file object.
"""
if self._free_fd_heap:
open_fd = heapq.heappop(self._free_fd_heap)
self.open_files[open_fd] = [file_obj]
return open_fd
self.open_files.append([file_obj])
return len(self.open_files) - 1 |
def Handle(self, unused_args, token=None):
"""Build the data structure representing the config."""
sections = {}
for descriptor in config.CONFIG.type_infos:
if descriptor.section in sections:
continue
section_data = {}
for parameter in self._ListParametersInSection(descriptor.section):
section_data[parameter] = ApiConfigOption().InitFromConfigOption(
parameter)
sections[descriptor.section] = section_data
result = ApiGetConfigResult()
for section_name in sorted(sections):
section = sections[section_name]
api_section = ApiConfigSection(name=section_name)
api_section.options = []
for param_name in sorted(section):
api_section.options.append(section[param_name])
result.sections.append(api_section)
return result | Build the data structure representing the config. | Below is the the instruction that describes the task:
### Input:
Build the data structure representing the config.
### Response:
def Handle(self, unused_args, token=None):
"""Build the data structure representing the config."""
sections = {}
for descriptor in config.CONFIG.type_infos:
if descriptor.section in sections:
continue
section_data = {}
for parameter in self._ListParametersInSection(descriptor.section):
section_data[parameter] = ApiConfigOption().InitFromConfigOption(
parameter)
sections[descriptor.section] = section_data
result = ApiGetConfigResult()
for section_name in sorted(sections):
section = sections[section_name]
api_section = ApiConfigSection(name=section_name)
api_section.options = []
for param_name in sorted(section):
api_section.options.append(section[param_name])
result.sections.append(api_section)
return result |
def update_range(self, share_name, directory_name, file_name, data,
start_range, end_range, validate_content=False, timeout=None):
'''
Writes the bytes specified by the request body into the specified range.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param bytes data:
Content of the range.
:param int start_range:
Start of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool validate_content:
If true, calculates an MD5 hash of the page content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
file.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('data', data)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(share_name, directory_name, file_name)
request.query = {
'comp': 'range',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-write': 'update',
}
_validate_and_format_range_headers(
request, start_range, end_range)
request.body = _get_data_bytes_only('data', data)
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
self._perform_request(request) | Writes the bytes specified by the request body into the specified range.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param bytes data:
Content of the range.
:param int start_range:
Start of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool validate_content:
If true, calculates an MD5 hash of the page content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
file.
:param int timeout:
The timeout parameter is expressed in seconds. | Below is the the instruction that describes the task:
### Input:
Writes the bytes specified by the request body into the specified range.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param bytes data:
Content of the range.
:param int start_range:
Start of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool validate_content:
If true, calculates an MD5 hash of the page content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
file.
:param int timeout:
The timeout parameter is expressed in seconds.
### Response:
def update_range(self, share_name, directory_name, file_name, data,
start_range, end_range, validate_content=False, timeout=None):
'''
Writes the bytes specified by the request body into the specified range.
:param str share_name:
Name of existing share.
:param str directory_name:
The path to the directory.
:param str file_name:
Name of existing file.
:param bytes data:
Content of the range.
:param int start_range:
Start of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param int end_range:
End of byte range to use for updating a section of the file.
The range can be up to 4 MB in size.
The start_range and end_range params are inclusive.
Ex: start_range=0, end_range=511 will download first 512 bytes of file.
:param bool validate_content:
If true, calculates an MD5 hash of the page content. The storage
service checks the hash of the content that has arrived
with the hash that was sent. This is primarily valuable for detecting
bitflips on the wire if using http instead of https as https (the default)
will already validate. Note that this MD5 hash is not stored with the
file.
:param int timeout:
The timeout parameter is expressed in seconds.
'''
_validate_not_none('share_name', share_name)
_validate_not_none('file_name', file_name)
_validate_not_none('data', data)
request = HTTPRequest()
request.method = 'PUT'
request.host_locations = self._get_host_locations()
request.path = _get_path(share_name, directory_name, file_name)
request.query = {
'comp': 'range',
'timeout': _int_to_str(timeout),
}
request.headers = {
'x-ms-write': 'update',
}
_validate_and_format_range_headers(
request, start_range, end_range)
request.body = _get_data_bytes_only('data', data)
if validate_content:
computed_md5 = _get_content_md5(request.body)
request.headers['Content-MD5'] = _to_str(computed_md5)
self._perform_request(request) |
def config(*args, **attrs):
"""Override configuration"""
attrs.setdefault("metavar", "KEY=VALUE")
attrs.setdefault("multiple", True)
return option(config, *args, **attrs) | Override configuration | Below is the the instruction that describes the task:
### Input:
Override configuration
### Response:
def config(*args, **attrs):
"""Override configuration"""
attrs.setdefault("metavar", "KEY=VALUE")
attrs.setdefault("multiple", True)
return option(config, *args, **attrs) |
def descr2tabledef(descr, table_type='binary', write_bitcols=False):
"""
Create a FITS table def from the input numpy descriptor.
parameters
----------
descr: list
A numpy recarray type descriptor array.dtype.descr
returns
-------
names, formats, dims: tuple of lists
These are the ttyp, tform and tdim header entries
for each field. dim entries may be None
"""
names = []
formats = []
dims = []
for d in descr:
"""
npy_dtype = d[1][1:]
if is_ascii and npy_dtype in ['u1','i1']:
raise ValueError("1-byte integers are not supported for "
"ascii tables")
"""
if d[1][1] == 'O':
raise ValueError(
'cannot automatically declare a var column without '
'some data to determine max len')
name, form, dim = _npy2fits(
d, table_type=table_type, write_bitcols=write_bitcols)
if name == '':
raise ValueError("field name is an empty string")
"""
if is_ascii:
if dim is not None:
raise ValueError("array columns are not supported "
"for ascii tables")
"""
names.append(name)
formats.append(form)
dims.append(dim)
return names, formats, dims | Create a FITS table def from the input numpy descriptor.
parameters
----------
descr: list
A numpy recarray type descriptor array.dtype.descr
returns
-------
names, formats, dims: tuple of lists
These are the ttyp, tform and tdim header entries
for each field. dim entries may be None | Below is the the instruction that describes the task:
### Input:
Create a FITS table def from the input numpy descriptor.
parameters
----------
descr: list
A numpy recarray type descriptor array.dtype.descr
returns
-------
names, formats, dims: tuple of lists
These are the ttyp, tform and tdim header entries
for each field. dim entries may be None
### Response:
def descr2tabledef(descr, table_type='binary', write_bitcols=False):
"""
Create a FITS table def from the input numpy descriptor.
parameters
----------
descr: list
A numpy recarray type descriptor array.dtype.descr
returns
-------
names, formats, dims: tuple of lists
These are the ttyp, tform and tdim header entries
for each field. dim entries may be None
"""
names = []
formats = []
dims = []
for d in descr:
"""
npy_dtype = d[1][1:]
if is_ascii and npy_dtype in ['u1','i1']:
raise ValueError("1-byte integers are not supported for "
"ascii tables")
"""
if d[1][1] == 'O':
raise ValueError(
'cannot automatically declare a var column without '
'some data to determine max len')
name, form, dim = _npy2fits(
d, table_type=table_type, write_bitcols=write_bitcols)
if name == '':
raise ValueError("field name is an empty string")
"""
if is_ascii:
if dim is not None:
raise ValueError("array columns are not supported "
"for ascii tables")
"""
names.append(name)
formats.append(form)
dims.append(dim)
return names, formats, dims |
def parse_firefox (url_data):
"""Parse a Firefox3 bookmark file."""
filename = url_data.get_os_filename()
for url, name in firefox.parse_bookmark_file(filename):
url_data.add_url(url, name=name) | Parse a Firefox3 bookmark file. | Below is the the instruction that describes the task:
### Input:
Parse a Firefox3 bookmark file.
### Response:
def parse_firefox (url_data):
"""Parse a Firefox3 bookmark file."""
filename = url_data.get_os_filename()
for url, name in firefox.parse_bookmark_file(filename):
url_data.add_url(url, name=name) |
def reset(self, addresses):
"""
Remove all PTR records from the given address
:type addresses: List[str]
:param addresses: (List[str]) The IP Address to reset
:return: (bool) True in case of success, False in case of failure
"""
request = self._call(SetEnqueueResetReverseDns.SetEnqueueResetReverseDns, IPs=addresses)
response = request.commit()
return response['Success'] | Remove all PTR records from the given address
:type addresses: List[str]
:param addresses: (List[str]) The IP Address to reset
:return: (bool) True in case of success, False in case of failure | Below is the the instruction that describes the task:
### Input:
Remove all PTR records from the given address
:type addresses: List[str]
:param addresses: (List[str]) The IP Address to reset
:return: (bool) True in case of success, False in case of failure
### Response:
def reset(self, addresses):
"""
Remove all PTR records from the given address
:type addresses: List[str]
:param addresses: (List[str]) The IP Address to reset
:return: (bool) True in case of success, False in case of failure
"""
request = self._call(SetEnqueueResetReverseDns.SetEnqueueResetReverseDns, IPs=addresses)
response = request.commit()
return response['Success'] |
def _yield_spatial_table(patch, div, spp_col, count_col, x_col, y_col):
"""
Calculates an empirical spatial table
Yields
-------
DataFrame
Spatial table for each division. See Notes.
Notes
-----
The spatial table is the precursor to the SAR, EAR, and grid-based
commonality metrics. Each row in the table corresponds to a cell created by
a given division. Columns are cell_loc (within the grid defined by the
division), spp_set, n_spp, and n_individs.
"""
# Catch error if you don't use ; after divs in comm_grid in MacroecoDesktop
try:
div_split_list = div.replace(';','').split(',')
except AttributeError:
div_split_list = str(div).strip("()").split(',')
div_split = (x_col + ':' + div_split_list[0] + ';' +
y_col + ':' + div_split_list[1])
# Get cell_locs
# Requires _parse_splits and _product functions to go y inside of x
x_starts, x_ends = _col_starts_ends(patch, x_col, div_split_list[0])
x_offset = (x_ends[0] - x_starts[0]) / 2
x_locs = x_starts + x_offset
y_starts, y_ends = _col_starts_ends(patch, y_col, div_split_list[1])
y_offset = (y_ends[0] - y_starts[0]) / 2
y_locs = y_starts + y_offset
cell_locs = _product(x_locs, y_locs)
# Get spp set and count for all cells
n_spp_list = [] # Number of species in cell
n_individs_list = []
spp_set_list = [] # Set object giving unique species IDs in cell
for cellstring, cellpatch in _yield_subpatches(patch,div_split,name='div'):
spp_set = set(np.unique(cellpatch.table[spp_col]))
spp_set_list.append(spp_set)
n_spp_list.append(len(spp_set))
n_individs_list.append(np.sum(cellpatch.table[count_col]))
# Create and return dataframe
df = pd.DataFrame({'cell_loc': cell_locs, 'spp_set': spp_set_list,
'n_spp': n_spp_list, 'n_individs': n_individs_list})
return df | Calculates an empirical spatial table
Yields
-------
DataFrame
Spatial table for each division. See Notes.
Notes
-----
The spatial table is the precursor to the SAR, EAR, and grid-based
commonality metrics. Each row in the table corresponds to a cell created by
a given division. Columns are cell_loc (within the grid defined by the
division), spp_set, n_spp, and n_individs. | Below is the the instruction that describes the task:
### Input:
Calculates an empirical spatial table
Yields
-------
DataFrame
Spatial table for each division. See Notes.
Notes
-----
The spatial table is the precursor to the SAR, EAR, and grid-based
commonality metrics. Each row in the table corresponds to a cell created by
a given division. Columns are cell_loc (within the grid defined by the
division), spp_set, n_spp, and n_individs.
### Response:
def _yield_spatial_table(patch, div, spp_col, count_col, x_col, y_col):
"""
Calculates an empirical spatial table
Yields
-------
DataFrame
Spatial table for each division. See Notes.
Notes
-----
The spatial table is the precursor to the SAR, EAR, and grid-based
commonality metrics. Each row in the table corresponds to a cell created by
a given division. Columns are cell_loc (within the grid defined by the
division), spp_set, n_spp, and n_individs.
"""
# Catch error if you don't use ; after divs in comm_grid in MacroecoDesktop
try:
div_split_list = div.replace(';','').split(',')
except AttributeError:
div_split_list = str(div).strip("()").split(',')
div_split = (x_col + ':' + div_split_list[0] + ';' +
y_col + ':' + div_split_list[1])
# Get cell_locs
# Requires _parse_splits and _product functions to go y inside of x
x_starts, x_ends = _col_starts_ends(patch, x_col, div_split_list[0])
x_offset = (x_ends[0] - x_starts[0]) / 2
x_locs = x_starts + x_offset
y_starts, y_ends = _col_starts_ends(patch, y_col, div_split_list[1])
y_offset = (y_ends[0] - y_starts[0]) / 2
y_locs = y_starts + y_offset
cell_locs = _product(x_locs, y_locs)
# Get spp set and count for all cells
n_spp_list = [] # Number of species in cell
n_individs_list = []
spp_set_list = [] # Set object giving unique species IDs in cell
for cellstring, cellpatch in _yield_subpatches(patch,div_split,name='div'):
spp_set = set(np.unique(cellpatch.table[spp_col]))
spp_set_list.append(spp_set)
n_spp_list.append(len(spp_set))
n_individs_list.append(np.sum(cellpatch.table[count_col]))
# Create and return dataframe
df = pd.DataFrame({'cell_loc': cell_locs, 'spp_set': spp_set_list,
'n_spp': n_spp_list, 'n_individs': n_individs_list})
return df |
def checkJobGraphAcylic(self):
"""
:raises toil.job.JobGraphDeadlockException: if the connected component \
of jobs containing this job contains any cycles of child/followOn dependencies \
in the *augmented job graph* (see below). Such cycles are not allowed \
in valid job graphs.
A follow-on edge (A, B) between two jobs A and B is equivalent \
to adding a child edge to B from (1) A, (2) from each child of A, \
and (3) from the successors of each child of A. We call each such edge \
an edge an "implied" edge. The augmented job graph is a job graph including \
all the implied edges.
For a job graph G = (V, E) the algorithm is ``O(|V|^2)``. It is ``O(|V| + |E|)`` for \
a graph with no follow-ons. The former follow-on case could be improved!
"""
#Get the root jobs
roots = self.getRootJobs()
if len(roots) == 0:
raise JobGraphDeadlockException("Graph contains no root jobs due to cycles")
#Get implied edges
extraEdges = self._getImpliedEdges(roots)
#Check for directed cycles in the augmented graph
visited = set()
for root in roots:
root._checkJobGraphAcylicDFS([], visited, extraEdges) | :raises toil.job.JobGraphDeadlockException: if the connected component \
of jobs containing this job contains any cycles of child/followOn dependencies \
in the *augmented job graph* (see below). Such cycles are not allowed \
in valid job graphs.
A follow-on edge (A, B) between two jobs A and B is equivalent \
to adding a child edge to B from (1) A, (2) from each child of A, \
and (3) from the successors of each child of A. We call each such edge \
an edge an "implied" edge. The augmented job graph is a job graph including \
all the implied edges.
For a job graph G = (V, E) the algorithm is ``O(|V|^2)``. It is ``O(|V| + |E|)`` for \
a graph with no follow-ons. The former follow-on case could be improved! | Below is the the instruction that describes the task:
### Input:
:raises toil.job.JobGraphDeadlockException: if the connected component \
of jobs containing this job contains any cycles of child/followOn dependencies \
in the *augmented job graph* (see below). Such cycles are not allowed \
in valid job graphs.
A follow-on edge (A, B) between two jobs A and B is equivalent \
to adding a child edge to B from (1) A, (2) from each child of A, \
and (3) from the successors of each child of A. We call each such edge \
an edge an "implied" edge. The augmented job graph is a job graph including \
all the implied edges.
For a job graph G = (V, E) the algorithm is ``O(|V|^2)``. It is ``O(|V| + |E|)`` for \
a graph with no follow-ons. The former follow-on case could be improved!
### Response:
def checkJobGraphAcylic(self):
"""
:raises toil.job.JobGraphDeadlockException: if the connected component \
of jobs containing this job contains any cycles of child/followOn dependencies \
in the *augmented job graph* (see below). Such cycles are not allowed \
in valid job graphs.
A follow-on edge (A, B) between two jobs A and B is equivalent \
to adding a child edge to B from (1) A, (2) from each child of A, \
and (3) from the successors of each child of A. We call each such edge \
an edge an "implied" edge. The augmented job graph is a job graph including \
all the implied edges.
For a job graph G = (V, E) the algorithm is ``O(|V|^2)``. It is ``O(|V| + |E|)`` for \
a graph with no follow-ons. The former follow-on case could be improved!
"""
#Get the root jobs
roots = self.getRootJobs()
if len(roots) == 0:
raise JobGraphDeadlockException("Graph contains no root jobs due to cycles")
#Get implied edges
extraEdges = self._getImpliedEdges(roots)
#Check for directed cycles in the augmented graph
visited = set()
for root in roots:
root._checkJobGraphAcylicDFS([], visited, extraEdges) |
def CreateMenuItem(self, MenuItemId, PluginContext, CaptionText, HintText=u'', IconPath='', Enabled=True,
ContactType=pluginContactTypeAll, MultipleContacts=False):
"""Creates custom menu item in Skype client's "Do More" menus.
:Parameters:
MenuItemId : unicode
Unique identifier for the menu item.
PluginContext : `enums`.pluginContext*
Menu item context. Allows to choose in which client windows will the menu item appear.
CaptionText : unicode
Caption text.
HintText : unicode
Hint text (optional). Shown when mouse hoovers over the menu item.
IconPath : unicode
Path to the icon (optional).
Enabled : bool
Initial state of the menu item. True by default.
ContactType : `enums`.pluginContactType*
In case of `enums.pluginContextContact` tells which contacts the menu item should appear
for. Defaults to `enums.pluginContactTypeAll`.
MultipleContacts : bool
Set to True if multiple contacts should be allowed (defaults to False).
:return: Menu item object.
:rtype: `PluginMenuItem`
"""
cmd = 'CREATE MENU_ITEM %s CONTEXT %s CAPTION %s ENABLED %s' % (tounicode(MenuItemId), PluginContext,
quote(tounicode(CaptionText)), cndexp(Enabled, 'true', 'false'))
if HintText:
cmd += ' HINT %s' % quote(tounicode(HintText))
if IconPath:
cmd += ' ICON %s' % quote(path2unicode(IconPath))
if MultipleContacts:
cmd += ' ENABLE_MULTIPLE_CONTACTS true'
if PluginContext == pluginContextContact:
cmd += ' CONTACT_TYPE_FILTER %s' % ContactType
self._Skype._DoCommand(cmd)
return PluginMenuItem(self._Skype, MenuItemId, CaptionText, HintText, Enabled) | Creates custom menu item in Skype client's "Do More" menus.
:Parameters:
MenuItemId : unicode
Unique identifier for the menu item.
PluginContext : `enums`.pluginContext*
Menu item context. Allows to choose in which client windows will the menu item appear.
CaptionText : unicode
Caption text.
HintText : unicode
Hint text (optional). Shown when mouse hoovers over the menu item.
IconPath : unicode
Path to the icon (optional).
Enabled : bool
Initial state of the menu item. True by default.
ContactType : `enums`.pluginContactType*
In case of `enums.pluginContextContact` tells which contacts the menu item should appear
for. Defaults to `enums.pluginContactTypeAll`.
MultipleContacts : bool
Set to True if multiple contacts should be allowed (defaults to False).
:return: Menu item object.
:rtype: `PluginMenuItem` | Below is the the instruction that describes the task:
### Input:
Creates custom menu item in Skype client's "Do More" menus.
:Parameters:
MenuItemId : unicode
Unique identifier for the menu item.
PluginContext : `enums`.pluginContext*
Menu item context. Allows to choose in which client windows will the menu item appear.
CaptionText : unicode
Caption text.
HintText : unicode
Hint text (optional). Shown when mouse hoovers over the menu item.
IconPath : unicode
Path to the icon (optional).
Enabled : bool
Initial state of the menu item. True by default.
ContactType : `enums`.pluginContactType*
In case of `enums.pluginContextContact` tells which contacts the menu item should appear
for. Defaults to `enums.pluginContactTypeAll`.
MultipleContacts : bool
Set to True if multiple contacts should be allowed (defaults to False).
:return: Menu item object.
:rtype: `PluginMenuItem`
### Response:
def CreateMenuItem(self, MenuItemId, PluginContext, CaptionText, HintText=u'', IconPath='', Enabled=True,
ContactType=pluginContactTypeAll, MultipleContacts=False):
"""Creates custom menu item in Skype client's "Do More" menus.
:Parameters:
MenuItemId : unicode
Unique identifier for the menu item.
PluginContext : `enums`.pluginContext*
Menu item context. Allows to choose in which client windows will the menu item appear.
CaptionText : unicode
Caption text.
HintText : unicode
Hint text (optional). Shown when mouse hoovers over the menu item.
IconPath : unicode
Path to the icon (optional).
Enabled : bool
Initial state of the menu item. True by default.
ContactType : `enums`.pluginContactType*
In case of `enums.pluginContextContact` tells which contacts the menu item should appear
for. Defaults to `enums.pluginContactTypeAll`.
MultipleContacts : bool
Set to True if multiple contacts should be allowed (defaults to False).
:return: Menu item object.
:rtype: `PluginMenuItem`
"""
cmd = 'CREATE MENU_ITEM %s CONTEXT %s CAPTION %s ENABLED %s' % (tounicode(MenuItemId), PluginContext,
quote(tounicode(CaptionText)), cndexp(Enabled, 'true', 'false'))
if HintText:
cmd += ' HINT %s' % quote(tounicode(HintText))
if IconPath:
cmd += ' ICON %s' % quote(path2unicode(IconPath))
if MultipleContacts:
cmd += ' ENABLE_MULTIPLE_CONTACTS true'
if PluginContext == pluginContextContact:
cmd += ' CONTACT_TYPE_FILTER %s' % ContactType
self._Skype._DoCommand(cmd)
return PluginMenuItem(self._Skype, MenuItemId, CaptionText, HintText, Enabled) |
def network_security_groups_list(resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
List all network security groups within a resource group.
:param resource_group: The resource group name to list network security \
groups within.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_security_groups_list testgroup
'''
result = {}
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
secgroups = __utils__['azurearm.paged_object_to_list'](
netconn.network_security_groups.list(
resource_group_name=resource_group
)
)
for secgroup in secgroups:
result[secgroup['name']] = secgroup
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result | .. versionadded:: 2019.2.0
List all network security groups within a resource group.
:param resource_group: The resource group name to list network security \
groups within.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_security_groups_list testgroup | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
List all network security groups within a resource group.
:param resource_group: The resource group name to list network security \
groups within.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_security_groups_list testgroup
### Response:
def network_security_groups_list(resource_group, **kwargs):
'''
.. versionadded:: 2019.2.0
List all network security groups within a resource group.
:param resource_group: The resource group name to list network security \
groups within.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.network_security_groups_list testgroup
'''
result = {}
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
secgroups = __utils__['azurearm.paged_object_to_list'](
netconn.network_security_groups.list(
resource_group_name=resource_group
)
)
for secgroup in secgroups:
result[secgroup['name']] = secgroup
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
return result |
def ip_geoloc(ip, hit_api=True):
"""
Get IP geolocation.
Args:
ip (str): IP address to use if no data provided.
hit_api (bool): whether to hit api if info not found.
Returns:
str: latitude and longitude, comma-separated.
"""
from ..logs.models import IPInfoCheck
try:
obj = IPInfoCheck.objects.get(ip_address=ip).ip_info
except IPInfoCheck.DoesNotExist:
if hit_api:
try:
obj = IPInfoCheck.check_ip(ip)
except RateExceededError:
return None
else:
return None
return obj.latitude, obj.longitude | Get IP geolocation.
Args:
ip (str): IP address to use if no data provided.
hit_api (bool): whether to hit api if info not found.
Returns:
str: latitude and longitude, comma-separated. | Below is the the instruction that describes the task:
### Input:
Get IP geolocation.
Args:
ip (str): IP address to use if no data provided.
hit_api (bool): whether to hit api if info not found.
Returns:
str: latitude and longitude, comma-separated.
### Response:
def ip_geoloc(ip, hit_api=True):
"""
Get IP geolocation.
Args:
ip (str): IP address to use if no data provided.
hit_api (bool): whether to hit api if info not found.
Returns:
str: latitude and longitude, comma-separated.
"""
from ..logs.models import IPInfoCheck
try:
obj = IPInfoCheck.objects.get(ip_address=ip).ip_info
except IPInfoCheck.DoesNotExist:
if hit_api:
try:
obj = IPInfoCheck.check_ip(ip)
except RateExceededError:
return None
else:
return None
return obj.latitude, obj.longitude |
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url)
if f is None: return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f,'code',None)!=404:
page = self.process_index(url, page) | Evaluate a URL as a possible download, and maybe retrieve it | Below is the the instruction that describes the task:
### Input:
Evaluate a URL as a possible download, and maybe retrieve it
### Response:
def process_url(self, url, retrieve=False):
"""Evaluate a URL as a possible download, and maybe retrieve it"""
if url in self.scanned_urls and not retrieve:
return
self.scanned_urls[url] = True
if not URL_SCHEME(url):
self.process_filename(url)
return
else:
dists = list(distros_for_url(url))
if dists:
if not self.url_ok(url):
return
self.debug("Found link: %s", url)
if dists or not retrieve or url in self.fetched_urls:
list(map(self.add, dists))
return # don't need the actual page
if not self.url_ok(url):
self.fetched_urls[url] = True
return
self.info("Reading %s", url)
self.fetched_urls[url] = True # prevent multiple fetch attempts
f = self.open_url(url, "Download error on %s: %%s -- Some packages may not be found!" % url)
if f is None: return
self.fetched_urls[f.url] = True
if 'html' not in f.headers.get('content-type', '').lower():
f.close() # not html, we can't process it
return
base = f.url # handle redirects
page = f.read()
if not isinstance(page, str): # We are in Python 3 and got bytes. We want str.
if isinstance(f, HTTPError):
# Errors have no charset, assume latin1:
charset = 'latin-1'
else:
charset = f.headers.get_param('charset') or 'latin-1'
page = page.decode(charset, "ignore")
f.close()
for match in HREF.finditer(page):
link = urljoin(base, htmldecode(match.group(1)))
self.process_url(link)
if url.startswith(self.index_url) and getattr(f,'code',None)!=404:
page = self.process_index(url, page) |
def sink_path(cls, project, sink):
"""Return a fully-qualified sink string."""
return google.api_core.path_template.expand(
"projects/{project}/sinks/{sink}", project=project, sink=sink
) | Return a fully-qualified sink string. | Below is the the instruction that describes the task:
### Input:
Return a fully-qualified sink string.
### Response:
def sink_path(cls, project, sink):
"""Return a fully-qualified sink string."""
return google.api_core.path_template.expand(
"projects/{project}/sinks/{sink}", project=project, sink=sink
) |
def set_logger(self):
"""Method to build the base logging system. By default, logging level
is set to INFO."""
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
logger_file = os.path.join(self.logs_path, 'dingtalk_sdk.logs')
logger_handler = logging.FileHandler(logger_file)
logger_handler.setLevel(logging.INFO)
logger_formatter = logging.Formatter(
'[%(asctime)s | %(name)s | %(levelname)s] %(message)s'
)
logger_handler.setFormatter(logger_formatter)
logger.addHandler(logger_handler)
return logger | Method to build the base logging system. By default, logging level
is set to INFO. | Below is the the instruction that describes the task:
### Input:
Method to build the base logging system. By default, logging level
is set to INFO.
### Response:
def set_logger(self):
"""Method to build the base logging system. By default, logging level
is set to INFO."""
logger = logging.getLogger(__name__)
logger.setLevel(level=logging.INFO)
logger_file = os.path.join(self.logs_path, 'dingtalk_sdk.logs')
logger_handler = logging.FileHandler(logger_file)
logger_handler.setLevel(logging.INFO)
logger_formatter = logging.Formatter(
'[%(asctime)s | %(name)s | %(levelname)s] %(message)s'
)
logger_handler.setFormatter(logger_formatter)
logger.addHandler(logger_handler)
return logger |
def send(self, data):
"""
Send encoded instructions to Guacamole guacd server.
"""
self.logger.debug('Sending data: %s' % data)
self.client.sendall(data.encode()) | Send encoded instructions to Guacamole guacd server. | Below is the the instruction that describes the task:
### Input:
Send encoded instructions to Guacamole guacd server.
### Response:
def send(self, data):
"""
Send encoded instructions to Guacamole guacd server.
"""
self.logger.debug('Sending data: %s' % data)
self.client.sendall(data.encode()) |
def _stripStrList(self, raw_str, stop_strs):
'''
去除字符串中的所有指定字符串
args:
raw_str 源字符串
stop_strs 指定字符串 列表
return
str 筛选后的字符串
'''
if type(stop_strs) == list:
for word in stop_strs:
raw_str = self._stripStr(raw_str, word)
return raw_str
else:
raise Exception('stop_words must be list!') | 去除字符串中的所有指定字符串
args:
raw_str 源字符串
stop_strs 指定字符串 列表
return
str 筛选后的字符串 | Below is the the instruction that describes the task:
### Input:
去除字符串中的所有指定字符串
args:
raw_str 源字符串
stop_strs 指定字符串 列表
return
str 筛选后的字符串
### Response:
def _stripStrList(self, raw_str, stop_strs):
'''
去除字符串中的所有指定字符串
args:
raw_str 源字符串
stop_strs 指定字符串 列表
return
str 筛选后的字符串
'''
if type(stop_strs) == list:
for word in stop_strs:
raw_str = self._stripStr(raw_str, word)
return raw_str
else:
raise Exception('stop_words must be list!') |
def generate_random_string(length=6):
'''
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
'''
n = int(length / 2 + 1)
x = binascii.hexlify(os.urandom(n))
s = x[:length]
return s.decode('utf-8') | Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True | Below is the the instruction that describes the task:
### Input:
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
### Response:
def generate_random_string(length=6):
'''
Returns a random string of a specified length.
>>> len(generate_random_string(length=25))
25
Test randomness. Try N times and observe no duplicaton
>>> N = 100
>>> len(set(generate_random_string(10) for i in range(N))) == N
True
'''
n = int(length / 2 + 1)
x = binascii.hexlify(os.urandom(n))
s = x[:length]
return s.decode('utf-8') |
def getCountGT(self, item):
"""Return number of elements greater than *item*."""
index = bisect.bisect_right(self._list, item)
return len(self._list) - index | Return number of elements greater than *item*. | Below is the the instruction that describes the task:
### Input:
Return number of elements greater than *item*.
### Response:
def getCountGT(self, item):
"""Return number of elements greater than *item*."""
index = bisect.bisect_right(self._list, item)
return len(self._list) - index |
def user_set_avatar(self, action=None, quick_key=None, url=None):
"""user/set_avatar
http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar
"""
return self.request("user/set_avatar", QueryParams({
"action": action,
"quick_key": quick_key,
"url": url
})) | user/set_avatar
http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar | Below is the the instruction that describes the task:
### Input:
user/set_avatar
http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar
### Response:
def user_set_avatar(self, action=None, quick_key=None, url=None):
"""user/set_avatar
http://www.mediafire.com/developers/core_api/1.3/user/#set_avatar
"""
return self.request("user/set_avatar", QueryParams({
"action": action,
"quick_key": quick_key,
"url": url
})) |
def elements(compounds):
"""
Determine the set of elements present in a list of chemical compounds.
The list of elements is sorted alphabetically.
:param compounds: List of compound formulas and phases, e.g.
['Fe2O3[S1]', 'Al2O3[S1]'].
:returns: List of elements.
"""
elementlist = [parse_compound(compound).count().keys()
for compound in compounds]
return set().union(*elementlist) | Determine the set of elements present in a list of chemical compounds.
The list of elements is sorted alphabetically.
:param compounds: List of compound formulas and phases, e.g.
['Fe2O3[S1]', 'Al2O3[S1]'].
:returns: List of elements. | Below is the the instruction that describes the task:
### Input:
Determine the set of elements present in a list of chemical compounds.
The list of elements is sorted alphabetically.
:param compounds: List of compound formulas and phases, e.g.
['Fe2O3[S1]', 'Al2O3[S1]'].
:returns: List of elements.
### Response:
def elements(compounds):
"""
Determine the set of elements present in a list of chemical compounds.
The list of elements is sorted alphabetically.
:param compounds: List of compound formulas and phases, e.g.
['Fe2O3[S1]', 'Al2O3[S1]'].
:returns: List of elements.
"""
elementlist = [parse_compound(compound).count().keys()
for compound in compounds]
return set().union(*elementlist) |
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
yield urljoin(url, htmldecode(match.group(1))) | Find rel="homepage" and rel="download" links in `page`, yielding URLs | Below is the the instruction that describes the task:
### Input:
Find rel="homepage" and rel="download" links in `page`, yielding URLs
### Response:
def find_external_links(url, page):
"""Find rel="homepage" and rel="download" links in `page`, yielding URLs"""
for match in REL.finditer(page):
tag, rel = match.groups()
rels = set(map(str.strip, rel.lower().split(',')))
if 'homepage' in rels or 'download' in rels:
for match in HREF.finditer(tag):
yield urljoin(url, htmldecode(match.group(1)))
for tag in ("<th>Home Page", "<th>Download URL"):
pos = page.find(tag)
if pos!=-1:
match = HREF.search(page,pos)
if match:
yield urljoin(url, htmldecode(match.group(1))) |
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: OrderedDict of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The penultimate output layer of the graph.
module_name: The name of the image module being used.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(
sess, image_lists, label_name, index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
tf.logging.info(
str(how_many_bottlenecks) + ' bottleneck files created.') | Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: OrderedDict of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The penultimate output layer of the graph.
module_name: The name of the image module being used.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: OrderedDict of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The penultimate output layer of the graph.
module_name: The name of the image module being used.
Returns:
Nothing.
### Response:
def cache_bottlenecks(sess, image_lists, image_dir, bottleneck_dir,
jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name):
"""Ensures all the training, testing, and validation bottlenecks are cached.
Because we're likely to read the same image multiple times (if there are no
distortions applied during training) it can speed things up a lot if we
calculate the bottleneck layer values once for each image during
preprocessing, and then just read those cached values repeatedly during
training. Here we go through all the images we've found, calculate those
values, and save them off.
Args:
sess: The current active TensorFlow Session.
image_lists: OrderedDict of training images for each label.
image_dir: Root folder string of the subfolders containing the training
images.
bottleneck_dir: Folder string holding cached files of bottleneck values.
jpeg_data_tensor: Input tensor for jpeg data from file.
decoded_image_tensor: The output of decoding and resizing the image.
resized_input_tensor: The input node of the recognition graph.
bottleneck_tensor: The penultimate output layer of the graph.
module_name: The name of the image module being used.
Returns:
Nothing.
"""
how_many_bottlenecks = 0
ensure_dir_exists(bottleneck_dir)
for label_name, label_lists in image_lists.items():
for category in ['training', 'testing', 'validation']:
category_list = label_lists[category]
for index, unused_base_name in enumerate(category_list):
get_or_create_bottleneck(
sess, image_lists, label_name, index, image_dir, category,
bottleneck_dir, jpeg_data_tensor, decoded_image_tensor,
resized_input_tensor, bottleneck_tensor, module_name)
how_many_bottlenecks += 1
if how_many_bottlenecks % 100 == 0:
tf.logging.info(
str(how_many_bottlenecks) + ' bottleneck files created.') |
def fit(self,
data: List[str],
return_tokenized_data: bool = False) -> Union[None, List[List[str]]]:
"""
TODO: update docs
Apply cleaner and tokenzier to raw data and build vocabulary.
Parameters
----------
data : List[str]
These are raw documents, which are a list of strings. ex:
[["The quick brown fox"], ["jumps over the lazy dog"]]
return_tokenized_data : bool
Return the tokenized strings. This is primarly used for debugging
purposes.
Returns
-------
None or List[List[str]]
if return_tokenized_data=True then will return tokenized documents,
otherwise will not return anything.
"""
self.__clear_data()
now = get_time()
logging.warning(f'....tokenizing data')
tokenized_data = self.parallel_process_text(data)
if not self.padding_maxlen:
# its not worth the overhead to parallelize document length counts
length_counts = map(count_len, tokenized_data)
self.document_length_histogram = Counter(length_counts)
self.generate_doc_length_stats()
# Learn corpus on single thread
logging.warning(f'(1/2) done. {time_diff(now)} sec')
logging.warning(f'....building corpus')
now = get_time()
self.indexer = custom_Indexer(num_words=self.keep_n)
self.indexer.fit_on_tokenized_texts(tokenized_data)
# Build Dictionary accounting For 0 padding, and reserve 1 for unknown and rare Words
self.token2id = self.indexer.word_index
self.id2token = {v: k for k, v in self.token2id.items()}
self.n_tokens = max(self.indexer.word_index.values())
# logging
logging.warning(f'(2/2) done. {time_diff(now)} sec')
logging.warning(f'Finished parsing {self.indexer.document_count:,} documents.')
if return_tokenized_data:
return tokenized_data | TODO: update docs
Apply cleaner and tokenzier to raw data and build vocabulary.
Parameters
----------
data : List[str]
These are raw documents, which are a list of strings. ex:
[["The quick brown fox"], ["jumps over the lazy dog"]]
return_tokenized_data : bool
Return the tokenized strings. This is primarly used for debugging
purposes.
Returns
-------
None or List[List[str]]
if return_tokenized_data=True then will return tokenized documents,
otherwise will not return anything. | Below is the the instruction that describes the task:
### Input:
TODO: update docs
Apply cleaner and tokenzier to raw data and build vocabulary.
Parameters
----------
data : List[str]
These are raw documents, which are a list of strings. ex:
[["The quick brown fox"], ["jumps over the lazy dog"]]
return_tokenized_data : bool
Return the tokenized strings. This is primarly used for debugging
purposes.
Returns
-------
None or List[List[str]]
if return_tokenized_data=True then will return tokenized documents,
otherwise will not return anything.
### Response:
def fit(self,
data: List[str],
return_tokenized_data: bool = False) -> Union[None, List[List[str]]]:
"""
TODO: update docs
Apply cleaner and tokenzier to raw data and build vocabulary.
Parameters
----------
data : List[str]
These are raw documents, which are a list of strings. ex:
[["The quick brown fox"], ["jumps over the lazy dog"]]
return_tokenized_data : bool
Return the tokenized strings. This is primarly used for debugging
purposes.
Returns
-------
None or List[List[str]]
if return_tokenized_data=True then will return tokenized documents,
otherwise will not return anything.
"""
self.__clear_data()
now = get_time()
logging.warning(f'....tokenizing data')
tokenized_data = self.parallel_process_text(data)
if not self.padding_maxlen:
# its not worth the overhead to parallelize document length counts
length_counts = map(count_len, tokenized_data)
self.document_length_histogram = Counter(length_counts)
self.generate_doc_length_stats()
# Learn corpus on single thread
logging.warning(f'(1/2) done. {time_diff(now)} sec')
logging.warning(f'....building corpus')
now = get_time()
self.indexer = custom_Indexer(num_words=self.keep_n)
self.indexer.fit_on_tokenized_texts(tokenized_data)
# Build Dictionary accounting For 0 padding, and reserve 1 for unknown and rare Words
self.token2id = self.indexer.word_index
self.id2token = {v: k for k, v in self.token2id.items()}
self.n_tokens = max(self.indexer.word_index.values())
# logging
logging.warning(f'(2/2) done. {time_diff(now)} sec')
logging.warning(f'Finished parsing {self.indexer.document_count:,} documents.')
if return_tokenized_data:
return tokenized_data |
def json_file_response(obj=None, pid=None, record=None, status=None):
"""JSON Files/File serializer.
:param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance or
a :class:`invenio_records_files.api.FilesIterator` if it's a list of
files.
:param pid: PID value. (not used)
:param record: The record metadata. (not used)
:param status: The HTTP status code.
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`.
"""
from invenio_records_files.api import FilesIterator
if isinstance(obj, FilesIterator):
return json_files_serializer(obj, status=status)
else:
return json_file_serializer(obj, status=status) | JSON Files/File serializer.
:param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance or
a :class:`invenio_records_files.api.FilesIterator` if it's a list of
files.
:param pid: PID value. (not used)
:param record: The record metadata. (not used)
:param status: The HTTP status code.
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`. | Below is the the instruction that describes the task:
### Input:
JSON Files/File serializer.
:param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance or
a :class:`invenio_records_files.api.FilesIterator` if it's a list of
files.
:param pid: PID value. (not used)
:param record: The record metadata. (not used)
:param status: The HTTP status code.
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`.
### Response:
def json_file_response(obj=None, pid=None, record=None, status=None):
"""JSON Files/File serializer.
:param obj: A :class:`invenio_files_rest.models.ObjectVersion` instance or
a :class:`invenio_records_files.api.FilesIterator` if it's a list of
files.
:param pid: PID value. (not used)
:param record: The record metadata. (not used)
:param status: The HTTP status code.
:returns: A Flask response with JSON data.
:rtype: :py:class:`flask.Response`.
"""
from invenio_records_files.api import FilesIterator
if isinstance(obj, FilesIterator):
return json_files_serializer(obj, status=status)
else:
return json_file_serializer(obj, status=status) |
def play_sound(self, subject='Find My iPhone Alert'):
""" Send a request to the device to play a sound.
It's possible to pass a custom message by changing the `subject`.
"""
data = json.dumps({
'device': self.content['id'],
'subject': subject,
'clientContext': {
'fmly': True
}
})
self.session.post(
self.sound_url,
params=self.params,
data=data
) | Send a request to the device to play a sound.
It's possible to pass a custom message by changing the `subject`. | Below is the the instruction that describes the task:
### Input:
Send a request to the device to play a sound.
It's possible to pass a custom message by changing the `subject`.
### Response:
def play_sound(self, subject='Find My iPhone Alert'):
""" Send a request to the device to play a sound.
It's possible to pass a custom message by changing the `subject`.
"""
data = json.dumps({
'device': self.content['id'],
'subject': subject,
'clientContext': {
'fmly': True
}
})
self.session.post(
self.sound_url,
params=self.params,
data=data
) |
def infer_unaryop(self, context=None):
"""Infer what an UnaryOp should return when evaluated."""
yield from _filter_operation_errors(
self, _infer_unaryop, context, util.BadUnaryOperationMessage
)
return dict(node=self, context=context) | Infer what an UnaryOp should return when evaluated. | Below is the the instruction that describes the task:
### Input:
Infer what an UnaryOp should return when evaluated.
### Response:
def infer_unaryop(self, context=None):
"""Infer what an UnaryOp should return when evaluated."""
yield from _filter_operation_errors(
self, _infer_unaryop, context, util.BadUnaryOperationMessage
)
return dict(node=self, context=context) |
def parseEntityRef(self):
"""parse ENTITY references declarations [68] EntityRef ::=
'&' Name ';' [ WFC: Entity Declared ] In a document
without any DTD, a document with only an internal DTD
subset which contains no parameter entity references, or a
document with "standalone='yes'", the Name given in the
entity reference must match that in an entity declaration,
except that well-formed documents need not declare any of
the following entities: amp, lt, gt, apos, quot. The
declaration of a parameter entity must precede any
reference to it. Similarly, the declaration of a general
entity must precede any reference to it which appears in a
default value in an attribute-list declaration. Note that
if entities are declared in the external subset or in
external parameter entities, a non-validating processor is
not obligated to read and process their declarations; for
such documents, the rule that an entity must be declared is
a well-formedness constraint only if standalone='yes'. [
WFC: Parsed Entity ] An entity reference must not contain
the name of an unparsed entity """
ret = libxml2mod.xmlParseEntityRef(self._o)
if ret is None:raise parserError('xmlParseEntityRef() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp | parse ENTITY references declarations [68] EntityRef ::=
'&' Name ';' [ WFC: Entity Declared ] In a document
without any DTD, a document with only an internal DTD
subset which contains no parameter entity references, or a
document with "standalone='yes'", the Name given in the
entity reference must match that in an entity declaration,
except that well-formed documents need not declare any of
the following entities: amp, lt, gt, apos, quot. The
declaration of a parameter entity must precede any
reference to it. Similarly, the declaration of a general
entity must precede any reference to it which appears in a
default value in an attribute-list declaration. Note that
if entities are declared in the external subset or in
external parameter entities, a non-validating processor is
not obligated to read and process their declarations; for
such documents, the rule that an entity must be declared is
a well-formedness constraint only if standalone='yes'. [
WFC: Parsed Entity ] An entity reference must not contain
the name of an unparsed entity | Below is the the instruction that describes the task:
### Input:
parse ENTITY references declarations [68] EntityRef ::=
'&' Name ';' [ WFC: Entity Declared ] In a document
without any DTD, a document with only an internal DTD
subset which contains no parameter entity references, or a
document with "standalone='yes'", the Name given in the
entity reference must match that in an entity declaration,
except that well-formed documents need not declare any of
the following entities: amp, lt, gt, apos, quot. The
declaration of a parameter entity must precede any
reference to it. Similarly, the declaration of a general
entity must precede any reference to it which appears in a
default value in an attribute-list declaration. Note that
if entities are declared in the external subset or in
external parameter entities, a non-validating processor is
not obligated to read and process their declarations; for
such documents, the rule that an entity must be declared is
a well-formedness constraint only if standalone='yes'. [
WFC: Parsed Entity ] An entity reference must not contain
the name of an unparsed entity
### Response:
def parseEntityRef(self):
"""parse ENTITY references declarations [68] EntityRef ::=
'&' Name ';' [ WFC: Entity Declared ] In a document
without any DTD, a document with only an internal DTD
subset which contains no parameter entity references, or a
document with "standalone='yes'", the Name given in the
entity reference must match that in an entity declaration,
except that well-formed documents need not declare any of
the following entities: amp, lt, gt, apos, quot. The
declaration of a parameter entity must precede any
reference to it. Similarly, the declaration of a general
entity must precede any reference to it which appears in a
default value in an attribute-list declaration. Note that
if entities are declared in the external subset or in
external parameter entities, a non-validating processor is
not obligated to read and process their declarations; for
such documents, the rule that an entity must be declared is
a well-formedness constraint only if standalone='yes'. [
WFC: Parsed Entity ] An entity reference must not contain
the name of an unparsed entity """
ret = libxml2mod.xmlParseEntityRef(self._o)
if ret is None:raise parserError('xmlParseEntityRef() failed')
__tmp = xmlEntity(_obj=ret)
return __tmp |
def resize(self, size=None):
"""
Resize the container's PTY.
If `size` is not None, it must be a tuple of (height,width), otherwise
it will be determined by the size of the current TTY.
"""
if not self.israw():
return
size = size or tty.size(self.stdout)
if size is not None:
rows, cols = size
try:
self.client.resize(self.container, height=rows, width=cols)
except IOError: # Container already exited
pass | Resize the container's PTY.
If `size` is not None, it must be a tuple of (height,width), otherwise
it will be determined by the size of the current TTY. | Below is the the instruction that describes the task:
### Input:
Resize the container's PTY.
If `size` is not None, it must be a tuple of (height,width), otherwise
it will be determined by the size of the current TTY.
### Response:
def resize(self, size=None):
"""
Resize the container's PTY.
If `size` is not None, it must be a tuple of (height,width), otherwise
it will be determined by the size of the current TTY.
"""
if not self.israw():
return
size = size or tty.size(self.stdout)
if size is not None:
rows, cols = size
try:
self.client.resize(self.container, height=rows, width=cols)
except IOError: # Container already exited
pass |
def _fix_squeeze(self, inputs, new_attr):
"""
MXNet doesnt have a squeeze operator.
Using "split" to perform similar operation.
"split" can be slower compared to "reshape".
This can have performance impact.
TODO: Remove this implementation once mxnet adds the support.
"""
axes = new_attr.get('axis')
op = mx.sym.split(inputs[0], axis=axes[0], num_outputs=1, squeeze_axis=1)
for i in axes[1:]:
op = mx.sym.split(op, axis=i-1, num_outputs=1, squeeze_axis=1)
return op | MXNet doesnt have a squeeze operator.
Using "split" to perform similar operation.
"split" can be slower compared to "reshape".
This can have performance impact.
TODO: Remove this implementation once mxnet adds the support. | Below is the the instruction that describes the task:
### Input:
MXNet doesnt have a squeeze operator.
Using "split" to perform similar operation.
"split" can be slower compared to "reshape".
This can have performance impact.
TODO: Remove this implementation once mxnet adds the support.
### Response:
def _fix_squeeze(self, inputs, new_attr):
"""
MXNet doesnt have a squeeze operator.
Using "split" to perform similar operation.
"split" can be slower compared to "reshape".
This can have performance impact.
TODO: Remove this implementation once mxnet adds the support.
"""
axes = new_attr.get('axis')
op = mx.sym.split(inputs[0], axis=axes[0], num_outputs=1, squeeze_axis=1)
for i in axes[1:]:
op = mx.sym.split(op, axis=i-1, num_outputs=1, squeeze_axis=1)
return op |
def parse_assertion(self, keys=None):
""" Parse the assertions for a saml response.
:param keys: A string representing a RSA key or a list of strings
containing RSA keys.
:return: True if the assertions are parsed otherwise False.
"""
if self.context == "AuthnQuery":
# can contain one or more assertions
pass
else:
# This is a saml2int limitation
try:
assert (
len(self.response.assertion) == 1
or len(self.response.encrypted_assertion) == 1
or self.assertion is not None
)
except AssertionError:
raise Exception("No assertion part")
if self.response.assertion:
logger.debug("***Unencrypted assertion***")
for assertion in self.response.assertion:
if not self._assertion(assertion, False):
return False
if self.find_encrypt_data(self.response):
logger.debug("***Encrypted assertion/-s***")
_enc_assertions = []
resp = self.response
decr_text = str(self.response)
decr_text_old = None
while self.find_encrypt_data(resp) and decr_text_old != decr_text:
decr_text_old = decr_text
try:
decr_text = self.sec.decrypt_keys(decr_text, keys)
except DecryptError as e:
continue
else:
resp = samlp.response_from_string(decr_text)
# check and prepare for comparison between str and unicode
if type(decr_text_old) != type(decr_text):
if isinstance(decr_text_old, six.binary_type):
decr_text_old = decr_text_old.decode("utf-8")
else:
decr_text_old = decr_text_old.encode("utf-8")
_enc_assertions = self.decrypt_assertions(
resp.encrypted_assertion, decr_text
)
decr_text_old = None
while (
self.find_encrypt_data(resp)
or self.find_encrypt_data_assertion_list(_enc_assertions)
) and decr_text_old != decr_text:
decr_text_old = decr_text
try:
decr_text = self.sec.decrypt_keys(decr_text, keys)
except DecryptError as e:
continue
else:
resp = samlp.response_from_string(decr_text)
_enc_assertions = self.decrypt_assertions(
resp.encrypted_assertion, decr_text, verified=True
)
# check and prepare for comparison between str and unicode
if type(decr_text_old) != type(decr_text):
if isinstance(decr_text_old, six.binary_type):
decr_text_old = decr_text_old.decode("utf-8")
else:
decr_text_old = decr_text_old.encode("utf-8")
all_assertions = _enc_assertions
if resp.assertion:
all_assertions = all_assertions + resp.assertion
if len(all_assertions) > 0:
for tmp_ass in all_assertions:
if tmp_ass.advice and tmp_ass.advice.encrypted_assertion:
advice_res = self.decrypt_assertions(
tmp_ass.advice.encrypted_assertion,
decr_text,
tmp_ass.issuer)
if tmp_ass.advice.assertion:
tmp_ass.advice.assertion.extend(advice_res)
else:
tmp_ass.advice.assertion = advice_res
if len(advice_res) > 0:
tmp_ass.advice.encrypted_assertion = []
self.response.assertion = resp.assertion
for assertion in _enc_assertions:
if not self._assertion(assertion, True):
return False
else:
self.assertions.append(assertion)
self.xmlstr = decr_text
if len(_enc_assertions) > 0:
self.response.encrypted_assertion = []
if self.response.assertion:
for assertion in self.response.assertion:
self.assertions.append(assertion)
if self.assertions and len(self.assertions) > 0:
self.assertion = self.assertions[0]
if self.context == "AuthnReq" or self.context == "AttrQuery":
self.ava = self.get_identity()
logger.debug("--- AVA: %s", self.ava)
return True | Parse the assertions for a saml response.
:param keys: A string representing a RSA key or a list of strings
containing RSA keys.
:return: True if the assertions are parsed otherwise False. | Below is the the instruction that describes the task:
### Input:
Parse the assertions for a saml response.
:param keys: A string representing a RSA key or a list of strings
containing RSA keys.
:return: True if the assertions are parsed otherwise False.
### Response:
def parse_assertion(self, keys=None):
""" Parse the assertions for a saml response.
:param keys: A string representing a RSA key or a list of strings
containing RSA keys.
:return: True if the assertions are parsed otherwise False.
"""
if self.context == "AuthnQuery":
# can contain one or more assertions
pass
else:
# This is a saml2int limitation
try:
assert (
len(self.response.assertion) == 1
or len(self.response.encrypted_assertion) == 1
or self.assertion is not None
)
except AssertionError:
raise Exception("No assertion part")
if self.response.assertion:
logger.debug("***Unencrypted assertion***")
for assertion in self.response.assertion:
if not self._assertion(assertion, False):
return False
if self.find_encrypt_data(self.response):
logger.debug("***Encrypted assertion/-s***")
_enc_assertions = []
resp = self.response
decr_text = str(self.response)
decr_text_old = None
while self.find_encrypt_data(resp) and decr_text_old != decr_text:
decr_text_old = decr_text
try:
decr_text = self.sec.decrypt_keys(decr_text, keys)
except DecryptError as e:
continue
else:
resp = samlp.response_from_string(decr_text)
# check and prepare for comparison between str and unicode
if type(decr_text_old) != type(decr_text):
if isinstance(decr_text_old, six.binary_type):
decr_text_old = decr_text_old.decode("utf-8")
else:
decr_text_old = decr_text_old.encode("utf-8")
_enc_assertions = self.decrypt_assertions(
resp.encrypted_assertion, decr_text
)
decr_text_old = None
while (
self.find_encrypt_data(resp)
or self.find_encrypt_data_assertion_list(_enc_assertions)
) and decr_text_old != decr_text:
decr_text_old = decr_text
try:
decr_text = self.sec.decrypt_keys(decr_text, keys)
except DecryptError as e:
continue
else:
resp = samlp.response_from_string(decr_text)
_enc_assertions = self.decrypt_assertions(
resp.encrypted_assertion, decr_text, verified=True
)
# check and prepare for comparison between str and unicode
if type(decr_text_old) != type(decr_text):
if isinstance(decr_text_old, six.binary_type):
decr_text_old = decr_text_old.decode("utf-8")
else:
decr_text_old = decr_text_old.encode("utf-8")
all_assertions = _enc_assertions
if resp.assertion:
all_assertions = all_assertions + resp.assertion
if len(all_assertions) > 0:
for tmp_ass in all_assertions:
if tmp_ass.advice and tmp_ass.advice.encrypted_assertion:
advice_res = self.decrypt_assertions(
tmp_ass.advice.encrypted_assertion,
decr_text,
tmp_ass.issuer)
if tmp_ass.advice.assertion:
tmp_ass.advice.assertion.extend(advice_res)
else:
tmp_ass.advice.assertion = advice_res
if len(advice_res) > 0:
tmp_ass.advice.encrypted_assertion = []
self.response.assertion = resp.assertion
for assertion in _enc_assertions:
if not self._assertion(assertion, True):
return False
else:
self.assertions.append(assertion)
self.xmlstr = decr_text
if len(_enc_assertions) > 0:
self.response.encrypted_assertion = []
if self.response.assertion:
for assertion in self.response.assertion:
self.assertions.append(assertion)
if self.assertions and len(self.assertions) > 0:
self.assertion = self.assertions[0]
if self.context == "AuthnReq" or self.context == "AttrQuery":
self.ava = self.get_identity()
logger.debug("--- AVA: %s", self.ava)
return True |
def deprecated(func, *args, **kwargs):
''' Marks a function as deprecated. '''
warnings.warn(
'{} is deprecated and should no longer be used.'.format(func),
DeprecationWarning,
stacklevel=3
)
return func(*args, **kwargs) | Marks a function as deprecated. | Below is the the instruction that describes the task:
### Input:
Marks a function as deprecated.
### Response:
def deprecated(func, *args, **kwargs):
''' Marks a function as deprecated. '''
warnings.warn(
'{} is deprecated and should no longer be used.'.format(func),
DeprecationWarning,
stacklevel=3
)
return func(*args, **kwargs) |
def AgregarCalidad(self, analisis_muestra=None, nro_boletin=None,
cod_grado=None, valor_grado=None,
valor_contenido_proteico=None, valor_factor=None,
**kwargs):
"Agrega la información sobre la calidad, al autorizar o posteriormente"
self.certificacion['primaria']['calidad'] = dict(
analisisMuestra=analisis_muestra,
nroBoletin=nro_boletin,
codGrado=cod_grado, # G1 G2 G3 F1 F2 F3
valorGrado=valor_grado or None, # opcional
valorContProteico=valor_contenido_proteico,
valorFactor=valor_factor,
detalleMuestraAnalisis=[], # <!--1 or more repetitions:-->
)
return True | Agrega la información sobre la calidad, al autorizar o posteriormente | Below is the the instruction that describes the task:
### Input:
Agrega la información sobre la calidad, al autorizar o posteriormente
### Response:
def AgregarCalidad(self, analisis_muestra=None, nro_boletin=None,
cod_grado=None, valor_grado=None,
valor_contenido_proteico=None, valor_factor=None,
**kwargs):
"Agrega la información sobre la calidad, al autorizar o posteriormente"
self.certificacion['primaria']['calidad'] = dict(
analisisMuestra=analisis_muestra,
nroBoletin=nro_boletin,
codGrado=cod_grado, # G1 G2 G3 F1 F2 F3
valorGrado=valor_grado or None, # opcional
valorContProteico=valor_contenido_proteico,
valorFactor=valor_factor,
detalleMuestraAnalisis=[], # <!--1 or more repetitions:-->
)
return True |
def _impl(lexer):
"""Return an Implies expression."""
p = _sumterm(lexer)
tok = next(lexer)
# SUMTERM '=>' IMPL
if isinstance(tok, OP_rarrow):
q = _impl(lexer)
return ('implies', p, q)
# SUMTERM '<=>' IMPL
elif isinstance(tok, OP_lrarrow):
q = _impl(lexer)
return ('equal', p, q)
# SUMTERM
else:
lexer.unpop_token(tok)
return p | Return an Implies expression. | Below is the the instruction that describes the task:
### Input:
Return an Implies expression.
### Response:
def _impl(lexer):
"""Return an Implies expression."""
p = _sumterm(lexer)
tok = next(lexer)
# SUMTERM '=>' IMPL
if isinstance(tok, OP_rarrow):
q = _impl(lexer)
return ('implies', p, q)
# SUMTERM '<=>' IMPL
elif isinstance(tok, OP_lrarrow):
q = _impl(lexer)
return ('equal', p, q)
# SUMTERM
else:
lexer.unpop_token(tok)
return p |
def has_api_scopes(self, *api_scopes):
"""
Test if all given API scopes are authorized.
:type api_scopes: list[str]
:param api_scopes: The API scopes to test
:rtype: bool|None
:return:
True or False, if the API Token has the API scopes field set,
otherwise None
"""
if self._authorized_api_scopes is None:
return None
return all((x in self._authorized_api_scopes) for x in api_scopes) | Test if all given API scopes are authorized.
:type api_scopes: list[str]
:param api_scopes: The API scopes to test
:rtype: bool|None
:return:
True or False, if the API Token has the API scopes field set,
otherwise None | Below is the the instruction that describes the task:
### Input:
Test if all given API scopes are authorized.
:type api_scopes: list[str]
:param api_scopes: The API scopes to test
:rtype: bool|None
:return:
True or False, if the API Token has the API scopes field set,
otherwise None
### Response:
def has_api_scopes(self, *api_scopes):
"""
Test if all given API scopes are authorized.
:type api_scopes: list[str]
:param api_scopes: The API scopes to test
:rtype: bool|None
:return:
True or False, if the API Token has the API scopes field set,
otherwise None
"""
if self._authorized_api_scopes is None:
return None
return all((x in self._authorized_api_scopes) for x in api_scopes) |
def dumps(obj, preserve=False):
"""Stringifies a dict as toml
:param obj: the object to be dumped into toml
:param preserve: optional flag to preserve the inline table in result
"""
f = StringIO()
dump(obj, f, preserve)
return f.getvalue() | Stringifies a dict as toml
:param obj: the object to be dumped into toml
:param preserve: optional flag to preserve the inline table in result | Below is the the instruction that describes the task:
### Input:
Stringifies a dict as toml
:param obj: the object to be dumped into toml
:param preserve: optional flag to preserve the inline table in result
### Response:
def dumps(obj, preserve=False):
"""Stringifies a dict as toml
:param obj: the object to be dumped into toml
:param preserve: optional flag to preserve the inline table in result
"""
f = StringIO()
dump(obj, f, preserve)
return f.getvalue() |
def get_entry_point(key, value):
"""Check if registered entry point is available for a given name and
load it. Otherwise, return None.
key (unicode): Entry point name.
value (unicode): Name of entry point to load.
RETURNS: The loaded entry point or None.
"""
for entry_point in pkg_resources.iter_entry_points(key):
if entry_point.name == value:
return entry_point.load() | Check if registered entry point is available for a given name and
load it. Otherwise, return None.
key (unicode): Entry point name.
value (unicode): Name of entry point to load.
RETURNS: The loaded entry point or None. | Below is the the instruction that describes the task:
### Input:
Check if registered entry point is available for a given name and
load it. Otherwise, return None.
key (unicode): Entry point name.
value (unicode): Name of entry point to load.
RETURNS: The loaded entry point or None.
### Response:
def get_entry_point(key, value):
"""Check if registered entry point is available for a given name and
load it. Otherwise, return None.
key (unicode): Entry point name.
value (unicode): Name of entry point to load.
RETURNS: The loaded entry point or None.
"""
for entry_point in pkg_resources.iter_entry_points(key):
if entry_point.name == value:
return entry_point.load() |
def open_file(filepath):
"""
Open file with the default system app.
Copied from https://stackoverflow.com/a/435669/1224456
"""
if sys.platform.startswith('darwin'):
subprocess.Popen(('open', filepath))
elif os.name == 'nt':
os.startfile(filepath)
elif os.name == 'posix':
subprocess.Popen(('xdg-open', filepath)) | Open file with the default system app.
Copied from https://stackoverflow.com/a/435669/1224456 | Below is the the instruction that describes the task:
### Input:
Open file with the default system app.
Copied from https://stackoverflow.com/a/435669/1224456
### Response:
def open_file(filepath):
"""
Open file with the default system app.
Copied from https://stackoverflow.com/a/435669/1224456
"""
if sys.platform.startswith('darwin'):
subprocess.Popen(('open', filepath))
elif os.name == 'nt':
os.startfile(filepath)
elif os.name == 'posix':
subprocess.Popen(('xdg-open', filepath)) |
def clean(self, value):
"""Cleans and returns the given value, or raises a ParameterNotValidError exception"""
if isinstance(value, six.string_types) and value.lower() == 'false':
return False
return bool(value) | Cleans and returns the given value, or raises a ParameterNotValidError exception | Below is the the instruction that describes the task:
### Input:
Cleans and returns the given value, or raises a ParameterNotValidError exception
### Response:
def clean(self, value):
"""Cleans and returns the given value, or raises a ParameterNotValidError exception"""
if isinstance(value, six.string_types) and value.lower() == 'false':
return False
return bool(value) |
def create(self, unique_name, friendly_name=values.unset, actions=values.unset,
actions_url=values.unset):
"""
Create a new TaskInstance
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:param unicode friendly_name: descriptive string that you create to describe the new resource
:param dict actions: The JSON string that specifies the actions that instruct the Assistant on how to perform the task
:param unicode actions_url: The URL from which the Assistant can fetch actions
:returns: Newly created TaskInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.TaskInstance
"""
data = values.of({
'UniqueName': unique_name,
'FriendlyName': friendly_name,
'Actions': serialize.object(actions),
'ActionsUrl': actions_url,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return TaskInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], ) | Create a new TaskInstance
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:param unicode friendly_name: descriptive string that you create to describe the new resource
:param dict actions: The JSON string that specifies the actions that instruct the Assistant on how to perform the task
:param unicode actions_url: The URL from which the Assistant can fetch actions
:returns: Newly created TaskInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.TaskInstance | Below is the the instruction that describes the task:
### Input:
Create a new TaskInstance
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:param unicode friendly_name: descriptive string that you create to describe the new resource
:param dict actions: The JSON string that specifies the actions that instruct the Assistant on how to perform the task
:param unicode actions_url: The URL from which the Assistant can fetch actions
:returns: Newly created TaskInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.TaskInstance
### Response:
def create(self, unique_name, friendly_name=values.unset, actions=values.unset,
actions_url=values.unset):
"""
Create a new TaskInstance
:param unicode unique_name: An application-defined string that uniquely identifies the resource
:param unicode friendly_name: descriptive string that you create to describe the new resource
:param dict actions: The JSON string that specifies the actions that instruct the Assistant on how to perform the task
:param unicode actions_url: The URL from which the Assistant can fetch actions
:returns: Newly created TaskInstance
:rtype: twilio.rest.autopilot.v1.assistant.task.TaskInstance
"""
data = values.of({
'UniqueName': unique_name,
'FriendlyName': friendly_name,
'Actions': serialize.object(actions),
'ActionsUrl': actions_url,
})
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return TaskInstance(self._version, payload, assistant_sid=self._solution['assistant_sid'], ) |
def order_assimilation(args):
"""
Internal helper method for BorgQueen to process assimilation
"""
(path, drone, data, status) = args
newdata = drone.assimilate(path)
if newdata:
data.append(json.dumps(newdata, cls=MontyEncoder))
status['count'] += 1
count = status['count']
total = status['total']
logger.info('{}/{} ({:.2f}%) done'.format(count, total,
count / total * 100)) | Internal helper method for BorgQueen to process assimilation | Below is the the instruction that describes the task:
### Input:
Internal helper method for BorgQueen to process assimilation
### Response:
def order_assimilation(args):
"""
Internal helper method for BorgQueen to process assimilation
"""
(path, drone, data, status) = args
newdata = drone.assimilate(path)
if newdata:
data.append(json.dumps(newdata, cls=MontyEncoder))
status['count'] += 1
count = status['count']
total = status['total']
logger.info('{}/{} ({:.2f}%) done'.format(count, total,
count / total * 100)) |
def order(self, order):
"""Returns name order key.
Returns tuple with two strings that can be compared to other such
tuple obtained from different name. Note that if you want
locale-dependent ordering then you need to compare strings using
locale-aware method (e.g. ``locale.strxfrm``).
:param order: One of the ORDER_* constants.
:returns: tuple of two strings
"""
given = self.given
surname = self.surname
if order in (ORDER_MAIDEN_GIVEN, ORDER_GIVEN_MAIDEN):
surname = self.maiden or self.surname
# We are collating empty names to come after non-empty,
# so instead of empty we return "2" and add "1" as prefix to others
given = ("1" + given) if given else "2"
surname = ("1" + surname) if surname else "2"
if order in (ORDER_SURNAME_GIVEN, ORDER_MAIDEN_GIVEN):
return (surname, given)
elif order in (ORDER_GIVEN_SURNAME, ORDER_GIVEN_MAIDEN):
return (given, surname)
else:
raise ValueError("unexpected order: {}".format(order)) | Returns name order key.
Returns tuple with two strings that can be compared to other such
tuple obtained from different name. Note that if you want
locale-dependent ordering then you need to compare strings using
locale-aware method (e.g. ``locale.strxfrm``).
:param order: One of the ORDER_* constants.
:returns: tuple of two strings | Below is the the instruction that describes the task:
### Input:
Returns name order key.
Returns tuple with two strings that can be compared to other such
tuple obtained from different name. Note that if you want
locale-dependent ordering then you need to compare strings using
locale-aware method (e.g. ``locale.strxfrm``).
:param order: One of the ORDER_* constants.
:returns: tuple of two strings
### Response:
def order(self, order):
"""Returns name order key.
Returns tuple with two strings that can be compared to other such
tuple obtained from different name. Note that if you want
locale-dependent ordering then you need to compare strings using
locale-aware method (e.g. ``locale.strxfrm``).
:param order: One of the ORDER_* constants.
:returns: tuple of two strings
"""
given = self.given
surname = self.surname
if order in (ORDER_MAIDEN_GIVEN, ORDER_GIVEN_MAIDEN):
surname = self.maiden or self.surname
# We are collating empty names to come after non-empty,
# so instead of empty we return "2" and add "1" as prefix to others
given = ("1" + given) if given else "2"
surname = ("1" + surname) if surname else "2"
if order in (ORDER_SURNAME_GIVEN, ORDER_MAIDEN_GIVEN):
return (surname, given)
elif order in (ORDER_GIVEN_SURNAME, ORDER_GIVEN_MAIDEN):
return (given, surname)
else:
raise ValueError("unexpected order: {}".format(order)) |
def logged_delete(self, user):
"""Delete the document and log the event in the change log"""
self.delete()
# Log the change
entry = ChangeLogEntry({
'type': 'DELETED',
'documents': [self],
'user': user
})
entry.insert()
return entry | Delete the document and log the event in the change log | Below is the the instruction that describes the task:
### Input:
Delete the document and log the event in the change log
### Response:
def logged_delete(self, user):
"""Delete the document and log the event in the change log"""
self.delete()
# Log the change
entry = ChangeLogEntry({
'type': 'DELETED',
'documents': [self],
'user': user
})
entry.insert()
return entry |
def sign(self, signer: Signer):
"""This method signs twice:
- the `non_closing_signature` for the balance proof update
- the `reward_proof_signature` for the monitoring request
"""
self.non_closing_signature = self.balance_proof._sign(signer)
message_data = self._data_to_sign()
self.signature = signer.sign(data=message_data) | This method signs twice:
- the `non_closing_signature` for the balance proof update
- the `reward_proof_signature` for the monitoring request | Below is the the instruction that describes the task:
### Input:
This method signs twice:
- the `non_closing_signature` for the balance proof update
- the `reward_proof_signature` for the monitoring request
### Response:
def sign(self, signer: Signer):
"""This method signs twice:
- the `non_closing_signature` for the balance proof update
- the `reward_proof_signature` for the monitoring request
"""
self.non_closing_signature = self.balance_proof._sign(signer)
message_data = self._data_to_sign()
self.signature = signer.sign(data=message_data) |
def get_source_id(self):
"""Gets the ``Resource Id`` of the source of this asset.
The source is the original owner of the copyright of this asset
and may differ from the creator of this asset. The source for a
published book written by Margaret Mitchell would be Macmillan.
The source for an unpublished painting by Arthur Goodwin would
be Arthur Goodwin.
An ``Asset`` is ``Sourceable`` and also contains a provider
identity. The provider is the entity that makes this digital
asset available in this repository but may or may not be the
publisher of the contents depicted in the asset. For example, a
map published by Ticknor and Fields in 1848 may have a provider
of Library of Congress and a source of Ticknor and Fields. If
copied from a repository at Middlebury College, the provider
would be Middlebury College and a source of Ticknor and Fields.
return: (osid.id.Id) - the source ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not bool(self._my_map['sourceId']):
raise errors.IllegalState('this Asset has no source')
else:
return Id(self._my_map['sourceId']) | Gets the ``Resource Id`` of the source of this asset.
The source is the original owner of the copyright of this asset
and may differ from the creator of this asset. The source for a
published book written by Margaret Mitchell would be Macmillan.
The source for an unpublished painting by Arthur Goodwin would
be Arthur Goodwin.
An ``Asset`` is ``Sourceable`` and also contains a provider
identity. The provider is the entity that makes this digital
asset available in this repository but may or may not be the
publisher of the contents depicted in the asset. For example, a
map published by Ticknor and Fields in 1848 may have a provider
of Library of Congress and a source of Ticknor and Fields. If
copied from a repository at Middlebury College, the provider
would be Middlebury College and a source of Ticknor and Fields.
return: (osid.id.Id) - the source ``Id``
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the ``Resource Id`` of the source of this asset.
The source is the original owner of the copyright of this asset
and may differ from the creator of this asset. The source for a
published book written by Margaret Mitchell would be Macmillan.
The source for an unpublished painting by Arthur Goodwin would
be Arthur Goodwin.
An ``Asset`` is ``Sourceable`` and also contains a provider
identity. The provider is the entity that makes this digital
asset available in this repository but may or may not be the
publisher of the contents depicted in the asset. For example, a
map published by Ticknor and Fields in 1848 may have a provider
of Library of Congress and a source of Ticknor and Fields. If
copied from a repository at Middlebury College, the provider
would be Middlebury College and a source of Ticknor and Fields.
return: (osid.id.Id) - the source ``Id``
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_source_id(self):
"""Gets the ``Resource Id`` of the source of this asset.
The source is the original owner of the copyright of this asset
and may differ from the creator of this asset. The source for a
published book written by Margaret Mitchell would be Macmillan.
The source for an unpublished painting by Arthur Goodwin would
be Arthur Goodwin.
An ``Asset`` is ``Sourceable`` and also contains a provider
identity. The provider is the entity that makes this digital
asset available in this repository but may or may not be the
publisher of the contents depicted in the asset. For example, a
map published by Ticknor and Fields in 1848 may have a provider
of Library of Congress and a source of Ticknor and Fields. If
copied from a repository at Middlebury College, the provider
would be Middlebury College and a source of Ticknor and Fields.
return: (osid.id.Id) - the source ``Id``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.Resource.get_avatar_id_template
if not bool(self._my_map['sourceId']):
raise errors.IllegalState('this Asset has no source')
else:
return Id(self._my_map['sourceId']) |
def predicates_overlap(tags1: List[str], tags2: List[str]) -> bool:
"""
Tests whether the predicate in BIO tags1 overlap
with those of tags2.
"""
# Get predicate word indices from both predictions
pred_ind1 = get_predicate_indices(tags1)
pred_ind2 = get_predicate_indices(tags2)
# Return if pred_ind1 pred_ind2 overlap
return any(set.intersection(set(pred_ind1), set(pred_ind2))) | Tests whether the predicate in BIO tags1 overlap
with those of tags2. | Below is the the instruction that describes the task:
### Input:
Tests whether the predicate in BIO tags1 overlap
with those of tags2.
### Response:
def predicates_overlap(tags1: List[str], tags2: List[str]) -> bool:
"""
Tests whether the predicate in BIO tags1 overlap
with those of tags2.
"""
# Get predicate word indices from both predictions
pred_ind1 = get_predicate_indices(tags1)
pred_ind2 = get_predicate_indices(tags2)
# Return if pred_ind1 pred_ind2 overlap
return any(set.intersection(set(pred_ind1), set(pred_ind2))) |
def email(value, whitelist=None):
"""
Validate an email address.
This validator is based on `Django's email validator`_. Returns
``True`` on success and :class:`~validators.utils.ValidationFailure`
when validation fails.
Examples::
>>> email('[email protected]')
True
>>> email('bogus@@')
ValidationFailure(func=email, ...)
.. _Django's email validator:
https://github.com/django/django/blob/master/django/core/validators.py
.. versionadded:: 0.1
:param value: value to validate
:param whitelist: domain names to whitelist
:copyright: (c) Django Software Foundation and individual contributors.
:license: BSD
"""
if whitelist is None:
whitelist = domain_whitelist
if not value or '@' not in value:
return False
user_part, domain_part = value.rsplit('@', 1)
if not user_regex.match(user_part):
return False
if domain_part not in whitelist and not domain_regex.match(domain_part):
# Try for possible IDN domain-part
try:
domain_part = domain_part.encode('idna').decode('ascii')
return domain_regex.match(domain_part)
except UnicodeError:
return False
return True | Validate an email address.
This validator is based on `Django's email validator`_. Returns
``True`` on success and :class:`~validators.utils.ValidationFailure`
when validation fails.
Examples::
>>> email('[email protected]')
True
>>> email('bogus@@')
ValidationFailure(func=email, ...)
.. _Django's email validator:
https://github.com/django/django/blob/master/django/core/validators.py
.. versionadded:: 0.1
:param value: value to validate
:param whitelist: domain names to whitelist
:copyright: (c) Django Software Foundation and individual contributors.
:license: BSD | Below is the the instruction that describes the task:
### Input:
Validate an email address.
This validator is based on `Django's email validator`_. Returns
``True`` on success and :class:`~validators.utils.ValidationFailure`
when validation fails.
Examples::
>>> email('[email protected]')
True
>>> email('bogus@@')
ValidationFailure(func=email, ...)
.. _Django's email validator:
https://github.com/django/django/blob/master/django/core/validators.py
.. versionadded:: 0.1
:param value: value to validate
:param whitelist: domain names to whitelist
:copyright: (c) Django Software Foundation and individual contributors.
:license: BSD
### Response:
def email(value, whitelist=None):
"""
Validate an email address.
This validator is based on `Django's email validator`_. Returns
``True`` on success and :class:`~validators.utils.ValidationFailure`
when validation fails.
Examples::
>>> email('[email protected]')
True
>>> email('bogus@@')
ValidationFailure(func=email, ...)
.. _Django's email validator:
https://github.com/django/django/blob/master/django/core/validators.py
.. versionadded:: 0.1
:param value: value to validate
:param whitelist: domain names to whitelist
:copyright: (c) Django Software Foundation and individual contributors.
:license: BSD
"""
if whitelist is None:
whitelist = domain_whitelist
if not value or '@' not in value:
return False
user_part, domain_part = value.rsplit('@', 1)
if not user_regex.match(user_part):
return False
if domain_part not in whitelist and not domain_regex.match(domain_part):
# Try for possible IDN domain-part
try:
domain_part = domain_part.encode('idna').decode('ascii')
return domain_regex.match(domain_part)
except UnicodeError:
return False
return True |
def screenshots_done(self, jobid):
"""
Return true if the screenshots job is done
"""
resp = self.session.get(os.path.join(self.api_url, '{0}.json'.format(jobid)))
resp = self._process_response(resp)
return True if resp.json()['state'] == 'done' else False | Return true if the screenshots job is done | Below is the the instruction that describes the task:
### Input:
Return true if the screenshots job is done
### Response:
def screenshots_done(self, jobid):
"""
Return true if the screenshots job is done
"""
resp = self.session.get(os.path.join(self.api_url, '{0}.json'.format(jobid)))
resp = self._process_response(resp)
return True if resp.json()['state'] == 'done' else False |
def _strip_scope(msg):
'''
Strip unnecessary message about running the command with --scope from
stderr so that we can raise an exception with the remaining stderr text.
'''
ret = []
for line in msg.splitlines():
if not line.endswith('.scope'):
ret.append(line)
return '\n'.join(ret).strip() | Strip unnecessary message about running the command with --scope from
stderr so that we can raise an exception with the remaining stderr text. | Below is the the instruction that describes the task:
### Input:
Strip unnecessary message about running the command with --scope from
stderr so that we can raise an exception with the remaining stderr text.
### Response:
def _strip_scope(msg):
'''
Strip unnecessary message about running the command with --scope from
stderr so that we can raise an exception with the remaining stderr text.
'''
ret = []
for line in msg.splitlines():
if not line.endswith('.scope'):
ret.append(line)
return '\n'.join(ret).strip() |
def add_user(self, uid, nodes, weights):
"""Add a user."""
for i, node in enumerate(nodes):
self.file.write("{},{},{}\n".format(uid, node, weights[i])) | Add a user. | Below is the the instruction that describes the task:
### Input:
Add a user.
### Response:
def add_user(self, uid, nodes, weights):
"""Add a user."""
for i, node in enumerate(nodes):
self.file.write("{},{},{}\n".format(uid, node, weights[i])) |
def dehydrate_time(value):
""" Dehydrator for `time` values.
:param value:
:type value: Time
:return:
"""
if isinstance(value, Time):
nanoseconds = int(value.ticks * 1000000000)
elif isinstance(value, time):
nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +
1000000000 * value.second + 1000 * value.microsecond)
else:
raise TypeError("Value must be a neotime.Time or a datetime.time")
if value.tzinfo:
return Structure(b"T", nanoseconds, value.tzinfo.utcoffset(value).seconds)
else:
return Structure(b"t", nanoseconds) | Dehydrator for `time` values.
:param value:
:type value: Time
:return: | Below is the the instruction that describes the task:
### Input:
Dehydrator for `time` values.
:param value:
:type value: Time
:return:
### Response:
def dehydrate_time(value):
""" Dehydrator for `time` values.
:param value:
:type value: Time
:return:
"""
if isinstance(value, Time):
nanoseconds = int(value.ticks * 1000000000)
elif isinstance(value, time):
nanoseconds = (3600000000000 * value.hour + 60000000000 * value.minute +
1000000000 * value.second + 1000 * value.microsecond)
else:
raise TypeError("Value must be a neotime.Time or a datetime.time")
if value.tzinfo:
return Structure(b"T", nanoseconds, value.tzinfo.utcoffset(value).seconds)
else:
return Structure(b"t", nanoseconds) |
def plot_heatmap(fig, ax, data,
xaxislabel=None, yaxislabel=None,
xticklabels=None, yticklabels=None,
title=None, grid=True,
values_in_cells=True, round_values_in_cells=2,
legend=False,
fontsize_axislabel=None,
fontsize_axisticks=None,
fontsize_cell_values=None):
""""
helper function to plot a heatmap for a 2D matrix `data` using matplotlib's "matshow" function
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
if data.ndim != 2:
raise ValueError('`data` must be a 2D matrix/array')
# draw basic heatmap
cax = ax.matshow(data)
# draw legend
if legend:
fig.colorbar(cax)
# set title
if title:
ax.set_title(title, y=1.25)
n_rows, n_cols = data.shape
# draw values in cells
if values_in_cells:
textcol_thresh = data.min() + (data.max() - data.min()) / 2
x_indices, y_indices = np.meshgrid(np.arange(n_cols), np.arange(n_rows))
for x, y in zip(x_indices.flatten(), y_indices.flatten()):
val = data[y, x]
# lower values get white text color for better visibility
textcol = 'white' if val < textcol_thresh else 'black'
disp_val = round(val, round_values_in_cells) if round_values_in_cells is not None else val
ax.text(x, y, disp_val, va='center', ha='center', color=textcol, fontsize=fontsize_cell_values)
# customize axes
if xaxislabel:
ax.set_xlabel(xaxislabel)
if yaxislabel:
ax.set_ylabel(yaxislabel)
if fontsize_axislabel:
for item in (ax.xaxis.label, ax.yaxis.label):
item.set_fontsize(fontsize_axislabel)
ax.set_xticks(np.arange(0, n_cols))
ax.set_yticks(np.arange(0, n_rows))
if xticklabels is not None:
ax.set_xticklabels(xticklabels, rotation=45, ha='left')
if yticklabels is not None:
ax.set_yticklabels(yticklabels)
if fontsize_axisticks:
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(fontsize_axisticks)
# gridlines based on minor ticks
if grid:
ax.set_xticks(np.arange(-.5, n_cols), minor=True)
ax.set_yticks(np.arange(-.5, n_rows), minor=True)
ax.grid(which='minor', color='w', linestyle='-', linewidth=1)
return fig, ax | helper function to plot a heatmap for a 2D matrix `data` using matplotlib's "matshow" function | Below is the the instruction that describes the task:
### Input:
helper function to plot a heatmap for a 2D matrix `data` using matplotlib's "matshow" function
### Response:
def plot_heatmap(fig, ax, data,
xaxislabel=None, yaxislabel=None,
xticklabels=None, yticklabels=None,
title=None, grid=True,
values_in_cells=True, round_values_in_cells=2,
legend=False,
fontsize_axislabel=None,
fontsize_axisticks=None,
fontsize_cell_values=None):
""""
helper function to plot a heatmap for a 2D matrix `data` using matplotlib's "matshow" function
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
if data.ndim != 2:
raise ValueError('`data` must be a 2D matrix/array')
# draw basic heatmap
cax = ax.matshow(data)
# draw legend
if legend:
fig.colorbar(cax)
# set title
if title:
ax.set_title(title, y=1.25)
n_rows, n_cols = data.shape
# draw values in cells
if values_in_cells:
textcol_thresh = data.min() + (data.max() - data.min()) / 2
x_indices, y_indices = np.meshgrid(np.arange(n_cols), np.arange(n_rows))
for x, y in zip(x_indices.flatten(), y_indices.flatten()):
val = data[y, x]
# lower values get white text color for better visibility
textcol = 'white' if val < textcol_thresh else 'black'
disp_val = round(val, round_values_in_cells) if round_values_in_cells is not None else val
ax.text(x, y, disp_val, va='center', ha='center', color=textcol, fontsize=fontsize_cell_values)
# customize axes
if xaxislabel:
ax.set_xlabel(xaxislabel)
if yaxislabel:
ax.set_ylabel(yaxislabel)
if fontsize_axislabel:
for item in (ax.xaxis.label, ax.yaxis.label):
item.set_fontsize(fontsize_axislabel)
ax.set_xticks(np.arange(0, n_cols))
ax.set_yticks(np.arange(0, n_rows))
if xticklabels is not None:
ax.set_xticklabels(xticklabels, rotation=45, ha='left')
if yticklabels is not None:
ax.set_yticklabels(yticklabels)
if fontsize_axisticks:
for label in (ax.get_xticklabels() + ax.get_yticklabels()):
label.set_fontsize(fontsize_axisticks)
# gridlines based on minor ticks
if grid:
ax.set_xticks(np.arange(-.5, n_cols), minor=True)
ax.set_yticks(np.arange(-.5, n_rows), minor=True)
ax.grid(which='minor', color='w', linestyle='-', linewidth=1)
return fig, ax |
def help(self, error=None, topic=None, parser=None):
"""Display an error message, or the named topic."""
assert error or topic or parser
if error:
print(error)
print("Use 'coverage help' for help.")
elif parser:
print(parser.format_help().strip())
else:
help_msg = HELP_TOPICS.get(topic, '').strip()
if help_msg:
print(help_msg % self.covpkg.__dict__)
else:
print("Don't know topic %r" % topic) | Display an error message, or the named topic. | Below is the the instruction that describes the task:
### Input:
Display an error message, or the named topic.
### Response:
def help(self, error=None, topic=None, parser=None):
"""Display an error message, or the named topic."""
assert error or topic or parser
if error:
print(error)
print("Use 'coverage help' for help.")
elif parser:
print(parser.format_help().strip())
else:
help_msg = HELP_TOPICS.get(topic, '').strip()
if help_msg:
print(help_msg % self.covpkg.__dict__)
else:
print("Don't know topic %r" % topic) |
def setHandler(self, event_name, callback):
"""Set an handler for given event."""
if event_name not in self.handlers:
raise ValueError('{} is not a valid event'.format(event_name))
if callable(event_name):
raise TypeError('{} is not callable'.format(callback))
self.handlers[event_name] = callback | Set an handler for given event. | Below is the the instruction that describes the task:
### Input:
Set an handler for given event.
### Response:
def setHandler(self, event_name, callback):
"""Set an handler for given event."""
if event_name not in self.handlers:
raise ValueError('{} is not a valid event'.format(event_name))
if callable(event_name):
raise TypeError('{} is not callable'.format(callback))
self.handlers[event_name] = callback |
def _del(self, command, *args, **kwargs):
"""
Shortcut for commands that remove all values of the field.
All will be deindexed.
"""
if self.indexable:
self.deindex()
return self._traverse_command(command, *args, **kwargs) | Shortcut for commands that remove all values of the field.
All will be deindexed. | Below is the the instruction that describes the task:
### Input:
Shortcut for commands that remove all values of the field.
All will be deindexed.
### Response:
def _del(self, command, *args, **kwargs):
"""
Shortcut for commands that remove all values of the field.
All will be deindexed.
"""
if self.indexable:
self.deindex()
return self._traverse_command(command, *args, **kwargs) |
def run(host: Optional[str] = None, port: Optional[int] = None,
*args, **kwargs) -> None:
"""Run the NoneBot instance."""
get_bot().run(host=host, port=port, *args, **kwargs) | Run the NoneBot instance. | Below is the the instruction that describes the task:
### Input:
Run the NoneBot instance.
### Response:
def run(host: Optional[str] = None, port: Optional[int] = None,
*args, **kwargs) -> None:
"""Run the NoneBot instance."""
get_bot().run(host=host, port=port, *args, **kwargs) |
def _take_action(self):
"""Determines whether to perform the action or not.
Checks whether or not an action should be taken. This is determined by
the truthy value for the unless parameter. If unless is a callback
method, it will be invoked with no parameters in order to determine
whether or not the action should be taken. Otherwise, the truthy value
of the unless attribute will determine if the action should be
performed.
"""
# Do the action if there isn't an unless override.
if self.unless is None:
return True
# Invoke the callback if there is one.
if hasattr(self.unless, '__call__'):
return not self.unless()
return not self.unless | Determines whether to perform the action or not.
Checks whether or not an action should be taken. This is determined by
the truthy value for the unless parameter. If unless is a callback
method, it will be invoked with no parameters in order to determine
whether or not the action should be taken. Otherwise, the truthy value
of the unless attribute will determine if the action should be
performed. | Below is the the instruction that describes the task:
### Input:
Determines whether to perform the action or not.
Checks whether or not an action should be taken. This is determined by
the truthy value for the unless parameter. If unless is a callback
method, it will be invoked with no parameters in order to determine
whether or not the action should be taken. Otherwise, the truthy value
of the unless attribute will determine if the action should be
performed.
### Response:
def _take_action(self):
"""Determines whether to perform the action or not.
Checks whether or not an action should be taken. This is determined by
the truthy value for the unless parameter. If unless is a callback
method, it will be invoked with no parameters in order to determine
whether or not the action should be taken. Otherwise, the truthy value
of the unless attribute will determine if the action should be
performed.
"""
# Do the action if there isn't an unless override.
if self.unless is None:
return True
# Invoke the callback if there is one.
if hasattr(self.unless, '__call__'):
return not self.unless()
return not self.unless |
def is_valid_input_array(x, ndim=None):
"""Test if ``x`` is a correctly shaped point array in R^d."""
x = np.asarray(x)
if ndim is None or ndim == 1:
return x.ndim == 1 and x.size > 1 or x.ndim == 2 and x.shape[0] == 1
else:
return x.ndim == 2 and x.shape[0] == ndim | Test if ``x`` is a correctly shaped point array in R^d. | Below is the the instruction that describes the task:
### Input:
Test if ``x`` is a correctly shaped point array in R^d.
### Response:
def is_valid_input_array(x, ndim=None):
"""Test if ``x`` is a correctly shaped point array in R^d."""
x = np.asarray(x)
if ndim is None or ndim == 1:
return x.ndim == 1 and x.size > 1 or x.ndim == 2 and x.shape[0] == 1
else:
return x.ndim == 2 and x.shape[0] == ndim |
def add_put(self, *args, **kwargs):
"""
Shortcut for add_route with method PUT
"""
return self.add_route(hdrs.METH_PUT, *args, **kwargs) | Shortcut for add_route with method PUT | Below is the the instruction that describes the task:
### Input:
Shortcut for add_route with method PUT
### Response:
def add_put(self, *args, **kwargs):
"""
Shortcut for add_route with method PUT
"""
return self.add_route(hdrs.METH_PUT, *args, **kwargs) |
def do_levmarq_all_particle_groups(s, region_size=40, max_iter=2, damping=1.0,
decrease_damp_factor=10., run_length=4, collect_stats=False, **kwargs):
"""
Levenberg-Marquardt optimization for every particle in the state.
Convenience wrapper for LMParticleGroupCollection. Same keyword args,
but I've set the defaults to what I've found to be useful values for
optimizing particles. See LMParticleGroupCollection for documentation.
See Also
--------
do_levmarq_particles : Levenberg-Marquardt optimization of a
specified set of particles.
do_levmarq : Levenberg-Marquardt optimization of the entire state;
useful for optimizing global parameters.
LMParticleGroupCollection : The workhorse of do_levmarq.
LMEngine : Engine superclass for all the optimizers.
"""
lp = LMParticleGroupCollection(s, region_size=region_size, damping=damping,
run_length=run_length, decrease_damp_factor=decrease_damp_factor,
get_cos=collect_stats, max_iter=max_iter, **kwargs)
lp.do_run_2()
if collect_stats:
return lp.stats | Levenberg-Marquardt optimization for every particle in the state.
Convenience wrapper for LMParticleGroupCollection. Same keyword args,
but I've set the defaults to what I've found to be useful values for
optimizing particles. See LMParticleGroupCollection for documentation.
See Also
--------
do_levmarq_particles : Levenberg-Marquardt optimization of a
specified set of particles.
do_levmarq : Levenberg-Marquardt optimization of the entire state;
useful for optimizing global parameters.
LMParticleGroupCollection : The workhorse of do_levmarq.
LMEngine : Engine superclass for all the optimizers. | Below is the the instruction that describes the task:
### Input:
Levenberg-Marquardt optimization for every particle in the state.
Convenience wrapper for LMParticleGroupCollection. Same keyword args,
but I've set the defaults to what I've found to be useful values for
optimizing particles. See LMParticleGroupCollection for documentation.
See Also
--------
do_levmarq_particles : Levenberg-Marquardt optimization of a
specified set of particles.
do_levmarq : Levenberg-Marquardt optimization of the entire state;
useful for optimizing global parameters.
LMParticleGroupCollection : The workhorse of do_levmarq.
LMEngine : Engine superclass for all the optimizers.
### Response:
def do_levmarq_all_particle_groups(s, region_size=40, max_iter=2, damping=1.0,
decrease_damp_factor=10., run_length=4, collect_stats=False, **kwargs):
"""
Levenberg-Marquardt optimization for every particle in the state.
Convenience wrapper for LMParticleGroupCollection. Same keyword args,
but I've set the defaults to what I've found to be useful values for
optimizing particles. See LMParticleGroupCollection for documentation.
See Also
--------
do_levmarq_particles : Levenberg-Marquardt optimization of a
specified set of particles.
do_levmarq : Levenberg-Marquardt optimization of the entire state;
useful for optimizing global parameters.
LMParticleGroupCollection : The workhorse of do_levmarq.
LMEngine : Engine superclass for all the optimizers.
"""
lp = LMParticleGroupCollection(s, region_size=region_size, damping=damping,
run_length=run_length, decrease_damp_factor=decrease_damp_factor,
get_cos=collect_stats, max_iter=max_iter, **kwargs)
lp.do_run_2()
if collect_stats:
return lp.stats |
def get_page_id(name, space):
"""Return id of a page based on passed page name and space.
Parameters:
- name: name of a Confluence page.
- space: space the Confluence page is in.
"""
data = _json.loads(_api.rest("?title=" + name.replace(" ", "%20") + "&"
"spaceKey=" + space + "&expand=history"))
try:
return data["results"][0]["id"]
except:
return ("Page not found!") | Return id of a page based on passed page name and space.
Parameters:
- name: name of a Confluence page.
- space: space the Confluence page is in. | Below is the the instruction that describes the task:
### Input:
Return id of a page based on passed page name and space.
Parameters:
- name: name of a Confluence page.
- space: space the Confluence page is in.
### Response:
def get_page_id(name, space):
"""Return id of a page based on passed page name and space.
Parameters:
- name: name of a Confluence page.
- space: space the Confluence page is in.
"""
data = _json.loads(_api.rest("?title=" + name.replace(" ", "%20") + "&"
"spaceKey=" + space + "&expand=history"))
try:
return data["results"][0]["id"]
except:
return ("Page not found!") |
def snapshot(self, target=None, defer=None, autonumber=False):
'''Save the contents of current surface into a file or cairo surface/context
:param filename: Can be a filename or a Cairo surface.
:param defer: If true, buffering/threading may be employed however output will not be immediate.
:param autonumber: If true then a number will be appended to the filename.
'''
if autonumber:
file_number = self._frame
else:
file_number = None
if isinstance(target, cairo.Surface):
# snapshot to Cairo surface
if defer is None:
self._canvas.snapshot(surface, defer)
defer = False
ctx = cairo.Context(target)
# this used to be self._canvas.snapshot, but I couldn't make it work.
# self._canvas.snapshot(target, defer)
# TODO: check if this breaks when taking more than 1 snapshot
self._canvas._drawqueue.render(ctx)
return
elif target is None:
# If nothing specified, use a default filename from the script name
script_file = self._namespace.get('__file__')
if script_file:
target = os.path.splitext(script_file)[0] + '.svg'
file_number = True
if target:
# snapshot to file, target is a filename
if defer is None:
defer = True
self._canvas.snapshot(target, defer=defer, file_number=file_number)
else:
raise ShoebotError('No image saved') | Save the contents of current surface into a file or cairo surface/context
:param filename: Can be a filename or a Cairo surface.
:param defer: If true, buffering/threading may be employed however output will not be immediate.
:param autonumber: If true then a number will be appended to the filename. | Below is the the instruction that describes the task:
### Input:
Save the contents of current surface into a file or cairo surface/context
:param filename: Can be a filename or a Cairo surface.
:param defer: If true, buffering/threading may be employed however output will not be immediate.
:param autonumber: If true then a number will be appended to the filename.
### Response:
def snapshot(self, target=None, defer=None, autonumber=False):
'''Save the contents of current surface into a file or cairo surface/context
:param filename: Can be a filename or a Cairo surface.
:param defer: If true, buffering/threading may be employed however output will not be immediate.
:param autonumber: If true then a number will be appended to the filename.
'''
if autonumber:
file_number = self._frame
else:
file_number = None
if isinstance(target, cairo.Surface):
# snapshot to Cairo surface
if defer is None:
self._canvas.snapshot(surface, defer)
defer = False
ctx = cairo.Context(target)
# this used to be self._canvas.snapshot, but I couldn't make it work.
# self._canvas.snapshot(target, defer)
# TODO: check if this breaks when taking more than 1 snapshot
self._canvas._drawqueue.render(ctx)
return
elif target is None:
# If nothing specified, use a default filename from the script name
script_file = self._namespace.get('__file__')
if script_file:
target = os.path.splitext(script_file)[0] + '.svg'
file_number = True
if target:
# snapshot to file, target is a filename
if defer is None:
defer = True
self._canvas.snapshot(target, defer=defer, file_number=file_number)
else:
raise ShoebotError('No image saved') |
def read_accpro20(infile):
"""Read the accpro20 output (.acc20) and return the parsed FASTA records.
Keeps the spaces between the accessibility numbers.
Args:
infile: Path to .acc20 file
Returns:
dict: Dictionary of accessibilities with keys as the ID
"""
with open(infile) as f:
records = f.read().splitlines()
accpro20_dict = {}
for i, r in enumerate(records):
if i % 2 == 0:
# TODO: Double check how to parse FASTA IDs (can they have a space because that is what i split by)
# Key was originally records[i][1:]
accpro20_dict[records[i].split(' ')[0][1:]] = [int(x) for x in records[i + 1].split(' ')]
return accpro20_dict | Read the accpro20 output (.acc20) and return the parsed FASTA records.
Keeps the spaces between the accessibility numbers.
Args:
infile: Path to .acc20 file
Returns:
dict: Dictionary of accessibilities with keys as the ID | Below is the the instruction that describes the task:
### Input:
Read the accpro20 output (.acc20) and return the parsed FASTA records.
Keeps the spaces between the accessibility numbers.
Args:
infile: Path to .acc20 file
Returns:
dict: Dictionary of accessibilities with keys as the ID
### Response:
def read_accpro20(infile):
"""Read the accpro20 output (.acc20) and return the parsed FASTA records.
Keeps the spaces between the accessibility numbers.
Args:
infile: Path to .acc20 file
Returns:
dict: Dictionary of accessibilities with keys as the ID
"""
with open(infile) as f:
records = f.read().splitlines()
accpro20_dict = {}
for i, r in enumerate(records):
if i % 2 == 0:
# TODO: Double check how to parse FASTA IDs (can they have a space because that is what i split by)
# Key was originally records[i][1:]
accpro20_dict[records[i].split(' ')[0][1:]] = [int(x) for x in records[i + 1].split(' ')]
return accpro20_dict |
def session_path(cls, project, instance, database, session):
"""Return a fully-qualified session string."""
return google.api_core.path_template.expand(
"projects/{project}/instances/{instance}/databases/{database}/sessions/{session}",
project=project,
instance=instance,
database=database,
session=session,
) | Return a fully-qualified session string. | Below is the the instruction that describes the task:
### Input:
Return a fully-qualified session string.
### Response:
def session_path(cls, project, instance, database, session):
"""Return a fully-qualified session string."""
return google.api_core.path_template.expand(
"projects/{project}/instances/{instance}/databases/{database}/sessions/{session}",
project=project,
instance=instance,
database=database,
session=session,
) |
def get_resource(self, service_name, resource_name, base_class=None):
"""
Returns a ``Resource`` **class** for a given service.
:param service_name: A string that specifies the name of the desired
service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc.
:type service_name: string
:param resource_name: A string that specifies the name of the desired
class. Ex. ``Queue``, ``Notification``, ``Table``, etc.
:type resource_name: string
:param base_class: (Optional) The base class of the object. Prevents
"magically" loading the wrong class (one with a different base).
:type base_class: class
:rtype: <kotocore.resources.Resource subclass>
"""
try:
return self.cache.get_resource(
service_name,
resource_name,
base_class=base_class
)
except NotCached:
pass
# We didn't find it. Construct it.
new_class = self.resource_factory.construct_for(
service_name,
resource_name,
base_class=base_class
)
self.cache.set_resource(service_name, resource_name, new_class)
return new_class | Returns a ``Resource`` **class** for a given service.
:param service_name: A string that specifies the name of the desired
service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc.
:type service_name: string
:param resource_name: A string that specifies the name of the desired
class. Ex. ``Queue``, ``Notification``, ``Table``, etc.
:type resource_name: string
:param base_class: (Optional) The base class of the object. Prevents
"magically" loading the wrong class (one with a different base).
:type base_class: class
:rtype: <kotocore.resources.Resource subclass> | Below is the the instruction that describes the task:
### Input:
Returns a ``Resource`` **class** for a given service.
:param service_name: A string that specifies the name of the desired
service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc.
:type service_name: string
:param resource_name: A string that specifies the name of the desired
class. Ex. ``Queue``, ``Notification``, ``Table``, etc.
:type resource_name: string
:param base_class: (Optional) The base class of the object. Prevents
"magically" loading the wrong class (one with a different base).
:type base_class: class
:rtype: <kotocore.resources.Resource subclass>
### Response:
def get_resource(self, service_name, resource_name, base_class=None):
"""
Returns a ``Resource`` **class** for a given service.
:param service_name: A string that specifies the name of the desired
service. Ex. ``sqs``, ``sns``, ``dynamodb``, etc.
:type service_name: string
:param resource_name: A string that specifies the name of the desired
class. Ex. ``Queue``, ``Notification``, ``Table``, etc.
:type resource_name: string
:param base_class: (Optional) The base class of the object. Prevents
"magically" loading the wrong class (one with a different base).
:type base_class: class
:rtype: <kotocore.resources.Resource subclass>
"""
try:
return self.cache.get_resource(
service_name,
resource_name,
base_class=base_class
)
except NotCached:
pass
# We didn't find it. Construct it.
new_class = self.resource_factory.construct_for(
service_name,
resource_name,
base_class=base_class
)
self.cache.set_resource(service_name, resource_name, new_class)
return new_class |
def index_labels(labels, case_sensitive=False):
"""Convert a list of string identifiers into numerical indices.
Parameters
----------
labels : list of strings, shape=(n,)
A list of annotations, e.g., segment or chord labels from an
annotation file.
case_sensitive : bool
Set to True to enable case-sensitive label indexing
(Default value = False)
Returns
-------
indices : list, shape=(n,)
Numerical representation of ``labels``
index_to_label : dict
Mapping to convert numerical indices back to labels.
``labels[i] == index_to_label[indices[i]]``
"""
label_to_index = {}
index_to_label = {}
# If we're not case-sensitive,
if not case_sensitive:
labels = [str(s).lower() for s in labels]
# First, build the unique label mapping
for index, s in enumerate(sorted(set(labels))):
label_to_index[s] = index
index_to_label[index] = s
# Remap the labels to indices
indices = [label_to_index[s] for s in labels]
# Return the converted labels, and the inverse mapping
return indices, index_to_label | Convert a list of string identifiers into numerical indices.
Parameters
----------
labels : list of strings, shape=(n,)
A list of annotations, e.g., segment or chord labels from an
annotation file.
case_sensitive : bool
Set to True to enable case-sensitive label indexing
(Default value = False)
Returns
-------
indices : list, shape=(n,)
Numerical representation of ``labels``
index_to_label : dict
Mapping to convert numerical indices back to labels.
``labels[i] == index_to_label[indices[i]]`` | Below is the the instruction that describes the task:
### Input:
Convert a list of string identifiers into numerical indices.
Parameters
----------
labels : list of strings, shape=(n,)
A list of annotations, e.g., segment or chord labels from an
annotation file.
case_sensitive : bool
Set to True to enable case-sensitive label indexing
(Default value = False)
Returns
-------
indices : list, shape=(n,)
Numerical representation of ``labels``
index_to_label : dict
Mapping to convert numerical indices back to labels.
``labels[i] == index_to_label[indices[i]]``
### Response:
def index_labels(labels, case_sensitive=False):
"""Convert a list of string identifiers into numerical indices.
Parameters
----------
labels : list of strings, shape=(n,)
A list of annotations, e.g., segment or chord labels from an
annotation file.
case_sensitive : bool
Set to True to enable case-sensitive label indexing
(Default value = False)
Returns
-------
indices : list, shape=(n,)
Numerical representation of ``labels``
index_to_label : dict
Mapping to convert numerical indices back to labels.
``labels[i] == index_to_label[indices[i]]``
"""
label_to_index = {}
index_to_label = {}
# If we're not case-sensitive,
if not case_sensitive:
labels = [str(s).lower() for s in labels]
# First, build the unique label mapping
for index, s in enumerate(sorted(set(labels))):
label_to_index[s] = index
index_to_label[index] = s
# Remap the labels to indices
indices = [label_to_index[s] for s in labels]
# Return the converted labels, and the inverse mapping
return indices, index_to_label |
def _indent(stream, indent, *msgs):
""" write a message to a text stream, with indentation. Also ensures that
the output encoding of the messages is safe for writing. """
for x in range(0, indent):
stream.write(" ")
for x in msgs:
# Any nicer way? In Py2 x can be 'str' or 'unicode'.
stream.write(x.encode("ascii", "backslashreplace").decode("ascii"))
stream.write("\n") | write a message to a text stream, with indentation. Also ensures that
the output encoding of the messages is safe for writing. | Below is the the instruction that describes the task:
### Input:
write a message to a text stream, with indentation. Also ensures that
the output encoding of the messages is safe for writing.
### Response:
def _indent(stream, indent, *msgs):
""" write a message to a text stream, with indentation. Also ensures that
the output encoding of the messages is safe for writing. """
for x in range(0, indent):
stream.write(" ")
for x in msgs:
# Any nicer way? In Py2 x can be 'str' or 'unicode'.
stream.write(x.encode("ascii", "backslashreplace").decode("ascii"))
stream.write("\n") |
def set_widgets(self):
"""Set widgets on the Classify tab."""
purpose = self.parent.step_kw_purpose.selected_purpose()
subcategory = self.parent.step_kw_subcategory.selected_subcategory()
classification = self.parent.step_kw_classification.\
selected_classification()
classification_name = classification['name']
if is_raster_layer(self.parent.layer):
self.lblClassify.setText(classify_raster_question % (
subcategory['name'], purpose['name'], classification_name))
dataset = gdal.Open(self.parent.layer.source(), GA_ReadOnly)
active_band = self.parent.step_kw_band_selector.selected_band()
unique_values = numpy.unique(numpy.array(
dataset.GetRasterBand(active_band).ReadAsArray()))
field_type = 0
# Convert datatype to a json serializable type
if numpy.issubdtype(unique_values.dtype, float):
unique_values = [float(i) for i in unique_values]
else:
unique_values = [int(i) for i in unique_values]
else:
field = self.parent.step_kw_field.selected_fields()
field_index = self.parent.layer.fields().indexFromName(field)
field_type = self.parent.layer.fields()[field_index].type()
self.lblClassify.setText(classify_vector_question % (
subcategory['name'], purpose['name'],
classification_name, field.upper()))
unique_values = self.parent.layer.uniqueValues(field_index)
clean_unique_values = []
for unique_value in unique_values:
if (unique_value is None
or (hasattr(unique_value, 'isNull')
and unique_value.isNull())):
# Don't classify features with NULL value
continue
clean_unique_values.append(unique_value)
# get default classes
default_classes = deepcopy(classification['classes'])
if classification['key'] == data_driven_classes['key']:
for unique_value in clean_unique_values:
name = str(unique_value).upper().replace('_', ' ')
default_class = {'key': unique_value,
'name': name,
# 'description': tr('Settlement'),
'string_defaults': [name]}
default_classes.append(default_class)
# Assign unique values to classes (according to default)
unassigned_values = list()
assigned_values = dict()
for default_class in default_classes:
assigned_values[default_class['key']] = list()
for unique_value in clean_unique_values:
# Capitalization of the value and removing '_' (raw OSM data).
value_as_string = str(unique_value).upper().replace('_', ' ')
assigned = False
for default_class in default_classes:
if 'string_defaults' in default_class:
# To make it case insensitive
upper_string_defaults = [
c.upper() for c in default_class['string_defaults']]
in_string_default = (
value_as_string in upper_string_defaults)
condition_1 = field_type > 9 and in_string_default
else:
condition_1 = False
condition_2 = (
field_type < 10
and 'numeric_default_min' in default_class
and 'numeric_default_max' in default_class
and (default_class['numeric_default_min']
<= unique_value
<= default_class['numeric_default_max']))
if condition_1 or condition_2:
assigned_values[default_class['key']] += [unique_value]
assigned = True
if not assigned:
# add to unassigned values list otherwise
unassigned_values += [unique_value]
self.populate_classified_values(
unassigned_values, assigned_values, default_classes)
# Overwrite assigned values according to existing keyword (if present).
# Note the default_classes and unique_values are already loaded!
value_map = self.parent.get_existing_keyword('value_map')
value_map_classification_name = self.parent.get_existing_keyword(
'classification')
# Do not continue if there is no value_map in existing keywords
if (value_map is None
or value_map_classification_name != classification['key']):
return
# Do not continue if user selected different field
field_keyword = self.parent.field_keyword_for_the_layer()
field = self.parent.get_existing_keyword('inasafe_fields').get(
field_keyword)
if (not is_raster_layer(self.parent.layer)
and field != self.parent.step_kw_field.selected_fields()):
return
unassigned_values = list()
assigned_values = dict()
for default_class in default_classes:
assigned_values[default_class['key']] = list()
if isinstance(value_map, str):
try:
value_map = json.loads(value_map)
except ValueError:
return
for unique_value in clean_unique_values:
# check in value map
assigned = False
for key, value_list in list(value_map.items()):
if unique_value in value_list and key in assigned_values:
assigned_values[key] += [unique_value]
assigned = True
if not assigned:
unassigned_values += [unique_value]
self.populate_classified_values(
unassigned_values, assigned_values, default_classes) | Set widgets on the Classify tab. | Below is the the instruction that describes the task:
### Input:
Set widgets on the Classify tab.
### Response:
def set_widgets(self):
"""Set widgets on the Classify tab."""
purpose = self.parent.step_kw_purpose.selected_purpose()
subcategory = self.parent.step_kw_subcategory.selected_subcategory()
classification = self.parent.step_kw_classification.\
selected_classification()
classification_name = classification['name']
if is_raster_layer(self.parent.layer):
self.lblClassify.setText(classify_raster_question % (
subcategory['name'], purpose['name'], classification_name))
dataset = gdal.Open(self.parent.layer.source(), GA_ReadOnly)
active_band = self.parent.step_kw_band_selector.selected_band()
unique_values = numpy.unique(numpy.array(
dataset.GetRasterBand(active_band).ReadAsArray()))
field_type = 0
# Convert datatype to a json serializable type
if numpy.issubdtype(unique_values.dtype, float):
unique_values = [float(i) for i in unique_values]
else:
unique_values = [int(i) for i in unique_values]
else:
field = self.parent.step_kw_field.selected_fields()
field_index = self.parent.layer.fields().indexFromName(field)
field_type = self.parent.layer.fields()[field_index].type()
self.lblClassify.setText(classify_vector_question % (
subcategory['name'], purpose['name'],
classification_name, field.upper()))
unique_values = self.parent.layer.uniqueValues(field_index)
clean_unique_values = []
for unique_value in unique_values:
if (unique_value is None
or (hasattr(unique_value, 'isNull')
and unique_value.isNull())):
# Don't classify features with NULL value
continue
clean_unique_values.append(unique_value)
# get default classes
default_classes = deepcopy(classification['classes'])
if classification['key'] == data_driven_classes['key']:
for unique_value in clean_unique_values:
name = str(unique_value).upper().replace('_', ' ')
default_class = {'key': unique_value,
'name': name,
# 'description': tr('Settlement'),
'string_defaults': [name]}
default_classes.append(default_class)
# Assign unique values to classes (according to default)
unassigned_values = list()
assigned_values = dict()
for default_class in default_classes:
assigned_values[default_class['key']] = list()
for unique_value in clean_unique_values:
# Capitalization of the value and removing '_' (raw OSM data).
value_as_string = str(unique_value).upper().replace('_', ' ')
assigned = False
for default_class in default_classes:
if 'string_defaults' in default_class:
# To make it case insensitive
upper_string_defaults = [
c.upper() for c in default_class['string_defaults']]
in_string_default = (
value_as_string in upper_string_defaults)
condition_1 = field_type > 9 and in_string_default
else:
condition_1 = False
condition_2 = (
field_type < 10
and 'numeric_default_min' in default_class
and 'numeric_default_max' in default_class
and (default_class['numeric_default_min']
<= unique_value
<= default_class['numeric_default_max']))
if condition_1 or condition_2:
assigned_values[default_class['key']] += [unique_value]
assigned = True
if not assigned:
# add to unassigned values list otherwise
unassigned_values += [unique_value]
self.populate_classified_values(
unassigned_values, assigned_values, default_classes)
# Overwrite assigned values according to existing keyword (if present).
# Note the default_classes and unique_values are already loaded!
value_map = self.parent.get_existing_keyword('value_map')
value_map_classification_name = self.parent.get_existing_keyword(
'classification')
# Do not continue if there is no value_map in existing keywords
if (value_map is None
or value_map_classification_name != classification['key']):
return
# Do not continue if user selected different field
field_keyword = self.parent.field_keyword_for_the_layer()
field = self.parent.get_existing_keyword('inasafe_fields').get(
field_keyword)
if (not is_raster_layer(self.parent.layer)
and field != self.parent.step_kw_field.selected_fields()):
return
unassigned_values = list()
assigned_values = dict()
for default_class in default_classes:
assigned_values[default_class['key']] = list()
if isinstance(value_map, str):
try:
value_map = json.loads(value_map)
except ValueError:
return
for unique_value in clean_unique_values:
# check in value map
assigned = False
for key, value_list in list(value_map.items()):
if unique_value in value_list and key in assigned_values:
assigned_values[key] += [unique_value]
assigned = True
if not assigned:
unassigned_values += [unique_value]
self.populate_classified_values(
unassigned_values, assigned_values, default_classes) |
def get_clear_pin(pinblock, account_number):
"""
Calculate the clear PIN from provided PIN block and account_number, which is the 12 right-most digits of card account number, excluding check digit
"""
raw_pinblock = bytes.fromhex(pinblock.decode('utf-8'))
raw_acct_num = bytes.fromhex((b'0000' + account_number).decode('utf-8'))
pin_str = xor(raw2B(raw_pinblock), raw2B(raw_acct_num)).decode('utf-8')
pin_length = int(pin_str[:2], 16)
if pin_length >= 4 and pin_length < 9:
pin = pin_str[2:2+pin_length]
try:
int(pin)
except ValueError:
raise ValueError('PIN contains non-numeric characters')
return bytes(pin, 'utf-8')
else:
raise ValueError('Incorrect PIN length: {}'.format(pin_length)) | Calculate the clear PIN from provided PIN block and account_number, which is the 12 right-most digits of card account number, excluding check digit | Below is the the instruction that describes the task:
### Input:
Calculate the clear PIN from provided PIN block and account_number, which is the 12 right-most digits of card account number, excluding check digit
### Response:
def get_clear_pin(pinblock, account_number):
"""
Calculate the clear PIN from provided PIN block and account_number, which is the 12 right-most digits of card account number, excluding check digit
"""
raw_pinblock = bytes.fromhex(pinblock.decode('utf-8'))
raw_acct_num = bytes.fromhex((b'0000' + account_number).decode('utf-8'))
pin_str = xor(raw2B(raw_pinblock), raw2B(raw_acct_num)).decode('utf-8')
pin_length = int(pin_str[:2], 16)
if pin_length >= 4 and pin_length < 9:
pin = pin_str[2:2+pin_length]
try:
int(pin)
except ValueError:
raise ValueError('PIN contains non-numeric characters')
return bytes(pin, 'utf-8')
else:
raise ValueError('Incorrect PIN length: {}'.format(pin_length)) |
def process_raw(self, raw: dict) -> None:
"""Pre-process raw dict.
Prepare parameters to work with APIItems.
"""
raw_ports = {}
for param in raw:
port_index = REGEX_PORT_INDEX.search(param).group(0)
if port_index not in raw_ports:
raw_ports[port_index] = {}
name = param.replace(IOPORT + '.I' + port_index + '.', '')
raw_ports[port_index][name] = raw[param]
super().process_raw(raw_ports) | Pre-process raw dict.
Prepare parameters to work with APIItems. | Below is the the instruction that describes the task:
### Input:
Pre-process raw dict.
Prepare parameters to work with APIItems.
### Response:
def process_raw(self, raw: dict) -> None:
"""Pre-process raw dict.
Prepare parameters to work with APIItems.
"""
raw_ports = {}
for param in raw:
port_index = REGEX_PORT_INDEX.search(param).group(0)
if port_index not in raw_ports:
raw_ports[port_index] = {}
name = param.replace(IOPORT + '.I' + port_index + '.', '')
raw_ports[port_index][name] = raw[param]
super().process_raw(raw_ports) |
def create_api_ipv4(self):
"""Get an instance of Api IPv4 services facade."""
return ApiIPv4(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) | Get an instance of Api IPv4 services facade. | Below is the the instruction that describes the task:
### Input:
Get an instance of Api IPv4 services facade.
### Response:
def create_api_ipv4(self):
"""Get an instance of Api IPv4 services facade."""
return ApiIPv4(
self.networkapi_url,
self.user,
self.password,
self.user_ldap) |
def wait_for_exit(
log,
debug=False):
"""wait_for_exit
Sleep to allow the thread to pick up final messages
before exiting and stopping the Splunk HTTP publisher.
You can decrease this delay (in seconds) by reducing
the splunk_sleep_interval or by exporting the env var:
export SPLUNK_SLEEP_INTERVAL=0.5
If you set the timer to 0 then it will be a blocking HTTP POST sent
to Splunk for each log message. This creates a blocking logger in
your application that will wait until each log's HTTP POST
was received before continuing.
Note: Reducing this Splunk sleep timer could result in losing
messages that were stuck in the queue when the
parent process exits. The multiprocessing
Splunk Publisher was built to do this, but will
not work in certain frameworks like Celery
as it requires access to spawn daemon processes to
prevent this 'message loss' case during exiting.
Applications using this library should ensure
there's no critical log messages stuck in a queue
when stopping a long-running process.
:param log: created logger
:param debug: bool to debug with prints
"""
debug = SPLUNK_DEBUG
for i in log.root.handlers:
handler_class_name = i.__class__.__name__.lower()
if debug:
print((
' - wait_for_exit handler={}').format(
handler_class_name))
if ('splunkpublisher' == handler_class_name
or 'mpsplunkpublisher' == handler_class_name):
if hasattr(i, 'sleep_interval'):
total_sleep = i.sleep_interval + 2.0
if os.getenv(
'PUBLISHER_EXIT_DELAY',
False):
total_sleep = float(os.getenv(
'PUBLISHER_EXIT_DELAY',
total_sleep))
if debug:
print((
' - wait_for_exit '
'handler={} wait={}s').format(
handler_class_name,
total_sleep))
time.sleep(total_sleep)
if debug:
print((
'done waiting for exit'))
return
else:
print((
' - wait_for_exit handler={} has no'
'sleep_interval').format(
handler_class_name)) | wait_for_exit
Sleep to allow the thread to pick up final messages
before exiting and stopping the Splunk HTTP publisher.
You can decrease this delay (in seconds) by reducing
the splunk_sleep_interval or by exporting the env var:
export SPLUNK_SLEEP_INTERVAL=0.5
If you set the timer to 0 then it will be a blocking HTTP POST sent
to Splunk for each log message. This creates a blocking logger in
your application that will wait until each log's HTTP POST
was received before continuing.
Note: Reducing this Splunk sleep timer could result in losing
messages that were stuck in the queue when the
parent process exits. The multiprocessing
Splunk Publisher was built to do this, but will
not work in certain frameworks like Celery
as it requires access to spawn daemon processes to
prevent this 'message loss' case during exiting.
Applications using this library should ensure
there's no critical log messages stuck in a queue
when stopping a long-running process.
:param log: created logger
:param debug: bool to debug with prints | Below is the the instruction that describes the task:
### Input:
wait_for_exit
Sleep to allow the thread to pick up final messages
before exiting and stopping the Splunk HTTP publisher.
You can decrease this delay (in seconds) by reducing
the splunk_sleep_interval or by exporting the env var:
export SPLUNK_SLEEP_INTERVAL=0.5
If you set the timer to 0 then it will be a blocking HTTP POST sent
to Splunk for each log message. This creates a blocking logger in
your application that will wait until each log's HTTP POST
was received before continuing.
Note: Reducing this Splunk sleep timer could result in losing
messages that were stuck in the queue when the
parent process exits. The multiprocessing
Splunk Publisher was built to do this, but will
not work in certain frameworks like Celery
as it requires access to spawn daemon processes to
prevent this 'message loss' case during exiting.
Applications using this library should ensure
there's no critical log messages stuck in a queue
when stopping a long-running process.
:param log: created logger
:param debug: bool to debug with prints
### Response:
def wait_for_exit(
log,
debug=False):
"""wait_for_exit
Sleep to allow the thread to pick up final messages
before exiting and stopping the Splunk HTTP publisher.
You can decrease this delay (in seconds) by reducing
the splunk_sleep_interval or by exporting the env var:
export SPLUNK_SLEEP_INTERVAL=0.5
If you set the timer to 0 then it will be a blocking HTTP POST sent
to Splunk for each log message. This creates a blocking logger in
your application that will wait until each log's HTTP POST
was received before continuing.
Note: Reducing this Splunk sleep timer could result in losing
messages that were stuck in the queue when the
parent process exits. The multiprocessing
Splunk Publisher was built to do this, but will
not work in certain frameworks like Celery
as it requires access to spawn daemon processes to
prevent this 'message loss' case during exiting.
Applications using this library should ensure
there's no critical log messages stuck in a queue
when stopping a long-running process.
:param log: created logger
:param debug: bool to debug with prints
"""
debug = SPLUNK_DEBUG
for i in log.root.handlers:
handler_class_name = i.__class__.__name__.lower()
if debug:
print((
' - wait_for_exit handler={}').format(
handler_class_name))
if ('splunkpublisher' == handler_class_name
or 'mpsplunkpublisher' == handler_class_name):
if hasattr(i, 'sleep_interval'):
total_sleep = i.sleep_interval + 2.0
if os.getenv(
'PUBLISHER_EXIT_DELAY',
False):
total_sleep = float(os.getenv(
'PUBLISHER_EXIT_DELAY',
total_sleep))
if debug:
print((
' - wait_for_exit '
'handler={} wait={}s').format(
handler_class_name,
total_sleep))
time.sleep(total_sleep)
if debug:
print((
'done waiting for exit'))
return
else:
print((
' - wait_for_exit handler={} has no'
'sleep_interval').format(
handler_class_name)) |
def _role_remove(name, user=None, host=None, port=None, maintenance_db=None,
password=None, runas=None):
'''
Removes a role from the Postgres Server
'''
# check if user exists
if not user_exists(name, user, host, port, maintenance_db,
password=password, runas=runas):
log.info('User \'%s\' does not exist', name)
return False
# user exists, proceed
sub_cmd = 'DROP ROLE "{0}"'.format(name)
_psql_prepare_and_run(
['-c', sub_cmd],
runas=runas, host=host, user=user, port=port,
maintenance_db=maintenance_db, password=password)
if not user_exists(name, user, host, port, maintenance_db,
password=password, runas=runas):
return True
else:
log.info('Failed to delete user \'%s\'.', name)
return False | Removes a role from the Postgres Server | Below is the the instruction that describes the task:
### Input:
Removes a role from the Postgres Server
### Response:
def _role_remove(name, user=None, host=None, port=None, maintenance_db=None,
password=None, runas=None):
'''
Removes a role from the Postgres Server
'''
# check if user exists
if not user_exists(name, user, host, port, maintenance_db,
password=password, runas=runas):
log.info('User \'%s\' does not exist', name)
return False
# user exists, proceed
sub_cmd = 'DROP ROLE "{0}"'.format(name)
_psql_prepare_and_run(
['-c', sub_cmd],
runas=runas, host=host, user=user, port=port,
maintenance_db=maintenance_db, password=password)
if not user_exists(name, user, host, port, maintenance_db,
password=password, runas=runas):
return True
else:
log.info('Failed to delete user \'%s\'.', name)
return False |
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory | Prepare the storage directory. | Below is the the instruction that describes the task:
### Input:
Prepare the storage directory.
### Response:
def prepare_storage_dir(storage_directory):
"""Prepare the storage directory."""
storage_directory = os.path.expanduser(storage_directory)
if not os.path.exists(storage_directory):
os.mkdir(storage_directory)
return storage_directory |
def rl_cleanspaces(x):
"""
Clean double spaces, trailing spaces, heading spaces,
spaces before punctuations
"""
patterns = (
# arguments for re.sub: pattern and repl
# удаляем пробел перед знаками препинания
(r' +([\.,?!\)]+)', r'\1'),
# добавляем пробел после знака препинания, если только за ним нет другого
(r'([\.,?!\)]+)([^\.!,?\)]+)', r'\1 \2'),
# убираем пробел после открывающей скобки
(r'(\S+)\s*(\()\s*(\S+)', r'\1 (\3'),
)
# удаляем двойные, начальные и конечные пробелы
return os.linesep.join(
' '.join(part for part in line.split(' ') if part)
for line in _sub_patterns(patterns, x).split(os.linesep)
) | Clean double spaces, trailing spaces, heading spaces,
spaces before punctuations | Below is the the instruction that describes the task:
### Input:
Clean double spaces, trailing spaces, heading spaces,
spaces before punctuations
### Response:
def rl_cleanspaces(x):
"""
Clean double spaces, trailing spaces, heading spaces,
spaces before punctuations
"""
patterns = (
# arguments for re.sub: pattern and repl
# удаляем пробел перед знаками препинания
(r' +([\.,?!\)]+)', r'\1'),
# добавляем пробел после знака препинания, если только за ним нет другого
(r'([\.,?!\)]+)([^\.!,?\)]+)', r'\1 \2'),
# убираем пробел после открывающей скобки
(r'(\S+)\s*(\()\s*(\S+)', r'\1 (\3'),
)
# удаляем двойные, начальные и конечные пробелы
return os.linesep.join(
' '.join(part for part in line.split(' ') if part)
for line in _sub_patterns(patterns, x).split(os.linesep)
) |
def resize(line, factor):
"""
factor: relative length (1->no change, 2-> double, 0.5:half)
"""
a = angle(line)
mx, my = middle(line)
d = length(line) * factor * 0.5
dx = cos(a) * d
dy = sin(a) * d
return mx - dx, my - dy, mx + dx, my + dy | factor: relative length (1->no change, 2-> double, 0.5:half) | Below is the the instruction that describes the task:
### Input:
factor: relative length (1->no change, 2-> double, 0.5:half)
### Response:
def resize(line, factor):
"""
factor: relative length (1->no change, 2-> double, 0.5:half)
"""
a = angle(line)
mx, my = middle(line)
d = length(line) * factor * 0.5
dx = cos(a) * d
dy = sin(a) * d
return mx - dx, my - dy, mx + dx, my + dy |
def has_option(self, option):
"""
Return True if the option is included in this key.
Parameters
----------
option : str
The option.
Returns
-------
has : bool
True if the option can be found. Otherwise False will be returned.
"""
if len(self.options) == 0:
return False
for op in self.options:
if (self._sized_op and op[0] == option) or (op == option):
return True
return False | Return True if the option is included in this key.
Parameters
----------
option : str
The option.
Returns
-------
has : bool
True if the option can be found. Otherwise False will be returned. | Below is the the instruction that describes the task:
### Input:
Return True if the option is included in this key.
Parameters
----------
option : str
The option.
Returns
-------
has : bool
True if the option can be found. Otherwise False will be returned.
### Response:
def has_option(self, option):
"""
Return True if the option is included in this key.
Parameters
----------
option : str
The option.
Returns
-------
has : bool
True if the option can be found. Otherwise False will be returned.
"""
if len(self.options) == 0:
return False
for op in self.options:
if (self._sized_op and op[0] == option) or (op == option):
return True
return False |
def request(self, api_query, url=None):
"""
e.g. {'action': 'query', 'meta': 'userinfo'}. format=json not required
function returns a python dict that resembles the api's json response
"""
api_query['format'] = 'json'
if url is not None:
api_url = url + "/api.php"
else:
api_url = self.api_url
size = sum([sys.getsizeof(v) for k, v in iteritems(api_query)])
if size > (1024 * 8):
# if request is bigger than 8 kB (the limit is somewhat arbitrary,
# see https://www.mediawiki.org/wiki/API:Edit#Large_texts) then
# transmit as multipart message
req = self._prepare_long_request(url=api_url,
api_query=api_query)
req.send()
if self.return_json:
return req.response.json()
else:
return req.response.text
else:
auth1 = OAuth1(
self.consumer_token.key,
client_secret=self.consumer_token.secret,
resource_owner_key=session['mwoauth_access_token']['key'],
resource_owner_secret=session['mwoauth_access_token']['secret'])
if self.return_json:
return requests.post(api_url, data=api_query, auth=auth1).json()
else:
return requests.post(api_url, data=api_query, auth=auth1).text | e.g. {'action': 'query', 'meta': 'userinfo'}. format=json not required
function returns a python dict that resembles the api's json response | Below is the the instruction that describes the task:
### Input:
e.g. {'action': 'query', 'meta': 'userinfo'}. format=json not required
function returns a python dict that resembles the api's json response
### Response:
def request(self, api_query, url=None):
"""
e.g. {'action': 'query', 'meta': 'userinfo'}. format=json not required
function returns a python dict that resembles the api's json response
"""
api_query['format'] = 'json'
if url is not None:
api_url = url + "/api.php"
else:
api_url = self.api_url
size = sum([sys.getsizeof(v) for k, v in iteritems(api_query)])
if size > (1024 * 8):
# if request is bigger than 8 kB (the limit is somewhat arbitrary,
# see https://www.mediawiki.org/wiki/API:Edit#Large_texts) then
# transmit as multipart message
req = self._prepare_long_request(url=api_url,
api_query=api_query)
req.send()
if self.return_json:
return req.response.json()
else:
return req.response.text
else:
auth1 = OAuth1(
self.consumer_token.key,
client_secret=self.consumer_token.secret,
resource_owner_key=session['mwoauth_access_token']['key'],
resource_owner_secret=session['mwoauth_access_token']['secret'])
if self.return_json:
return requests.post(api_url, data=api_query, auth=auth1).json()
else:
return requests.post(api_url, data=api_query, auth=auth1).text |
def get_dbg_brk_linux64():
'''
Return the current brk value in the debugged process (only x86_64 Linux)
'''
# TODO this method is so weird, find a unused address to inject code not
# the base address
debugger = get_debugger()
code = b'\x0f\x05' # syscall
rax = debugger.get_reg("rax")
rdi = debugger.get_reg("rdi")
rip = debugger.get_reg("rip")
efl = debugger.get_reg("efl")
debugger.set_reg("rax", 12) # sys_brk
debugger.set_reg("rdi", 0)
base = debugger.image_base()
inj = base
save = debugger.get_bytes(inj, len(code))
debugger.put_bytes(inj, code)
debugger.set_reg("rip", inj)
debugger.step_into()
debugger.wait_ready()
brk_res = debugger.get_reg("rax")
debugger.set_reg("rax", rax)
debugger.set_reg("rdi", rdi)
debugger.set_reg("rip", rip)
debugger.set_reg("efl", efl)
debugger.put_bytes(inj, save)
return brk_res | Return the current brk value in the debugged process (only x86_64 Linux) | Below is the the instruction that describes the task:
### Input:
Return the current brk value in the debugged process (only x86_64 Linux)
### Response:
def get_dbg_brk_linux64():
'''
Return the current brk value in the debugged process (only x86_64 Linux)
'''
# TODO this method is so weird, find a unused address to inject code not
# the base address
debugger = get_debugger()
code = b'\x0f\x05' # syscall
rax = debugger.get_reg("rax")
rdi = debugger.get_reg("rdi")
rip = debugger.get_reg("rip")
efl = debugger.get_reg("efl")
debugger.set_reg("rax", 12) # sys_brk
debugger.set_reg("rdi", 0)
base = debugger.image_base()
inj = base
save = debugger.get_bytes(inj, len(code))
debugger.put_bytes(inj, code)
debugger.set_reg("rip", inj)
debugger.step_into()
debugger.wait_ready()
brk_res = debugger.get_reg("rax")
debugger.set_reg("rax", rax)
debugger.set_reg("rdi", rdi)
debugger.set_reg("rip", rip)
debugger.set_reg("efl", efl)
debugger.put_bytes(inj, save)
return brk_res |
def psubscribe(self, *args, **kwargs):
"""
Subscribe to channel patterns. Patterns supplied as keyword arguments
expect a pattern name as the key and a callable as the value. A
pattern's callable will be invoked automatically when a message is
received on that pattern rather than producing a message via
``listen()``.
"""
if args:
args = list_or_args(args[0], args[1:])
new_patterns = dict.fromkeys(args)
new_patterns.update(kwargs)
ret_val = self.execute_command('PSUBSCRIBE', *iterkeys(new_patterns))
# update the patterns dict AFTER we send the command. we don't want to
# subscribe twice to these patterns, once for the command and again
# for the reconnection.
new_patterns = self._normalize_keys(new_patterns)
self.patterns.update(new_patterns)
self.pending_unsubscribe_patterns.difference_update(new_patterns)
return ret_val | Subscribe to channel patterns. Patterns supplied as keyword arguments
expect a pattern name as the key and a callable as the value. A
pattern's callable will be invoked automatically when a message is
received on that pattern rather than producing a message via
``listen()``. | Below is the the instruction that describes the task:
### Input:
Subscribe to channel patterns. Patterns supplied as keyword arguments
expect a pattern name as the key and a callable as the value. A
pattern's callable will be invoked automatically when a message is
received on that pattern rather than producing a message via
``listen()``.
### Response:
def psubscribe(self, *args, **kwargs):
"""
Subscribe to channel patterns. Patterns supplied as keyword arguments
expect a pattern name as the key and a callable as the value. A
pattern's callable will be invoked automatically when a message is
received on that pattern rather than producing a message via
``listen()``.
"""
if args:
args = list_or_args(args[0], args[1:])
new_patterns = dict.fromkeys(args)
new_patterns.update(kwargs)
ret_val = self.execute_command('PSUBSCRIBE', *iterkeys(new_patterns))
# update the patterns dict AFTER we send the command. we don't want to
# subscribe twice to these patterns, once for the command and again
# for the reconnection.
new_patterns = self._normalize_keys(new_patterns)
self.patterns.update(new_patterns)
self.pending_unsubscribe_patterns.difference_update(new_patterns)
return ret_val |
def get_logger(name, level=None, fmt=':%(lineno)d: %(message)s'):
"""
Return a logger.
Args:
name (str): name to pass to the logging module.
level (int): level of logging.
fmt (str): format string.
Returns:
logging.Logger: logger from ``logging.getLogger``.
"""
if name not in Logger.loggers:
if Logger.level is None and level is None:
Logger.level = level = logging.ERROR
elif Logger.level is None:
Logger.level = level
elif level is None:
level = Logger.level
logger = logging.getLogger(name)
logger_handler = logging.StreamHandler()
logger_handler.setFormatter(LoggingFormatter(fmt=name + fmt))
logger.addHandler(logger_handler)
logger.setLevel(level)
Logger.loggers[name] = logger
return Logger.loggers[name] | Return a logger.
Args:
name (str): name to pass to the logging module.
level (int): level of logging.
fmt (str): format string.
Returns:
logging.Logger: logger from ``logging.getLogger``. | Below is the the instruction that describes the task:
### Input:
Return a logger.
Args:
name (str): name to pass to the logging module.
level (int): level of logging.
fmt (str): format string.
Returns:
logging.Logger: logger from ``logging.getLogger``.
### Response:
def get_logger(name, level=None, fmt=':%(lineno)d: %(message)s'):
"""
Return a logger.
Args:
name (str): name to pass to the logging module.
level (int): level of logging.
fmt (str): format string.
Returns:
logging.Logger: logger from ``logging.getLogger``.
"""
if name not in Logger.loggers:
if Logger.level is None and level is None:
Logger.level = level = logging.ERROR
elif Logger.level is None:
Logger.level = level
elif level is None:
level = Logger.level
logger = logging.getLogger(name)
logger_handler = logging.StreamHandler()
logger_handler.setFormatter(LoggingFormatter(fmt=name + fmt))
logger.addHandler(logger_handler)
logger.setLevel(level)
Logger.loggers[name] = logger
return Logger.loggers[name] |
def align(self, arr):
"""
Align columns, including column headers
"""
if arr is None:
return arr
c_hdrs = self._get_col_hdrs()
if self.show_col_hdr_in_cell:
for hdr in c_hdrs:
arr[hdr] = map(lambda col: ":".join([hdr, str(col)]), arr[hdr])
if self.show_col_hdrs:
widths = [max(len(str(col))
for col in arr[hdr].tolist() + [hdr]) for hdr in c_hdrs]
else:
widths = [max(len(str(col))
for col in arr[hdr].tolist()) for hdr in c_hdrs]
# align column headers
c_hdrs = map(lambda (c_hdr, width): c_hdr.ljust(width),
zip(c_hdrs, widths))
# align data
for n_row in range(len(arr)):
arr[n_row] = tuple(map(lambda (col, width): col.ljust(width),
zip(arr[n_row], widths)))
return arr, c_hdrs, widths | Align columns, including column headers | Below is the the instruction that describes the task:
### Input:
Align columns, including column headers
### Response:
def align(self, arr):
"""
Align columns, including column headers
"""
if arr is None:
return arr
c_hdrs = self._get_col_hdrs()
if self.show_col_hdr_in_cell:
for hdr in c_hdrs:
arr[hdr] = map(lambda col: ":".join([hdr, str(col)]), arr[hdr])
if self.show_col_hdrs:
widths = [max(len(str(col))
for col in arr[hdr].tolist() + [hdr]) for hdr in c_hdrs]
else:
widths = [max(len(str(col))
for col in arr[hdr].tolist()) for hdr in c_hdrs]
# align column headers
c_hdrs = map(lambda (c_hdr, width): c_hdr.ljust(width),
zip(c_hdrs, widths))
# align data
for n_row in range(len(arr)):
arr[n_row] = tuple(map(lambda (col, width): col.ljust(width),
zip(arr[n_row], widths)))
return arr, c_hdrs, widths |
def translate_identifiers(self, identifiers, language):
"""
Translate a list of identifiers to item ids. Identifier is a string of
the following form:
<model_prefix>/<model_identifier>
where <model_prefix> is any suffix of database table of the given model
which uniquely specifies the table, and <model_identifier> is
identifier of the object.
Args:
identifiers (list[str]): list of identifiers
language (str): language used for further filtering (some objects
for different languages share the same item
Returns:
dict: identifier -> item id
"""
result = {}
identifiers = set(identifiers)
item_types = ItemType.objects.get_all_types()
for item_type_id, type_identifiers in proso.list.group_by(identifiers, by=lambda identifier: self.get_item_type_id_from_identifier(identifier, item_types)).items():
to_find = {}
for identifier in type_identifiers:
identifier_split = identifier.split('/')
to_find[identifier_split[1]] = identifier
kwargs = {'identifier__in': list(to_find.keys())}
item_type = ItemType.objects.get_all_types()[item_type_id]
model = ItemType.objects.get_model(item_type_id)
if 'language' in item_type:
kwargs[item_type['language']] = language
for identifier, item_id in model.objects.filter(**kwargs).values_list('identifier', item_type['foreign_key']):
result[to_find[identifier]] = item_id
if len(result) != len(identifiers):
raise HttpError(404, "Can't translate the following identifiers: {}".format(set(identifiers) - set(result.keys())), 'identifier_not_found')
return result | Translate a list of identifiers to item ids. Identifier is a string of
the following form:
<model_prefix>/<model_identifier>
where <model_prefix> is any suffix of database table of the given model
which uniquely specifies the table, and <model_identifier> is
identifier of the object.
Args:
identifiers (list[str]): list of identifiers
language (str): language used for further filtering (some objects
for different languages share the same item
Returns:
dict: identifier -> item id | Below is the the instruction that describes the task:
### Input:
Translate a list of identifiers to item ids. Identifier is a string of
the following form:
<model_prefix>/<model_identifier>
where <model_prefix> is any suffix of database table of the given model
which uniquely specifies the table, and <model_identifier> is
identifier of the object.
Args:
identifiers (list[str]): list of identifiers
language (str): language used for further filtering (some objects
for different languages share the same item
Returns:
dict: identifier -> item id
### Response:
def translate_identifiers(self, identifiers, language):
"""
Translate a list of identifiers to item ids. Identifier is a string of
the following form:
<model_prefix>/<model_identifier>
where <model_prefix> is any suffix of database table of the given model
which uniquely specifies the table, and <model_identifier> is
identifier of the object.
Args:
identifiers (list[str]): list of identifiers
language (str): language used for further filtering (some objects
for different languages share the same item
Returns:
dict: identifier -> item id
"""
result = {}
identifiers = set(identifiers)
item_types = ItemType.objects.get_all_types()
for item_type_id, type_identifiers in proso.list.group_by(identifiers, by=lambda identifier: self.get_item_type_id_from_identifier(identifier, item_types)).items():
to_find = {}
for identifier in type_identifiers:
identifier_split = identifier.split('/')
to_find[identifier_split[1]] = identifier
kwargs = {'identifier__in': list(to_find.keys())}
item_type = ItemType.objects.get_all_types()[item_type_id]
model = ItemType.objects.get_model(item_type_id)
if 'language' in item_type:
kwargs[item_type['language']] = language
for identifier, item_id in model.objects.filter(**kwargs).values_list('identifier', item_type['foreign_key']):
result[to_find[identifier]] = item_id
if len(result) != len(identifiers):
raise HttpError(404, "Can't translate the following identifiers: {}".format(set(identifiers) - set(result.keys())), 'identifier_not_found')
return result |
def _set_sftp(self, v, load=False):
"""
Setter method for sftp, mapped from YANG variable /brocade_firmware_rpc/firmware_download/input/sftp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_sftp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sftp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=sftp.sftp, is_container='container', presence=False, yang_name="sftp", rest_name="sftp", parent=self, choice=(u'protocol-type', u'sftp-protocol'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sftp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=sftp.sftp, is_container='container', presence=False, yang_name="sftp", rest_name="sftp", parent=self, choice=(u'protocol-type', u'sftp-protocol'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)""",
})
self.__sftp = t
if hasattr(self, '_set'):
self._set() | Setter method for sftp, mapped from YANG variable /brocade_firmware_rpc/firmware_download/input/sftp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_sftp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sftp() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for sftp, mapped from YANG variable /brocade_firmware_rpc/firmware_download/input/sftp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_sftp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sftp() directly.
### Response:
def _set_sftp(self, v, load=False):
"""
Setter method for sftp, mapped from YANG variable /brocade_firmware_rpc/firmware_download/input/sftp (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_sftp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_sftp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=sftp.sftp, is_container='container', presence=False, yang_name="sftp", rest_name="sftp", parent=self, choice=(u'protocol-type', u'sftp-protocol'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """sftp must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=sftp.sftp, is_container='container', presence=False, yang_name="sftp", rest_name="sftp", parent=self, choice=(u'protocol-type', u'sftp-protocol'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-firmware', defining_module='brocade-firmware', yang_type='container', is_config=True)""",
})
self.__sftp = t
if hasattr(self, '_set'):
self._set() |
def is_installed(self):
"""Return True if the bundle is installed."""
r = self.library.resolve(self.identity.vid)
return r is not None | Return True if the bundle is installed. | Below is the the instruction that describes the task:
### Input:
Return True if the bundle is installed.
### Response:
def is_installed(self):
"""Return True if the bundle is installed."""
r = self.library.resolve(self.identity.vid)
return r is not None |
def gru_cell(input_layer,
state,
num_units,
bias=tf.zeros_initializer(),
weights=None,
phase=prettytensor.Phase.train,
parameter_modifier=parameters.identity):
"""Gated recurrent unit memory cell (GRU).
Args:
input_layer: The input layer.
state: The current state of the network. For GRUs, this is a list with
one element (tensor) of shape [batch, num_units].
num_units: How big is the hidden state.
bias: An initializer for the bias or a Tensor. No bias if set to None.
weights: An initializer for weights or a Tensor.
phase: The phase of graph construction. See `pt.Phase`.
parameter_modifier: A function to modify parameters that is applied after
creation and before use.
Returns:
A RecurrentResult.
"""
# As a compound op, it needs to respect whether or not this is a sequential
# builder.
if input_layer.is_sequential_builder():
layer = input_layer.as_layer()
else:
layer = input_layer
# We start with bias of 1.0 to not reset and not udpate.
# NB We compute activation_input and activation_state in two different ops,
# instead of concatenating them, followed by one matrix multiplication. The
# reason is that input has size [batch_size x input_size], while state has
# [ ? x state_size ], where the first dimension is 1 initially and will be
# batch_size only after the first RNN computation. We thus cannot concatenate
# input and state, and instead add the results of two fully connected ops,
# which works thanks to broadcasting, independent of state's batch size.
state = state[0]
state_pt = prettytensor.wrap(state, layer.bookkeeper)
activation_input = layer.fully_connected(
2 * num_units,
bias=None if bias is None else tf.constant_initializer(1.0),
activation_fn=None,
weights=weights,
phase=phase,
parameter_modifier=parameter_modifier)
activation_state = state_pt.fully_connected(
2 * num_units,
bias=None,
activation_fn=None,
weights=weights,
phase=phase,
parameter_modifier=parameter_modifier)
# adds batch_size x (2 * num_units) + ? x (2 * num_inputs)
activation = activation_input + activation_state
activation = activation.sigmoid()
split = activation.split(1, 2)
r = split[0]
u = split[1]
c = layer.concat(1, [r * state]).fully_connected(
num_units,
bias=bias,
activation_fn=None,
weights=weights,
phase=phase,
parameter_modifier=parameter_modifier).apply(tf.tanh)
new_h = u * state + (1 - u) * c
if input_layer.is_sequential_builder():
new_h = input_layer.set_head(input_layer)
return RecurrentResult(new_h, [new_h]) | Gated recurrent unit memory cell (GRU).
Args:
input_layer: The input layer.
state: The current state of the network. For GRUs, this is a list with
one element (tensor) of shape [batch, num_units].
num_units: How big is the hidden state.
bias: An initializer for the bias or a Tensor. No bias if set to None.
weights: An initializer for weights or a Tensor.
phase: The phase of graph construction. See `pt.Phase`.
parameter_modifier: A function to modify parameters that is applied after
creation and before use.
Returns:
A RecurrentResult. | Below is the the instruction that describes the task:
### Input:
Gated recurrent unit memory cell (GRU).
Args:
input_layer: The input layer.
state: The current state of the network. For GRUs, this is a list with
one element (tensor) of shape [batch, num_units].
num_units: How big is the hidden state.
bias: An initializer for the bias or a Tensor. No bias if set to None.
weights: An initializer for weights or a Tensor.
phase: The phase of graph construction. See `pt.Phase`.
parameter_modifier: A function to modify parameters that is applied after
creation and before use.
Returns:
A RecurrentResult.
### Response:
def gru_cell(input_layer,
state,
num_units,
bias=tf.zeros_initializer(),
weights=None,
phase=prettytensor.Phase.train,
parameter_modifier=parameters.identity):
"""Gated recurrent unit memory cell (GRU).
Args:
input_layer: The input layer.
state: The current state of the network. For GRUs, this is a list with
one element (tensor) of shape [batch, num_units].
num_units: How big is the hidden state.
bias: An initializer for the bias or a Tensor. No bias if set to None.
weights: An initializer for weights or a Tensor.
phase: The phase of graph construction. See `pt.Phase`.
parameter_modifier: A function to modify parameters that is applied after
creation and before use.
Returns:
A RecurrentResult.
"""
# As a compound op, it needs to respect whether or not this is a sequential
# builder.
if input_layer.is_sequential_builder():
layer = input_layer.as_layer()
else:
layer = input_layer
# We start with bias of 1.0 to not reset and not udpate.
# NB We compute activation_input and activation_state in two different ops,
# instead of concatenating them, followed by one matrix multiplication. The
# reason is that input has size [batch_size x input_size], while state has
# [ ? x state_size ], where the first dimension is 1 initially and will be
# batch_size only after the first RNN computation. We thus cannot concatenate
# input and state, and instead add the results of two fully connected ops,
# which works thanks to broadcasting, independent of state's batch size.
state = state[0]
state_pt = prettytensor.wrap(state, layer.bookkeeper)
activation_input = layer.fully_connected(
2 * num_units,
bias=None if bias is None else tf.constant_initializer(1.0),
activation_fn=None,
weights=weights,
phase=phase,
parameter_modifier=parameter_modifier)
activation_state = state_pt.fully_connected(
2 * num_units,
bias=None,
activation_fn=None,
weights=weights,
phase=phase,
parameter_modifier=parameter_modifier)
# adds batch_size x (2 * num_units) + ? x (2 * num_inputs)
activation = activation_input + activation_state
activation = activation.sigmoid()
split = activation.split(1, 2)
r = split[0]
u = split[1]
c = layer.concat(1, [r * state]).fully_connected(
num_units,
bias=bias,
activation_fn=None,
weights=weights,
phase=phase,
parameter_modifier=parameter_modifier).apply(tf.tanh)
new_h = u * state + (1 - u) * c
if input_layer.is_sequential_builder():
new_h = input_layer.set_head(input_layer)
return RecurrentResult(new_h, [new_h]) |
def transpose(self, *args, **kwargs):
"""
Returns a DataFrame with the rows/columns switched.
"""
nv.validate_transpose(args, kwargs)
return self._constructor(
self.values.T, index=self.columns, columns=self.index,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self) | Returns a DataFrame with the rows/columns switched. | Below is the the instruction that describes the task:
### Input:
Returns a DataFrame with the rows/columns switched.
### Response:
def transpose(self, *args, **kwargs):
"""
Returns a DataFrame with the rows/columns switched.
"""
nv.validate_transpose(args, kwargs)
return self._constructor(
self.values.T, index=self.columns, columns=self.index,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self) |
def run_as_coroutine(self, stdin, callbacks):
"""
The input 'event loop'.
"""
assert isinstance(callbacks, EventLoopCallbacks)
# Create reader class.
stdin_reader = PosixStdinReader(stdin.fileno())
if self.closed:
raise Exception('Event loop already closed.')
inputstream = InputStream(callbacks.feed_key)
try:
# Create a new Future every time.
self._stopped_f = asyncio.Future(loop=self.loop)
# Handle input timouts
def timeout_handler():
"""
When no input has been received for INPUT_TIMEOUT seconds,
flush the input stream and fire the timeout event.
"""
inputstream.flush()
callbacks.input_timeout()
timeout = AsyncioTimeout(INPUT_TIMEOUT, timeout_handler, self.loop)
# Catch sigwinch
def received_winch():
self.call_from_executor(callbacks.terminal_size_changed)
self.loop.add_signal_handler(signal.SIGWINCH, received_winch)
# Read input data.
def stdin_ready():
data = stdin_reader.read()
inputstream.feed(data)
timeout.reset()
# Quit when the input stream was closed.
if stdin_reader.closed:
self.stop()
self.loop.add_reader(stdin.fileno(), stdin_ready)
# Block this coroutine until stop() has been called.
for f in self._stopped_f:
yield f
finally:
# Clean up.
self.loop.remove_reader(stdin.fileno())
self.loop.remove_signal_handler(signal.SIGWINCH)
# Don't trigger any timeout events anymore.
timeout.stop() | The input 'event loop'. | Below is the the instruction that describes the task:
### Input:
The input 'event loop'.
### Response:
def run_as_coroutine(self, stdin, callbacks):
"""
The input 'event loop'.
"""
assert isinstance(callbacks, EventLoopCallbacks)
# Create reader class.
stdin_reader = PosixStdinReader(stdin.fileno())
if self.closed:
raise Exception('Event loop already closed.')
inputstream = InputStream(callbacks.feed_key)
try:
# Create a new Future every time.
self._stopped_f = asyncio.Future(loop=self.loop)
# Handle input timouts
def timeout_handler():
"""
When no input has been received for INPUT_TIMEOUT seconds,
flush the input stream and fire the timeout event.
"""
inputstream.flush()
callbacks.input_timeout()
timeout = AsyncioTimeout(INPUT_TIMEOUT, timeout_handler, self.loop)
# Catch sigwinch
def received_winch():
self.call_from_executor(callbacks.terminal_size_changed)
self.loop.add_signal_handler(signal.SIGWINCH, received_winch)
# Read input data.
def stdin_ready():
data = stdin_reader.read()
inputstream.feed(data)
timeout.reset()
# Quit when the input stream was closed.
if stdin_reader.closed:
self.stop()
self.loop.add_reader(stdin.fileno(), stdin_ready)
# Block this coroutine until stop() has been called.
for f in self._stopped_f:
yield f
finally:
# Clean up.
self.loop.remove_reader(stdin.fileno())
self.loop.remove_signal_handler(signal.SIGWINCH)
# Don't trigger any timeout events anymore.
timeout.stop() |
def pprint_vector(vector, limit=False, width=None, indent=0, eng=False, frac_length=3):
r"""
Format a list of numbers (vector) or a Numpy vector for printing.
If the argument **vector** is :code:`None` the string :code:`'None'` is
returned
:param vector: Vector to pretty print or None
:type vector: list of integers or floats, Numpy vector or None
:param limit: Flag that indicates whether at most 6 vector items are
printed (all vector items if its length is equal or less
than 6, first and last 3 vector items if it is not) (True),
or the entire vector is printed (False)
:type limit: boolean
:param width: Number of available characters per line. If None the vector
is printed in one line
:type width: integer or None
:param indent: Flag that indicates whether all subsequent lines after the
first one are indented (True) or not (False). Only relevant
if **width** is not None
:type indent: boolean
:param eng: Flag that indicates whether engineering notation is used
(True) or not (False)
:type eng: boolean
:param frac_length: Number of digits of fractional part (only applicable
if **eng** is True)
:type frac_length: integer
:raises: ValueError (Argument \`width\` is too small)
:rtype: string
For example:
>>> from __future__ import print_function
>>> import peng
>>> header = 'Vector: '
>>> data = [1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9]
>>> print(
... header+peng.pprint_vector(
... data,
... width=30,
... eng=True,
... frac_length=1,
... limit=True,
... indent=len(header)
... )
... )
Vector: [ 1.0m, 20.0u, 300.0M,
...
700.0 , 8.0 , 9.0 ]
>>> print(
... header+peng.pprint_vector(
... data,
... width=30,
... eng=True,
... frac_length=0,
... indent=len(header)
... )
... )
Vector: [ 1m, 20u, 300M, 4p,
5k, -6n, 700 , 8 ,
9 ]
>>> print(peng.pprint_vector(data, eng=True, frac_length=0))
[ 1m, 20u, 300M, 4p, 5k, -6n, 700 , 8 , 9 ]
>>> print(peng.pprint_vector(data, limit=True))
[ 0.001, 2e-05, 300000000.0, ..., 700, 8, 9 ]
"""
# pylint: disable=R0912,R0913
num_digits = 12
approx = lambda x: float(x) if "." not in x else round(float(x), num_digits)
def limstr(value):
str1 = str(value)
iscomplex = isinstance(value, complex)
str1 = str1.lstrip("(").rstrip(")")
if "." not in str1:
return str1
if iscomplex:
sign = "+" if value.imag >= 0 else "-"
regexp = re.compile(
r"(.*(?:[Ee][\+-]\d+)?)"
+ (r"\+" if sign == "+" else "-")
+ r"(.*(?:[Ee][\+-]\d+)?j)"
)
rvalue, ivalue = regexp.match(str1).groups()
return (
str(complex(approx(rvalue), approx(sign + ivalue.strip("j"))))
.lstrip("(")
.rstrip(")")
)
str2 = str(round(value, num_digits))
return str2 if len(str1) > len(str2) else str1
def _str(*args):
"""
Convert numbers to string, optionally represented in engineering notation.
Numbers may be integers, float or complex
"""
ret = [
(limstr(element) if not eng else peng(element, frac_length, True))
if not isinstance(element, complex)
else (
limstr(element)
if not eng
else "{real}{sign}{imag}j".format(
real=peng(element.real, frac_length, True),
imag=peng(abs(element.imag), frac_length, True),
sign="+" if element.imag >= 0 else "-",
)
)
for element in args
]
return ret[0] if len(ret) == 1 else ret
if vector is None:
return "None"
lvector = len(vector)
if (not limit) or (limit and (lvector < 7)):
items = _str(*vector)
uret = "[ {0} ]".format(", ".join(items))
else:
items = _str(*(vector[:3] + vector[-3:]))
uret = "[ {0}, ..., {1} ]".format(", ".join(items[:3]), ", ".join(items[-3:]))
if (width is None) or (len(uret) < width):
return uret
# -4 comes from the fact that an opening '[ ' and a closing ' ]'
# are added to the multi-line vector string
if any([len(item) > width - 4 for item in items]):
raise ValueError("Argument `width` is too small")
# Text needs to be wrapped in multiple lines
# Figure out how long the first line needs to be
wobj = textwrap.TextWrapper(initial_indent="[ ", width=width)
# uret[2:] -> do not include initial '[ ' as this is specified as
# the initial indent to the text wrapper
rlist = wobj.wrap(uret[2:])
first_line = rlist[0]
first_line_elements = first_line.count(",")
# Reconstruct string representation of vector excluding first line
# Remove ... from text to be wrapped because it is placed in a single
# line centered with the content
uret_left = (",".join(uret.split(",")[first_line_elements:])).replace("...,", "")
wobj = textwrap.TextWrapper(width=width - 2)
wrapped_text = wobj.wrap(uret_left.lstrip())
# Construct candidate wrapped and indented list of vector elements
rlist = [first_line] + [
(" " * (indent + 2)) + item.rstrip() for item in wrapped_text
]
last_line = rlist[-1]
last_line_elements = last_line.count(",") + 1
# "Manually" format limit output so that it is either 3 lines, first and
# last line with 3 elements and the middle with '...' or 7 lines, each with
# 1 element and the middle with '...'
# If numbers are not to be aligned at commas (variable width) then use the
# existing results of the wrap() function
if limit and (lvector > 6):
if (first_line_elements < 3) or (
(first_line_elements == 3) and (last_line_elements < 3)
):
rlist = [
"[ {0},".format(_str(vector[0])),
_str(vector[1]),
_str(vector[2]),
"...",
_str(vector[-3]),
_str(vector[-2]),
"{0} ]".format(_str(vector[-1])),
]
first_line_elements = 1
else:
rlist = [
"[ {0},".format(", ".join(_str(*vector[:3]))),
"...",
"{0} ]".format(", ".join(_str(*vector[-3:]))),
]
first_line = rlist[0]
elif limit:
rlist = [item.lstrip() for item in rlist]
first_comma_index = first_line.find(",")
actual_width = len(first_line) - 2
if not eng:
if not limit:
return "\n".join(rlist)
num_elements = len(rlist)
return "\n".join(
[
"{spaces}{line}{comma}".format(
spaces=(" " * (indent + 2)) if num > 0 else "",
line=(
line.center(actual_width).rstrip()
if line.strip() == "..."
else line
),
comma=(
","
if (
(num < num_elements - 1)
and (not line.endswith(","))
and (line.strip() != "...")
)
else ""
),
)
if num > 0
else line
for num, line in enumerate(rlist)
]
)
# Align elements across multiple lines
if limit:
remainder_list = [line.lstrip() for line in rlist[1:]]
else:
remainder_list = _split_every(
text=uret[len(first_line) :],
sep=",",
count=first_line_elements,
lstrip=True,
)
new_wrapped_lines_list = [first_line]
for line in remainder_list[:-1]:
new_wrapped_lines_list.append(
"{0},".format(line).rjust(actual_width)
if line != "..."
else line.center(actual_width).rstrip()
)
# Align last line on fist comma (if it exists) or
# on length of field if does not
if remainder_list[-1].find(",") == -1:
marker = len(remainder_list[-1]) - 2
else:
marker = remainder_list[-1].find(",")
new_wrapped_lines_list.append(
"{0}{1}".format((first_comma_index - marker - 2) * " ", remainder_list[-1])
)
return "\n".join(
[
"{spaces}{line}".format(spaces=" " * (indent + 2), line=line)
if num > 0
else line
for num, line in enumerate(new_wrapped_lines_list)
]
) | r"""
Format a list of numbers (vector) or a Numpy vector for printing.
If the argument **vector** is :code:`None` the string :code:`'None'` is
returned
:param vector: Vector to pretty print or None
:type vector: list of integers or floats, Numpy vector or None
:param limit: Flag that indicates whether at most 6 vector items are
printed (all vector items if its length is equal or less
than 6, first and last 3 vector items if it is not) (True),
or the entire vector is printed (False)
:type limit: boolean
:param width: Number of available characters per line. If None the vector
is printed in one line
:type width: integer or None
:param indent: Flag that indicates whether all subsequent lines after the
first one are indented (True) or not (False). Only relevant
if **width** is not None
:type indent: boolean
:param eng: Flag that indicates whether engineering notation is used
(True) or not (False)
:type eng: boolean
:param frac_length: Number of digits of fractional part (only applicable
if **eng** is True)
:type frac_length: integer
:raises: ValueError (Argument \`width\` is too small)
:rtype: string
For example:
>>> from __future__ import print_function
>>> import peng
>>> header = 'Vector: '
>>> data = [1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9]
>>> print(
... header+peng.pprint_vector(
... data,
... width=30,
... eng=True,
... frac_length=1,
... limit=True,
... indent=len(header)
... )
... )
Vector: [ 1.0m, 20.0u, 300.0M,
...
700.0 , 8.0 , 9.0 ]
>>> print(
... header+peng.pprint_vector(
... data,
... width=30,
... eng=True,
... frac_length=0,
... indent=len(header)
... )
... )
Vector: [ 1m, 20u, 300M, 4p,
5k, -6n, 700 , 8 ,
9 ]
>>> print(peng.pprint_vector(data, eng=True, frac_length=0))
[ 1m, 20u, 300M, 4p, 5k, -6n, 700 , 8 , 9 ]
>>> print(peng.pprint_vector(data, limit=True))
[ 0.001, 2e-05, 300000000.0, ..., 700, 8, 9 ] | Below is the the instruction that describes the task:
### Input:
r"""
Format a list of numbers (vector) or a Numpy vector for printing.
If the argument **vector** is :code:`None` the string :code:`'None'` is
returned
:param vector: Vector to pretty print or None
:type vector: list of integers or floats, Numpy vector or None
:param limit: Flag that indicates whether at most 6 vector items are
printed (all vector items if its length is equal or less
than 6, first and last 3 vector items if it is not) (True),
or the entire vector is printed (False)
:type limit: boolean
:param width: Number of available characters per line. If None the vector
is printed in one line
:type width: integer or None
:param indent: Flag that indicates whether all subsequent lines after the
first one are indented (True) or not (False). Only relevant
if **width** is not None
:type indent: boolean
:param eng: Flag that indicates whether engineering notation is used
(True) or not (False)
:type eng: boolean
:param frac_length: Number of digits of fractional part (only applicable
if **eng** is True)
:type frac_length: integer
:raises: ValueError (Argument \`width\` is too small)
:rtype: string
For example:
>>> from __future__ import print_function
>>> import peng
>>> header = 'Vector: '
>>> data = [1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9]
>>> print(
... header+peng.pprint_vector(
... data,
... width=30,
... eng=True,
... frac_length=1,
... limit=True,
... indent=len(header)
... )
... )
Vector: [ 1.0m, 20.0u, 300.0M,
...
700.0 , 8.0 , 9.0 ]
>>> print(
... header+peng.pprint_vector(
... data,
... width=30,
... eng=True,
... frac_length=0,
... indent=len(header)
... )
... )
Vector: [ 1m, 20u, 300M, 4p,
5k, -6n, 700 , 8 ,
9 ]
>>> print(peng.pprint_vector(data, eng=True, frac_length=0))
[ 1m, 20u, 300M, 4p, 5k, -6n, 700 , 8 , 9 ]
>>> print(peng.pprint_vector(data, limit=True))
[ 0.001, 2e-05, 300000000.0, ..., 700, 8, 9 ]
### Response:
def pprint_vector(vector, limit=False, width=None, indent=0, eng=False, frac_length=3):
r"""
Format a list of numbers (vector) or a Numpy vector for printing.
If the argument **vector** is :code:`None` the string :code:`'None'` is
returned
:param vector: Vector to pretty print or None
:type vector: list of integers or floats, Numpy vector or None
:param limit: Flag that indicates whether at most 6 vector items are
printed (all vector items if its length is equal or less
than 6, first and last 3 vector items if it is not) (True),
or the entire vector is printed (False)
:type limit: boolean
:param width: Number of available characters per line. If None the vector
is printed in one line
:type width: integer or None
:param indent: Flag that indicates whether all subsequent lines after the
first one are indented (True) or not (False). Only relevant
if **width** is not None
:type indent: boolean
:param eng: Flag that indicates whether engineering notation is used
(True) or not (False)
:type eng: boolean
:param frac_length: Number of digits of fractional part (only applicable
if **eng** is True)
:type frac_length: integer
:raises: ValueError (Argument \`width\` is too small)
:rtype: string
For example:
>>> from __future__ import print_function
>>> import peng
>>> header = 'Vector: '
>>> data = [1e-3, 20e-6, 300e+6, 4e-12, 5.25e3, -6e-9, 700, 8, 9]
>>> print(
... header+peng.pprint_vector(
... data,
... width=30,
... eng=True,
... frac_length=1,
... limit=True,
... indent=len(header)
... )
... )
Vector: [ 1.0m, 20.0u, 300.0M,
...
700.0 , 8.0 , 9.0 ]
>>> print(
... header+peng.pprint_vector(
... data,
... width=30,
... eng=True,
... frac_length=0,
... indent=len(header)
... )
... )
Vector: [ 1m, 20u, 300M, 4p,
5k, -6n, 700 , 8 ,
9 ]
>>> print(peng.pprint_vector(data, eng=True, frac_length=0))
[ 1m, 20u, 300M, 4p, 5k, -6n, 700 , 8 , 9 ]
>>> print(peng.pprint_vector(data, limit=True))
[ 0.001, 2e-05, 300000000.0, ..., 700, 8, 9 ]
"""
# pylint: disable=R0912,R0913
num_digits = 12
approx = lambda x: float(x) if "." not in x else round(float(x), num_digits)
def limstr(value):
str1 = str(value)
iscomplex = isinstance(value, complex)
str1 = str1.lstrip("(").rstrip(")")
if "." not in str1:
return str1
if iscomplex:
sign = "+" if value.imag >= 0 else "-"
regexp = re.compile(
r"(.*(?:[Ee][\+-]\d+)?)"
+ (r"\+" if sign == "+" else "-")
+ r"(.*(?:[Ee][\+-]\d+)?j)"
)
rvalue, ivalue = regexp.match(str1).groups()
return (
str(complex(approx(rvalue), approx(sign + ivalue.strip("j"))))
.lstrip("(")
.rstrip(")")
)
str2 = str(round(value, num_digits))
return str2 if len(str1) > len(str2) else str1
def _str(*args):
"""
Convert numbers to string, optionally represented in engineering notation.
Numbers may be integers, float or complex
"""
ret = [
(limstr(element) if not eng else peng(element, frac_length, True))
if not isinstance(element, complex)
else (
limstr(element)
if not eng
else "{real}{sign}{imag}j".format(
real=peng(element.real, frac_length, True),
imag=peng(abs(element.imag), frac_length, True),
sign="+" if element.imag >= 0 else "-",
)
)
for element in args
]
return ret[0] if len(ret) == 1 else ret
if vector is None:
return "None"
lvector = len(vector)
if (not limit) or (limit and (lvector < 7)):
items = _str(*vector)
uret = "[ {0} ]".format(", ".join(items))
else:
items = _str(*(vector[:3] + vector[-3:]))
uret = "[ {0}, ..., {1} ]".format(", ".join(items[:3]), ", ".join(items[-3:]))
if (width is None) or (len(uret) < width):
return uret
# -4 comes from the fact that an opening '[ ' and a closing ' ]'
# are added to the multi-line vector string
if any([len(item) > width - 4 for item in items]):
raise ValueError("Argument `width` is too small")
# Text needs to be wrapped in multiple lines
# Figure out how long the first line needs to be
wobj = textwrap.TextWrapper(initial_indent="[ ", width=width)
# uret[2:] -> do not include initial '[ ' as this is specified as
# the initial indent to the text wrapper
rlist = wobj.wrap(uret[2:])
first_line = rlist[0]
first_line_elements = first_line.count(",")
# Reconstruct string representation of vector excluding first line
# Remove ... from text to be wrapped because it is placed in a single
# line centered with the content
uret_left = (",".join(uret.split(",")[first_line_elements:])).replace("...,", "")
wobj = textwrap.TextWrapper(width=width - 2)
wrapped_text = wobj.wrap(uret_left.lstrip())
# Construct candidate wrapped and indented list of vector elements
rlist = [first_line] + [
(" " * (indent + 2)) + item.rstrip() for item in wrapped_text
]
last_line = rlist[-1]
last_line_elements = last_line.count(",") + 1
# "Manually" format limit output so that it is either 3 lines, first and
# last line with 3 elements and the middle with '...' or 7 lines, each with
# 1 element and the middle with '...'
# If numbers are not to be aligned at commas (variable width) then use the
# existing results of the wrap() function
if limit and (lvector > 6):
if (first_line_elements < 3) or (
(first_line_elements == 3) and (last_line_elements < 3)
):
rlist = [
"[ {0},".format(_str(vector[0])),
_str(vector[1]),
_str(vector[2]),
"...",
_str(vector[-3]),
_str(vector[-2]),
"{0} ]".format(_str(vector[-1])),
]
first_line_elements = 1
else:
rlist = [
"[ {0},".format(", ".join(_str(*vector[:3]))),
"...",
"{0} ]".format(", ".join(_str(*vector[-3:]))),
]
first_line = rlist[0]
elif limit:
rlist = [item.lstrip() for item in rlist]
first_comma_index = first_line.find(",")
actual_width = len(first_line) - 2
if not eng:
if not limit:
return "\n".join(rlist)
num_elements = len(rlist)
return "\n".join(
[
"{spaces}{line}{comma}".format(
spaces=(" " * (indent + 2)) if num > 0 else "",
line=(
line.center(actual_width).rstrip()
if line.strip() == "..."
else line
),
comma=(
","
if (
(num < num_elements - 1)
and (not line.endswith(","))
and (line.strip() != "...")
)
else ""
),
)
if num > 0
else line
for num, line in enumerate(rlist)
]
)
# Align elements across multiple lines
if limit:
remainder_list = [line.lstrip() for line in rlist[1:]]
else:
remainder_list = _split_every(
text=uret[len(first_line) :],
sep=",",
count=first_line_elements,
lstrip=True,
)
new_wrapped_lines_list = [first_line]
for line in remainder_list[:-1]:
new_wrapped_lines_list.append(
"{0},".format(line).rjust(actual_width)
if line != "..."
else line.center(actual_width).rstrip()
)
# Align last line on fist comma (if it exists) or
# on length of field if does not
if remainder_list[-1].find(",") == -1:
marker = len(remainder_list[-1]) - 2
else:
marker = remainder_list[-1].find(",")
new_wrapped_lines_list.append(
"{0}{1}".format((first_comma_index - marker - 2) * " ", remainder_list[-1])
)
return "\n".join(
[
"{spaces}{line}".format(spaces=" " * (indent + 2), line=line)
if num > 0
else line
for num, line in enumerate(new_wrapped_lines_list)
]
) |
def from_section(cls, stream, section_name='.pic'):
"""Construct a Converter object from the specified section
of the specified binary stream."""
binary = Executable(stream)
section_data = binary.get_section_data(section_name)
return cls(section_data, binary.system) | Construct a Converter object from the specified section
of the specified binary stream. | Below is the the instruction that describes the task:
### Input:
Construct a Converter object from the specified section
of the specified binary stream.
### Response:
def from_section(cls, stream, section_name='.pic'):
"""Construct a Converter object from the specified section
of the specified binary stream."""
binary = Executable(stream)
section_data = binary.get_section_data(section_name)
return cls(section_data, binary.system) |
def get_variable(self, name):
"""
Get a variable used in this tower.
The name should not contain the variable scope prefix of the tower.
When the tower has the same variable scope and name scope, this is equivalent to
:meth:`get_tensor`.
"""
name = get_op_tensor_name(name)[1]
if len(self.vs_name):
name_with_vs = self.vs_name + "/" + name
else:
name_with_vs = name
return get_op_or_tensor_by_name(name_with_vs) | Get a variable used in this tower.
The name should not contain the variable scope prefix of the tower.
When the tower has the same variable scope and name scope, this is equivalent to
:meth:`get_tensor`. | Below is the the instruction that describes the task:
### Input:
Get a variable used in this tower.
The name should not contain the variable scope prefix of the tower.
When the tower has the same variable scope and name scope, this is equivalent to
:meth:`get_tensor`.
### Response:
def get_variable(self, name):
"""
Get a variable used in this tower.
The name should not contain the variable scope prefix of the tower.
When the tower has the same variable scope and name scope, this is equivalent to
:meth:`get_tensor`.
"""
name = get_op_tensor_name(name)[1]
if len(self.vs_name):
name_with_vs = self.vs_name + "/" + name
else:
name_with_vs = name
return get_op_or_tensor_by_name(name_with_vs) |
def run_delete_sm(self, tenant_id, fw_dict, is_fw_virt):
"""Runs the delete State Machine.
Goes through every state function until the end or when one state
returns failure.
"""
# Read the current state from the DB
ret = True
serv_obj = self.get_service_obj(tenant_id)
state = serv_obj.get_state()
# Preserve the ordering of the next lines till while
new_state = serv_obj.fixup_state(fw_const.FW_DEL_OP, state)
serv_obj.store_local_final_result(fw_const.RESULT_FW_DELETE_INIT)
if state != new_state:
state = new_state
serv_obj.store_state(state)
while ret:
try:
ret = self.fabric_fsm[state][1](tenant_id, fw_dict,
is_fw_virt=is_fw_virt)
except Exception as exc:
LOG.error("Exception %(exc)s for state %(state)s",
{'exc': str(exc), 'state':
fw_const.fw_state_fn_del_dict.get(state)})
ret = False
if ret:
LOG.info("State %s return successfully",
fw_const.fw_state_fn_del_dict.get(state))
if state == fw_const.INIT_STATE:
break
state = self.get_next_state(state, ret, fw_const.FW_DEL_OP)
serv_obj.store_state(state)
return ret | Runs the delete State Machine.
Goes through every state function until the end or when one state
returns failure. | Below is the the instruction that describes the task:
### Input:
Runs the delete State Machine.
Goes through every state function until the end or when one state
returns failure.
### Response:
def run_delete_sm(self, tenant_id, fw_dict, is_fw_virt):
"""Runs the delete State Machine.
Goes through every state function until the end or when one state
returns failure.
"""
# Read the current state from the DB
ret = True
serv_obj = self.get_service_obj(tenant_id)
state = serv_obj.get_state()
# Preserve the ordering of the next lines till while
new_state = serv_obj.fixup_state(fw_const.FW_DEL_OP, state)
serv_obj.store_local_final_result(fw_const.RESULT_FW_DELETE_INIT)
if state != new_state:
state = new_state
serv_obj.store_state(state)
while ret:
try:
ret = self.fabric_fsm[state][1](tenant_id, fw_dict,
is_fw_virt=is_fw_virt)
except Exception as exc:
LOG.error("Exception %(exc)s for state %(state)s",
{'exc': str(exc), 'state':
fw_const.fw_state_fn_del_dict.get(state)})
ret = False
if ret:
LOG.info("State %s return successfully",
fw_const.fw_state_fn_del_dict.get(state))
if state == fw_const.INIT_STATE:
break
state = self.get_next_state(state, ret, fw_const.FW_DEL_OP)
serv_obj.store_state(state)
return ret |
def leave_moderator(self, subreddit):
"""Abdicate moderator status in a subreddit. Use with care.
:param subreddit: The name of the subreddit to leave `status` from.
:returns: the json response from the server.
"""
self.evict(self.config['my_mod_subreddits'])
return self._leave_status(subreddit, self.config['leavemoderator']) | Abdicate moderator status in a subreddit. Use with care.
:param subreddit: The name of the subreddit to leave `status` from.
:returns: the json response from the server. | Below is the the instruction that describes the task:
### Input:
Abdicate moderator status in a subreddit. Use with care.
:param subreddit: The name of the subreddit to leave `status` from.
:returns: the json response from the server.
### Response:
def leave_moderator(self, subreddit):
"""Abdicate moderator status in a subreddit. Use with care.
:param subreddit: The name of the subreddit to leave `status` from.
:returns: the json response from the server.
"""
self.evict(self.config['my_mod_subreddits'])
return self._leave_status(subreddit, self.config['leavemoderator']) |
def sdram_alloc_as_filelike(self, size, tag=0, x=Required, y=Required,
app_id=Required, clear=False):
"""Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like
object <.MemoryIO>` which allows safe reading and writing to the block
that is allocated.
Returns
-------
:py:class:`.MemoryIO`
File-like object which allows accessing the newly allocated region
of memory. For example::
>>> # Read, write and seek through the allocated memory just
>>> # like a file
>>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP
>>> mem.write(b"Hello, world") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(5) # doctest: +SKIP
b"Hello"
>>> mem.read(7) # doctest: +SKIP
b", world"
>>> # Reads and writes are truncated to the allocated region,
>>> # preventing accidental clobbering/access of memory.
>>> mem.seek(0) # doctest: +SKIP
>>> mem.write(b"How are you today?") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(100) # doctest: +SKIP
b"How are you "
See the :py:class:`.MemoryIO` class for details of other features
of these file-like views of SpiNNaker's memory.
Raises
------
rig.machine_control.machine_controller.SpiNNakerMemoryError
If the memory cannot be allocated, or the tag is already taken or
invalid.
"""
# Perform the malloc
start_address = self.sdram_alloc(size, tag, x, y, app_id, clear)
return MemoryIO(self, x, y, start_address, start_address + size) | Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like
object <.MemoryIO>` which allows safe reading and writing to the block
that is allocated.
Returns
-------
:py:class:`.MemoryIO`
File-like object which allows accessing the newly allocated region
of memory. For example::
>>> # Read, write and seek through the allocated memory just
>>> # like a file
>>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP
>>> mem.write(b"Hello, world") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(5) # doctest: +SKIP
b"Hello"
>>> mem.read(7) # doctest: +SKIP
b", world"
>>> # Reads and writes are truncated to the allocated region,
>>> # preventing accidental clobbering/access of memory.
>>> mem.seek(0) # doctest: +SKIP
>>> mem.write(b"How are you today?") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(100) # doctest: +SKIP
b"How are you "
See the :py:class:`.MemoryIO` class for details of other features
of these file-like views of SpiNNaker's memory.
Raises
------
rig.machine_control.machine_controller.SpiNNakerMemoryError
If the memory cannot be allocated, or the tag is already taken or
invalid. | Below is the the instruction that describes the task:
### Input:
Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like
object <.MemoryIO>` which allows safe reading and writing to the block
that is allocated.
Returns
-------
:py:class:`.MemoryIO`
File-like object which allows accessing the newly allocated region
of memory. For example::
>>> # Read, write and seek through the allocated memory just
>>> # like a file
>>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP
>>> mem.write(b"Hello, world") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(5) # doctest: +SKIP
b"Hello"
>>> mem.read(7) # doctest: +SKIP
b", world"
>>> # Reads and writes are truncated to the allocated region,
>>> # preventing accidental clobbering/access of memory.
>>> mem.seek(0) # doctest: +SKIP
>>> mem.write(b"How are you today?") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(100) # doctest: +SKIP
b"How are you "
See the :py:class:`.MemoryIO` class for details of other features
of these file-like views of SpiNNaker's memory.
Raises
------
rig.machine_control.machine_controller.SpiNNakerMemoryError
If the memory cannot be allocated, or the tag is already taken or
invalid.
### Response:
def sdram_alloc_as_filelike(self, size, tag=0, x=Required, y=Required,
app_id=Required, clear=False):
"""Like :py:meth:`.sdram_alloc` but returns a :py:class:`file-like
object <.MemoryIO>` which allows safe reading and writing to the block
that is allocated.
Returns
-------
:py:class:`.MemoryIO`
File-like object which allows accessing the newly allocated region
of memory. For example::
>>> # Read, write and seek through the allocated memory just
>>> # like a file
>>> mem = mc.sdram_alloc_as_filelike(12) # doctest: +SKIP
>>> mem.write(b"Hello, world") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(5) # doctest: +SKIP
b"Hello"
>>> mem.read(7) # doctest: +SKIP
b", world"
>>> # Reads and writes are truncated to the allocated region,
>>> # preventing accidental clobbering/access of memory.
>>> mem.seek(0) # doctest: +SKIP
>>> mem.write(b"How are you today?") # doctest: +SKIP
12
>>> mem.seek(0) # doctest: +SKIP
>>> mem.read(100) # doctest: +SKIP
b"How are you "
See the :py:class:`.MemoryIO` class for details of other features
of these file-like views of SpiNNaker's memory.
Raises
------
rig.machine_control.machine_controller.SpiNNakerMemoryError
If the memory cannot be allocated, or the tag is already taken or
invalid.
"""
# Perform the malloc
start_address = self.sdram_alloc(size, tag, x, y, app_id, clear)
return MemoryIO(self, x, y, start_address, start_address + size) |
def get_coordinator():
"""Creates a coordinator and returns it."""
workflow_queue = Queue.Queue()
complete_queue = Queue.Queue()
coordinator = WorkflowThread(workflow_queue, complete_queue)
coordinator.register(WorkflowItem, workflow_queue)
return coordinator | Creates a coordinator and returns it. | Below is the the instruction that describes the task:
### Input:
Creates a coordinator and returns it.
### Response:
def get_coordinator():
"""Creates a coordinator and returns it."""
workflow_queue = Queue.Queue()
complete_queue = Queue.Queue()
coordinator = WorkflowThread(workflow_queue, complete_queue)
coordinator.register(WorkflowItem, workflow_queue)
return coordinator |
def add_tip_labels_to_axes(self):
"""
Add text offset from tips of tree with correction for orientation,
and fixed_order which is usually used in multitree plotting.
"""
# get tip-coords and replace if using fixed_order
xpos = self.ttree.get_tip_coordinates('x')
ypos = self.ttree.get_tip_coordinates('y')
if self.style.orient in ("up", "down"):
if self.ttree._fixed_order:
xpos = list(range(self.ttree.ntips))
ypos = ypos[self.ttree._fixed_idx]
if self.style.tip_labels_align:
ypos = np.zeros(self.ttree.ntips)
if self.style.orient in ("right", "left"):
if self.ttree._fixed_order:
xpos = xpos[self.ttree._fixed_idx]
ypos = list(range(self.ttree.ntips))
if self.style.tip_labels_align:
xpos = np.zeros(self.ttree.ntips)
# pop fill from color dict if using color
tstyle = deepcopy(self.style.tip_labels_style)
if self.style.tip_labels_colors:
tstyle.pop("fill")
# add tip names to coordinates calculated above
self.axes.text(
xpos,
ypos,
self.tip_labels,
angle=(0 if self.style.orient in ("right", "left") else -90),
style=tstyle,
color=self.style.tip_labels_colors,
)
# get stroke-width for aligned tip-label lines (optional)
# copy stroke-width from the edge_style unless user set it
if not self.style.edge_align_style.get("stroke-width"):
self.style.edge_align_style["stroke-width"] = (
self.style.edge_style["stroke-width"]) | Add text offset from tips of tree with correction for orientation,
and fixed_order which is usually used in multitree plotting. | Below is the the instruction that describes the task:
### Input:
Add text offset from tips of tree with correction for orientation,
and fixed_order which is usually used in multitree plotting.
### Response:
def add_tip_labels_to_axes(self):
"""
Add text offset from tips of tree with correction for orientation,
and fixed_order which is usually used in multitree plotting.
"""
# get tip-coords and replace if using fixed_order
xpos = self.ttree.get_tip_coordinates('x')
ypos = self.ttree.get_tip_coordinates('y')
if self.style.orient in ("up", "down"):
if self.ttree._fixed_order:
xpos = list(range(self.ttree.ntips))
ypos = ypos[self.ttree._fixed_idx]
if self.style.tip_labels_align:
ypos = np.zeros(self.ttree.ntips)
if self.style.orient in ("right", "left"):
if self.ttree._fixed_order:
xpos = xpos[self.ttree._fixed_idx]
ypos = list(range(self.ttree.ntips))
if self.style.tip_labels_align:
xpos = np.zeros(self.ttree.ntips)
# pop fill from color dict if using color
tstyle = deepcopy(self.style.tip_labels_style)
if self.style.tip_labels_colors:
tstyle.pop("fill")
# add tip names to coordinates calculated above
self.axes.text(
xpos,
ypos,
self.tip_labels,
angle=(0 if self.style.orient in ("right", "left") else -90),
style=tstyle,
color=self.style.tip_labels_colors,
)
# get stroke-width for aligned tip-label lines (optional)
# copy stroke-width from the edge_style unless user set it
if not self.style.edge_align_style.get("stroke-width"):
self.style.edge_align_style["stroke-width"] = (
self.style.edge_style["stroke-width"]) |
def update(self, query_name, saved_query_attributes):
"""
Given a dict of attributes to be updated, update only those attributes
in the Saved Query at the resource given by 'query_name'. This will
perform two HTTP requests--one to fetch the query definition, and one
to set the new attributes. This method will intend to preserve any
other properties on the query.
Master key must be set.
"""
query_name_attr_name = "query_name"
refresh_rate_attr_name = "refresh_rate"
query_attr_name = "query"
metadata_attr_name = "metadata"
old_saved_query = self.get(query_name)
# Create a new query def to send back. We cannot send values for attributes like 'urls',
# 'last_modified_date', 'run_information', etc.
new_saved_query = {
query_name_attr_name: old_saved_query[query_name_attr_name], # expected
refresh_rate_attr_name: old_saved_query[refresh_rate_attr_name], # expected
query_attr_name: {}
}
# If metadata was set, preserve it. The Explorer UI currently stores information here.
old_metadata = (old_saved_query[metadata_attr_name]
if metadata_attr_name in old_saved_query
else None)
if old_metadata:
new_saved_query[metadata_attr_name] = old_metadata
# Preserve any non-empty properties of the existing query. We get back values like None
# for 'group_by', 'interval' or 'timezone', but those aren't accepted values when updating.
old_query = old_saved_query[query_attr_name] # expected
# Shallow copy since we want the entire object heirarchy to start with.
for (key, value) in six.iteritems(old_query):
if value:
new_saved_query[query_attr_name][key] = value
# Now, recursively overwrite any attributes passed in.
SavedQueriesInterface._deep_update(new_saved_query, saved_query_attributes)
return self.create(query_name, new_saved_query) | Given a dict of attributes to be updated, update only those attributes
in the Saved Query at the resource given by 'query_name'. This will
perform two HTTP requests--one to fetch the query definition, and one
to set the new attributes. This method will intend to preserve any
other properties on the query.
Master key must be set. | Below is the the instruction that describes the task:
### Input:
Given a dict of attributes to be updated, update only those attributes
in the Saved Query at the resource given by 'query_name'. This will
perform two HTTP requests--one to fetch the query definition, and one
to set the new attributes. This method will intend to preserve any
other properties on the query.
Master key must be set.
### Response:
def update(self, query_name, saved_query_attributes):
"""
Given a dict of attributes to be updated, update only those attributes
in the Saved Query at the resource given by 'query_name'. This will
perform two HTTP requests--one to fetch the query definition, and one
to set the new attributes. This method will intend to preserve any
other properties on the query.
Master key must be set.
"""
query_name_attr_name = "query_name"
refresh_rate_attr_name = "refresh_rate"
query_attr_name = "query"
metadata_attr_name = "metadata"
old_saved_query = self.get(query_name)
# Create a new query def to send back. We cannot send values for attributes like 'urls',
# 'last_modified_date', 'run_information', etc.
new_saved_query = {
query_name_attr_name: old_saved_query[query_name_attr_name], # expected
refresh_rate_attr_name: old_saved_query[refresh_rate_attr_name], # expected
query_attr_name: {}
}
# If metadata was set, preserve it. The Explorer UI currently stores information here.
old_metadata = (old_saved_query[metadata_attr_name]
if metadata_attr_name in old_saved_query
else None)
if old_metadata:
new_saved_query[metadata_attr_name] = old_metadata
# Preserve any non-empty properties of the existing query. We get back values like None
# for 'group_by', 'interval' or 'timezone', but those aren't accepted values when updating.
old_query = old_saved_query[query_attr_name] # expected
# Shallow copy since we want the entire object heirarchy to start with.
for (key, value) in six.iteritems(old_query):
if value:
new_saved_query[query_attr_name][key] = value
# Now, recursively overwrite any attributes passed in.
SavedQueriesInterface._deep_update(new_saved_query, saved_query_attributes)
return self.create(query_name, new_saved_query) |
Subsets and Splits