code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def is_gff_db(db_fname):
"""
Return True if the given filename is a GFF database.
For now, rely on .db extension.
"""
if not os.path.isfile(db_fname):
return False
if db_fname.endswith(".db"):
return True
return False | Return True if the given filename is a GFF database.
For now, rely on .db extension. | Below is the the instruction that describes the task:
### Input:
Return True if the given filename is a GFF database.
For now, rely on .db extension.
### Response:
def is_gff_db(db_fname):
"""
Return True if the given filename is a GFF database.
For now, rely on .db extension.
"""
if not os.path.isfile(db_fname):
return False
if db_fname.endswith(".db"):
return True
return False |
def _fromData(cls, header, tflags, data):
"""Construct this ID3 frame from raw string data.
Raises:
ID3JunkFrameError in case parsing failed
NotImplementedError in case parsing isn't implemented
ID3EncryptionUnsupportedError in case the frame is encrypted.
"""
if header.version >= header._V24:
if tflags & (Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN):
# The data length int is syncsafe in 2.4 (but not 2.3).
# However, we don't actually need the data length int,
# except to work around a QL 0.12 bug, and in that case
# all we need are the raw bytes.
datalen_bytes = data[:4]
data = data[4:]
if tflags & Frame.FLAG24_UNSYNCH or header.f_unsynch:
try:
data = unsynch.decode(data)
except ValueError:
# Some things write synch-unsafe data with either the frame
# or global unsynch flag set. Try to load them as is.
# https://github.com/quodlibet/mutagen/issues/210
# https://github.com/quodlibet/mutagen/issues/223
pass
if tflags & Frame.FLAG24_ENCRYPT:
raise ID3EncryptionUnsupportedError
if tflags & Frame.FLAG24_COMPRESS:
try:
data = zlib.decompress(data)
except zlib.error:
# the initial mutagen that went out with QL 0.12 did not
# write the 4 bytes of uncompressed size. Compensate.
data = datalen_bytes + data
try:
data = zlib.decompress(data)
except zlib.error as err:
raise ID3JunkFrameError(
'zlib: %s: %r' % (err, data))
elif header.version >= header._V23:
if tflags & Frame.FLAG23_COMPRESS:
usize, = unpack('>L', data[:4])
data = data[4:]
if tflags & Frame.FLAG23_ENCRYPT:
raise ID3EncryptionUnsupportedError
if tflags & Frame.FLAG23_COMPRESS:
try:
data = zlib.decompress(data)
except zlib.error as err:
raise ID3JunkFrameError('zlib: %s: %r' % (err, data))
frame = cls()
frame._readData(header, data)
return frame | Construct this ID3 frame from raw string data.
Raises:
ID3JunkFrameError in case parsing failed
NotImplementedError in case parsing isn't implemented
ID3EncryptionUnsupportedError in case the frame is encrypted. | Below is the the instruction that describes the task:
### Input:
Construct this ID3 frame from raw string data.
Raises:
ID3JunkFrameError in case parsing failed
NotImplementedError in case parsing isn't implemented
ID3EncryptionUnsupportedError in case the frame is encrypted.
### Response:
def _fromData(cls, header, tflags, data):
"""Construct this ID3 frame from raw string data.
Raises:
ID3JunkFrameError in case parsing failed
NotImplementedError in case parsing isn't implemented
ID3EncryptionUnsupportedError in case the frame is encrypted.
"""
if header.version >= header._V24:
if tflags & (Frame.FLAG24_COMPRESS | Frame.FLAG24_DATALEN):
# The data length int is syncsafe in 2.4 (but not 2.3).
# However, we don't actually need the data length int,
# except to work around a QL 0.12 bug, and in that case
# all we need are the raw bytes.
datalen_bytes = data[:4]
data = data[4:]
if tflags & Frame.FLAG24_UNSYNCH or header.f_unsynch:
try:
data = unsynch.decode(data)
except ValueError:
# Some things write synch-unsafe data with either the frame
# or global unsynch flag set. Try to load them as is.
# https://github.com/quodlibet/mutagen/issues/210
# https://github.com/quodlibet/mutagen/issues/223
pass
if tflags & Frame.FLAG24_ENCRYPT:
raise ID3EncryptionUnsupportedError
if tflags & Frame.FLAG24_COMPRESS:
try:
data = zlib.decompress(data)
except zlib.error:
# the initial mutagen that went out with QL 0.12 did not
# write the 4 bytes of uncompressed size. Compensate.
data = datalen_bytes + data
try:
data = zlib.decompress(data)
except zlib.error as err:
raise ID3JunkFrameError(
'zlib: %s: %r' % (err, data))
elif header.version >= header._V23:
if tflags & Frame.FLAG23_COMPRESS:
usize, = unpack('>L', data[:4])
data = data[4:]
if tflags & Frame.FLAG23_ENCRYPT:
raise ID3EncryptionUnsupportedError
if tflags & Frame.FLAG23_COMPRESS:
try:
data = zlib.decompress(data)
except zlib.error as err:
raise ID3JunkFrameError('zlib: %s: %r' % (err, data))
frame = cls()
frame._readData(header, data)
return frame |
def write_without_mac(self, data, block):
"""Write a data block without integrity check.
This is the standard write method for a FeliCa Lite. The
16-byte string or bytearray *data* is written to the numbered
*block* in service 0x0009 (NDEF write service). ::
data = bytearray(range(16)) # 0x00, 0x01, ... 0x0F
try: tag.write_without_mac(data, 5) # write block 5
except nfc.tag.TagCommandError:
print("something went wrong")
Tag command errors raise :exc:`~nfc.tag.TagCommandError`.
"""
# Write a single data block without a mac. Write with mac is
# only supported by FeliCa Lite-S.
assert len(data) == 16 and type(block) is int
log.debug("write 1 block without mac".format())
sc_list = [tt3.ServiceCode(0, 0b001001)]
bc_list = [tt3.BlockCode(block)]
self.write_without_encryption(sc_list, bc_list, data) | Write a data block without integrity check.
This is the standard write method for a FeliCa Lite. The
16-byte string or bytearray *data* is written to the numbered
*block* in service 0x0009 (NDEF write service). ::
data = bytearray(range(16)) # 0x00, 0x01, ... 0x0F
try: tag.write_without_mac(data, 5) # write block 5
except nfc.tag.TagCommandError:
print("something went wrong")
Tag command errors raise :exc:`~nfc.tag.TagCommandError`. | Below is the the instruction that describes the task:
### Input:
Write a data block without integrity check.
This is the standard write method for a FeliCa Lite. The
16-byte string or bytearray *data* is written to the numbered
*block* in service 0x0009 (NDEF write service). ::
data = bytearray(range(16)) # 0x00, 0x01, ... 0x0F
try: tag.write_without_mac(data, 5) # write block 5
except nfc.tag.TagCommandError:
print("something went wrong")
Tag command errors raise :exc:`~nfc.tag.TagCommandError`.
### Response:
def write_without_mac(self, data, block):
"""Write a data block without integrity check.
This is the standard write method for a FeliCa Lite. The
16-byte string or bytearray *data* is written to the numbered
*block* in service 0x0009 (NDEF write service). ::
data = bytearray(range(16)) # 0x00, 0x01, ... 0x0F
try: tag.write_without_mac(data, 5) # write block 5
except nfc.tag.TagCommandError:
print("something went wrong")
Tag command errors raise :exc:`~nfc.tag.TagCommandError`.
"""
# Write a single data block without a mac. Write with mac is
# only supported by FeliCa Lite-S.
assert len(data) == 16 and type(block) is int
log.debug("write 1 block without mac".format())
sc_list = [tt3.ServiceCode(0, 0b001001)]
bc_list = [tt3.BlockCode(block)]
self.write_without_encryption(sc_list, bc_list, data) |
def truepath(path, real=False):
"""
Normalizes a string representation of a path and does shell-like expansion.
Args:
path (PathLike): string representation of a path
real (bool): if True, all symbolic links are followed. (default: False)
Returns:
PathLike : normalized path
Note:
This function is similar to the composition of expanduser, expandvars,
normpath, and (realpath if `real` else abspath). However, on windows
backslashes are then replaced with forward slashes to offer a
consistent unix-like experience across platforms.
On windows expanduser will expand environment variables formatted as
%name%, whereas on unix, this will not occur.
CommandLine:
python -m ubelt.util_path truepath
Example:
>>> import ubelt as ub
>>> assert ub.truepath('~/foo') == join(ub.userhome(), 'foo')
>>> assert ub.truepath('~/foo') == ub.truepath('~/foo/bar/..')
>>> assert ub.truepath('~/foo', real=True) == ub.truepath('~/foo')
"""
path = expanduser(path)
path = expandvars(path)
if real:
path = realpath(path)
else:
path = abspath(path)
path = normpath(path)
return path | Normalizes a string representation of a path and does shell-like expansion.
Args:
path (PathLike): string representation of a path
real (bool): if True, all symbolic links are followed. (default: False)
Returns:
PathLike : normalized path
Note:
This function is similar to the composition of expanduser, expandvars,
normpath, and (realpath if `real` else abspath). However, on windows
backslashes are then replaced with forward slashes to offer a
consistent unix-like experience across platforms.
On windows expanduser will expand environment variables formatted as
%name%, whereas on unix, this will not occur.
CommandLine:
python -m ubelt.util_path truepath
Example:
>>> import ubelt as ub
>>> assert ub.truepath('~/foo') == join(ub.userhome(), 'foo')
>>> assert ub.truepath('~/foo') == ub.truepath('~/foo/bar/..')
>>> assert ub.truepath('~/foo', real=True) == ub.truepath('~/foo') | Below is the the instruction that describes the task:
### Input:
Normalizes a string representation of a path and does shell-like expansion.
Args:
path (PathLike): string representation of a path
real (bool): if True, all symbolic links are followed. (default: False)
Returns:
PathLike : normalized path
Note:
This function is similar to the composition of expanduser, expandvars,
normpath, and (realpath if `real` else abspath). However, on windows
backslashes are then replaced with forward slashes to offer a
consistent unix-like experience across platforms.
On windows expanduser will expand environment variables formatted as
%name%, whereas on unix, this will not occur.
CommandLine:
python -m ubelt.util_path truepath
Example:
>>> import ubelt as ub
>>> assert ub.truepath('~/foo') == join(ub.userhome(), 'foo')
>>> assert ub.truepath('~/foo') == ub.truepath('~/foo/bar/..')
>>> assert ub.truepath('~/foo', real=True) == ub.truepath('~/foo')
### Response:
def truepath(path, real=False):
"""
Normalizes a string representation of a path and does shell-like expansion.
Args:
path (PathLike): string representation of a path
real (bool): if True, all symbolic links are followed. (default: False)
Returns:
PathLike : normalized path
Note:
This function is similar to the composition of expanduser, expandvars,
normpath, and (realpath if `real` else abspath). However, on windows
backslashes are then replaced with forward slashes to offer a
consistent unix-like experience across platforms.
On windows expanduser will expand environment variables formatted as
%name%, whereas on unix, this will not occur.
CommandLine:
python -m ubelt.util_path truepath
Example:
>>> import ubelt as ub
>>> assert ub.truepath('~/foo') == join(ub.userhome(), 'foo')
>>> assert ub.truepath('~/foo') == ub.truepath('~/foo/bar/..')
>>> assert ub.truepath('~/foo', real=True) == ub.truepath('~/foo')
"""
path = expanduser(path)
path = expandvars(path)
if real:
path = realpath(path)
else:
path = abspath(path)
path = normpath(path)
return path |
def _check_stop_iteration_inside_generator(self, node):
"""Check if an exception of type StopIteration is raised inside a generator"""
frame = node.frame()
if not isinstance(frame, astroid.FunctionDef) or not frame.is_generator():
return
if utils.node_ignores_exception(node, StopIteration):
return
if not node.exc:
return
exc = utils.safe_infer(node.exc)
if exc is None or exc is astroid.Uninferable:
return
if self._check_exception_inherit_from_stopiteration(exc):
self.add_message("stop-iteration-return", node=node) | Check if an exception of type StopIteration is raised inside a generator | Below is the the instruction that describes the task:
### Input:
Check if an exception of type StopIteration is raised inside a generator
### Response:
def _check_stop_iteration_inside_generator(self, node):
"""Check if an exception of type StopIteration is raised inside a generator"""
frame = node.frame()
if not isinstance(frame, astroid.FunctionDef) or not frame.is_generator():
return
if utils.node_ignores_exception(node, StopIteration):
return
if not node.exc:
return
exc = utils.safe_infer(node.exc)
if exc is None or exc is astroid.Uninferable:
return
if self._check_exception_inherit_from_stopiteration(exc):
self.add_message("stop-iteration-return", node=node) |
def create_product(name, abbreviation, **kwargs):
"""
Create a new Product
"""
data = create_product_raw(name, abbreviation, **kwargs)
if data:
return utils.format_json(data) | Create a new Product | Below is the the instruction that describes the task:
### Input:
Create a new Product
### Response:
def create_product(name, abbreviation, **kwargs):
"""
Create a new Product
"""
data = create_product_raw(name, abbreviation, **kwargs)
if data:
return utils.format_json(data) |
def decode_packet(packet: str) -> dict:
"""Break packet down into primitives, and do basic interpretation.
>>> decode_packet('20;06;Kaku;ID=41;SWITCH=1;CMD=ON;') == {
... 'node': 'gateway',
... 'protocol': 'kaku',
... 'id': '000041',
... 'switch': '1',
... 'command': 'on',
... }
True
"""
node_id, _, protocol, attrs = packet.split(DELIM, 3)
data = cast(Dict[str, Any], {
'node': PacketHeader(node_id).name,
})
# make exception for version response
data['protocol'] = UNKNOWN
if '=' in protocol:
attrs = protocol + DELIM + attrs
# no attributes but instead the welcome banner
elif 'RFLink Gateway' in protocol:
data.update(parse_banner(protocol))
elif protocol == 'PONG':
data['ping'] = protocol.lower()
# debug response
elif protocol == 'DEBUG':
data['protocol'] = protocol.lower()
data['tm'] = packet[3:5]
# failure response
elif protocol == 'CMD UNKNOWN':
data['response'] = 'command_unknown'
data['ok'] = False
# ok response
elif protocol == 'OK':
data['ok'] = True
# its a regular packet
else:
data['protocol'] = protocol.lower()
# convert key=value pairs where needed
for attr in filter(None, attrs.strip(DELIM).split(DELIM)):
key, value = attr.lower().split('=')
if key in VALUE_TRANSLATION:
value = VALUE_TRANSLATION.get(key)(value)
name = PACKET_FIELDS.get(key, key)
data[name] = value
unit = UNITS.get(key, None)
if unit:
data[name + '_unit'] = unit
# correct KaKu device address
if data.get('protocol', '') == 'kaku' and len(data['id']) != 6:
data['id'] = '0000' + data['id']
return data | Break packet down into primitives, and do basic interpretation.
>>> decode_packet('20;06;Kaku;ID=41;SWITCH=1;CMD=ON;') == {
... 'node': 'gateway',
... 'protocol': 'kaku',
... 'id': '000041',
... 'switch': '1',
... 'command': 'on',
... }
True | Below is the the instruction that describes the task:
### Input:
Break packet down into primitives, and do basic interpretation.
>>> decode_packet('20;06;Kaku;ID=41;SWITCH=1;CMD=ON;') == {
... 'node': 'gateway',
... 'protocol': 'kaku',
... 'id': '000041',
... 'switch': '1',
... 'command': 'on',
... }
True
### Response:
def decode_packet(packet: str) -> dict:
"""Break packet down into primitives, and do basic interpretation.
>>> decode_packet('20;06;Kaku;ID=41;SWITCH=1;CMD=ON;') == {
... 'node': 'gateway',
... 'protocol': 'kaku',
... 'id': '000041',
... 'switch': '1',
... 'command': 'on',
... }
True
"""
node_id, _, protocol, attrs = packet.split(DELIM, 3)
data = cast(Dict[str, Any], {
'node': PacketHeader(node_id).name,
})
# make exception for version response
data['protocol'] = UNKNOWN
if '=' in protocol:
attrs = protocol + DELIM + attrs
# no attributes but instead the welcome banner
elif 'RFLink Gateway' in protocol:
data.update(parse_banner(protocol))
elif protocol == 'PONG':
data['ping'] = protocol.lower()
# debug response
elif protocol == 'DEBUG':
data['protocol'] = protocol.lower()
data['tm'] = packet[3:5]
# failure response
elif protocol == 'CMD UNKNOWN':
data['response'] = 'command_unknown'
data['ok'] = False
# ok response
elif protocol == 'OK':
data['ok'] = True
# its a regular packet
else:
data['protocol'] = protocol.lower()
# convert key=value pairs where needed
for attr in filter(None, attrs.strip(DELIM).split(DELIM)):
key, value = attr.lower().split('=')
if key in VALUE_TRANSLATION:
value = VALUE_TRANSLATION.get(key)(value)
name = PACKET_FIELDS.get(key, key)
data[name] = value
unit = UNITS.get(key, None)
if unit:
data[name + '_unit'] = unit
# correct KaKu device address
if data.get('protocol', '') == 'kaku' and len(data['id']) != 6:
data['id'] = '0000' + data['id']
return data |
def get_user_token(login, password):
"""Retrieve user token from the API (via authentication)."""
client = get_user_api()
# Never use API key for the token endpoint
config = cloudsmith_api.Configuration()
set_api_key(config, None)
with catch_raise_api_exception():
data, _, headers = client.user_token_create_with_http_info(
data={"email": login, "password": password}
)
ratelimits.maybe_rate_limit(client, headers)
return data.token | Retrieve user token from the API (via authentication). | Below is the the instruction that describes the task:
### Input:
Retrieve user token from the API (via authentication).
### Response:
def get_user_token(login, password):
"""Retrieve user token from the API (via authentication)."""
client = get_user_api()
# Never use API key for the token endpoint
config = cloudsmith_api.Configuration()
set_api_key(config, None)
with catch_raise_api_exception():
data, _, headers = client.user_token_create_with_http_info(
data={"email": login, "password": password}
)
ratelimits.maybe_rate_limit(client, headers)
return data.token |
def shellne(command):
"""
Runs 'commands' on the underlying shell; any stderr is echo'd to the
console.
Raises a RuntimeException on any shell exec errors.
"""
child = os.popen(command)
data = child.read()
err = child.close()
if err:
raise RuntimeError('%s failed w/ exit code %d' % (command, err))
return data | Runs 'commands' on the underlying shell; any stderr is echo'd to the
console.
Raises a RuntimeException on any shell exec errors. | Below is the the instruction that describes the task:
### Input:
Runs 'commands' on the underlying shell; any stderr is echo'd to the
console.
Raises a RuntimeException on any shell exec errors.
### Response:
def shellne(command):
"""
Runs 'commands' on the underlying shell; any stderr is echo'd to the
console.
Raises a RuntimeException on any shell exec errors.
"""
child = os.popen(command)
data = child.read()
err = child.close()
if err:
raise RuntimeError('%s failed w/ exit code %d' % (command, err))
return data |
def main():
"""
Commandline interface to initialize Sockeye embedding weights with pretrained word representations.
"""
setup_main_logger(console=True, file_logging=False)
params = argparse.ArgumentParser(description='Quick usage: python3 -m sockeye.init_embedding '
'-w embed-in-src.npy embed-in-tgt.npy '
'-i vocab-in-src.json vocab-in-tgt.json '
'-o vocab-out-src.json vocab-out-tgt.json '
'-n source_embed_weight target_embed_weight '
'-f params.init')
arguments.add_init_embedding_args(params)
args = params.parse_args()
init_embeddings(args) | Commandline interface to initialize Sockeye embedding weights with pretrained word representations. | Below is the the instruction that describes the task:
### Input:
Commandline interface to initialize Sockeye embedding weights with pretrained word representations.
### Response:
def main():
"""
Commandline interface to initialize Sockeye embedding weights with pretrained word representations.
"""
setup_main_logger(console=True, file_logging=False)
params = argparse.ArgumentParser(description='Quick usage: python3 -m sockeye.init_embedding '
'-w embed-in-src.npy embed-in-tgt.npy '
'-i vocab-in-src.json vocab-in-tgt.json '
'-o vocab-out-src.json vocab-out-tgt.json '
'-n source_embed_weight target_embed_weight '
'-f params.init')
arguments.add_init_embedding_args(params)
args = params.parse_args()
init_embeddings(args) |
def seek(self, offset, from_what=0):
"""
seek(offset, from_what=0) -> int. Change stream position.
Seek to byte offset pos relative to position indicated by whence:
0 Start of stream (the default). pos should be >= tell();
1 Current position - negative pos not implemented;
2 End of stream - not implemented.
Returns the new absolute position.
"""
if from_what == 0: # From the begining
if offset >= self.tell():
self.seek(offset - self.tell(), from_what=1)
else:
raise NotImplementedError("Can't seek backwards")
elif from_what == 1: # From the cursor position
if offset < 0:
raise NotImplementedError("Can't seek backwards")
else:
self.read(offset)
else:
raise NotImplementedError("Can't seek from there")
return self.tell() | seek(offset, from_what=0) -> int. Change stream position.
Seek to byte offset pos relative to position indicated by whence:
0 Start of stream (the default). pos should be >= tell();
1 Current position - negative pos not implemented;
2 End of stream - not implemented.
Returns the new absolute position. | Below is the the instruction that describes the task:
### Input:
seek(offset, from_what=0) -> int. Change stream position.
Seek to byte offset pos relative to position indicated by whence:
0 Start of stream (the default). pos should be >= tell();
1 Current position - negative pos not implemented;
2 End of stream - not implemented.
Returns the new absolute position.
### Response:
def seek(self, offset, from_what=0):
"""
seek(offset, from_what=0) -> int. Change stream position.
Seek to byte offset pos relative to position indicated by whence:
0 Start of stream (the default). pos should be >= tell();
1 Current position - negative pos not implemented;
2 End of stream - not implemented.
Returns the new absolute position.
"""
if from_what == 0: # From the begining
if offset >= self.tell():
self.seek(offset - self.tell(), from_what=1)
else:
raise NotImplementedError("Can't seek backwards")
elif from_what == 1: # From the cursor position
if offset < 0:
raise NotImplementedError("Can't seek backwards")
else:
self.read(offset)
else:
raise NotImplementedError("Can't seek from there")
return self.tell() |
def solve_for(self, var=None):
"""
Ensure that the children is within its parent
"""
margin = self.margin_method()
if self.parent_nw[0].value > self.child[0].value - margin:
_update(self.child[0], self.parent_nw[0].value + margin)
# Right edge (east)
if self.parent_se[0].value < self.child[0].value + margin:
_update(self.child[0], self.parent_se[0].value - margin)
# Upper edge (north)
if self.parent_nw[1].value > self.child[1].value - margin:
_update(self.child[1], self.parent_nw[1].value + margin)
# Lower edge (south)
if self.parent_se[1].value < self.child[1].value + margin:
_update(self.child[1], self.parent_se[1].value - margin) | Ensure that the children is within its parent | Below is the the instruction that describes the task:
### Input:
Ensure that the children is within its parent
### Response:
def solve_for(self, var=None):
"""
Ensure that the children is within its parent
"""
margin = self.margin_method()
if self.parent_nw[0].value > self.child[0].value - margin:
_update(self.child[0], self.parent_nw[0].value + margin)
# Right edge (east)
if self.parent_se[0].value < self.child[0].value + margin:
_update(self.child[0], self.parent_se[0].value - margin)
# Upper edge (north)
if self.parent_nw[1].value > self.child[1].value - margin:
_update(self.child[1], self.parent_nw[1].value + margin)
# Lower edge (south)
if self.parent_se[1].value < self.child[1].value + margin:
_update(self.child[1], self.parent_se[1].value - margin) |
def prepare_request_params(self, _query_params, _json_params):
""" Prepare query and update params. """
self._query_params = dictset(
_query_params or self.request.params.mixed())
self._json_params = dictset(_json_params)
ctype = self.request.content_type
if self.request.method in ['POST', 'PUT', 'PATCH']:
if ctype == 'application/json':
try:
self._json_params.update(self.request.json)
except simplejson.JSONDecodeError:
log.error(
"Expecting JSON. Received: '{}'. "
"Request: {} {}".format(
self.request.body, self.request.method,
self.request.url))
self._json_params = BaseView.convert_dotted(self._json_params)
self._query_params = BaseView.convert_dotted(self._query_params)
self._params = self._query_params.copy()
self._params.update(self._json_params) | Prepare query and update params. | Below is the the instruction that describes the task:
### Input:
Prepare query and update params.
### Response:
def prepare_request_params(self, _query_params, _json_params):
""" Prepare query and update params. """
self._query_params = dictset(
_query_params or self.request.params.mixed())
self._json_params = dictset(_json_params)
ctype = self.request.content_type
if self.request.method in ['POST', 'PUT', 'PATCH']:
if ctype == 'application/json':
try:
self._json_params.update(self.request.json)
except simplejson.JSONDecodeError:
log.error(
"Expecting JSON. Received: '{}'. "
"Request: {} {}".format(
self.request.body, self.request.method,
self.request.url))
self._json_params = BaseView.convert_dotted(self._json_params)
self._query_params = BaseView.convert_dotted(self._query_params)
self._params = self._query_params.copy()
self._params.update(self._json_params) |
def from_arrays(event, time, name_event=None, name_time=None):
"""Create structured array.
Parameters
----------
event : array-like
Event indicator. A boolean array or array with values 0/1.
time : array-like
Observed time.
name_event : str|None
Name of event, optional, default: 'event'
name_time : str|None
Name of observed time, optional, default: 'time'
Returns
-------
y : np.array
Structured array with two fields.
"""
name_event = name_event or 'event'
name_time = name_time or 'time'
if name_time == name_event:
raise ValueError('name_time must be different from name_event')
time = numpy.asanyarray(time, dtype=numpy.float_)
y = numpy.empty(time.shape[0],
dtype=[(name_event, numpy.bool_), (name_time, numpy.float_)])
y[name_time] = time
event = numpy.asanyarray(event)
check_consistent_length(time, event)
if numpy.issubdtype(event.dtype, numpy.bool_):
y[name_event] = event
else:
events = numpy.unique(event)
events.sort()
if len(events) != 2:
raise ValueError('event indicator must be binary')
if numpy.all(events == numpy.array([0, 1], dtype=events.dtype)):
y[name_event] = event.astype(numpy.bool_)
else:
raise ValueError('non-boolean event indicator must contain 0 and 1 only')
return y | Create structured array.
Parameters
----------
event : array-like
Event indicator. A boolean array or array with values 0/1.
time : array-like
Observed time.
name_event : str|None
Name of event, optional, default: 'event'
name_time : str|None
Name of observed time, optional, default: 'time'
Returns
-------
y : np.array
Structured array with two fields. | Below is the the instruction that describes the task:
### Input:
Create structured array.
Parameters
----------
event : array-like
Event indicator. A boolean array or array with values 0/1.
time : array-like
Observed time.
name_event : str|None
Name of event, optional, default: 'event'
name_time : str|None
Name of observed time, optional, default: 'time'
Returns
-------
y : np.array
Structured array with two fields.
### Response:
def from_arrays(event, time, name_event=None, name_time=None):
"""Create structured array.
Parameters
----------
event : array-like
Event indicator. A boolean array or array with values 0/1.
time : array-like
Observed time.
name_event : str|None
Name of event, optional, default: 'event'
name_time : str|None
Name of observed time, optional, default: 'time'
Returns
-------
y : np.array
Structured array with two fields.
"""
name_event = name_event or 'event'
name_time = name_time or 'time'
if name_time == name_event:
raise ValueError('name_time must be different from name_event')
time = numpy.asanyarray(time, dtype=numpy.float_)
y = numpy.empty(time.shape[0],
dtype=[(name_event, numpy.bool_), (name_time, numpy.float_)])
y[name_time] = time
event = numpy.asanyarray(event)
check_consistent_length(time, event)
if numpy.issubdtype(event.dtype, numpy.bool_):
y[name_event] = event
else:
events = numpy.unique(event)
events.sort()
if len(events) != 2:
raise ValueError('event indicator must be binary')
if numpy.all(events == numpy.array([0, 1], dtype=events.dtype)):
y[name_event] = event.astype(numpy.bool_)
else:
raise ValueError('non-boolean event indicator must contain 0 and 1 only')
return y |
def get_lldp_neighbor_detail_input_request_type_get_request_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
input = ET.SubElement(get_lldp_neighbor_detail, "input")
request_type = ET.SubElement(input, "request-type")
get_request = ET.SubElement(request_type, "get-request")
interface_type = ET.SubElement(get_request, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_lldp_neighbor_detail_input_request_type_get_request_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
input = ET.SubElement(get_lldp_neighbor_detail, "input")
request_type = ET.SubElement(input, "request-type")
get_request = ET.SubElement(request_type, "get-request")
interface_type = ET.SubElement(get_request, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def Unzip(iterable):
"""Unzips specified iterable of pairs to pair of two iterables.
This function is an inversion of the standard `zip` function and the following
hold:
* β l, r. l, r == unzip(zip(l, r))
* β p. p == zip(unzip(p))
Examples:
>>> Unzip([("foo", 1), ("bar", 2), ("baz", 3)])
(["foo", "bar", "baz"], [1, 2, 3])
Args:
iterable: An iterable of pairs to unzip.
Returns:
A pair of iterables after unzipping.
"""
lefts = []
rights = []
for left, right in iterable:
lefts.append(left)
rights.append(right)
return lefts, rights | Unzips specified iterable of pairs to pair of two iterables.
This function is an inversion of the standard `zip` function and the following
hold:
* β l, r. l, r == unzip(zip(l, r))
* β p. p == zip(unzip(p))
Examples:
>>> Unzip([("foo", 1), ("bar", 2), ("baz", 3)])
(["foo", "bar", "baz"], [1, 2, 3])
Args:
iterable: An iterable of pairs to unzip.
Returns:
A pair of iterables after unzipping. | Below is the the instruction that describes the task:
### Input:
Unzips specified iterable of pairs to pair of two iterables.
This function is an inversion of the standard `zip` function and the following
hold:
* β l, r. l, r == unzip(zip(l, r))
* β p. p == zip(unzip(p))
Examples:
>>> Unzip([("foo", 1), ("bar", 2), ("baz", 3)])
(["foo", "bar", "baz"], [1, 2, 3])
Args:
iterable: An iterable of pairs to unzip.
Returns:
A pair of iterables after unzipping.
### Response:
def Unzip(iterable):
"""Unzips specified iterable of pairs to pair of two iterables.
This function is an inversion of the standard `zip` function and the following
hold:
* β l, r. l, r == unzip(zip(l, r))
* β p. p == zip(unzip(p))
Examples:
>>> Unzip([("foo", 1), ("bar", 2), ("baz", 3)])
(["foo", "bar", "baz"], [1, 2, 3])
Args:
iterable: An iterable of pairs to unzip.
Returns:
A pair of iterables after unzipping.
"""
lefts = []
rights = []
for left, right in iterable:
lefts.append(left)
rights.append(right)
return lefts, rights |
def stop(self, io_loop):
"""
Asynchronously stop the application.
:param tornado.ioloop.IOLoop io_loop: loop to run until all
callbacks, timeouts, and queued calls are complete
Call this method to start the application shutdown process.
The IOLoop will be stopped once the application is completely
shut down.
"""
running_async = False
shutdown = _ShutdownHandler(io_loop)
for callback in self.on_shutdown_callbacks:
try:
maybe_future = callback(self.tornado_application)
if asyncio.iscoroutine(maybe_future):
maybe_future = asyncio.create_task(maybe_future)
if concurrent.is_future(maybe_future):
shutdown.add_future(maybe_future)
running_async = True
except Exception as error:
self.logger.warning('exception raised from shutdown '
'callback %r, ignored: %s',
callback, error, exc_info=1)
if not running_async:
shutdown.on_shutdown_ready() | Asynchronously stop the application.
:param tornado.ioloop.IOLoop io_loop: loop to run until all
callbacks, timeouts, and queued calls are complete
Call this method to start the application shutdown process.
The IOLoop will be stopped once the application is completely
shut down. | Below is the the instruction that describes the task:
### Input:
Asynchronously stop the application.
:param tornado.ioloop.IOLoop io_loop: loop to run until all
callbacks, timeouts, and queued calls are complete
Call this method to start the application shutdown process.
The IOLoop will be stopped once the application is completely
shut down.
### Response:
def stop(self, io_loop):
"""
Asynchronously stop the application.
:param tornado.ioloop.IOLoop io_loop: loop to run until all
callbacks, timeouts, and queued calls are complete
Call this method to start the application shutdown process.
The IOLoop will be stopped once the application is completely
shut down.
"""
running_async = False
shutdown = _ShutdownHandler(io_loop)
for callback in self.on_shutdown_callbacks:
try:
maybe_future = callback(self.tornado_application)
if asyncio.iscoroutine(maybe_future):
maybe_future = asyncio.create_task(maybe_future)
if concurrent.is_future(maybe_future):
shutdown.add_future(maybe_future)
running_async = True
except Exception as error:
self.logger.warning('exception raised from shutdown '
'callback %r, ignored: %s',
callback, error, exc_info=1)
if not running_async:
shutdown.on_shutdown_ready() |
def _get_shards(self):
"""
Returns comma separated list of configured Solr cores
"""
if self._shards is None:
endpoints = []
for endpoint in self.endpoints:
# We need to remove and http:// prefixes from URLs
url = urlparse.urlparse(self.endpoints[endpoint])
endpoints.append("/".join([url.netloc, url.path]))
self._shards = ",".join(endpoints)
return self._shards | Returns comma separated list of configured Solr cores | Below is the the instruction that describes the task:
### Input:
Returns comma separated list of configured Solr cores
### Response:
def _get_shards(self):
"""
Returns comma separated list of configured Solr cores
"""
if self._shards is None:
endpoints = []
for endpoint in self.endpoints:
# We need to remove and http:// prefixes from URLs
url = urlparse.urlparse(self.endpoints[endpoint])
endpoints.append("/".join([url.netloc, url.path]))
self._shards = ",".join(endpoints)
return self._shards |
def build_wheel(source_dir, wheel_dir, config_settings=None):
"""Build a wheel from a source directory using PEP 517 hooks.
:param str source_dir: Source directory containing pyproject.toml
:param str wheel_dir: Target directory to create wheel in
:param dict config_settings: Options to pass to build backend
This is a blocking function which will run pip in a subprocess to install
build requirements.
"""
if config_settings is None:
config_settings = {}
requires, backend = _load_pyproject(source_dir)
hooks = Pep517HookCaller(source_dir, backend)
with BuildEnvironment() as env:
env.pip_install(requires)
reqs = hooks.get_requires_for_build_wheel(config_settings)
env.pip_install(reqs)
return hooks.build_wheel(wheel_dir, config_settings) | Build a wheel from a source directory using PEP 517 hooks.
:param str source_dir: Source directory containing pyproject.toml
:param str wheel_dir: Target directory to create wheel in
:param dict config_settings: Options to pass to build backend
This is a blocking function which will run pip in a subprocess to install
build requirements. | Below is the the instruction that describes the task:
### Input:
Build a wheel from a source directory using PEP 517 hooks.
:param str source_dir: Source directory containing pyproject.toml
:param str wheel_dir: Target directory to create wheel in
:param dict config_settings: Options to pass to build backend
This is a blocking function which will run pip in a subprocess to install
build requirements.
### Response:
def build_wheel(source_dir, wheel_dir, config_settings=None):
"""Build a wheel from a source directory using PEP 517 hooks.
:param str source_dir: Source directory containing pyproject.toml
:param str wheel_dir: Target directory to create wheel in
:param dict config_settings: Options to pass to build backend
This is a blocking function which will run pip in a subprocess to install
build requirements.
"""
if config_settings is None:
config_settings = {}
requires, backend = _load_pyproject(source_dir)
hooks = Pep517HookCaller(source_dir, backend)
with BuildEnvironment() as env:
env.pip_install(requires)
reqs = hooks.get_requires_for_build_wheel(config_settings)
env.pip_install(reqs)
return hooks.build_wheel(wheel_dir, config_settings) |
def put_event(self, evt):
"""
Put an :class:`tf.Event`.
`step` and `wall_time` fields of :class:`tf.Event` will be filled automatically.
Args:
evt (tf.Event):
"""
evt.step = self.global_step
evt.wall_time = time.time()
self._dispatch(lambda m: m.process_event(evt)) | Put an :class:`tf.Event`.
`step` and `wall_time` fields of :class:`tf.Event` will be filled automatically.
Args:
evt (tf.Event): | Below is the the instruction that describes the task:
### Input:
Put an :class:`tf.Event`.
`step` and `wall_time` fields of :class:`tf.Event` will be filled automatically.
Args:
evt (tf.Event):
### Response:
def put_event(self, evt):
"""
Put an :class:`tf.Event`.
`step` and `wall_time` fields of :class:`tf.Event` will be filled automatically.
Args:
evt (tf.Event):
"""
evt.step = self.global_step
evt.wall_time = time.time()
self._dispatch(lambda m: m.process_event(evt)) |
def get_password(self, service, username):
"""
Read the password from the file.
"""
assoc = self._generate_assoc(service, username)
service = escape_for_ini(service)
username = escape_for_ini(username)
# load the passwords from the file
config = configparser.RawConfigParser()
if os.path.exists(self.file_path):
config.read(self.file_path)
# fetch the password
try:
password_base64 = config.get(service, username).encode()
# decode with base64
password_encrypted = decodebytes(password_base64)
# decrypt the password with associated data
try:
password = self.decrypt(password_encrypted, assoc).decode(
'utf-8')
except ValueError:
# decrypt the password without associated data
password = self.decrypt(password_encrypted).decode('utf-8')
except (configparser.NoOptionError, configparser.NoSectionError):
password = None
return password | Read the password from the file. | Below is the the instruction that describes the task:
### Input:
Read the password from the file.
### Response:
def get_password(self, service, username):
"""
Read the password from the file.
"""
assoc = self._generate_assoc(service, username)
service = escape_for_ini(service)
username = escape_for_ini(username)
# load the passwords from the file
config = configparser.RawConfigParser()
if os.path.exists(self.file_path):
config.read(self.file_path)
# fetch the password
try:
password_base64 = config.get(service, username).encode()
# decode with base64
password_encrypted = decodebytes(password_base64)
# decrypt the password with associated data
try:
password = self.decrypt(password_encrypted, assoc).decode(
'utf-8')
except ValueError:
# decrypt the password without associated data
password = self.decrypt(password_encrypted).decode('utf-8')
except (configparser.NoOptionError, configparser.NoSectionError):
password = None
return password |
def proto_value_for_feature(example, feature_name):
"""Get the value of a feature from Example regardless of feature type."""
feature = get_example_features(example)[feature_name]
if feature is None:
raise ValueError('Feature {} is not on example proto.'.format(feature_name))
feature_type = feature.WhichOneof('kind')
if feature_type is None:
raise ValueError('Feature {} on example proto has no declared type.'.format(
feature_name))
return getattr(feature, feature_type).value | Get the value of a feature from Example regardless of feature type. | Below is the the instruction that describes the task:
### Input:
Get the value of a feature from Example regardless of feature type.
### Response:
def proto_value_for_feature(example, feature_name):
"""Get the value of a feature from Example regardless of feature type."""
feature = get_example_features(example)[feature_name]
if feature is None:
raise ValueError('Feature {} is not on example proto.'.format(feature_name))
feature_type = feature.WhichOneof('kind')
if feature_type is None:
raise ValueError('Feature {} on example proto has no declared type.'.format(
feature_name))
return getattr(feature, feature_type).value |
def set_value(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
instance[tail] = value | Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative | Below is the the instruction that describes the task:
### Input:
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
### Response:
def set_value(instance, path, value, ref=None):
"""
Set `value` on `instance` at the given `path` and create missing intermediate objects.
Parameters
----------
instance : dict or list
instance from which to retrieve a value
path : str
path to retrieve a value from
value :
value to set
ref : str or None
reference path if `path` is relative
"""
*head, tail = split_path(path, ref)
for part in head:
instance = instance.setdefault(part, {})
instance[tail] = value |
def _bitForCoordinate(cls, coordinate, n):
"""
Maps the coordinate to a bit in the SDR.
@param coordinate (numpy.array) Coordinate
@param n (int) The number of available bits in the SDR
@return (int) The index to a bit in the SDR
"""
seed = cls._hashCoordinate(coordinate)
rng = Random(seed)
return rng.getUInt32(n) | Maps the coordinate to a bit in the SDR.
@param coordinate (numpy.array) Coordinate
@param n (int) The number of available bits in the SDR
@return (int) The index to a bit in the SDR | Below is the the instruction that describes the task:
### Input:
Maps the coordinate to a bit in the SDR.
@param coordinate (numpy.array) Coordinate
@param n (int) The number of available bits in the SDR
@return (int) The index to a bit in the SDR
### Response:
def _bitForCoordinate(cls, coordinate, n):
"""
Maps the coordinate to a bit in the SDR.
@param coordinate (numpy.array) Coordinate
@param n (int) The number of available bits in the SDR
@return (int) The index to a bit in the SDR
"""
seed = cls._hashCoordinate(coordinate)
rng = Random(seed)
return rng.getUInt32(n) |
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=default_split_fn,
group_fn=default_split_fn,
average_group=False,
shaded_std=True,
shaded_err=True,
figsize=None,
legend_outside=False,
resample=0,
smooth_step=1.0
):
'''
Plot multiple Results objects
xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.
By default, x is cumsum of episode lengths, and y is episode rewards
split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.
That is, the results r for which split_fn(r) is different will be put on different sub-panels.
By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are
stacked vertically in the figure.
group_fn: function Result -> hashable - function that converts results objects into keys to group curves by.
That is, the results r for which group_fn(r) is the same will be put into the same group.
Curves in the same group have the same color (if average_group is False), or averaged over
(if average_group is True). The default value is the same as default value for split_fn
average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling
(if resample = 0, will use 512 steps)
shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be
shown (only applicable if average_group = True)
shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves
(that is, standard deviation divided by square root of number of curves) will be
shown (only applicable if average_group = True)
figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of
sub-panels.
legend_outside: bool - if True, will place the legend outside of the sub-panels.
resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric
EMA smoothing (see the docstring for symmetric_ema).
Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default
value is 512.
smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).
See docstrings for decay_steps in symmetric_ema or one_sided_ema functions.
'''
if split_fn is None: split_fn = lambda _ : ''
if group_fn is None: group_fn = lambda _ : ''
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
assert len(sk2r) > 0
assert isinstance(resample, int), "0: don't resample. <integer>: that many samples"
nrows = len(sk2r)
ncols = 1
figsize = figsize or (6, 6 * nrows)
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
for (isplit, sk) in enumerate(sorted(sk2r.keys())):
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
ax = axarr[isplit][0]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for group in sorted(groups):
xys = gresults[group]
if not any(xys):
continue
color = COLORS[groups.index(group) % len(COLORS)]
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
low = max(x[0] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]),\
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
l, = axarr[isplit][0].plot(usex, ymean, color=color)
g2l[group] = l
if shaded_err:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
# https://matplotlib.org/users/legend_guide.html
plt.tight_layout()
if any(g2l.keys()):
ax.legend(
g2l.values(),
['%s (%i)'%(g, g2c[g]) for g in g2l] if average_group else g2l.keys(),
loc=2 if legend_outside else None,
bbox_to_anchor=(1,1) if legend_outside else None)
ax.set_title(sk)
return f, axarr | Plot multiple Results objects
xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.
By default, x is cumsum of episode lengths, and y is episode rewards
split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.
That is, the results r for which split_fn(r) is different will be put on different sub-panels.
By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are
stacked vertically in the figure.
group_fn: function Result -> hashable - function that converts results objects into keys to group curves by.
That is, the results r for which group_fn(r) is the same will be put into the same group.
Curves in the same group have the same color (if average_group is False), or averaged over
(if average_group is True). The default value is the same as default value for split_fn
average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling
(if resample = 0, will use 512 steps)
shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be
shown (only applicable if average_group = True)
shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves
(that is, standard deviation divided by square root of number of curves) will be
shown (only applicable if average_group = True)
figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of
sub-panels.
legend_outside: bool - if True, will place the legend outside of the sub-panels.
resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric
EMA smoothing (see the docstring for symmetric_ema).
Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default
value is 512.
smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).
See docstrings for decay_steps in symmetric_ema or one_sided_ema functions. | Below is the the instruction that describes the task:
### Input:
Plot multiple Results objects
xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.
By default, x is cumsum of episode lengths, and y is episode rewards
split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.
That is, the results r for which split_fn(r) is different will be put on different sub-panels.
By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are
stacked vertically in the figure.
group_fn: function Result -> hashable - function that converts results objects into keys to group curves by.
That is, the results r for which group_fn(r) is the same will be put into the same group.
Curves in the same group have the same color (if average_group is False), or averaged over
(if average_group is True). The default value is the same as default value for split_fn
average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling
(if resample = 0, will use 512 steps)
shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be
shown (only applicable if average_group = True)
shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves
(that is, standard deviation divided by square root of number of curves) will be
shown (only applicable if average_group = True)
figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of
sub-panels.
legend_outside: bool - if True, will place the legend outside of the sub-panels.
resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric
EMA smoothing (see the docstring for symmetric_ema).
Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default
value is 512.
smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).
See docstrings for decay_steps in symmetric_ema or one_sided_ema functions.
### Response:
def plot_results(
allresults, *,
xy_fn=default_xy_fn,
split_fn=default_split_fn,
group_fn=default_split_fn,
average_group=False,
shaded_std=True,
shaded_err=True,
figsize=None,
legend_outside=False,
resample=0,
smooth_step=1.0
):
'''
Plot multiple Results objects
xy_fn: function Result -> x,y - function that converts results objects into tuple of x and y values.
By default, x is cumsum of episode lengths, and y is episode rewards
split_fn: function Result -> hashable - function that converts results objects into keys to split curves into sub-panels by.
That is, the results r for which split_fn(r) is different will be put on different sub-panels.
By default, the portion of r.dirname between last / and -<digits> is returned. The sub-panels are
stacked vertically in the figure.
group_fn: function Result -> hashable - function that converts results objects into keys to group curves by.
That is, the results r for which group_fn(r) is the same will be put into the same group.
Curves in the same group have the same color (if average_group is False), or averaged over
(if average_group is True). The default value is the same as default value for split_fn
average_group: bool - if True, will average the curves in the same group and plot the mean. Enables resampling
(if resample = 0, will use 512 steps)
shaded_std: bool - if True (default), the shaded region corresponding to standard deviation of the group of curves will be
shown (only applicable if average_group = True)
shaded_err: bool - if True (default), the shaded region corresponding to error in mean estimate of the group of curves
(that is, standard deviation divided by square root of number of curves) will be
shown (only applicable if average_group = True)
figsize: tuple or None - size of the resulting figure (including sub-panels). By default, width is 6 and height is 6 times number of
sub-panels.
legend_outside: bool - if True, will place the legend outside of the sub-panels.
resample: int - if not zero, size of the uniform grid in x direction to resample onto. Resampling is performed via symmetric
EMA smoothing (see the docstring for symmetric_ema).
Default is zero (no resampling). Note that if average_group is True, resampling is necessary; in that case, default
value is 512.
smooth_step: float - when resampling (i.e. when resample > 0 or average_group is True), use this EMA decay parameter (in units of the new grid step).
See docstrings for decay_steps in symmetric_ema or one_sided_ema functions.
'''
if split_fn is None: split_fn = lambda _ : ''
if group_fn is None: group_fn = lambda _ : ''
sk2r = defaultdict(list) # splitkey2results
for result in allresults:
splitkey = split_fn(result)
sk2r[splitkey].append(result)
assert len(sk2r) > 0
assert isinstance(resample, int), "0: don't resample. <integer>: that many samples"
nrows = len(sk2r)
ncols = 1
figsize = figsize or (6, 6 * nrows)
f, axarr = plt.subplots(nrows, ncols, sharex=False, squeeze=False, figsize=figsize)
groups = list(set(group_fn(result) for result in allresults))
default_samples = 512
if average_group:
resample = resample or default_samples
for (isplit, sk) in enumerate(sorted(sk2r.keys())):
g2l = {}
g2c = defaultdict(int)
sresults = sk2r[sk]
gresults = defaultdict(list)
ax = axarr[isplit][0]
for result in sresults:
group = group_fn(result)
g2c[group] += 1
x, y = xy_fn(result)
if x is None: x = np.arange(len(y))
x, y = map(np.asarray, (x, y))
if average_group:
gresults[group].append((x,y))
else:
if resample:
x, y, counts = symmetric_ema(x, y, x[0], x[-1], resample, decay_steps=smooth_step)
l, = ax.plot(x, y, color=COLORS[groups.index(group) % len(COLORS)])
g2l[group] = l
if average_group:
for group in sorted(groups):
xys = gresults[group]
if not any(xys):
continue
color = COLORS[groups.index(group) % len(COLORS)]
origxs = [xy[0] for xy in xys]
minxlen = min(map(len, origxs))
def allequal(qs):
return all((q==qs[0]).all() for q in qs[1:])
if resample:
low = max(x[0] for x in origxs)
high = min(x[-1] for x in origxs)
usex = np.linspace(low, high, resample)
ys = []
for (x, y) in xys:
ys.append(symmetric_ema(x, y, low, high, resample, decay_steps=smooth_step)[1])
else:
assert allequal([x[:minxlen] for x in origxs]),\
'If you want to average unevenly sampled data, set resample=<number of samples you want>'
usex = origxs[0]
ys = [xy[1][:minxlen] for xy in xys]
ymean = np.mean(ys, axis=0)
ystd = np.std(ys, axis=0)
ystderr = ystd / np.sqrt(len(ys))
l, = axarr[isplit][0].plot(usex, ymean, color=color)
g2l[group] = l
if shaded_err:
ax.fill_between(usex, ymean - ystderr, ymean + ystderr, color=color, alpha=.4)
if shaded_std:
ax.fill_between(usex, ymean - ystd, ymean + ystd, color=color, alpha=.2)
# https://matplotlib.org/users/legend_guide.html
plt.tight_layout()
if any(g2l.keys()):
ax.legend(
g2l.values(),
['%s (%i)'%(g, g2c[g]) for g in g2l] if average_group else g2l.keys(),
loc=2 if legend_outside else None,
bbox_to_anchor=(1,1) if legend_outside else None)
ax.set_title(sk)
return f, axarr |
def has_pattern(self, decl_string):
"""
Implementation detail
"""
if self.__begin == "<":
# Cleanup parentheses blocks before checking for the pattern
# See also the args() method (in this file) for more explanations.
decl_string = re.sub("\\s\\(.*?\\)", "", decl_string).strip()
last_part = decl_string.split('::')[-1]
return (
decl_string.find(self.__begin) != -1 and
last_part.find(self.__end) != -1
) | Implementation detail | Below is the the instruction that describes the task:
### Input:
Implementation detail
### Response:
def has_pattern(self, decl_string):
"""
Implementation detail
"""
if self.__begin == "<":
# Cleanup parentheses blocks before checking for the pattern
# See also the args() method (in this file) for more explanations.
decl_string = re.sub("\\s\\(.*?\\)", "", decl_string).strip()
last_part = decl_string.split('::')[-1]
return (
decl_string.find(self.__begin) != -1 and
last_part.find(self.__end) != -1
) |
def get_selected_filenames(self):
"""Return selected filenames"""
if self.selectionMode() == self.ExtendedSelection:
if self.selectionModel() is None:
return []
return [self.get_filename(idx) for idx in
self.selectionModel().selectedRows()]
else:
return [self.get_filename(self.currentIndex())] | Return selected filenames | Below is the the instruction that describes the task:
### Input:
Return selected filenames
### Response:
def get_selected_filenames(self):
"""Return selected filenames"""
if self.selectionMode() == self.ExtendedSelection:
if self.selectionModel() is None:
return []
return [self.get_filename(idx) for idx in
self.selectionModel().selectedRows()]
else:
return [self.get_filename(self.currentIndex())] |
def process_memberdocs(self, docs, codeEl, add=True):
"""Associates member type DocElements with their corresponding members
in the specified code element. The element must have a dictionary of
members already."""
#Now we need to associate the members with their docstrings
#Some of the members may be buried inside a group tag and
#need to be handled separately.
remainingdocs = []
expandeddocs = []
#Process any groups that are in the doc list.
for doc in docs:
if isinstance(doc, DocGroup):
kids = self._process_docgroup(doc, codeEl, add)
expandeddocs.extend(kids)
else:
expandeddocs.append(doc)
for doc in expandeddocs:
#Process the docstring, if it doesn't belong to a member
#we will add it to the list of unassigned docstrings,
#these most likely point to type declarations.
if not self._process_docstrings(doc, codeEl.members, add):
remainingdocs.append(doc)
return remainingdocs | Associates member type DocElements with their corresponding members
in the specified code element. The element must have a dictionary of
members already. | Below is the the instruction that describes the task:
### Input:
Associates member type DocElements with their corresponding members
in the specified code element. The element must have a dictionary of
members already.
### Response:
def process_memberdocs(self, docs, codeEl, add=True):
"""Associates member type DocElements with their corresponding members
in the specified code element. The element must have a dictionary of
members already."""
#Now we need to associate the members with their docstrings
#Some of the members may be buried inside a group tag and
#need to be handled separately.
remainingdocs = []
expandeddocs = []
#Process any groups that are in the doc list.
for doc in docs:
if isinstance(doc, DocGroup):
kids = self._process_docgroup(doc, codeEl, add)
expandeddocs.extend(kids)
else:
expandeddocs.append(doc)
for doc in expandeddocs:
#Process the docstring, if it doesn't belong to a member
#we will add it to the list of unassigned docstrings,
#these most likely point to type declarations.
if not self._process_docstrings(doc, codeEl.members, add):
remainingdocs.append(doc)
return remainingdocs |
def _add_operations_bulk(self, chunked_data):
"""Tool for the function add_operations_bulk. It send the requests to
add the operations to the agents.
:param list chunked_data: list of list of the agents and their operations
to add. Each chunk can be requested at the same time.
:return: list of agents containing a message about the added
operations.
:rtype: list if dict.
:raises CraftAiBadRequestError: if the input is not of the right form.
"""
url = "{}/bulk/context".format(self._base_url)
ct_header = {"Content-Type": "application/json; charset=utf-8"}
responses = []
for chunk in chunked_data:
if len(chunk) > 1:
try:
json_pl = json.dumps(chunk)
except TypeError as err:
raise CraftAiBadRequestError("Error while dumping the payload into json"
"format when converting it for the bulk request. {}"
.format(err.__str__()))
resp = self._requests_session.post(url, headers=ct_header, data=json_pl)
resp = self._decode_response(resp)
responses += resp
elif chunk:
message = self.add_operations(chunk[0]["id"], chunk[0]["operations"])
responses.append({"id": chunk[0]["id"], "status": 201, "message": message})
if responses == []:
raise CraftAiBadRequestError("Invalid or empty set of operations given")
return responses | Tool for the function add_operations_bulk. It send the requests to
add the operations to the agents.
:param list chunked_data: list of list of the agents and their operations
to add. Each chunk can be requested at the same time.
:return: list of agents containing a message about the added
operations.
:rtype: list if dict.
:raises CraftAiBadRequestError: if the input is not of the right form. | Below is the the instruction that describes the task:
### Input:
Tool for the function add_operations_bulk. It send the requests to
add the operations to the agents.
:param list chunked_data: list of list of the agents and their operations
to add. Each chunk can be requested at the same time.
:return: list of agents containing a message about the added
operations.
:rtype: list if dict.
:raises CraftAiBadRequestError: if the input is not of the right form.
### Response:
def _add_operations_bulk(self, chunked_data):
"""Tool for the function add_operations_bulk. It send the requests to
add the operations to the agents.
:param list chunked_data: list of list of the agents and their operations
to add. Each chunk can be requested at the same time.
:return: list of agents containing a message about the added
operations.
:rtype: list if dict.
:raises CraftAiBadRequestError: if the input is not of the right form.
"""
url = "{}/bulk/context".format(self._base_url)
ct_header = {"Content-Type": "application/json; charset=utf-8"}
responses = []
for chunk in chunked_data:
if len(chunk) > 1:
try:
json_pl = json.dumps(chunk)
except TypeError as err:
raise CraftAiBadRequestError("Error while dumping the payload into json"
"format when converting it for the bulk request. {}"
.format(err.__str__()))
resp = self._requests_session.post(url, headers=ct_header, data=json_pl)
resp = self._decode_response(resp)
responses += resp
elif chunk:
message = self.add_operations(chunk[0]["id"], chunk[0]["operations"])
responses.append({"id": chunk[0]["id"], "status": 201, "message": message})
if responses == []:
raise CraftAiBadRequestError("Invalid or empty set of operations given")
return responses |
def _fill_topology_cfg(self, topo_dict):
"""Fills the extra configurations in the topology. """
cfg_dict = {}
if topo_dict.bond_member_ports is not None:
cfg_dict.update({'bond_member_ports':
topo_dict.bond_member_ports})
if topo_dict.bond_interface is not None:
cfg_dict.update({'bond_interface':
topo_dict.bond_interface})
return cfg_dict | Fills the extra configurations in the topology. | Below is the the instruction that describes the task:
### Input:
Fills the extra configurations in the topology.
### Response:
def _fill_topology_cfg(self, topo_dict):
"""Fills the extra configurations in the topology. """
cfg_dict = {}
if topo_dict.bond_member_ports is not None:
cfg_dict.update({'bond_member_ports':
topo_dict.bond_member_ports})
if topo_dict.bond_interface is not None:
cfg_dict.update({'bond_interface':
topo_dict.bond_interface})
return cfg_dict |
def export(self, top=True):
"""Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
"""
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.city))
out.append(self._to_str(self.state_province_region))
out.append(self._to_str(self.country))
out.append(self._to_str(self.source))
out.append(self._to_str(self.wmo))
out.append(self._to_str(self.latitude))
out.append(self._to_str(self.longitude))
out.append(self._to_str(self.timezone))
out.append(self._to_str(self.elevation))
return ",".join(out) | Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation | Below is the the instruction that describes the task:
### Input:
Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
### Response:
def export(self, top=True):
"""Exports object to its string representation.
Args:
top (bool): if True appends `internal_name` before values.
All non list objects should be exported with value top=True,
all list objects, that are embedded in as fields inlist objects
should be exported with `top`=False
Returns:
str: The objects string representation
"""
out = []
if top:
out.append(self._internal_name)
out.append(self._to_str(self.city))
out.append(self._to_str(self.state_province_region))
out.append(self._to_str(self.country))
out.append(self._to_str(self.source))
out.append(self._to_str(self.wmo))
out.append(self._to_str(self.latitude))
out.append(self._to_str(self.longitude))
out.append(self._to_str(self.timezone))
out.append(self._to_str(self.elevation))
return ",".join(out) |
def _load_dx(self, filename):
"""Initializes Grid from a OpenDX file."""
dx = OpenDX.field(0)
dx.read(filename)
grid, edges = dx.histogramdd()
self.__init__(grid=grid, edges=edges, metadata=self.metadata) | Initializes Grid from a OpenDX file. | Below is the the instruction that describes the task:
### Input:
Initializes Grid from a OpenDX file.
### Response:
def _load_dx(self, filename):
"""Initializes Grid from a OpenDX file."""
dx = OpenDX.field(0)
dx.read(filename)
grid, edges = dx.histogramdd()
self.__init__(grid=grid, edges=edges, metadata=self.metadata) |
def guard_cancel(analysis_request):
"""Returns whether 'cancel' transition can be performed or not. Returns
True only if all analyses are in "unassigned" status
"""
# Ask to partitions
for partition in analysis_request.getDescendants(all_descendants=False):
if not isTransitionAllowed(partition, "cancel"):
return False
# Look through analyses. We've checked the partitions already, so there is
# no need to look through analyses from partitions again, but through the
# analyses directly bound to the current Analysis Request.
cancellable_states = ["unassigned", "registered"]
for analysis in analysis_request.objectValues("Analysis"):
if api.get_workflow_status_of(analysis) not in cancellable_states:
return False
return True | Returns whether 'cancel' transition can be performed or not. Returns
True only if all analyses are in "unassigned" status | Below is the the instruction that describes the task:
### Input:
Returns whether 'cancel' transition can be performed or not. Returns
True only if all analyses are in "unassigned" status
### Response:
def guard_cancel(analysis_request):
"""Returns whether 'cancel' transition can be performed or not. Returns
True only if all analyses are in "unassigned" status
"""
# Ask to partitions
for partition in analysis_request.getDescendants(all_descendants=False):
if not isTransitionAllowed(partition, "cancel"):
return False
# Look through analyses. We've checked the partitions already, so there is
# no need to look through analyses from partitions again, but through the
# analyses directly bound to the current Analysis Request.
cancellable_states = ["unassigned", "registered"]
for analysis in analysis_request.objectValues("Analysis"):
if api.get_workflow_status_of(analysis) not in cancellable_states:
return False
return True |
def apply(
self,
docs=None,
split=0,
train=False,
lfs=None,
clear=True,
parallelism=None,
progress_bar=True,
):
"""Apply the labels of the specified candidates based on the provided LFs.
:param docs: If provided, apply the LFs to all the candidates in these
documents.
:param split: If docs is None, apply the LFs to the candidates in this
particular split.
:type split: int
:param train: Whether or not to update the global key set of labels and
the labels of candidates.
:type train: bool
:param lfs: A list of lists of labeling functions to apply. Each list
should correspond with the candidate_classes used to initialize the
Labeler.
:type lfs: list of lists
:param clear: Whether or not to clear the labels table before applying
these LFs.
:type clear: bool
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Labeler if
it is provided.
:type parallelism: int
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:type progress_bar: bool
:raises ValueError: If labeling functions are not provided for each
candidate class.
"""
if lfs is None:
raise ValueError("Please provide a list of labeling functions.")
if len(lfs) != len(self.candidate_classes):
raise ValueError("Please provide LFs for each candidate class.")
self.lfs = lfs
if docs:
# Call apply on the specified docs for all splits
split = ALL_SPLITS
super(Labeler, self).apply(
docs,
split=split,
train=train,
lfs=self.lfs,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
)
# Needed to sync the bulk operations
self.session.commit()
else:
# Only grab the docs containing candidates from the given split.
split_docs = get_docs_from_split(
self.session, self.candidate_classes, split
)
super(Labeler, self).apply(
split_docs,
split=split,
train=train,
lfs=self.lfs,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
)
# Needed to sync the bulk operations
self.session.commit() | Apply the labels of the specified candidates based on the provided LFs.
:param docs: If provided, apply the LFs to all the candidates in these
documents.
:param split: If docs is None, apply the LFs to the candidates in this
particular split.
:type split: int
:param train: Whether or not to update the global key set of labels and
the labels of candidates.
:type train: bool
:param lfs: A list of lists of labeling functions to apply. Each list
should correspond with the candidate_classes used to initialize the
Labeler.
:type lfs: list of lists
:param clear: Whether or not to clear the labels table before applying
these LFs.
:type clear: bool
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Labeler if
it is provided.
:type parallelism: int
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:type progress_bar: bool
:raises ValueError: If labeling functions are not provided for each
candidate class. | Below is the the instruction that describes the task:
### Input:
Apply the labels of the specified candidates based on the provided LFs.
:param docs: If provided, apply the LFs to all the candidates in these
documents.
:param split: If docs is None, apply the LFs to the candidates in this
particular split.
:type split: int
:param train: Whether or not to update the global key set of labels and
the labels of candidates.
:type train: bool
:param lfs: A list of lists of labeling functions to apply. Each list
should correspond with the candidate_classes used to initialize the
Labeler.
:type lfs: list of lists
:param clear: Whether or not to clear the labels table before applying
these LFs.
:type clear: bool
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Labeler if
it is provided.
:type parallelism: int
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:type progress_bar: bool
:raises ValueError: If labeling functions are not provided for each
candidate class.
### Response:
def apply(
self,
docs=None,
split=0,
train=False,
lfs=None,
clear=True,
parallelism=None,
progress_bar=True,
):
"""Apply the labels of the specified candidates based on the provided LFs.
:param docs: If provided, apply the LFs to all the candidates in these
documents.
:param split: If docs is None, apply the LFs to the candidates in this
particular split.
:type split: int
:param train: Whether or not to update the global key set of labels and
the labels of candidates.
:type train: bool
:param lfs: A list of lists of labeling functions to apply. Each list
should correspond with the candidate_classes used to initialize the
Labeler.
:type lfs: list of lists
:param clear: Whether or not to clear the labels table before applying
these LFs.
:type clear: bool
:param parallelism: How many threads to use for extraction. This will
override the parallelism value used to initialize the Labeler if
it is provided.
:type parallelism: int
:param progress_bar: Whether or not to display a progress bar. The
progress bar is measured per document.
:type progress_bar: bool
:raises ValueError: If labeling functions are not provided for each
candidate class.
"""
if lfs is None:
raise ValueError("Please provide a list of labeling functions.")
if len(lfs) != len(self.candidate_classes):
raise ValueError("Please provide LFs for each candidate class.")
self.lfs = lfs
if docs:
# Call apply on the specified docs for all splits
split = ALL_SPLITS
super(Labeler, self).apply(
docs,
split=split,
train=train,
lfs=self.lfs,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
)
# Needed to sync the bulk operations
self.session.commit()
else:
# Only grab the docs containing candidates from the given split.
split_docs = get_docs_from_split(
self.session, self.candidate_classes, split
)
super(Labeler, self).apply(
split_docs,
split=split,
train=train,
lfs=self.lfs,
clear=clear,
parallelism=parallelism,
progress_bar=progress_bar,
)
# Needed to sync the bulk operations
self.session.commit() |
def run(self, steps=None):
"""Run threaded code in machine.
Args:
steps: If specified, run that many number of instructions before
stopping.
"""
try:
while self.instruction_pointer < len(self.code):
self.step()
if steps is not None:
steps -= 1
if steps == 0:
break
except StopIteration:
pass
except EOFError:
pass
return self | Run threaded code in machine.
Args:
steps: If specified, run that many number of instructions before
stopping. | Below is the the instruction that describes the task:
### Input:
Run threaded code in machine.
Args:
steps: If specified, run that many number of instructions before
stopping.
### Response:
def run(self, steps=None):
"""Run threaded code in machine.
Args:
steps: If specified, run that many number of instructions before
stopping.
"""
try:
while self.instruction_pointer < len(self.code):
self.step()
if steps is not None:
steps -= 1
if steps == 0:
break
except StopIteration:
pass
except EOFError:
pass
return self |
def cos_r(self, N=None): # percent=0.9
"""Return the squared cosines for each row."""
if not hasattr(self, 'F') or self.F.shape[1] < self.rank:
self.fs_r(N=self.rank) # generate F
self.dr = norm(self.F, axis=1)**2
# cheaper than diag(self.F.dot(self.F.T))?
return apply_along_axis(lambda _: _/self.dr, 0, self.F[:, :N]**2) | Return the squared cosines for each row. | Below is the the instruction that describes the task:
### Input:
Return the squared cosines for each row.
### Response:
def cos_r(self, N=None): # percent=0.9
"""Return the squared cosines for each row."""
if not hasattr(self, 'F') or self.F.shape[1] < self.rank:
self.fs_r(N=self.rank) # generate F
self.dr = norm(self.F, axis=1)**2
# cheaper than diag(self.F.dot(self.F.T))?
return apply_along_axis(lambda _: _/self.dr, 0, self.F[:, :N]**2) |
def _retrieve_page(self, page_index):
"""Returns the node of matches to be processed"""
params = self._get_params()
params["page"] = str(page_index)
doc = self._request(self._ws_prefix + ".search", True, params)
return doc.getElementsByTagName(self._ws_prefix + "matches")[0] | Returns the node of matches to be processed | Below is the the instruction that describes the task:
### Input:
Returns the node of matches to be processed
### Response:
def _retrieve_page(self, page_index):
"""Returns the node of matches to be processed"""
params = self._get_params()
params["page"] = str(page_index)
doc = self._request(self._ws_prefix + ".search", True, params)
return doc.getElementsByTagName(self._ws_prefix + "matches")[0] |
def create_sample_input_files(template_filename,
database_filename,
config_filename):
"""Create sample template email and database."""
print("Creating sample template email {}".format(template_filename))
if os.path.exists(template_filename):
print("Error: file exists: " + template_filename)
sys.exit(1)
with io.open(template_filename, "w") as template_file:
template_file.write(
u"TO: {{email}}\n"
u"SUBJECT: Testing mailmerge\n"
u"FROM: My Self <[email protected]>\n"
u"\n"
u"Hi, {{name}},\n"
u"\n"
u"Your number is {{number}}.\n"
)
print("Creating sample database {}".format(database_filename))
if os.path.exists(database_filename):
print("Error: file exists: " + database_filename)
sys.exit(1)
with io.open(database_filename, "w") as database_file:
database_file.write(
u'email,name,number\n'
u'[email protected],"Myself",17\n'
u'[email protected],"Bob",42\n'
)
print("Creating sample config file {}".format(config_filename))
if os.path.exists(config_filename):
print("Error: file exists: " + config_filename)
sys.exit(1)
with io.open(config_filename, "w") as config_file:
config_file.write(
u"# Example: GMail\n"
u"[smtp_server]\n"
u"host = smtp.gmail.com\n"
u"port = 465\n"
u"security = SSL/TLS\n"
u"username = YOUR_USERNAME_HERE\n"
u"#\n"
u"# Example: Wide open\n"
u"# [smtp_server]\n"
u"# host = open-smtp.example.com\n"
u"# port = 25\n"
u"# security = Never\n"
u"# username = None\n"
u"#\n"
u"# Example: University of Michigan\n"
u"# [smtp_server]\n"
u"# host = smtp.mail.umich.edu\n"
u"# port = 465\n"
u"# security = SSL/TLS\n"
u"# username = YOUR_USERNAME_HERE\n"
u"#\n"
u"# Example: University of Michigan EECS Dept., with STARTTLS security\n" # noqa: E501
u"# [smtp_server]\n"
u"# host = newman.eecs.umich.edu\n"
u"# port = 25\n"
u"# security = STARTTLS\n"
u"# username = YOUR_USERNAME_HERE\n"
u"#\n"
u"# Example: University of Michigan EECS Dept., with no encryption\n" # noqa: E501
u"# [smtp_server]\n"
u"# host = newman.eecs.umich.edu\n"
u"# port = 25\n"
u"# security = Never\n"
u"# username = YOUR_USERNAME_HERE\n"
)
print("Edit these files, and then run mailmerge again") | Create sample template email and database. | Below is the the instruction that describes the task:
### Input:
Create sample template email and database.
### Response:
def create_sample_input_files(template_filename,
database_filename,
config_filename):
"""Create sample template email and database."""
print("Creating sample template email {}".format(template_filename))
if os.path.exists(template_filename):
print("Error: file exists: " + template_filename)
sys.exit(1)
with io.open(template_filename, "w") as template_file:
template_file.write(
u"TO: {{email}}\n"
u"SUBJECT: Testing mailmerge\n"
u"FROM: My Self <[email protected]>\n"
u"\n"
u"Hi, {{name}},\n"
u"\n"
u"Your number is {{number}}.\n"
)
print("Creating sample database {}".format(database_filename))
if os.path.exists(database_filename):
print("Error: file exists: " + database_filename)
sys.exit(1)
with io.open(database_filename, "w") as database_file:
database_file.write(
u'email,name,number\n'
u'[email protected],"Myself",17\n'
u'[email protected],"Bob",42\n'
)
print("Creating sample config file {}".format(config_filename))
if os.path.exists(config_filename):
print("Error: file exists: " + config_filename)
sys.exit(1)
with io.open(config_filename, "w") as config_file:
config_file.write(
u"# Example: GMail\n"
u"[smtp_server]\n"
u"host = smtp.gmail.com\n"
u"port = 465\n"
u"security = SSL/TLS\n"
u"username = YOUR_USERNAME_HERE\n"
u"#\n"
u"# Example: Wide open\n"
u"# [smtp_server]\n"
u"# host = open-smtp.example.com\n"
u"# port = 25\n"
u"# security = Never\n"
u"# username = None\n"
u"#\n"
u"# Example: University of Michigan\n"
u"# [smtp_server]\n"
u"# host = smtp.mail.umich.edu\n"
u"# port = 465\n"
u"# security = SSL/TLS\n"
u"# username = YOUR_USERNAME_HERE\n"
u"#\n"
u"# Example: University of Michigan EECS Dept., with STARTTLS security\n" # noqa: E501
u"# [smtp_server]\n"
u"# host = newman.eecs.umich.edu\n"
u"# port = 25\n"
u"# security = STARTTLS\n"
u"# username = YOUR_USERNAME_HERE\n"
u"#\n"
u"# Example: University of Michigan EECS Dept., with no encryption\n" # noqa: E501
u"# [smtp_server]\n"
u"# host = newman.eecs.umich.edu\n"
u"# port = 25\n"
u"# security = Never\n"
u"# username = YOUR_USERNAME_HERE\n"
)
print("Edit these files, and then run mailmerge again") |
def gmres_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter,
tol, weighting='local', Cpt_params=None):
"""Use GMRES to smooth T by solving A T = 0, subject to nullspace and sparsity constraints.
Parameters
----------
A : csr_matrix, bsr_matrix
SPD sparse NxN matrix
Should be at least nonsymmetric or indefinite
T : bsr_matrix
Tentative prolongator, a NxM sparse matrix (M < N).
This is initial guess for the equation A T = 0.
Assumed that T B_c = B_f
B : array
Near-nullspace modes for coarse grid, i.e., B_c.
Has shape (M,k) where k is the number of coarse candidate vectors.
BtBinv : array
3 dimensional array such that,
BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted
to the neighborhood (in the matrix graph) of dof of i.
Sparsity_Pattern : csr_matrix, bsr_matrix
Sparse NxM matrix
This is the sparsity pattern constraint to enforce on the
eventual prolongator
maxiter : int
maximum number of iterations
tol : float
residual tolerance for A T = 0
weighting : string
'block', 'diagonal' or 'local' construction of the diagonal
preconditioning
Cpt_params : tuple
Tuple of the form (bool, dict). If the Cpt_params[0] = False, then
the standard SA prolongation smoothing is carried out. If True, then
dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
the injection matrix for the Cpts, (2) I_F: an identity matrix
for only the F-points (i.e. I, but with zero rows and columns for
C-points) and I_C: the C-point analogue to I_F.
Returns
-------
T : bsr_matrix
Smoothed prolongator using GMRES to solve A T = 0,
subject to the constraints, T B_c = B_f, and T has no nonzero
outside of the sparsity pattern in Sparsity_Pattern.
See Also
--------
The principal calling routine,
pyamg.aggregation.smooth.energy_prolongation_smoother
"""
# For non-SPD system, apply GMRES with Diagonal Preconditioning
# Preallocate space for new search directions
uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
AV = sparse.bsr_matrix((uones, Sparsity_Pattern.indices,
Sparsity_Pattern.indptr),
shape=(Sparsity_Pattern.shape))
# Preallocate for Givens Rotations, Hessenberg matrix and Krylov Space
xtype = sparse.sputils.upcast(A.dtype, T.dtype, B.dtype)
Q = [] # Givens Rotations
V = [] # Krylov Space
# vs = [] # vs store the pointers to each column of V for speed
# Upper Hessenberg matrix, converted to upper tri with Givens Rots
H = np.zeros((maxiter+1, maxiter+1), dtype=xtype)
# GMRES will be run with diagonal preconditioning
if weighting == 'diagonal':
Dinv = get_diagonal(A, norm_eq=False, inv=True)
elif weighting == 'block':
Dinv = get_block_diag(A, blocksize=A.blocksize[0], inv_flag=True)
Dinv = sparse.bsr_matrix((Dinv, np.arange(Dinv.shape[0]),
np.arange(Dinv.shape[0]+1)),
shape=A.shape)
elif weighting == 'local':
# Based on Gershgorin estimate
D = np.abs(A)*np.ones((A.shape[0], 1), dtype=A.dtype)
Dinv = np.zeros_like(D)
Dinv[D != 0] = 1.0 / np.abs(D[D != 0])
else:
raise ValueError('weighting value is invalid')
# Calculate initial residual
# Equivalent to R = -A*T; R = R.multiply(Sparsity_Pattern)
# with the added constraint that R has an explicit 0 wherever
# R is 0 and Sparsity_Pattern is not
uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
R = sparse.bsr_matrix((uones, Sparsity_Pattern.indices,
Sparsity_Pattern.indptr),
shape=(Sparsity_Pattern.shape))
pyamg.amg_core.incomplete_mat_mult_bsr(A.indptr, A.indices,
np.ravel(A.data),
T.indptr, T.indices,
np.ravel(T.data),
R.indptr, R.indices,
np.ravel(R.data),
int(T.shape[0]/T.blocksize[0]),
int(T.shape[1]/T.blocksize[1]),
A.blocksize[0], A.blocksize[1],
T.blocksize[1])
R.data *= -1.0
# Apply diagonal preconditioner
if weighting == 'local' or weighting == 'diagonal':
R = scale_rows(R, Dinv)
else:
R = Dinv*R
# Enforce R*B = 0
Satisfy_Constraints(R, B, BtBinv)
if R.nnz == 0:
print("Error in sa_energy_min(..). Initial R no nonzeros on a level. \
Returning tentative prolongator\n")
return T
# This is the RHS vector for the problem in the Krylov Space
normr = np.sqrt((R.data.conjugate()*R.data).sum())
g = np.zeros((maxiter+1,), dtype=xtype)
g[0] = normr
# First Krylov vector
# V[0] = r/normr
if normr > 0.0:
V.append((1.0/normr)*R)
# print "Energy Minimization of Prolongator \
# --- Iteration 0 --- r = " + str(normr)
i = -1
# vect = np.ravel((A*T).data)
# print "Iteration " + str(i+1) + " \
# Energy = %1.3e"%np.sqrt( (vect.conjugate()*vect).sum() )
# print "Iteration " + str(i+1) + " Normr %1.3e"%normr
while i < maxiter-1 and normr > tol:
i = i+1
# Calculate new search direction
# Equivalent to: AV = A*V; AV = AV.multiply(Sparsity_Pattern)
# with the added constraint that explicit zeros are in AP wherever
# AP = 0 and Sparsity_Pattern does not
AV.data[:] = 0.0
pyamg.amg_core.incomplete_mat_mult_bsr(A.indptr, A.indices,
np.ravel(A.data),
V[i].indptr, V[i].indices,
np.ravel(V[i].data),
AV.indptr, AV.indices,
np.ravel(AV.data),
int(T.shape[0]/T.blocksize[0]),
int(T.shape[1]/T.blocksize[1]),
A.blocksize[0], A.blocksize[1],
T.blocksize[1])
if weighting == 'local' or weighting == 'diagonal':
AV = scale_rows(AV, Dinv)
else:
AV = Dinv*AV
# Enforce AV*B = 0
Satisfy_Constraints(AV, B, BtBinv)
V.append(AV.copy())
# Modified Gram-Schmidt
for j in range(i+1):
# Frobenius inner-product
H[j, i] = (V[j].conjugate().multiply(V[i+1])).sum()
V[i+1] = V[i+1] - H[j, i]*V[j]
# Frobenius Norm
H[i+1, i] = np.sqrt((V[i+1].data.conjugate()*V[i+1].data).sum())
# Check for breakdown
if H[i+1, i] != 0.0:
V[i+1] = (1.0 / H[i+1, i]) * V[i+1]
# Apply previous Givens rotations to H
if i > 0:
apply_givens(Q, H[:, i], i)
# Calculate and apply next complex-valued Givens Rotation
if H[i+1, i] != 0:
h1 = H[i, i]
h2 = H[i+1, i]
h1_mag = np.abs(h1)
h2_mag = np.abs(h2)
if h1_mag < h2_mag:
mu = h1/h2
tau = np.conjugate(mu)/np.abs(mu)
else:
mu = h2/h1
tau = mu/np.abs(mu)
denom = np.sqrt(h1_mag**2 + h2_mag**2)
c = h1_mag/denom
s = h2_mag*tau/denom
Qblock = np.array([[c, np.conjugate(s)], [-s, c]], dtype=xtype)
Q.append(Qblock)
# Apply Givens Rotation to g,
# the RHS for the linear system in the Krylov Subspace.
g[i:i+2] = sp.dot(Qblock, g[i:i+2])
# Apply effect of Givens Rotation to H
H[i, i] = sp.dot(Qblock[0, :], H[i:i+2, i])
H[i+1, i] = 0.0
normr = np.abs(g[i+1])
# print "Iteration " + str(i+1) + " Normr %1.3e"%normr
# End while loop
# Find best update to x in Krylov Space, V. Solve (i x i) system.
if i != -1:
y = la.solve(H[0:i+1, 0:i+1], g[0:i+1])
for j in range(i+1):
T = T + y[j]*V[j]
# vect = np.ravel((A*T).data)
# print "Final Iteration " + str(i) + " \
# Energy = %1.3e"%np.sqrt( (vect.conjugate()*vect).sum() )
# Ensure identity at C-pts
if Cpt_params[0]:
T = Cpt_params[1]['I_F']*T + Cpt_params[1]['P_I']
return T | Use GMRES to smooth T by solving A T = 0, subject to nullspace and sparsity constraints.
Parameters
----------
A : csr_matrix, bsr_matrix
SPD sparse NxN matrix
Should be at least nonsymmetric or indefinite
T : bsr_matrix
Tentative prolongator, a NxM sparse matrix (M < N).
This is initial guess for the equation A T = 0.
Assumed that T B_c = B_f
B : array
Near-nullspace modes for coarse grid, i.e., B_c.
Has shape (M,k) where k is the number of coarse candidate vectors.
BtBinv : array
3 dimensional array such that,
BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted
to the neighborhood (in the matrix graph) of dof of i.
Sparsity_Pattern : csr_matrix, bsr_matrix
Sparse NxM matrix
This is the sparsity pattern constraint to enforce on the
eventual prolongator
maxiter : int
maximum number of iterations
tol : float
residual tolerance for A T = 0
weighting : string
'block', 'diagonal' or 'local' construction of the diagonal
preconditioning
Cpt_params : tuple
Tuple of the form (bool, dict). If the Cpt_params[0] = False, then
the standard SA prolongation smoothing is carried out. If True, then
dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
the injection matrix for the Cpts, (2) I_F: an identity matrix
for only the F-points (i.e. I, but with zero rows and columns for
C-points) and I_C: the C-point analogue to I_F.
Returns
-------
T : bsr_matrix
Smoothed prolongator using GMRES to solve A T = 0,
subject to the constraints, T B_c = B_f, and T has no nonzero
outside of the sparsity pattern in Sparsity_Pattern.
See Also
--------
The principal calling routine,
pyamg.aggregation.smooth.energy_prolongation_smoother | Below is the the instruction that describes the task:
### Input:
Use GMRES to smooth T by solving A T = 0, subject to nullspace and sparsity constraints.
Parameters
----------
A : csr_matrix, bsr_matrix
SPD sparse NxN matrix
Should be at least nonsymmetric or indefinite
T : bsr_matrix
Tentative prolongator, a NxM sparse matrix (M < N).
This is initial guess for the equation A T = 0.
Assumed that T B_c = B_f
B : array
Near-nullspace modes for coarse grid, i.e., B_c.
Has shape (M,k) where k is the number of coarse candidate vectors.
BtBinv : array
3 dimensional array such that,
BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted
to the neighborhood (in the matrix graph) of dof of i.
Sparsity_Pattern : csr_matrix, bsr_matrix
Sparse NxM matrix
This is the sparsity pattern constraint to enforce on the
eventual prolongator
maxiter : int
maximum number of iterations
tol : float
residual tolerance for A T = 0
weighting : string
'block', 'diagonal' or 'local' construction of the diagonal
preconditioning
Cpt_params : tuple
Tuple of the form (bool, dict). If the Cpt_params[0] = False, then
the standard SA prolongation smoothing is carried out. If True, then
dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
the injection matrix for the Cpts, (2) I_F: an identity matrix
for only the F-points (i.e. I, but with zero rows and columns for
C-points) and I_C: the C-point analogue to I_F.
Returns
-------
T : bsr_matrix
Smoothed prolongator using GMRES to solve A T = 0,
subject to the constraints, T B_c = B_f, and T has no nonzero
outside of the sparsity pattern in Sparsity_Pattern.
See Also
--------
The principal calling routine,
pyamg.aggregation.smooth.energy_prolongation_smoother
### Response:
def gmres_prolongation_smoothing(A, T, B, BtBinv, Sparsity_Pattern, maxiter,
tol, weighting='local', Cpt_params=None):
"""Use GMRES to smooth T by solving A T = 0, subject to nullspace and sparsity constraints.
Parameters
----------
A : csr_matrix, bsr_matrix
SPD sparse NxN matrix
Should be at least nonsymmetric or indefinite
T : bsr_matrix
Tentative prolongator, a NxM sparse matrix (M < N).
This is initial guess for the equation A T = 0.
Assumed that T B_c = B_f
B : array
Near-nullspace modes for coarse grid, i.e., B_c.
Has shape (M,k) where k is the number of coarse candidate vectors.
BtBinv : array
3 dimensional array such that,
BtBinv[i] = pinv(B_i.H Bi), and B_i is B restricted
to the neighborhood (in the matrix graph) of dof of i.
Sparsity_Pattern : csr_matrix, bsr_matrix
Sparse NxM matrix
This is the sparsity pattern constraint to enforce on the
eventual prolongator
maxiter : int
maximum number of iterations
tol : float
residual tolerance for A T = 0
weighting : string
'block', 'diagonal' or 'local' construction of the diagonal
preconditioning
Cpt_params : tuple
Tuple of the form (bool, dict). If the Cpt_params[0] = False, then
the standard SA prolongation smoothing is carried out. If True, then
dict must be a dictionary of parameters containing, (1) P_I: P_I.T is
the injection matrix for the Cpts, (2) I_F: an identity matrix
for only the F-points (i.e. I, but with zero rows and columns for
C-points) and I_C: the C-point analogue to I_F.
Returns
-------
T : bsr_matrix
Smoothed prolongator using GMRES to solve A T = 0,
subject to the constraints, T B_c = B_f, and T has no nonzero
outside of the sparsity pattern in Sparsity_Pattern.
See Also
--------
The principal calling routine,
pyamg.aggregation.smooth.energy_prolongation_smoother
"""
# For non-SPD system, apply GMRES with Diagonal Preconditioning
# Preallocate space for new search directions
uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
AV = sparse.bsr_matrix((uones, Sparsity_Pattern.indices,
Sparsity_Pattern.indptr),
shape=(Sparsity_Pattern.shape))
# Preallocate for Givens Rotations, Hessenberg matrix and Krylov Space
xtype = sparse.sputils.upcast(A.dtype, T.dtype, B.dtype)
Q = [] # Givens Rotations
V = [] # Krylov Space
# vs = [] # vs store the pointers to each column of V for speed
# Upper Hessenberg matrix, converted to upper tri with Givens Rots
H = np.zeros((maxiter+1, maxiter+1), dtype=xtype)
# GMRES will be run with diagonal preconditioning
if weighting == 'diagonal':
Dinv = get_diagonal(A, norm_eq=False, inv=True)
elif weighting == 'block':
Dinv = get_block_diag(A, blocksize=A.blocksize[0], inv_flag=True)
Dinv = sparse.bsr_matrix((Dinv, np.arange(Dinv.shape[0]),
np.arange(Dinv.shape[0]+1)),
shape=A.shape)
elif weighting == 'local':
# Based on Gershgorin estimate
D = np.abs(A)*np.ones((A.shape[0], 1), dtype=A.dtype)
Dinv = np.zeros_like(D)
Dinv[D != 0] = 1.0 / np.abs(D[D != 0])
else:
raise ValueError('weighting value is invalid')
# Calculate initial residual
# Equivalent to R = -A*T; R = R.multiply(Sparsity_Pattern)
# with the added constraint that R has an explicit 0 wherever
# R is 0 and Sparsity_Pattern is not
uones = np.zeros(Sparsity_Pattern.data.shape, dtype=T.dtype)
R = sparse.bsr_matrix((uones, Sparsity_Pattern.indices,
Sparsity_Pattern.indptr),
shape=(Sparsity_Pattern.shape))
pyamg.amg_core.incomplete_mat_mult_bsr(A.indptr, A.indices,
np.ravel(A.data),
T.indptr, T.indices,
np.ravel(T.data),
R.indptr, R.indices,
np.ravel(R.data),
int(T.shape[0]/T.blocksize[0]),
int(T.shape[1]/T.blocksize[1]),
A.blocksize[0], A.blocksize[1],
T.blocksize[1])
R.data *= -1.0
# Apply diagonal preconditioner
if weighting == 'local' or weighting == 'diagonal':
R = scale_rows(R, Dinv)
else:
R = Dinv*R
# Enforce R*B = 0
Satisfy_Constraints(R, B, BtBinv)
if R.nnz == 0:
print("Error in sa_energy_min(..). Initial R no nonzeros on a level. \
Returning tentative prolongator\n")
return T
# This is the RHS vector for the problem in the Krylov Space
normr = np.sqrt((R.data.conjugate()*R.data).sum())
g = np.zeros((maxiter+1,), dtype=xtype)
g[0] = normr
# First Krylov vector
# V[0] = r/normr
if normr > 0.0:
V.append((1.0/normr)*R)
# print "Energy Minimization of Prolongator \
# --- Iteration 0 --- r = " + str(normr)
i = -1
# vect = np.ravel((A*T).data)
# print "Iteration " + str(i+1) + " \
# Energy = %1.3e"%np.sqrt( (vect.conjugate()*vect).sum() )
# print "Iteration " + str(i+1) + " Normr %1.3e"%normr
while i < maxiter-1 and normr > tol:
i = i+1
# Calculate new search direction
# Equivalent to: AV = A*V; AV = AV.multiply(Sparsity_Pattern)
# with the added constraint that explicit zeros are in AP wherever
# AP = 0 and Sparsity_Pattern does not
AV.data[:] = 0.0
pyamg.amg_core.incomplete_mat_mult_bsr(A.indptr, A.indices,
np.ravel(A.data),
V[i].indptr, V[i].indices,
np.ravel(V[i].data),
AV.indptr, AV.indices,
np.ravel(AV.data),
int(T.shape[0]/T.blocksize[0]),
int(T.shape[1]/T.blocksize[1]),
A.blocksize[0], A.blocksize[1],
T.blocksize[1])
if weighting == 'local' or weighting == 'diagonal':
AV = scale_rows(AV, Dinv)
else:
AV = Dinv*AV
# Enforce AV*B = 0
Satisfy_Constraints(AV, B, BtBinv)
V.append(AV.copy())
# Modified Gram-Schmidt
for j in range(i+1):
# Frobenius inner-product
H[j, i] = (V[j].conjugate().multiply(V[i+1])).sum()
V[i+1] = V[i+1] - H[j, i]*V[j]
# Frobenius Norm
H[i+1, i] = np.sqrt((V[i+1].data.conjugate()*V[i+1].data).sum())
# Check for breakdown
if H[i+1, i] != 0.0:
V[i+1] = (1.0 / H[i+1, i]) * V[i+1]
# Apply previous Givens rotations to H
if i > 0:
apply_givens(Q, H[:, i], i)
# Calculate and apply next complex-valued Givens Rotation
if H[i+1, i] != 0:
h1 = H[i, i]
h2 = H[i+1, i]
h1_mag = np.abs(h1)
h2_mag = np.abs(h2)
if h1_mag < h2_mag:
mu = h1/h2
tau = np.conjugate(mu)/np.abs(mu)
else:
mu = h2/h1
tau = mu/np.abs(mu)
denom = np.sqrt(h1_mag**2 + h2_mag**2)
c = h1_mag/denom
s = h2_mag*tau/denom
Qblock = np.array([[c, np.conjugate(s)], [-s, c]], dtype=xtype)
Q.append(Qblock)
# Apply Givens Rotation to g,
# the RHS for the linear system in the Krylov Subspace.
g[i:i+2] = sp.dot(Qblock, g[i:i+2])
# Apply effect of Givens Rotation to H
H[i, i] = sp.dot(Qblock[0, :], H[i:i+2, i])
H[i+1, i] = 0.0
normr = np.abs(g[i+1])
# print "Iteration " + str(i+1) + " Normr %1.3e"%normr
# End while loop
# Find best update to x in Krylov Space, V. Solve (i x i) system.
if i != -1:
y = la.solve(H[0:i+1, 0:i+1], g[0:i+1])
for j in range(i+1):
T = T + y[j]*V[j]
# vect = np.ravel((A*T).data)
# print "Final Iteration " + str(i) + " \
# Energy = %1.3e"%np.sqrt( (vect.conjugate()*vect).sum() )
# Ensure identity at C-pts
if Cpt_params[0]:
T = Cpt_params[1]['I_F']*T + Cpt_params[1]['P_I']
return T |
def favorites_getPublicList(user_id, per_page='', page=''):
"""Returns list of Photo objects."""
method = 'flickr.favorites.getPublicList'
data = _doget(method, auth=False, user_id=user_id, per_page=per_page,\
page=page)
photos = []
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos | Returns list of Photo objects. | Below is the the instruction that describes the task:
### Input:
Returns list of Photo objects.
### Response:
def favorites_getPublicList(user_id, per_page='', page=''):
"""Returns list of Photo objects."""
method = 'flickr.favorites.getPublicList'
data = _doget(method, auth=False, user_id=user_id, per_page=per_page,\
page=page)
photos = []
if isinstance(data.rsp.photos.photo, list):
for photo in data.rsp.photos.photo:
photos.append(_parse_photo(photo))
else:
photos = [_parse_photo(data.rsp.photos.photo)]
return photos |
def _pretend_to_run(self, migration, method):
"""
Pretend to run the migration.
:param migration: The migration
:type migration: orator.migrations.migration.Migration
:param method: The method to execute
:type method: str
"""
self._note("")
names = []
for query in self._get_queries(migration, method):
name = migration.__class__.__name__
bindings = None
if isinstance(query, tuple):
query, bindings = query
query = highlight(query, SqlLexer(), CommandFormatter()).strip()
if bindings:
query = (query, bindings)
if name not in names:
self._note("[<info>{}</info>]".format(name))
names.append(name)
self._note(query) | Pretend to run the migration.
:param migration: The migration
:type migration: orator.migrations.migration.Migration
:param method: The method to execute
:type method: str | Below is the the instruction that describes the task:
### Input:
Pretend to run the migration.
:param migration: The migration
:type migration: orator.migrations.migration.Migration
:param method: The method to execute
:type method: str
### Response:
def _pretend_to_run(self, migration, method):
"""
Pretend to run the migration.
:param migration: The migration
:type migration: orator.migrations.migration.Migration
:param method: The method to execute
:type method: str
"""
self._note("")
names = []
for query in self._get_queries(migration, method):
name = migration.__class__.__name__
bindings = None
if isinstance(query, tuple):
query, bindings = query
query = highlight(query, SqlLexer(), CommandFormatter()).strip()
if bindings:
query = (query, bindings)
if name not in names:
self._note("[<info>{}</info>]".format(name))
names.append(name)
self._note(query) |
def save(self):
""" Save this node (and all its attributes) to config """
cfg_file = "/etc/nago/nago.ini"
config = ConfigParser.ConfigParser()
config.read(cfg_file)
result = {}
token = self.data.pop("token", self.token)
if token != self._original_token:
config.remove_section(self._original_token)
config.add_section(token)
if token not in config.sections():
config.add_section(token)
for key, value in self.data.items():
config.set(token, key, value)
for key, value in config.items(token):
if key not in self.data:
config.set(token, key, None)
with open(cfg_file, 'w') as f:
return config.write(f) | Save this node (and all its attributes) to config | Below is the the instruction that describes the task:
### Input:
Save this node (and all its attributes) to config
### Response:
def save(self):
""" Save this node (and all its attributes) to config """
cfg_file = "/etc/nago/nago.ini"
config = ConfigParser.ConfigParser()
config.read(cfg_file)
result = {}
token = self.data.pop("token", self.token)
if token != self._original_token:
config.remove_section(self._original_token)
config.add_section(token)
if token not in config.sections():
config.add_section(token)
for key, value in self.data.items():
config.set(token, key, value)
for key, value in config.items(token):
if key not in self.data:
config.set(token, key, None)
with open(cfg_file, 'w') as f:
return config.write(f) |
def notify_callback(self, notify):
"""Process State from NOTIFY message."""
_LOGGER.debug(notify)
if notify.ip_address != self.stb_ip:
return
if notify.tune:
self._state = State.PLAYING_LIVE_TV
self.tune_src = notify.tune['@src']
try:
if notify.stopped:
self._state = State.STOPPED
elif notify.timeshift:
self._state = State.PLAYING_TIMESHIFT_TV
elif notify.recorded:
self._state = State.PLAYING_RECORDED_TV
except PyMediaroomError as e:
_LOGGER.debug("%s please report at https://github.com/dgomes/pymediaroom/issues", e)
else:
self._state = State.STANDBY
_LOGGER.debug("%s is %s", self.stb_ip, self._state)
return self._state | Process State from NOTIFY message. | Below is the the instruction that describes the task:
### Input:
Process State from NOTIFY message.
### Response:
def notify_callback(self, notify):
"""Process State from NOTIFY message."""
_LOGGER.debug(notify)
if notify.ip_address != self.stb_ip:
return
if notify.tune:
self._state = State.PLAYING_LIVE_TV
self.tune_src = notify.tune['@src']
try:
if notify.stopped:
self._state = State.STOPPED
elif notify.timeshift:
self._state = State.PLAYING_TIMESHIFT_TV
elif notify.recorded:
self._state = State.PLAYING_RECORDED_TV
except PyMediaroomError as e:
_LOGGER.debug("%s please report at https://github.com/dgomes/pymediaroom/issues", e)
else:
self._state = State.STANDBY
_LOGGER.debug("%s is %s", self.stb_ip, self._state)
return self._state |
def set_branching_model(self, project, repository, data):
"""
Set branching model
:param project:
:param repository:
:param data:
:return:
"""
url = 'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branchmodel/configuration'.format(
project=project,
repository=repository)
return self.put(url, data=data) | Set branching model
:param project:
:param repository:
:param data:
:return: | Below is the the instruction that describes the task:
### Input:
Set branching model
:param project:
:param repository:
:param data:
:return:
### Response:
def set_branching_model(self, project, repository, data):
"""
Set branching model
:param project:
:param repository:
:param data:
:return:
"""
url = 'rest/branch-utils/1.0/projects/{project}/repos/{repository}/branchmodel/configuration'.format(
project=project,
repository=repository)
return self.put(url, data=data) |
def owned(self):
"""
Returns True if this key is locked by this lock, otherwise False.
"""
stored_token = self.redis.get(self.name)
# need to always compare bytes to bytes
# TODO: this can be simplified when the context manager is finished
if stored_token and not isinstance(stored_token, bytes):
encoder = self.redis.connection_pool.get_encoder()
stored_token = encoder.encode(stored_token)
return self.local.token is not None and \
stored_token == self.local.token | Returns True if this key is locked by this lock, otherwise False. | Below is the the instruction that describes the task:
### Input:
Returns True if this key is locked by this lock, otherwise False.
### Response:
def owned(self):
"""
Returns True if this key is locked by this lock, otherwise False.
"""
stored_token = self.redis.get(self.name)
# need to always compare bytes to bytes
# TODO: this can be simplified when the context manager is finished
if stored_token and not isinstance(stored_token, bytes):
encoder = self.redis.connection_pool.get_encoder()
stored_token = encoder.encode(stored_token)
return self.local.token is not None and \
stored_token == self.local.token |
def create_organisation(self, organisation_json):
'''
Create an Organisation object from a JSON object
Returns:
Organisation: The organisation from the given `organisation_json`.
'''
return trolly.organisation.Organisation(
trello_client=self,
organisation_id=organisation_json['id'],
name=organisation_json['name'],
data=organisation_json,
) | Create an Organisation object from a JSON object
Returns:
Organisation: The organisation from the given `organisation_json`. | Below is the the instruction that describes the task:
### Input:
Create an Organisation object from a JSON object
Returns:
Organisation: The organisation from the given `organisation_json`.
### Response:
def create_organisation(self, organisation_json):
'''
Create an Organisation object from a JSON object
Returns:
Organisation: The organisation from the given `organisation_json`.
'''
return trolly.organisation.Organisation(
trello_client=self,
organisation_id=organisation_json['id'],
name=organisation_json['name'],
data=organisation_json,
) |
def updatepLvlGrid(self):
'''
Update the grid of persistent income levels. Currently only works for
infinite horizon models (cycles=0) and lifecycle models (cycles=1). Not
clear what to do about cycles>1 because the distribution of persistent
income will be different within a period depending on how many cycles
have elapsed. This method uses a simulation approach to generate the
pLvlGrid at each period of the cycle, drawing on the initial distribution
of persistent income, the pLvlNextFuncs, and the attribute pLvlPctiles.
Parameters
----------
None
Returns
-------
None
'''
orig_time = self.time_flow
self.timeFwd()
LivPrbAll = np.array(self.LivPrb)
# Simulate the distribution of persistent income levels by t_cycle in a lifecycle model
if self.cycles == 1:
pLvlNow = drawLognormal(self.AgentCount,mu=self.pLvlInitMean,sigma=self.pLvlInitStd,seed=31382)
pLvlGrid = [] # empty list of time-varying persistent income grids
# Calculate distribution of persistent income in each period of lifecycle
for t in range(len(self.PermShkStd)):
if t > 0:
PermShkNow = drawDiscrete(N=self.AgentCount,P=self.PermShkDstn[t-1][0],X=self.PermShkDstn[t-1][1],exact_match=False,seed=t)
pLvlNow = self.pLvlNextFunc[t-1](pLvlNow)*PermShkNow
pLvlGrid.append(getPercentiles(pLvlNow,percentiles=self.pLvlPctiles))
# Calculate "stationary" distribution in infinite horizon (might vary across periods of cycle)
elif self.cycles == 0:
T_long = 1000 # Number of periods to simulate to get to "stationary" distribution
pLvlNow = drawLognormal(self.AgentCount,mu=self.pLvlInitMean,sigma=self.pLvlInitStd,seed=31382)
t_cycle = np.zeros(self.AgentCount,dtype=int)
for t in range(T_long):
LivPrb = LivPrbAll[t_cycle] # Determine who dies and replace them with newborns
draws = drawUniform(self.AgentCount,seed=t)
who_dies = draws > LivPrb
pLvlNow[who_dies] = drawLognormal(np.sum(who_dies),mu=self.pLvlInitMean,sigma=self.pLvlInitStd,seed=t+92615)
t_cycle[who_dies] = 0
for j in range(self.T_cycle): # Update persistent income
these = t_cycle == j
PermShkTemp = drawDiscrete(N=np.sum(these),P=self.PermShkDstn[j][0],X=self.PermShkDstn[j][1],exact_match=False,seed=t+13*j)
pLvlNow[these] = self.pLvlNextFunc[j](pLvlNow[these])*PermShkTemp
t_cycle = t_cycle + 1
t_cycle[t_cycle == self.T_cycle] = 0
# We now have a "long run stationary distribution", extract percentiles
pLvlGrid = [] # empty list of time-varying persistent income grids
for t in range(self.T_cycle):
these = t_cycle == t
pLvlGrid.append(getPercentiles(pLvlNow[these],percentiles=self.pLvlPctiles))
# Throw an error if cycles>1
else:
assert False, "Can only handle cycles=0 or cycles=1!"
# Store the result and add attribute to time_vary
self.pLvlGrid = pLvlGrid
self.addToTimeVary('pLvlGrid')
if not orig_time:
self.timeRev() | Update the grid of persistent income levels. Currently only works for
infinite horizon models (cycles=0) and lifecycle models (cycles=1). Not
clear what to do about cycles>1 because the distribution of persistent
income will be different within a period depending on how many cycles
have elapsed. This method uses a simulation approach to generate the
pLvlGrid at each period of the cycle, drawing on the initial distribution
of persistent income, the pLvlNextFuncs, and the attribute pLvlPctiles.
Parameters
----------
None
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Update the grid of persistent income levels. Currently only works for
infinite horizon models (cycles=0) and lifecycle models (cycles=1). Not
clear what to do about cycles>1 because the distribution of persistent
income will be different within a period depending on how many cycles
have elapsed. This method uses a simulation approach to generate the
pLvlGrid at each period of the cycle, drawing on the initial distribution
of persistent income, the pLvlNextFuncs, and the attribute pLvlPctiles.
Parameters
----------
None
Returns
-------
None
### Response:
def updatepLvlGrid(self):
'''
Update the grid of persistent income levels. Currently only works for
infinite horizon models (cycles=0) and lifecycle models (cycles=1). Not
clear what to do about cycles>1 because the distribution of persistent
income will be different within a period depending on how many cycles
have elapsed. This method uses a simulation approach to generate the
pLvlGrid at each period of the cycle, drawing on the initial distribution
of persistent income, the pLvlNextFuncs, and the attribute pLvlPctiles.
Parameters
----------
None
Returns
-------
None
'''
orig_time = self.time_flow
self.timeFwd()
LivPrbAll = np.array(self.LivPrb)
# Simulate the distribution of persistent income levels by t_cycle in a lifecycle model
if self.cycles == 1:
pLvlNow = drawLognormal(self.AgentCount,mu=self.pLvlInitMean,sigma=self.pLvlInitStd,seed=31382)
pLvlGrid = [] # empty list of time-varying persistent income grids
# Calculate distribution of persistent income in each period of lifecycle
for t in range(len(self.PermShkStd)):
if t > 0:
PermShkNow = drawDiscrete(N=self.AgentCount,P=self.PermShkDstn[t-1][0],X=self.PermShkDstn[t-1][1],exact_match=False,seed=t)
pLvlNow = self.pLvlNextFunc[t-1](pLvlNow)*PermShkNow
pLvlGrid.append(getPercentiles(pLvlNow,percentiles=self.pLvlPctiles))
# Calculate "stationary" distribution in infinite horizon (might vary across periods of cycle)
elif self.cycles == 0:
T_long = 1000 # Number of periods to simulate to get to "stationary" distribution
pLvlNow = drawLognormal(self.AgentCount,mu=self.pLvlInitMean,sigma=self.pLvlInitStd,seed=31382)
t_cycle = np.zeros(self.AgentCount,dtype=int)
for t in range(T_long):
LivPrb = LivPrbAll[t_cycle] # Determine who dies and replace them with newborns
draws = drawUniform(self.AgentCount,seed=t)
who_dies = draws > LivPrb
pLvlNow[who_dies] = drawLognormal(np.sum(who_dies),mu=self.pLvlInitMean,sigma=self.pLvlInitStd,seed=t+92615)
t_cycle[who_dies] = 0
for j in range(self.T_cycle): # Update persistent income
these = t_cycle == j
PermShkTemp = drawDiscrete(N=np.sum(these),P=self.PermShkDstn[j][0],X=self.PermShkDstn[j][1],exact_match=False,seed=t+13*j)
pLvlNow[these] = self.pLvlNextFunc[j](pLvlNow[these])*PermShkTemp
t_cycle = t_cycle + 1
t_cycle[t_cycle == self.T_cycle] = 0
# We now have a "long run stationary distribution", extract percentiles
pLvlGrid = [] # empty list of time-varying persistent income grids
for t in range(self.T_cycle):
these = t_cycle == t
pLvlGrid.append(getPercentiles(pLvlNow[these],percentiles=self.pLvlPctiles))
# Throw an error if cycles>1
else:
assert False, "Can only handle cycles=0 or cycles=1!"
# Store the result and add attribute to time_vary
self.pLvlGrid = pLvlGrid
self.addToTimeVary('pLvlGrid')
if not orig_time:
self.timeRev() |
def delete_row(self):
"""Delete bookmarks or event from annotations, based on row."""
sel_model = self.idx_annot_list.selectionModel()
for row in sel_model.selectedRows():
i = row.row()
start = self.idx_annot_list.property('start')[i]
end = self.idx_annot_list.property('end')[i]
name = self.idx_annot_list.item(i, 2).text()
marker_event = self.idx_annot_list.item(i, 3).text()
if marker_event == 'bookmark':
self.annot.remove_bookmark(name=name, time=(start, end))
else:
self.annot.remove_event(name=name, time=(start, end))
highlight = self.parent.traces.highlight
if highlight:
self.parent.traces.scene.removeItem(highlight)
highlight = None
self.parent.traces.event_sel = None
self.update_annotations() | Delete bookmarks or event from annotations, based on row. | Below is the the instruction that describes the task:
### Input:
Delete bookmarks or event from annotations, based on row.
### Response:
def delete_row(self):
"""Delete bookmarks or event from annotations, based on row."""
sel_model = self.idx_annot_list.selectionModel()
for row in sel_model.selectedRows():
i = row.row()
start = self.idx_annot_list.property('start')[i]
end = self.idx_annot_list.property('end')[i]
name = self.idx_annot_list.item(i, 2).text()
marker_event = self.idx_annot_list.item(i, 3).text()
if marker_event == 'bookmark':
self.annot.remove_bookmark(name=name, time=(start, end))
else:
self.annot.remove_event(name=name, time=(start, end))
highlight = self.parent.traces.highlight
if highlight:
self.parent.traces.scene.removeItem(highlight)
highlight = None
self.parent.traces.event_sel = None
self.update_annotations() |
def StatusActionFactory(version, build=None, base_class=BaseStatusAction): # noqa
"""
A factory for creating a new status action class specific to a service.
:param version: The service version
:type version: union[str, unicode]
:param build: The optional service build identifier
:type build: union[str, unicode]
:param base_class: The optional base class, to override `BaseStatusAction` as the base class
:type base_class: BaseStatusAction
:return: A class named `StatusAction`, extending `base_class`, with version and build matching the input parameters
:rtype: class
"""
return type(
str('StatusAction'),
(base_class, ),
{str('_version'): version, str('_build'): build},
) | A factory for creating a new status action class specific to a service.
:param version: The service version
:type version: union[str, unicode]
:param build: The optional service build identifier
:type build: union[str, unicode]
:param base_class: The optional base class, to override `BaseStatusAction` as the base class
:type base_class: BaseStatusAction
:return: A class named `StatusAction`, extending `base_class`, with version and build matching the input parameters
:rtype: class | Below is the the instruction that describes the task:
### Input:
A factory for creating a new status action class specific to a service.
:param version: The service version
:type version: union[str, unicode]
:param build: The optional service build identifier
:type build: union[str, unicode]
:param base_class: The optional base class, to override `BaseStatusAction` as the base class
:type base_class: BaseStatusAction
:return: A class named `StatusAction`, extending `base_class`, with version and build matching the input parameters
:rtype: class
### Response:
def StatusActionFactory(version, build=None, base_class=BaseStatusAction): # noqa
"""
A factory for creating a new status action class specific to a service.
:param version: The service version
:type version: union[str, unicode]
:param build: The optional service build identifier
:type build: union[str, unicode]
:param base_class: The optional base class, to override `BaseStatusAction` as the base class
:type base_class: BaseStatusAction
:return: A class named `StatusAction`, extending `base_class`, with version and build matching the input parameters
:rtype: class
"""
return type(
str('StatusAction'),
(base_class, ),
{str('_version'): version, str('_build'): build},
) |
def _func(self) -> typing.Optional[typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]]]:
"""Get wrapped function.
:rtype: typing.Optional[typing.Callable[..., typing.Union[typing.Awaitable, typing.Any]]]
"""
return self.__func | Get wrapped function.
:rtype: typing.Optional[typing.Callable[..., typing.Union[typing.Awaitable, typing.Any]]] | Below is the the instruction that describes the task:
### Input:
Get wrapped function.
:rtype: typing.Optional[typing.Callable[..., typing.Union[typing.Awaitable, typing.Any]]]
### Response:
def _func(self) -> typing.Optional[typing.Callable[..., typing.Union["typing.Awaitable[typing.Any]", typing.Any]]]:
"""Get wrapped function.
:rtype: typing.Optional[typing.Callable[..., typing.Union[typing.Awaitable, typing.Any]]]
"""
return self.__func |
def _mainType(self, resp):
""" gets the main type from the response object"""
if self.PY2:
return resp.headers.maintype
elif self.PY3:
return resp.headers.get_content_maintype()
else:
return None | gets the main type from the response object | Below is the the instruction that describes the task:
### Input:
gets the main type from the response object
### Response:
def _mainType(self, resp):
""" gets the main type from the response object"""
if self.PY2:
return resp.headers.maintype
elif self.PY3:
return resp.headers.get_content_maintype()
else:
return None |
def guess_param_types(**kwargs):
"""
Given a set of keyword literals, promote to the appropriate
parameter type based on some simple heuristics.
"""
params = {}
for k, v in kwargs.items():
kws = dict(default=v, constant=True)
if isinstance(v, Parameter):
params[k] = v
elif isinstance(v, dt_types):
params[k] = Date(**kws)
elif isinstance(v, bool):
params[k] = Boolean(**kws)
elif isinstance(v, int):
params[k] = Integer(**kws)
elif isinstance(v, float):
params[k] = Number(**kws)
elif isinstance(v, str):
params[k] = String(**kws)
elif isinstance(v, dict):
params[k] = Dict(**kws)
elif isinstance(v, tuple):
if all(_is_number(el) for el in v):
params[k] = NumericTuple(**kws)
elif all(isinstance(el. dt_types) for el in v) and len(v)==2:
params[k] = DateRange(**kws)
else:
params[k] = Tuple(**kws)
elif isinstance(v, list):
params[k] = List(**kws)
elif isinstance(v, np.ndarray):
params[k] = Array(**kws)
else:
from pandas import DataFrame as pdDFrame
from pandas import Series as pdSeries
if isinstance(v, pdDFrame):
params[k] = DataFrame(**kws)
elif isinstance(v, pdSeries):
params[k] = Series(**kws)
else:
params[k] = Parameter(**kws)
return params | Given a set of keyword literals, promote to the appropriate
parameter type based on some simple heuristics. | Below is the the instruction that describes the task:
### Input:
Given a set of keyword literals, promote to the appropriate
parameter type based on some simple heuristics.
### Response:
def guess_param_types(**kwargs):
"""
Given a set of keyword literals, promote to the appropriate
parameter type based on some simple heuristics.
"""
params = {}
for k, v in kwargs.items():
kws = dict(default=v, constant=True)
if isinstance(v, Parameter):
params[k] = v
elif isinstance(v, dt_types):
params[k] = Date(**kws)
elif isinstance(v, bool):
params[k] = Boolean(**kws)
elif isinstance(v, int):
params[k] = Integer(**kws)
elif isinstance(v, float):
params[k] = Number(**kws)
elif isinstance(v, str):
params[k] = String(**kws)
elif isinstance(v, dict):
params[k] = Dict(**kws)
elif isinstance(v, tuple):
if all(_is_number(el) for el in v):
params[k] = NumericTuple(**kws)
elif all(isinstance(el. dt_types) for el in v) and len(v)==2:
params[k] = DateRange(**kws)
else:
params[k] = Tuple(**kws)
elif isinstance(v, list):
params[k] = List(**kws)
elif isinstance(v, np.ndarray):
params[k] = Array(**kws)
else:
from pandas import DataFrame as pdDFrame
from pandas import Series as pdSeries
if isinstance(v, pdDFrame):
params[k] = DataFrame(**kws)
elif isinstance(v, pdSeries):
params[k] = Series(**kws)
else:
params[k] = Parameter(**kws)
return params |
def __get_merge_versions(self, merge_id):
"""Get merge versions"""
versions = []
group_versions = self.client.merge_versions(merge_id)
for raw_versions in group_versions:
for version in json.loads(raw_versions):
version_id = version['id']
version_full_raw = self.client.merge_version(merge_id, version_id)
version_full = json.loads(version_full_raw)
version_full.pop('diffs', None)
versions.append(version_full)
return versions | Get merge versions | Below is the the instruction that describes the task:
### Input:
Get merge versions
### Response:
def __get_merge_versions(self, merge_id):
"""Get merge versions"""
versions = []
group_versions = self.client.merge_versions(merge_id)
for raw_versions in group_versions:
for version in json.loads(raw_versions):
version_id = version['id']
version_full_raw = self.client.merge_version(merge_id, version_id)
version_full = json.loads(version_full_raw)
version_full.pop('diffs', None)
versions.append(version_full)
return versions |
def _adjust_n_months(other_day, n, reference_day):
"""Adjust the number of times a monthly offset is applied based
on the day of a given date, and the reference day provided.
"""
if n > 0 and other_day < reference_day:
n = n - 1
elif n <= 0 and other_day > reference_day:
n = n + 1
return n | Adjust the number of times a monthly offset is applied based
on the day of a given date, and the reference day provided. | Below is the the instruction that describes the task:
### Input:
Adjust the number of times a monthly offset is applied based
on the day of a given date, and the reference day provided.
### Response:
def _adjust_n_months(other_day, n, reference_day):
"""Adjust the number of times a monthly offset is applied based
on the day of a given date, and the reference day provided.
"""
if n > 0 and other_day < reference_day:
n = n - 1
elif n <= 0 and other_day > reference_day:
n = n + 1
return n |
def _proxy_to_logger(self, method_name, event, *event_args,
**event_kw):
"""
Propagate a method call to the wrapped logger.
This is the same as the superclass implementation, except that
it also preserves positional arguments in the `event_dict` so
that the stdblib's support for format strings can be used.
"""
if isinstance(event, bytes):
event = event.decode('utf-8')
if event_args:
event_kw['positional_args'] = event_args
return super(BoundLevelLogger, self)._proxy_to_logger(method_name,
event=event,
**event_kw) | Propagate a method call to the wrapped logger.
This is the same as the superclass implementation, except that
it also preserves positional arguments in the `event_dict` so
that the stdblib's support for format strings can be used. | Below is the the instruction that describes the task:
### Input:
Propagate a method call to the wrapped logger.
This is the same as the superclass implementation, except that
it also preserves positional arguments in the `event_dict` so
that the stdblib's support for format strings can be used.
### Response:
def _proxy_to_logger(self, method_name, event, *event_args,
**event_kw):
"""
Propagate a method call to the wrapped logger.
This is the same as the superclass implementation, except that
it also preserves positional arguments in the `event_dict` so
that the stdblib's support for format strings can be used.
"""
if isinstance(event, bytes):
event = event.decode('utf-8')
if event_args:
event_kw['positional_args'] = event_args
return super(BoundLevelLogger, self)._proxy_to_logger(method_name,
event=event,
**event_kw) |
def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val) | Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project. | Below is the the instruction that describes the task:
### Input:
Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
### Response:
def load_index(self, filename, reindex=False):
''' Load the Layout's index from a plaintext file.
Args:
filename (str): Path to the plaintext index file.
reindex (bool): If True, discards entity values provided in the
loaded index and instead re-indexes every file in the loaded
index against the entities defined in the config. Default is
False, in which case it is assumed that all entity definitions
in the loaded index are correct and do not need any further
validation.
Note: At the moment, directory-specific config files aren't serialized.
This means reconstructed indexes will only work properly in cases
where there aren't multiple layout specs within a project.
'''
self._reset_index()
with open(filename, 'r') as fobj:
data = json.load(fobj)
for path, file in data.items():
ents, domains = file['entities'], file['domains']
root, f = dirname(path), basename(path)
if reindex:
self._index_file(root, f, domains)
else:
f = self._make_file_object(root, f)
tags = {k: Tag(self.entities[k], v) for k, v in ents.items()}
f.tags = tags
self.files[f.path] = f
for ent, val in f.entities.items():
self.entities[ent].add_file(f.path, val) |
def secret_loader(self, callback):
"""
Decorate a method that receives a key id and returns a secret key
"""
if not callback or not callable(callback):
raise Exception("Please pass in a callable that loads secret keys")
self.secret_loader_callback = callback
return callback | Decorate a method that receives a key id and returns a secret key | Below is the the instruction that describes the task:
### Input:
Decorate a method that receives a key id and returns a secret key
### Response:
def secret_loader(self, callback):
"""
Decorate a method that receives a key id and returns a secret key
"""
if not callback or not callable(callback):
raise Exception("Please pass in a callable that loads secret keys")
self.secret_loader_callback = callback
return callback |
def main():
'''Main routine.'''
# validate command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--name', '-n', required=True,
action='store', help='Name of vmss')
arg_parser.add_argument('--capacity', '-c', required=True, action='store',
help='Number of VMs')
arg_parser.add_argument('--location', '-l', action='store', help='Location, e.g. eastus')
arg_parser.add_argument('--verbose', '-v', action='store_true', default=False,
help='Print operational details')
args = arg_parser.parse_args()
name = args.name
location = args.location
capacity = args.capacity
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
print("Error: Expecting azurermconfig.json in current folder")
sys.exit()
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
# authenticate
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# create resource group
print('Creating resource group: ' + name)
rmreturn = azurerm.create_resource_group(
access_token, subscription_id, name, location)
print(rmreturn)
# create NSG
nsg_name = name + 'nsg'
print('Creating NSG: ' + nsg_name)
rmreturn = azurerm.create_nsg(
access_token, subscription_id, name, nsg_name, location)
nsg_id = rmreturn.json()['id']
print('nsg_id = ' + nsg_id)
# create NSG rule
nsg_rule = 'ssh'
print('Creating NSG rule: ' + nsg_rule)
rmreturn = azurerm.create_nsg_rule(access_token, subscription_id, name, nsg_name, nsg_rule,
description='ssh rule', destination_range='22')
#print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
# create VNET
vnetname = name + 'vnet'
print('Creating VNet: ' + vnetname)
rmreturn = azurerm.create_vnet(access_token, subscription_id, name, vnetname, location,
nsg_id=nsg_id)
print(rmreturn)
# print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
subnet_id = rmreturn.json()['properties']['subnets'][0]['id']
print('subnet_id = ' + subnet_id)
# create public IP address
public_ip_name = name + 'ip'
dns_label = name + 'ip'
print('Creating public IP address: ' + public_ip_name)
rmreturn = azurerm.create_public_ip(access_token, subscription_id, name, public_ip_name,
dns_label, location)
print(rmreturn)
ip_id = rmreturn.json()['id']
print('ip_id = ' + ip_id)
# create load balancer with nat pool
lb_name = vnetname + 'lb'
print('Creating load balancer with nat pool: ' + lb_name)
rmreturn = azurerm.create_lb_with_nat_pool(access_token, subscription_id, name, lb_name,
ip_id, '50000', '50100', '22', location)
be_pool_id = rmreturn.json()['properties']['backendAddressPools'][0]['id']
lb_pool_id = rmreturn.json()['properties']['inboundNatPools'][0]['id']
# create VMSS
vmss_name = name
vm_size = 'Standard_D1_v2'
publisher = 'Canonical'
offer = 'UbuntuServer'
sku = '16.04-LTS'
version = 'latest'
username = 'azure'
password = Haikunator().haikunate(delimiter=',') # creates random password
print('Password = ' + password)
print('Creating VMSS: ' + vmss_name)
rmreturn = azurerm.create_vmss(access_token, subscription_id, name, vmss_name, vm_size,
capacity, publisher, offer, sku, version, subnet_id, be_pool_id,
lb_pool_id, location, username=username, password=password)
print(rmreturn)
print(json.dumps(rmreturn.json(), sort_keys=False,
indent=2, separators=(',', ': '))) | Main routine. | Below is the the instruction that describes the task:
### Input:
Main routine.
### Response:
def main():
'''Main routine.'''
# validate command line arguments
arg_parser = argparse.ArgumentParser()
arg_parser.add_argument('--name', '-n', required=True,
action='store', help='Name of vmss')
arg_parser.add_argument('--capacity', '-c', required=True, action='store',
help='Number of VMs')
arg_parser.add_argument('--location', '-l', action='store', help='Location, e.g. eastus')
arg_parser.add_argument('--verbose', '-v', action='store_true', default=False,
help='Print operational details')
args = arg_parser.parse_args()
name = args.name
location = args.location
capacity = args.capacity
# Load Azure app defaults
try:
with open('azurermconfig.json') as config_file:
config_data = json.load(config_file)
except FileNotFoundError:
print("Error: Expecting azurermconfig.json in current folder")
sys.exit()
tenant_id = config_data['tenantId']
app_id = config_data['appId']
app_secret = config_data['appSecret']
subscription_id = config_data['subscriptionId']
# authenticate
access_token = azurerm.get_access_token(tenant_id, app_id, app_secret)
# create resource group
print('Creating resource group: ' + name)
rmreturn = azurerm.create_resource_group(
access_token, subscription_id, name, location)
print(rmreturn)
# create NSG
nsg_name = name + 'nsg'
print('Creating NSG: ' + nsg_name)
rmreturn = azurerm.create_nsg(
access_token, subscription_id, name, nsg_name, location)
nsg_id = rmreturn.json()['id']
print('nsg_id = ' + nsg_id)
# create NSG rule
nsg_rule = 'ssh'
print('Creating NSG rule: ' + nsg_rule)
rmreturn = azurerm.create_nsg_rule(access_token, subscription_id, name, nsg_name, nsg_rule,
description='ssh rule', destination_range='22')
#print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
# create VNET
vnetname = name + 'vnet'
print('Creating VNet: ' + vnetname)
rmreturn = azurerm.create_vnet(access_token, subscription_id, name, vnetname, location,
nsg_id=nsg_id)
print(rmreturn)
# print(json.dumps(rmreturn.json(), sort_keys=False, indent=2, separators=(',', ': ')))
subnet_id = rmreturn.json()['properties']['subnets'][0]['id']
print('subnet_id = ' + subnet_id)
# create public IP address
public_ip_name = name + 'ip'
dns_label = name + 'ip'
print('Creating public IP address: ' + public_ip_name)
rmreturn = azurerm.create_public_ip(access_token, subscription_id, name, public_ip_name,
dns_label, location)
print(rmreturn)
ip_id = rmreturn.json()['id']
print('ip_id = ' + ip_id)
# create load balancer with nat pool
lb_name = vnetname + 'lb'
print('Creating load balancer with nat pool: ' + lb_name)
rmreturn = azurerm.create_lb_with_nat_pool(access_token, subscription_id, name, lb_name,
ip_id, '50000', '50100', '22', location)
be_pool_id = rmreturn.json()['properties']['backendAddressPools'][0]['id']
lb_pool_id = rmreturn.json()['properties']['inboundNatPools'][0]['id']
# create VMSS
vmss_name = name
vm_size = 'Standard_D1_v2'
publisher = 'Canonical'
offer = 'UbuntuServer'
sku = '16.04-LTS'
version = 'latest'
username = 'azure'
password = Haikunator().haikunate(delimiter=',') # creates random password
print('Password = ' + password)
print('Creating VMSS: ' + vmss_name)
rmreturn = azurerm.create_vmss(access_token, subscription_id, name, vmss_name, vm_size,
capacity, publisher, offer, sku, version, subnet_id, be_pool_id,
lb_pool_id, location, username=username, password=password)
print(rmreturn)
print(json.dumps(rmreturn.json(), sort_keys=False,
indent=2, separators=(',', ': '))) |
async def fetch_invite(self, *, with_counts=True):
"""|coro|
Retrieves an :class:`Invite` from a invite URL or ID.
This is the same as :meth:`Client.get_invite`; the invite
code is abstracted away.
Parameters
-----------
with_counts: :class:`bool`
Whether to include count information in the invite. This fills the
:attr:`Invite.approximate_member_count` and :attr:`Invite.approximate_presence_count`
fields.
Returns
--------
:class:`Invite`
The invite from the URL/ID.
"""
if self._invite:
invite_id = resolve_invite(self._invite)
data = await self._state.http.get_invite(invite_id, with_counts=with_counts)
return Invite.from_incomplete(state=self._state, data=data) | |coro|
Retrieves an :class:`Invite` from a invite URL or ID.
This is the same as :meth:`Client.get_invite`; the invite
code is abstracted away.
Parameters
-----------
with_counts: :class:`bool`
Whether to include count information in the invite. This fills the
:attr:`Invite.approximate_member_count` and :attr:`Invite.approximate_presence_count`
fields.
Returns
--------
:class:`Invite`
The invite from the URL/ID. | Below is the the instruction that describes the task:
### Input:
|coro|
Retrieves an :class:`Invite` from a invite URL or ID.
This is the same as :meth:`Client.get_invite`; the invite
code is abstracted away.
Parameters
-----------
with_counts: :class:`bool`
Whether to include count information in the invite. This fills the
:attr:`Invite.approximate_member_count` and :attr:`Invite.approximate_presence_count`
fields.
Returns
--------
:class:`Invite`
The invite from the URL/ID.
### Response:
async def fetch_invite(self, *, with_counts=True):
"""|coro|
Retrieves an :class:`Invite` from a invite URL or ID.
This is the same as :meth:`Client.get_invite`; the invite
code is abstracted away.
Parameters
-----------
with_counts: :class:`bool`
Whether to include count information in the invite. This fills the
:attr:`Invite.approximate_member_count` and :attr:`Invite.approximate_presence_count`
fields.
Returns
--------
:class:`Invite`
The invite from the URL/ID.
"""
if self._invite:
invite_id = resolve_invite(self._invite)
data = await self._state.http.get_invite(invite_id, with_counts=with_counts)
return Invite.from_incomplete(state=self._state, data=data) |
def __WaitForVolume(volume, desired_state):
""" Blocks until EBS volume is in desired state. """
print 'Waiting for volume %s to be %s...' % (volume.id, desired_state)
while True:
volume.update()
sys.stdout.write('.')
sys.stdout.flush()
#print 'status is: %s' % volume.status
if volume.status == desired_state:
break
time.sleep(5)
return | Blocks until EBS volume is in desired state. | Below is the the instruction that describes the task:
### Input:
Blocks until EBS volume is in desired state.
### Response:
def __WaitForVolume(volume, desired_state):
""" Blocks until EBS volume is in desired state. """
print 'Waiting for volume %s to be %s...' % (volume.id, desired_state)
while True:
volume.update()
sys.stdout.write('.')
sys.stdout.flush()
#print 'status is: %s' % volume.status
if volume.status == desired_state:
break
time.sleep(5)
return |
def get_thumbnail_format(self):
"""
Determines the target thumbnail type either by looking for a format
override specified at the model level, or by using the format the
user uploaded.
"""
if self.field.thumbnail_format:
# Over-ride was given, use that instead.
return self.field.thumbnail_format.lower()
else:
# Use the existing extension from the file.
filename_split = self.name.rsplit('.', 1)
return filename_split[-1] | Determines the target thumbnail type either by looking for a format
override specified at the model level, or by using the format the
user uploaded. | Below is the the instruction that describes the task:
### Input:
Determines the target thumbnail type either by looking for a format
override specified at the model level, or by using the format the
user uploaded.
### Response:
def get_thumbnail_format(self):
"""
Determines the target thumbnail type either by looking for a format
override specified at the model level, or by using the format the
user uploaded.
"""
if self.field.thumbnail_format:
# Over-ride was given, use that instead.
return self.field.thumbnail_format.lower()
else:
# Use the existing extension from the file.
filename_split = self.name.rsplit('.', 1)
return filename_split[-1] |
def _example_stock_quote(quote_ctx):
"""
θ·εζΉιζ₯δ»·οΌθΎεΊ θ‘η₯¨εη§°οΌζΆι΄οΌε½εδ»·οΌεΌηδ»·οΌζι«δ»·οΌζδ½δ»·οΌζ¨ε€©ζΆηδ»·οΌζδΊ€ιοΌζδΊ€ι’οΌζ’ζηοΌζ―εΉ
οΌθ‘η₯¨ηΆζ
"""
stock_code_list = ["US.AAPL", "HK.00700"]
# subscribe "QUOTE"
ret_status, ret_data = quote_ctx.subscribe(stock_code_list, ft.SubType.QUOTE)
if ret_status != ft.RET_OK:
print("%s %s: %s" % (stock_code_list, "QUOTE", ret_data))
exit()
ret_status, ret_data = quote_ctx.query_subscription()
if ret_status != ft.RET_OK:
print(ret_status)
exit()
print(ret_data)
ret_status, ret_data = quote_ctx.get_stock_quote(stock_code_list)
if ret_status != ft.RET_OK:
print(ret_data)
exit()
quote_table = ret_data
print("QUOTE_TABLE")
print(quote_table) | θ·εζΉιζ₯δ»·οΌθΎεΊ θ‘η₯¨εη§°οΌζΆι΄οΌε½εδ»·οΌεΌηδ»·οΌζι«δ»·οΌζδ½δ»·οΌζ¨ε€©ζΆηδ»·οΌζδΊ€ιοΌζδΊ€ι’οΌζ’ζηοΌζ―εΉ
οΌθ‘η₯¨ηΆζ | Below is the the instruction that describes the task:
### Input:
θ·εζΉιζ₯δ»·οΌθΎεΊ θ‘η₯¨εη§°οΌζΆι΄οΌε½εδ»·οΌεΌηδ»·οΌζι«δ»·οΌζδ½δ»·οΌζ¨ε€©ζΆηδ»·οΌζδΊ€ιοΌζδΊ€ι’οΌζ’ζηοΌζ―εΉ
οΌθ‘η₯¨ηΆζ
### Response:
def _example_stock_quote(quote_ctx):
"""
θ·εζΉιζ₯δ»·οΌθΎεΊ θ‘η₯¨εη§°οΌζΆι΄οΌε½εδ»·οΌεΌηδ»·οΌζι«δ»·οΌζδ½δ»·οΌζ¨ε€©ζΆηδ»·οΌζδΊ€ιοΌζδΊ€ι’οΌζ’ζηοΌζ―εΉ
οΌθ‘η₯¨ηΆζ
"""
stock_code_list = ["US.AAPL", "HK.00700"]
# subscribe "QUOTE"
ret_status, ret_data = quote_ctx.subscribe(stock_code_list, ft.SubType.QUOTE)
if ret_status != ft.RET_OK:
print("%s %s: %s" % (stock_code_list, "QUOTE", ret_data))
exit()
ret_status, ret_data = quote_ctx.query_subscription()
if ret_status != ft.RET_OK:
print(ret_status)
exit()
print(ret_data)
ret_status, ret_data = quote_ctx.get_stock_quote(stock_code_list)
if ret_status != ft.RET_OK:
print(ret_data)
exit()
quote_table = ret_data
print("QUOTE_TABLE")
print(quote_table) |
def is_carrier_specific(numobj):
"""Given a valid short number, determines whether it is carrier-specific
(however, nothing is implied about its validity). Carrier-specific numbers
may connect to a different end-point, or not connect at all, depending
on the user's carrier. If it is important that the number is valid, then
its validity must first be checked using is_valid_short_number or
is_valid_short_number_for_region.
Arguments:
numobj -- the valid short number to check
Returns whether the short number is carrier-specific, assuming the input
was a valid short number.
"""
region_codes = region_codes_for_country_code(numobj.country_code)
region_code = _region_code_for_short_number_from_region_list(numobj, region_codes)
national_number = national_significant_number(numobj)
metadata = PhoneMetadata.short_metadata_for_region(region_code)
return (metadata is not None and
_matches_possible_number_and_national_number(national_number, metadata.carrier_specific)) | Given a valid short number, determines whether it is carrier-specific
(however, nothing is implied about its validity). Carrier-specific numbers
may connect to a different end-point, or not connect at all, depending
on the user's carrier. If it is important that the number is valid, then
its validity must first be checked using is_valid_short_number or
is_valid_short_number_for_region.
Arguments:
numobj -- the valid short number to check
Returns whether the short number is carrier-specific, assuming the input
was a valid short number. | Below is the the instruction that describes the task:
### Input:
Given a valid short number, determines whether it is carrier-specific
(however, nothing is implied about its validity). Carrier-specific numbers
may connect to a different end-point, or not connect at all, depending
on the user's carrier. If it is important that the number is valid, then
its validity must first be checked using is_valid_short_number or
is_valid_short_number_for_region.
Arguments:
numobj -- the valid short number to check
Returns whether the short number is carrier-specific, assuming the input
was a valid short number.
### Response:
def is_carrier_specific(numobj):
"""Given a valid short number, determines whether it is carrier-specific
(however, nothing is implied about its validity). Carrier-specific numbers
may connect to a different end-point, or not connect at all, depending
on the user's carrier. If it is important that the number is valid, then
its validity must first be checked using is_valid_short_number or
is_valid_short_number_for_region.
Arguments:
numobj -- the valid short number to check
Returns whether the short number is carrier-specific, assuming the input
was a valid short number.
"""
region_codes = region_codes_for_country_code(numobj.country_code)
region_code = _region_code_for_short_number_from_region_list(numobj, region_codes)
national_number = national_significant_number(numobj)
metadata = PhoneMetadata.short_metadata_for_region(region_code)
return (metadata is not None and
_matches_possible_number_and_national_number(national_number, metadata.carrier_specific)) |
def _add_seg_to_output(out, data, enumerate_chroms=False):
"""Export outputs to 'seg' format compatible with IGV and GenePattern.
"""
out_file = "%s.seg" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
"seg"]
if enumerate_chroms:
cmd += ["--enumerate-chroms"]
cmd += ["-o", tx_out_file, out["cns"]]
do.run(cmd, "CNVkit export seg")
out["seg"] = out_file
return out | Export outputs to 'seg' format compatible with IGV and GenePattern. | Below is the the instruction that describes the task:
### Input:
Export outputs to 'seg' format compatible with IGV and GenePattern.
### Response:
def _add_seg_to_output(out, data, enumerate_chroms=False):
"""Export outputs to 'seg' format compatible with IGV and GenePattern.
"""
out_file = "%s.seg" % os.path.splitext(out["cns"])[0]
if not utils.file_exists(out_file):
with file_transaction(data, out_file) as tx_out_file:
cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "export",
"seg"]
if enumerate_chroms:
cmd += ["--enumerate-chroms"]
cmd += ["-o", tx_out_file, out["cns"]]
do.run(cmd, "CNVkit export seg")
out["seg"] = out_file
return out |
def extractContent(self, text):
"""Extract the content of comment text.
"""
m = self.nextValidComment(text)
return '' if m is None else m.group(1) | Extract the content of comment text. | Below is the the instruction that describes the task:
### Input:
Extract the content of comment text.
### Response:
def extractContent(self, text):
"""Extract the content of comment text.
"""
m = self.nextValidComment(text)
return '' if m is None else m.group(1) |
def save_current_nb_as_html(info=False):
"""
Save the current notebook as html file in the same directory
"""
assert in_ipynb()
full_path = get_notebook_name()
path, filename = os.path.split(full_path)
wd_save = os.getcwd()
os.chdir(path)
cmd = 'jupyter nbconvert --to html "{}"'.format(filename)
os.system(cmd)
os.chdir(wd_save)
if info:
print("target dir: ", path)
print("cmd: ", cmd)
print("working dir: ", wd_save) | Save the current notebook as html file in the same directory | Below is the the instruction that describes the task:
### Input:
Save the current notebook as html file in the same directory
### Response:
def save_current_nb_as_html(info=False):
"""
Save the current notebook as html file in the same directory
"""
assert in_ipynb()
full_path = get_notebook_name()
path, filename = os.path.split(full_path)
wd_save = os.getcwd()
os.chdir(path)
cmd = 'jupyter nbconvert --to html "{}"'.format(filename)
os.system(cmd)
os.chdir(wd_save)
if info:
print("target dir: ", path)
print("cmd: ", cmd)
print("working dir: ", wd_save) |
def start(self):
"""Start scheduling"""
self.stop()
self.initialize()
self.handle = self.loop.call_at(self.get_next(), self.call_next) | Start scheduling | Below is the the instruction that describes the task:
### Input:
Start scheduling
### Response:
def start(self):
"""Start scheduling"""
self.stop()
self.initialize()
self.handle = self.loop.call_at(self.get_next(), self.call_next) |
def _set_tacacs_server(self, v, load=False):
"""
Setter method for tacacs_server, mapped from YANG variable /tacacs_server (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tacacs_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tacacs_server() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=tacacs_server.tacacs_server, is_container='container', presence=False, yang_name="tacacs-server", rest_name="tacacs-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'TACACS+ server configuration', u'cli-incomplete-no': None, u'sort-priority': u'11'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tacacs_server must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=tacacs_server.tacacs_server, is_container='container', presence=False, yang_name="tacacs-server", rest_name="tacacs-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'TACACS+ server configuration', u'cli-incomplete-no': None, u'sort-priority': u'11'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)""",
})
self.__tacacs_server = t
if hasattr(self, '_set'):
self._set() | Setter method for tacacs_server, mapped from YANG variable /tacacs_server (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tacacs_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tacacs_server() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for tacacs_server, mapped from YANG variable /tacacs_server (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tacacs_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tacacs_server() directly.
### Response:
def _set_tacacs_server(self, v, load=False):
"""
Setter method for tacacs_server, mapped from YANG variable /tacacs_server (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_tacacs_server is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_tacacs_server() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=tacacs_server.tacacs_server, is_container='container', presence=False, yang_name="tacacs-server", rest_name="tacacs-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'TACACS+ server configuration', u'cli-incomplete-no': None, u'sort-priority': u'11'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """tacacs_server must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=tacacs_server.tacacs_server, is_container='container', presence=False, yang_name="tacacs-server", rest_name="tacacs-server", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'TACACS+ server configuration', u'cli-incomplete-no': None, u'sort-priority': u'11'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)""",
})
self.__tacacs_server = t
if hasattr(self, '_set'):
self._set() |
def RecurseKeys(self):
"""Recurses the subkeys starting with the key.
Yields:
WinRegistryKey: Windows Registry key.
"""
yield self
for subkey in self.GetSubkeys():
for key in subkey.RecurseKeys():
yield key | Recurses the subkeys starting with the key.
Yields:
WinRegistryKey: Windows Registry key. | Below is the the instruction that describes the task:
### Input:
Recurses the subkeys starting with the key.
Yields:
WinRegistryKey: Windows Registry key.
### Response:
def RecurseKeys(self):
"""Recurses the subkeys starting with the key.
Yields:
WinRegistryKey: Windows Registry key.
"""
yield self
for subkey in self.GetSubkeys():
for key in subkey.RecurseKeys():
yield key |
def _random_block():
"""
Generate a random string of `BLOCK_SIZE` length.
"""
# TODO: Use a better RNG than random.randint
random_number = random.randint(0, DISCRETE_VALUES)
random_string = _to_base36(random_number)
return _pad(random_string, BLOCK_SIZE) | Generate a random string of `BLOCK_SIZE` length. | Below is the the instruction that describes the task:
### Input:
Generate a random string of `BLOCK_SIZE` length.
### Response:
def _random_block():
"""
Generate a random string of `BLOCK_SIZE` length.
"""
# TODO: Use a better RNG than random.randint
random_number = random.randint(0, DISCRETE_VALUES)
random_string = _to_base36(random_number)
return _pad(random_string, BLOCK_SIZE) |
def body(self):
"""Gets the JSON body of the request"""
if self._decoded_body == None:
# Try to decode the JSON body. But raise an error if the
# content-type is unexpected, or the JSON is invalid.
raw_content_type = self.request.headers.get("content-type") or ""
content_type = raw_content_type.split(";")[0].strip().lower()
if content_type == "application/json":
try:
self._decoded_body = escape.json_decode(self.request.body)
except:
raise oz.json_api.ApiError("Bad JSON body")
else:
raise oz.json_api.ApiError("JSON body expected")
return self._decoded_body | Gets the JSON body of the request | Below is the the instruction that describes the task:
### Input:
Gets the JSON body of the request
### Response:
def body(self):
"""Gets the JSON body of the request"""
if self._decoded_body == None:
# Try to decode the JSON body. But raise an error if the
# content-type is unexpected, or the JSON is invalid.
raw_content_type = self.request.headers.get("content-type") or ""
content_type = raw_content_type.split(";")[0].strip().lower()
if content_type == "application/json":
try:
self._decoded_body = escape.json_decode(self.request.body)
except:
raise oz.json_api.ApiError("Bad JSON body")
else:
raise oz.json_api.ApiError("JSON body expected")
return self._decoded_body |
def argv2line(argv):
r"""Put together the given argument vector into a command line.
"argv" is the argument vector to process.
>>> from cmdln import argv2line
>>> argv2line(['foo'])
'foo'
>>> argv2line(['foo', 'bar'])
'foo bar'
>>> argv2line(['foo', 'bar baz'])
'foo "bar baz"'
>>> argv2line(['foo"bar'])
'foo"bar'
>>> print(argv2line(['foo" bar']))
'foo" bar'
>>> print(argv2line(["foo' bar"]))
"foo' bar"
>>> argv2line(["foo'bar"])
"foo'bar"
"""
escapedArgs = []
for arg in argv:
if ' ' in arg and '"' not in arg:
arg = '"' + arg + '"'
elif ' ' in arg and "'" not in arg:
arg = "'" + arg + "'"
elif ' ' in arg:
arg = arg.replace('"', r'\"')
arg = '"' + arg + '"'
escapedArgs.append(arg)
return ' '.join(escapedArgs) | r"""Put together the given argument vector into a command line.
"argv" is the argument vector to process.
>>> from cmdln import argv2line
>>> argv2line(['foo'])
'foo'
>>> argv2line(['foo', 'bar'])
'foo bar'
>>> argv2line(['foo', 'bar baz'])
'foo "bar baz"'
>>> argv2line(['foo"bar'])
'foo"bar'
>>> print(argv2line(['foo" bar']))
'foo" bar'
>>> print(argv2line(["foo' bar"]))
"foo' bar"
>>> argv2line(["foo'bar"])
"foo'bar" | Below is the the instruction that describes the task:
### Input:
r"""Put together the given argument vector into a command line.
"argv" is the argument vector to process.
>>> from cmdln import argv2line
>>> argv2line(['foo'])
'foo'
>>> argv2line(['foo', 'bar'])
'foo bar'
>>> argv2line(['foo', 'bar baz'])
'foo "bar baz"'
>>> argv2line(['foo"bar'])
'foo"bar'
>>> print(argv2line(['foo" bar']))
'foo" bar'
>>> print(argv2line(["foo' bar"]))
"foo' bar"
>>> argv2line(["foo'bar"])
"foo'bar"
### Response:
def argv2line(argv):
r"""Put together the given argument vector into a command line.
"argv" is the argument vector to process.
>>> from cmdln import argv2line
>>> argv2line(['foo'])
'foo'
>>> argv2line(['foo', 'bar'])
'foo bar'
>>> argv2line(['foo', 'bar baz'])
'foo "bar baz"'
>>> argv2line(['foo"bar'])
'foo"bar'
>>> print(argv2line(['foo" bar']))
'foo" bar'
>>> print(argv2line(["foo' bar"]))
"foo' bar"
>>> argv2line(["foo'bar"])
"foo'bar"
"""
escapedArgs = []
for arg in argv:
if ' ' in arg and '"' not in arg:
arg = '"' + arg + '"'
elif ' ' in arg and "'" not in arg:
arg = "'" + arg + "'"
elif ' ' in arg:
arg = arg.replace('"', r'\"')
arg = '"' + arg + '"'
escapedArgs.append(arg)
return ' '.join(escapedArgs) |
def read(self, n):
"""Read `n` chars from buffer"""
r = self.buf[self.offset:self.offset + n]
if isinstance(r, array):
r = r.tostring()
self.offset += n
return r | Read `n` chars from buffer | Below is the the instruction that describes the task:
### Input:
Read `n` chars from buffer
### Response:
def read(self, n):
"""Read `n` chars from buffer"""
r = self.buf[self.offset:self.offset + n]
if isinstance(r, array):
r = r.tostring()
self.offset += n
return r |
async def _get_entity_from_string(self, string):
"""
Gets a full entity from the given string, which may be a phone or
a username, and processes all the found entities on the session.
The string may also be a user link, or a channel/chat invite link.
This method has the side effect of adding the found users to the
session database, so it can be queried later without API calls,
if this option is enabled on the session.
Returns the found entity, or raises TypeError if not found.
"""
phone = utils.parse_phone(string)
if phone:
try:
for user in (await self(
functions.contacts.GetContactsRequest(0))).users:
if user.phone == phone:
return user
except errors.BotMethodInvalidError:
raise ValueError('Cannot get entity by phone number as a '
'bot (try using integer IDs, not strings)')
elif string.lower() in ('me', 'self'):
return await self.get_me()
else:
username, is_join_chat = utils.parse_username(string)
if is_join_chat:
invite = await self(
functions.messages.CheckChatInviteRequest(username))
if isinstance(invite, types.ChatInvite):
raise ValueError(
'Cannot get entity from a channel (or group) '
'that you are not part of. Join the group and retry'
)
elif isinstance(invite, types.ChatInviteAlready):
return invite.chat
elif username:
try:
result = await self(
functions.contacts.ResolveUsernameRequest(username))
except errors.UsernameNotOccupiedError as e:
raise ValueError('No user has "{}" as username'
.format(username)) from e
try:
pid = utils.get_peer_id(result.peer, add_mark=False)
if isinstance(result.peer, types.PeerUser):
return next(x for x in result.users if x.id == pid)
else:
return next(x for x in result.chats if x.id == pid)
except StopIteration:
pass
try:
# Nobody with this username, maybe it's an exact name/title
return await self.get_entity(
self.session.get_input_entity(string))
except ValueError:
pass
raise ValueError(
'Cannot find any entity corresponding to "{}"'.format(string)
) | Gets a full entity from the given string, which may be a phone or
a username, and processes all the found entities on the session.
The string may also be a user link, or a channel/chat invite link.
This method has the side effect of adding the found users to the
session database, so it can be queried later without API calls,
if this option is enabled on the session.
Returns the found entity, or raises TypeError if not found. | Below is the the instruction that describes the task:
### Input:
Gets a full entity from the given string, which may be a phone or
a username, and processes all the found entities on the session.
The string may also be a user link, or a channel/chat invite link.
This method has the side effect of adding the found users to the
session database, so it can be queried later without API calls,
if this option is enabled on the session.
Returns the found entity, or raises TypeError if not found.
### Response:
async def _get_entity_from_string(self, string):
"""
Gets a full entity from the given string, which may be a phone or
a username, and processes all the found entities on the session.
The string may also be a user link, or a channel/chat invite link.
This method has the side effect of adding the found users to the
session database, so it can be queried later without API calls,
if this option is enabled on the session.
Returns the found entity, or raises TypeError if not found.
"""
phone = utils.parse_phone(string)
if phone:
try:
for user in (await self(
functions.contacts.GetContactsRequest(0))).users:
if user.phone == phone:
return user
except errors.BotMethodInvalidError:
raise ValueError('Cannot get entity by phone number as a '
'bot (try using integer IDs, not strings)')
elif string.lower() in ('me', 'self'):
return await self.get_me()
else:
username, is_join_chat = utils.parse_username(string)
if is_join_chat:
invite = await self(
functions.messages.CheckChatInviteRequest(username))
if isinstance(invite, types.ChatInvite):
raise ValueError(
'Cannot get entity from a channel (or group) '
'that you are not part of. Join the group and retry'
)
elif isinstance(invite, types.ChatInviteAlready):
return invite.chat
elif username:
try:
result = await self(
functions.contacts.ResolveUsernameRequest(username))
except errors.UsernameNotOccupiedError as e:
raise ValueError('No user has "{}" as username'
.format(username)) from e
try:
pid = utils.get_peer_id(result.peer, add_mark=False)
if isinstance(result.peer, types.PeerUser):
return next(x for x in result.users if x.id == pid)
else:
return next(x for x in result.chats if x.id == pid)
except StopIteration:
pass
try:
# Nobody with this username, maybe it's an exact name/title
return await self.get_entity(
self.session.get_input_entity(string))
except ValueError:
pass
raise ValueError(
'Cannot find any entity corresponding to "{}"'.format(string)
) |
def set_scale(self):
"""generate the lookup which translates y-coordinate to fft-bin"""
f_min = float(self.lower_freq)
f_max = float(self.higher_freq)
y_min = f_min
y_max = f_max
for y in range(self.image_height):
freq = y_min + y / (self.image_height - 1.0) * (y_max - y_min)
fft_bin = freq / f_max * (self.fft_size / 2 + 1)
if fft_bin < self.fft_size / 2:
alpha = fft_bin - int(fft_bin)
self.y_to_bin.append((int(fft_bin), alpha * 255)) | generate the lookup which translates y-coordinate to fft-bin | Below is the the instruction that describes the task:
### Input:
generate the lookup which translates y-coordinate to fft-bin
### Response:
def set_scale(self):
"""generate the lookup which translates y-coordinate to fft-bin"""
f_min = float(self.lower_freq)
f_max = float(self.higher_freq)
y_min = f_min
y_max = f_max
for y in range(self.image_height):
freq = y_min + y / (self.image_height - 1.0) * (y_max - y_min)
fft_bin = freq / f_max * (self.fft_size / 2 + 1)
if fft_bin < self.fft_size / 2:
alpha = fft_bin - int(fft_bin)
self.y_to_bin.append((int(fft_bin), alpha * 255)) |
def _get_data_from_dataframe(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles Pandas DataFrames. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
rows = []
if count < 0:
count = len(source.index)
df_slice = source.reset_index(drop=True)[first_row:first_row + count]
for index, data_frame_row in df_slice.iterrows():
row = data_frame_row.to_dict()
for key in list(row.keys()):
val = row[key]
if isinstance(val, pandas.Timestamp):
row[key] = val.to_pydatetime()
rows.append({'c': [{'v': row[c]} if c in row else {} for c in fields]})
cols = _get_cols(fields, schema)
return {'cols': cols, 'rows': rows}, len(source) | Helper function for _get_data that handles Pandas DataFrames. | Below is the the instruction that describes the task:
### Input:
Helper function for _get_data that handles Pandas DataFrames.
### Response:
def _get_data_from_dataframe(source, fields='*', first_row=0, count=-1, schema=None):
""" Helper function for _get_data that handles Pandas DataFrames. """
if schema is None:
schema = google.datalab.bigquery.Schema.from_data(source)
fields = get_field_list(fields, schema)
rows = []
if count < 0:
count = len(source.index)
df_slice = source.reset_index(drop=True)[first_row:first_row + count]
for index, data_frame_row in df_slice.iterrows():
row = data_frame_row.to_dict()
for key in list(row.keys()):
val = row[key]
if isinstance(val, pandas.Timestamp):
row[key] = val.to_pydatetime()
rows.append({'c': [{'v': row[c]} if c in row else {} for c in fields]})
cols = _get_cols(fields, schema)
return {'cols': cols, 'rows': rows}, len(source) |
def _construct_message(self):
"""Build the message params."""
self.message["chat_id"] = self.chat_id
self.message["text"] = ""
if self.from_:
self.message["text"] += "From: " + self.from_ + "\n"
if self.subject:
self.message["text"] += "Subject: " + self.subject + "\n"
self.message["text"] += self.body
self.message.update(self.params) | Build the message params. | Below is the the instruction that describes the task:
### Input:
Build the message params.
### Response:
def _construct_message(self):
"""Build the message params."""
self.message["chat_id"] = self.chat_id
self.message["text"] = ""
if self.from_:
self.message["text"] += "From: " + self.from_ + "\n"
if self.subject:
self.message["text"] += "Subject: " + self.subject + "\n"
self.message["text"] += self.body
self.message.update(self.params) |
def kdf_hmac(self):
"""
Returns the HMAC algorithm to use with the KDF.
:return:
A unicode string of one of the following: "md2", "md5", "sha1",
"sha224", "sha256", "sha384", "sha512"
"""
encryption_algo = self['algorithm'].native
if encryption_algo == 'pbes2':
return self['parameters']['key_derivation_func']['parameters']['prf']['algorithm'].native
if encryption_algo.find('.') == -1:
if encryption_algo.find('_') != -1:
_, hmac_algo, _ = encryption_algo.split('_', 2)
return hmac_algo
raise ValueError(unwrap(
'''
Encryption algorithm "%s" does not have a registered key
derivation function
''',
encryption_algo
))
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s", can not determine key
derivation hmac algorithm
''',
encryption_algo
)) | Returns the HMAC algorithm to use with the KDF.
:return:
A unicode string of one of the following: "md2", "md5", "sha1",
"sha224", "sha256", "sha384", "sha512" | Below is the the instruction that describes the task:
### Input:
Returns the HMAC algorithm to use with the KDF.
:return:
A unicode string of one of the following: "md2", "md5", "sha1",
"sha224", "sha256", "sha384", "sha512"
### Response:
def kdf_hmac(self):
"""
Returns the HMAC algorithm to use with the KDF.
:return:
A unicode string of one of the following: "md2", "md5", "sha1",
"sha224", "sha256", "sha384", "sha512"
"""
encryption_algo = self['algorithm'].native
if encryption_algo == 'pbes2':
return self['parameters']['key_derivation_func']['parameters']['prf']['algorithm'].native
if encryption_algo.find('.') == -1:
if encryption_algo.find('_') != -1:
_, hmac_algo, _ = encryption_algo.split('_', 2)
return hmac_algo
raise ValueError(unwrap(
'''
Encryption algorithm "%s" does not have a registered key
derivation function
''',
encryption_algo
))
raise ValueError(unwrap(
'''
Unrecognized encryption algorithm "%s", can not determine key
derivation hmac algorithm
''',
encryption_algo
)) |
def get_best_single_experiments(nets,expvars):
'''
returns the experiments as a``TermSet`` object [instance].
'''
netsf = nets.to_file()
expvarsf = expvars.to_file()
i = 1 #single experiment
num_exp = String2TermSet('pexperiment('+str(i)+')')
num_expf = num_exp.to_file()
prg = [ netsf, expvarsf, num_expf, find_best_exp_sets_prg ,
elem_path_prg ]
coptions = '--project --opt-mode=optN --opt-strategy=0 --opt-heuristic'
solver = GringoClasp(clasp_options=coptions)
solutions = solver.run(prg,collapseTerms=True,collapseAtoms=False)
os.unlink(num_expf)
os.unlink(netsf)
os.unlink(expvarsf)
return solutions | returns the experiments as a``TermSet`` object [instance]. | Below is the the instruction that describes the task:
### Input:
returns the experiments as a``TermSet`` object [instance].
### Response:
def get_best_single_experiments(nets,expvars):
'''
returns the experiments as a``TermSet`` object [instance].
'''
netsf = nets.to_file()
expvarsf = expvars.to_file()
i = 1 #single experiment
num_exp = String2TermSet('pexperiment('+str(i)+')')
num_expf = num_exp.to_file()
prg = [ netsf, expvarsf, num_expf, find_best_exp_sets_prg ,
elem_path_prg ]
coptions = '--project --opt-mode=optN --opt-strategy=0 --opt-heuristic'
solver = GringoClasp(clasp_options=coptions)
solutions = solver.run(prg,collapseTerms=True,collapseAtoms=False)
os.unlink(num_expf)
os.unlink(netsf)
os.unlink(expvarsf)
return solutions |
def unwrap(klass, value):
"""Unpack a Value into an augmented python type (selected from the 'value' field)
"""
assert isinstance(value, Value), value
V = value.value
try:
T = klass.typeMap[type(V)]
except KeyError:
raise ValueError("Can't unwrap value of type %s" % type(V))
try:
return T(value.value)._store(value)
except Exception as e:
raise ValueError("Can't construct %s around %s (%s): %s" % (T, value, type(value), e)) | Unpack a Value into an augmented python type (selected from the 'value' field) | Below is the the instruction that describes the task:
### Input:
Unpack a Value into an augmented python type (selected from the 'value' field)
### Response:
def unwrap(klass, value):
"""Unpack a Value into an augmented python type (selected from the 'value' field)
"""
assert isinstance(value, Value), value
V = value.value
try:
T = klass.typeMap[type(V)]
except KeyError:
raise ValueError("Can't unwrap value of type %s" % type(V))
try:
return T(value.value)._store(value)
except Exception as e:
raise ValueError("Can't construct %s around %s (%s): %s" % (T, value, type(value), e)) |
def finalize(self):
"""
Add title and modify axes to make the image ready for display.
"""
self.set_title(
'{} Manifold (fit in {:0.2f} seconds)'.format(
self._name, self.fit_time_.interval
)
)
self.ax.set_xticklabels([])
self.ax.set_yticklabels([])
if self._target_color_type == DISCRETE:
# Add the legend
manual_legend(self, self.classes_, self._colors, frameon=True)
elif self._target_color_type == CONTINUOUS:
# Add the color bar
plt.colorbar(self._scatter, ax=self.ax) | Add title and modify axes to make the image ready for display. | Below is the the instruction that describes the task:
### Input:
Add title and modify axes to make the image ready for display.
### Response:
def finalize(self):
"""
Add title and modify axes to make the image ready for display.
"""
self.set_title(
'{} Manifold (fit in {:0.2f} seconds)'.format(
self._name, self.fit_time_.interval
)
)
self.ax.set_xticklabels([])
self.ax.set_yticklabels([])
if self._target_color_type == DISCRETE:
# Add the legend
manual_legend(self, self.classes_, self._colors, frameon=True)
elif self._target_color_type == CONTINUOUS:
# Add the color bar
plt.colorbar(self._scatter, ax=self.ax) |
def get_fw_version(self):
'''
Queries Smoothieware for it's build version, and returns
the parsed response.
returns: str
Current version of attached Smoothi-driver. Versions are derived
from git branch-hash (eg: edge-66ec883NOMSD)
Example Smoothieware response:
Build version: edge-66ec883NOMSD, Build date: Jan 28 2018 15:26:57, MCU: LPC1769, System Clock: 120MHz # NOQA
CNC Build NOMSD Build
6 axis
'''
version = 'Virtual Smoothie'
if not self.simulating:
version = self._send_command('version')
version = version.split(',')[0].split(':')[-1].strip()
version = version.replace('NOMSD', '')
return version | Queries Smoothieware for it's build version, and returns
the parsed response.
returns: str
Current version of attached Smoothi-driver. Versions are derived
from git branch-hash (eg: edge-66ec883NOMSD)
Example Smoothieware response:
Build version: edge-66ec883NOMSD, Build date: Jan 28 2018 15:26:57, MCU: LPC1769, System Clock: 120MHz # NOQA
CNC Build NOMSD Build
6 axis | Below is the the instruction that describes the task:
### Input:
Queries Smoothieware for it's build version, and returns
the parsed response.
returns: str
Current version of attached Smoothi-driver. Versions are derived
from git branch-hash (eg: edge-66ec883NOMSD)
Example Smoothieware response:
Build version: edge-66ec883NOMSD, Build date: Jan 28 2018 15:26:57, MCU: LPC1769, System Clock: 120MHz # NOQA
CNC Build NOMSD Build
6 axis
### Response:
def get_fw_version(self):
'''
Queries Smoothieware for it's build version, and returns
the parsed response.
returns: str
Current version of attached Smoothi-driver. Versions are derived
from git branch-hash (eg: edge-66ec883NOMSD)
Example Smoothieware response:
Build version: edge-66ec883NOMSD, Build date: Jan 28 2018 15:26:57, MCU: LPC1769, System Clock: 120MHz # NOQA
CNC Build NOMSD Build
6 axis
'''
version = 'Virtual Smoothie'
if not self.simulating:
version = self._send_command('version')
version = version.split(',')[0].split(':')[-1].strip()
version = version.replace('NOMSD', '')
return version |
def get_queryset(self):
""" Returns the list of items for this view. """
# Determines the forums that can be accessed by the current user
forums = self.request.forum_permission_handler.get_readable_forums(
Forum.objects.all(), self.request.user,
)
# Returns the posts submitted by the considered user.
return (
Post.objects
.select_related('topic', 'topic__forum', 'poster')
.prefetch_related('poster__forum_profile')
.filter(poster=self.poster, topic__forum__in=forums)
.order_by('-created')
) | Returns the list of items for this view. | Below is the the instruction that describes the task:
### Input:
Returns the list of items for this view.
### Response:
def get_queryset(self):
""" Returns the list of items for this view. """
# Determines the forums that can be accessed by the current user
forums = self.request.forum_permission_handler.get_readable_forums(
Forum.objects.all(), self.request.user,
)
# Returns the posts submitted by the considered user.
return (
Post.objects
.select_related('topic', 'topic__forum', 'poster')
.prefetch_related('poster__forum_profile')
.filter(poster=self.poster, topic__forum__in=forums)
.order_by('-created')
) |
def get_handler_fp(logger):
"""
Get handler_fp.
This method is integrated to LoggerFactory Object in the future.
:param logging.Logger logger: Python logging.Logger. logger instance.
:rtype: logging.Logger.handlers.BaseRotatingHandler
:return: Handler or Handler's stream. We call it `handler_fp`.
"""
if not hasattr(logger, 'handlers'):
raise blackbird.utils.error.BlackbirdError(
'Given logger is not logging.Logger instance!'
)
if len(logger.handlers) != 1:
raise blackbird.utils.error.BlackbirdError(
'Given logger has invalid handlers.'
)
if hasattr(logger.handlers[0], 'stream'):
return logger.handlers[0].stream
# case of setting SysLogHandler to logger.handlers[0]
return logger.handlers[0] | Get handler_fp.
This method is integrated to LoggerFactory Object in the future.
:param logging.Logger logger: Python logging.Logger. logger instance.
:rtype: logging.Logger.handlers.BaseRotatingHandler
:return: Handler or Handler's stream. We call it `handler_fp`. | Below is the the instruction that describes the task:
### Input:
Get handler_fp.
This method is integrated to LoggerFactory Object in the future.
:param logging.Logger logger: Python logging.Logger. logger instance.
:rtype: logging.Logger.handlers.BaseRotatingHandler
:return: Handler or Handler's stream. We call it `handler_fp`.
### Response:
def get_handler_fp(logger):
"""
Get handler_fp.
This method is integrated to LoggerFactory Object in the future.
:param logging.Logger logger: Python logging.Logger. logger instance.
:rtype: logging.Logger.handlers.BaseRotatingHandler
:return: Handler or Handler's stream. We call it `handler_fp`.
"""
if not hasattr(logger, 'handlers'):
raise blackbird.utils.error.BlackbirdError(
'Given logger is not logging.Logger instance!'
)
if len(logger.handlers) != 1:
raise blackbird.utils.error.BlackbirdError(
'Given logger has invalid handlers.'
)
if hasattr(logger.handlers[0], 'stream'):
return logger.handlers[0].stream
# case of setting SysLogHandler to logger.handlers[0]
return logger.handlers[0] |
def _handle_put(self, request):
# type: (Put) -> CallbackResponses
"""Called with the lock taken"""
attribute_name = request.path[1]
attribute = self._block[attribute_name]
assert isinstance(attribute, AttributeModel), \
"Cannot Put to %s which is a %s" % (attribute.path, type(attribute))
self.check_field_writeable(attribute)
put_function = self.get_put_function(attribute_name)
value = attribute.meta.validate(request.value)
with self.lock_released:
result = put_function(value)
if request.get and result is None:
# We asked for a Get, and didn't get given a return, so do return
# the current value. Don't serialize here as value is immutable
# (as long as we don't try too hard to break the rules)
result = self._block[attribute_name].value
elif not request.get:
# We didn't ask for a Get, so throw result away
result = None
ret = [request.return_response(result)]
return ret | Called with the lock taken | Below is the the instruction that describes the task:
### Input:
Called with the lock taken
### Response:
def _handle_put(self, request):
# type: (Put) -> CallbackResponses
"""Called with the lock taken"""
attribute_name = request.path[1]
attribute = self._block[attribute_name]
assert isinstance(attribute, AttributeModel), \
"Cannot Put to %s which is a %s" % (attribute.path, type(attribute))
self.check_field_writeable(attribute)
put_function = self.get_put_function(attribute_name)
value = attribute.meta.validate(request.value)
with self.lock_released:
result = put_function(value)
if request.get and result is None:
# We asked for a Get, and didn't get given a return, so do return
# the current value. Don't serialize here as value is immutable
# (as long as we don't try too hard to break the rules)
result = self._block[attribute_name].value
elif not request.get:
# We didn't ask for a Get, so throw result away
result = None
ret = [request.return_response(result)]
return ret |
def nextstate(self, newstate, treenode=None, user_data=None):
"""
Manage transition of state.
"""
if newstate is None:
return self
if isinstance(newstate, State) and id(newstate) != id(self):
return newstate
elif isinstance(newstate, StateEvent):
self.state_register.named_events[newstate.name] = True
return newstate.st
elif isinstance(newstate, StatePrecond):
return newstate.st
elif isinstance(newstate, StateHook):
# final API using PSL
newstate.call(treenode, user_data)
return newstate.st
return self | Manage transition of state. | Below is the the instruction that describes the task:
### Input:
Manage transition of state.
### Response:
def nextstate(self, newstate, treenode=None, user_data=None):
"""
Manage transition of state.
"""
if newstate is None:
return self
if isinstance(newstate, State) and id(newstate) != id(self):
return newstate
elif isinstance(newstate, StateEvent):
self.state_register.named_events[newstate.name] = True
return newstate.st
elif isinstance(newstate, StatePrecond):
return newstate.st
elif isinstance(newstate, StateHook):
# final API using PSL
newstate.call(treenode, user_data)
return newstate.st
return self |
def softmax(attrs, inputs, proto_obj):
"""Softmax function."""
if 'axis' not in attrs:
attrs = translation_utils._add_extra_attributes(attrs, {'axis': 1})
return 'softmax', attrs, inputs | Softmax function. | Below is the the instruction that describes the task:
### Input:
Softmax function.
### Response:
def softmax(attrs, inputs, proto_obj):
"""Softmax function."""
if 'axis' not in attrs:
attrs = translation_utils._add_extra_attributes(attrs, {'axis': 1})
return 'softmax', attrs, inputs |
def schedule_start(self) -> bool:
"""Schedule the task: check first if the task can start:
1. we check that the task is still in the CREATED state.
2. we check that the upstream dependency is met.
3. we check that pipeline can start a new task;
i.e. we check the concurrency of the pipeline.
4. we check that operation can start a new instance;
i.e. we check the concurrency of the operation.
-> If all checks pass we schedule the task start it.
-> 1. If the operation is not in created status, nothing to do.
-> 2. If the upstream dependency check is not met, two use cases need to be validated:
* The upstream dependency is not met but could be met in the future,
because some ops are still CREATED/SCHEDULED/RUNNING/...
in this case nothing need to be done, every time an upstream operation finishes,
it will notify all the downstream ops including this one.
* The upstream dependency is not met and could not be met at all.
In this case we need to mark the task with `UPSTREAM_FAILED`.
-> 3. If the pipeline has reached it's concurrency limit,
we just delay schedule based on the interval/time delay defined by the user.
The pipeline scheduler will keep checking until the task can be scheduled or stopped.
-> 4. If the operation has reached it's concurrency limit,
Same as above we keep trying based on an interval defined by the user.
Returns:
boolean: Whether to try to schedule this operation run in the future or not.
"""
if self.last_status != self.STATUSES.CREATED:
return False
upstream_trigger_check = self.check_upstream_trigger()
if not upstream_trigger_check and self.is_upstream_done:
# This task cannot be scheduled anymore
self.on_upstream_failed()
return False
if not self.pipeline_run.check_concurrency():
return True
if not self.check_concurrency():
return True
self.on_scheduled()
self.start()
return False | Schedule the task: check first if the task can start:
1. we check that the task is still in the CREATED state.
2. we check that the upstream dependency is met.
3. we check that pipeline can start a new task;
i.e. we check the concurrency of the pipeline.
4. we check that operation can start a new instance;
i.e. we check the concurrency of the operation.
-> If all checks pass we schedule the task start it.
-> 1. If the operation is not in created status, nothing to do.
-> 2. If the upstream dependency check is not met, two use cases need to be validated:
* The upstream dependency is not met but could be met in the future,
because some ops are still CREATED/SCHEDULED/RUNNING/...
in this case nothing need to be done, every time an upstream operation finishes,
it will notify all the downstream ops including this one.
* The upstream dependency is not met and could not be met at all.
In this case we need to mark the task with `UPSTREAM_FAILED`.
-> 3. If the pipeline has reached it's concurrency limit,
we just delay schedule based on the interval/time delay defined by the user.
The pipeline scheduler will keep checking until the task can be scheduled or stopped.
-> 4. If the operation has reached it's concurrency limit,
Same as above we keep trying based on an interval defined by the user.
Returns:
boolean: Whether to try to schedule this operation run in the future or not. | Below is the the instruction that describes the task:
### Input:
Schedule the task: check first if the task can start:
1. we check that the task is still in the CREATED state.
2. we check that the upstream dependency is met.
3. we check that pipeline can start a new task;
i.e. we check the concurrency of the pipeline.
4. we check that operation can start a new instance;
i.e. we check the concurrency of the operation.
-> If all checks pass we schedule the task start it.
-> 1. If the operation is not in created status, nothing to do.
-> 2. If the upstream dependency check is not met, two use cases need to be validated:
* The upstream dependency is not met but could be met in the future,
because some ops are still CREATED/SCHEDULED/RUNNING/...
in this case nothing need to be done, every time an upstream operation finishes,
it will notify all the downstream ops including this one.
* The upstream dependency is not met and could not be met at all.
In this case we need to mark the task with `UPSTREAM_FAILED`.
-> 3. If the pipeline has reached it's concurrency limit,
we just delay schedule based on the interval/time delay defined by the user.
The pipeline scheduler will keep checking until the task can be scheduled or stopped.
-> 4. If the operation has reached it's concurrency limit,
Same as above we keep trying based on an interval defined by the user.
Returns:
boolean: Whether to try to schedule this operation run in the future or not.
### Response:
def schedule_start(self) -> bool:
"""Schedule the task: check first if the task can start:
1. we check that the task is still in the CREATED state.
2. we check that the upstream dependency is met.
3. we check that pipeline can start a new task;
i.e. we check the concurrency of the pipeline.
4. we check that operation can start a new instance;
i.e. we check the concurrency of the operation.
-> If all checks pass we schedule the task start it.
-> 1. If the operation is not in created status, nothing to do.
-> 2. If the upstream dependency check is not met, two use cases need to be validated:
* The upstream dependency is not met but could be met in the future,
because some ops are still CREATED/SCHEDULED/RUNNING/...
in this case nothing need to be done, every time an upstream operation finishes,
it will notify all the downstream ops including this one.
* The upstream dependency is not met and could not be met at all.
In this case we need to mark the task with `UPSTREAM_FAILED`.
-> 3. If the pipeline has reached it's concurrency limit,
we just delay schedule based on the interval/time delay defined by the user.
The pipeline scheduler will keep checking until the task can be scheduled or stopped.
-> 4. If the operation has reached it's concurrency limit,
Same as above we keep trying based on an interval defined by the user.
Returns:
boolean: Whether to try to schedule this operation run in the future or not.
"""
if self.last_status != self.STATUSES.CREATED:
return False
upstream_trigger_check = self.check_upstream_trigger()
if not upstream_trigger_check and self.is_upstream_done:
# This task cannot be scheduled anymore
self.on_upstream_failed()
return False
if not self.pipeline_run.check_concurrency():
return True
if not self.check_concurrency():
return True
self.on_scheduled()
self.start()
return False |
def get_estimator(output_dir, train_config, args):
"""Returns a tf learn estimator.
We only support {DNN, Linear}Regressor and {DNN, Linear}Classifier. This is
controlled by the values of model_type in the args.
Args:
output_dir: Modes are saved into outputdir/train
train_config: our training config
args: command line parameters
Returns:
TF lean estimator
Raises:
ValueError: if config is wrong.
"""
# Check the requested mode fits the preprocessed data.
target_name = train_config['target_column']
if is_classification_model(args.model_type) and target_name not in \
train_config['categorical_columns']:
raise ValueError('When using a classification model, the target must be a '
'categorical variable.')
if is_regression_model(args.model_type) and target_name not in \
train_config['numerical_columns']:
raise ValueError('When using a regression model, the target must be a '
'numerical variable.')
# Check layers used for dnn models.
if is_dnn_model(args.model_type) and not args.layer_sizes:
raise ValueError('--layer-size* must be used with DNN models')
if is_linear_model(args.model_type) and args.layer_sizes:
raise ValueError('--layer-size* cannot be used with linear models')
# Build tf.learn features
feature_columns = _tflearn_features(train_config, args)
# Set how often to run checkpointing in terms of time.
config = tf.contrib.learn.RunConfig(
save_checkpoints_secs=args.save_checkpoints_secs)
train_dir = os.path.join(output_dir, 'train')
if args.model_type == 'dnn_regression':
estimator = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=args.layer_sizes,
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'linear_regression':
estimator = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'dnn_classification':
estimator = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=args.layer_sizes,
n_classes=train_config['vocab_stats'][target_name]['n_classes'],
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'linear_classification':
estimator = tf.contrib.learn.LinearClassifier(
feature_columns=feature_columns,
n_classes=train_config['vocab_stats'][target_name]['n_classes'],
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
else:
raise ValueError('bad --model-type value')
return estimator | Returns a tf learn estimator.
We only support {DNN, Linear}Regressor and {DNN, Linear}Classifier. This is
controlled by the values of model_type in the args.
Args:
output_dir: Modes are saved into outputdir/train
train_config: our training config
args: command line parameters
Returns:
TF lean estimator
Raises:
ValueError: if config is wrong. | Below is the the instruction that describes the task:
### Input:
Returns a tf learn estimator.
We only support {DNN, Linear}Regressor and {DNN, Linear}Classifier. This is
controlled by the values of model_type in the args.
Args:
output_dir: Modes are saved into outputdir/train
train_config: our training config
args: command line parameters
Returns:
TF lean estimator
Raises:
ValueError: if config is wrong.
### Response:
def get_estimator(output_dir, train_config, args):
"""Returns a tf learn estimator.
We only support {DNN, Linear}Regressor and {DNN, Linear}Classifier. This is
controlled by the values of model_type in the args.
Args:
output_dir: Modes are saved into outputdir/train
train_config: our training config
args: command line parameters
Returns:
TF lean estimator
Raises:
ValueError: if config is wrong.
"""
# Check the requested mode fits the preprocessed data.
target_name = train_config['target_column']
if is_classification_model(args.model_type) and target_name not in \
train_config['categorical_columns']:
raise ValueError('When using a classification model, the target must be a '
'categorical variable.')
if is_regression_model(args.model_type) and target_name not in \
train_config['numerical_columns']:
raise ValueError('When using a regression model, the target must be a '
'numerical variable.')
# Check layers used for dnn models.
if is_dnn_model(args.model_type) and not args.layer_sizes:
raise ValueError('--layer-size* must be used with DNN models')
if is_linear_model(args.model_type) and args.layer_sizes:
raise ValueError('--layer-size* cannot be used with linear models')
# Build tf.learn features
feature_columns = _tflearn_features(train_config, args)
# Set how often to run checkpointing in terms of time.
config = tf.contrib.learn.RunConfig(
save_checkpoints_secs=args.save_checkpoints_secs)
train_dir = os.path.join(output_dir, 'train')
if args.model_type == 'dnn_regression':
estimator = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns,
hidden_units=args.layer_sizes,
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'linear_regression':
estimator = tf.contrib.learn.LinearRegressor(
feature_columns=feature_columns,
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'dnn_classification':
estimator = tf.contrib.learn.DNNClassifier(
feature_columns=feature_columns,
hidden_units=args.layer_sizes,
n_classes=train_config['vocab_stats'][target_name]['n_classes'],
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
elif args.model_type == 'linear_classification':
estimator = tf.contrib.learn.LinearClassifier(
feature_columns=feature_columns,
n_classes=train_config['vocab_stats'][target_name]['n_classes'],
config=config,
model_dir=train_dir,
optimizer=tf.train.AdamOptimizer(
args.learning_rate, epsilon=args.epsilon))
else:
raise ValueError('bad --model-type value')
return estimator |
def tokenize(self, s):
"""Splits a string into tokens."""
s = tf.compat.as_text(s)
if self.reserved_tokens:
# First split out the reserved tokens
substrs = self._reserved_tokens_re.split(s)
else:
substrs = [s]
toks = []
for substr in substrs:
if substr in self.reserved_tokens:
toks.append(substr)
else:
toks.extend(self._alphanum_re.split(substr))
# Filter out empty strings
toks = [t for t in toks if t]
return toks | Splits a string into tokens. | Below is the the instruction that describes the task:
### Input:
Splits a string into tokens.
### Response:
def tokenize(self, s):
"""Splits a string into tokens."""
s = tf.compat.as_text(s)
if self.reserved_tokens:
# First split out the reserved tokens
substrs = self._reserved_tokens_re.split(s)
else:
substrs = [s]
toks = []
for substr in substrs:
if substr in self.reserved_tokens:
toks.append(substr)
else:
toks.extend(self._alphanum_re.split(substr))
# Filter out empty strings
toks = [t for t in toks if t]
return toks |
def _get_art_abs(story_file):
"""Get abstract (highlights) and article from a story file path."""
# Based on https://github.com/abisee/cnn-dailymail/blob/master/
# make_datafiles.py
lines = _read_text_file(story_file)
# Lowercase everything
lines = [line.lower() for line in lines]
# Put periods on the ends of lines that are missing them
# (this is a problem in the dataset because many image captions don't end in
# periods; consequently they end up in the body of the article as run-on
# sentences)
def fix_missing_period(line):
"""Adds a period to a line that is missing a period."""
if '@highlight' in line: return line
if not line: return line
if line[-1] in END_TOKENS: return line
return line + ' .'
lines = [fix_missing_period(line) for line in lines]
# Separate out article and abstract sentences
article_lines = []
highlights = []
next_is_highlight = False
for line in lines:
if not line:
continue # empty line
elif line.startswith('@highlight'):
next_is_highlight = True
elif next_is_highlight:
highlights.append(line)
else:
article_lines.append(line)
# Make article into a single string
article = ' '.join(article_lines)
# Make abstract into a single string, putting <s> and </s> tags around
# the sentences.
abstract = ' '.join(['%s %s %s' % (SENTENCE_START, sent,
SENTENCE_END) for sent in highlights])
return article, abstract | Get abstract (highlights) and article from a story file path. | Below is the the instruction that describes the task:
### Input:
Get abstract (highlights) and article from a story file path.
### Response:
def _get_art_abs(story_file):
"""Get abstract (highlights) and article from a story file path."""
# Based on https://github.com/abisee/cnn-dailymail/blob/master/
# make_datafiles.py
lines = _read_text_file(story_file)
# Lowercase everything
lines = [line.lower() for line in lines]
# Put periods on the ends of lines that are missing them
# (this is a problem in the dataset because many image captions don't end in
# periods; consequently they end up in the body of the article as run-on
# sentences)
def fix_missing_period(line):
"""Adds a period to a line that is missing a period."""
if '@highlight' in line: return line
if not line: return line
if line[-1] in END_TOKENS: return line
return line + ' .'
lines = [fix_missing_period(line) for line in lines]
# Separate out article and abstract sentences
article_lines = []
highlights = []
next_is_highlight = False
for line in lines:
if not line:
continue # empty line
elif line.startswith('@highlight'):
next_is_highlight = True
elif next_is_highlight:
highlights.append(line)
else:
article_lines.append(line)
# Make article into a single string
article = ' '.join(article_lines)
# Make abstract into a single string, putting <s> and </s> tags around
# the sentences.
abstract = ' '.join(['%s %s %s' % (SENTENCE_START, sent,
SENTENCE_END) for sent in highlights])
return article, abstract |
def reject_record(self, record):
"""Reject a record for inclusion in the community.
:param record: Record object.
"""
with db.session.begin_nested():
req = InclusionRequest.get(self.id, record.id)
if req is None:
raise InclusionRequestMissingError(community=self,
record=record)
req.delete() | Reject a record for inclusion in the community.
:param record: Record object. | Below is the the instruction that describes the task:
### Input:
Reject a record for inclusion in the community.
:param record: Record object.
### Response:
def reject_record(self, record):
"""Reject a record for inclusion in the community.
:param record: Record object.
"""
with db.session.begin_nested():
req = InclusionRequest.get(self.id, record.id)
if req is None:
raise InclusionRequestMissingError(community=self,
record=record)
req.delete() |
def get_stp_mst_detail_output_cist_cist_bridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
cist_bridge_id = ET.SubElement(cist, "cist-bridge-id")
cist_bridge_id.text = kwargs.pop('cist_bridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_stp_mst_detail_output_cist_cist_bridge_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
cist = ET.SubElement(output, "cist")
cist_bridge_id = ET.SubElement(cist, "cist-bridge-id")
cist_bridge_id.text = kwargs.pop('cist_bridge_id')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def update(self, *args, **kwargs):
"""See `__setitem__`."""
super(TAG_Compound, self).update(*args, **kwargs)
for key, item in self.items():
if item.name is None:
item.name = key | See `__setitem__`. | Below is the the instruction that describes the task:
### Input:
See `__setitem__`.
### Response:
def update(self, *args, **kwargs):
"""See `__setitem__`."""
super(TAG_Compound, self).update(*args, **kwargs)
for key, item in self.items():
if item.name is None:
item.name = key |
def execute_lite(self, instructions, context=None):
"""Execute a list of instructions. It does not support loops.
"""
if context:
self.__cpu.registers = dict(context)
for instr in instructions:
self.__execute_one(instr)
return dict(self.__cpu.registers), self.__mem | Execute a list of instructions. It does not support loops. | Below is the the instruction that describes the task:
### Input:
Execute a list of instructions. It does not support loops.
### Response:
def execute_lite(self, instructions, context=None):
"""Execute a list of instructions. It does not support loops.
"""
if context:
self.__cpu.registers = dict(context)
for instr in instructions:
self.__execute_one(instr)
return dict(self.__cpu.registers), self.__mem |
def on_episode_end(self, episode, logs):
""" Compute and print metrics at the end of each episode """
duration = timeit.default_timer() - self.starts[episode]
metrics = self.metrics[episode]
if np.isnan(metrics).all():
mean_metrics = np.array([np.nan for _ in self.metrics_names])
else:
mean_metrics = np.nanmean(metrics, axis=0)
assert len(mean_metrics) == len(self.metrics_names)
data = list(zip(self.metrics_names, mean_metrics))
data += list(logs.items())
data += [('episode', episode), ('duration', duration)]
for key, value in data:
if key not in self.data:
self.data[key] = []
self.data[key].append(value)
if self.interval is not None and episode % self.interval == 0:
self.save_data()
# Clean up.
del self.metrics[episode]
del self.starts[episode] | Compute and print metrics at the end of each episode | Below is the the instruction that describes the task:
### Input:
Compute and print metrics at the end of each episode
### Response:
def on_episode_end(self, episode, logs):
""" Compute and print metrics at the end of each episode """
duration = timeit.default_timer() - self.starts[episode]
metrics = self.metrics[episode]
if np.isnan(metrics).all():
mean_metrics = np.array([np.nan for _ in self.metrics_names])
else:
mean_metrics = np.nanmean(metrics, axis=0)
assert len(mean_metrics) == len(self.metrics_names)
data = list(zip(self.metrics_names, mean_metrics))
data += list(logs.items())
data += [('episode', episode), ('duration', duration)]
for key, value in data:
if key not in self.data:
self.data[key] = []
self.data[key].append(value)
if self.interval is not None and episode % self.interval == 0:
self.save_data()
# Clean up.
del self.metrics[episode]
del self.starts[episode] |
def concatenate(vars, axis=-1):
"""
A utility function of concatenate.
"""
from deepy.core.neural_var import NeuralVariable
if isinstance(vars[0], NeuralVariable):
concat_var = Concatenate(axis=axis).compute(*vars)
if axis == -1 or axis == vars[0].tensor.ndim - 1:
concat_var.output_dim = sum([x.output_dim for x in vars], 0)
else:
concat_var = TT.concatenate(vars, axis)
return concat_var | A utility function of concatenate. | Below is the the instruction that describes the task:
### Input:
A utility function of concatenate.
### Response:
def concatenate(vars, axis=-1):
"""
A utility function of concatenate.
"""
from deepy.core.neural_var import NeuralVariable
if isinstance(vars[0], NeuralVariable):
concat_var = Concatenate(axis=axis).compute(*vars)
if axis == -1 or axis == vars[0].tensor.ndim - 1:
concat_var.output_dim = sum([x.output_dim for x in vars], 0)
else:
concat_var = TT.concatenate(vars, axis)
return concat_var |
def start(self):
'''
Start the actual proxy minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
super(ProxyMinion, self).start()
try:
if check_user(self.config['user']):
self.action_log_info('The Proxy Minion is starting up')
self.verify_hash_type()
self.minion.tune_in()
if self.minion.restart:
raise SaltClientError('Proxy Minion could not connect to Master')
except (KeyboardInterrupt, SaltSystemExit) as exc:
self.action_log_info('Proxy Minion Stopping')
if isinstance(exc, KeyboardInterrupt):
log.warning('Exiting on Ctrl-c')
self.shutdown()
else:
log.error(exc)
self.shutdown(exc.code) | Start the actual proxy minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`. | Below is the the instruction that describes the task:
### Input:
Start the actual proxy minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
### Response:
def start(self):
'''
Start the actual proxy minion.
If sub-classed, don't **ever** forget to run:
super(YourSubClass, self).start()
NOTE: Run any required code before calling `super()`.
'''
super(ProxyMinion, self).start()
try:
if check_user(self.config['user']):
self.action_log_info('The Proxy Minion is starting up')
self.verify_hash_type()
self.minion.tune_in()
if self.minion.restart:
raise SaltClientError('Proxy Minion could not connect to Master')
except (KeyboardInterrupt, SaltSystemExit) as exc:
self.action_log_info('Proxy Minion Stopping')
if isinstance(exc, KeyboardInterrupt):
log.warning('Exiting on Ctrl-c')
self.shutdown()
else:
log.error(exc)
self.shutdown(exc.code) |
def timed (log=sys.stderr, limit=2.0):
"""Decorator to run a function with timing info."""
return lambda func: timeit(func, log, limit) | Decorator to run a function with timing info. | Below is the the instruction that describes the task:
### Input:
Decorator to run a function with timing info.
### Response:
def timed (log=sys.stderr, limit=2.0):
"""Decorator to run a function with timing info."""
return lambda func: timeit(func, log, limit) |
def _GetComparable(self, sub_comparable_string=''):
"""Retrieves the comparable representation.
This is a convenience function for constructing comparables.
Args:
sub_comparable_string (str): sub comparable string.
Returns:
str: comparable representation of the path specification.
"""
string_parts = []
string_parts.append(getattr(self.parent, 'comparable', ''))
string_parts.append('type: {0:s}'.format(self.type_indicator))
if sub_comparable_string:
string_parts.append(', {0:s}'.format(sub_comparable_string))
string_parts.append('\n')
return ''.join(string_parts) | Retrieves the comparable representation.
This is a convenience function for constructing comparables.
Args:
sub_comparable_string (str): sub comparable string.
Returns:
str: comparable representation of the path specification. | Below is the the instruction that describes the task:
### Input:
Retrieves the comparable representation.
This is a convenience function for constructing comparables.
Args:
sub_comparable_string (str): sub comparable string.
Returns:
str: comparable representation of the path specification.
### Response:
def _GetComparable(self, sub_comparable_string=''):
"""Retrieves the comparable representation.
This is a convenience function for constructing comparables.
Args:
sub_comparable_string (str): sub comparable string.
Returns:
str: comparable representation of the path specification.
"""
string_parts = []
string_parts.append(getattr(self.parent, 'comparable', ''))
string_parts.append('type: {0:s}'.format(self.type_indicator))
if sub_comparable_string:
string_parts.append(', {0:s}'.format(sub_comparable_string))
string_parts.append('\n')
return ''.join(string_parts) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.