code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _get_vcap_services(vcap_services=None):
"""Retrieves the VCAP Services information from the `ConfigParams.VCAP_SERVICES` field in the config object. If
`vcap_services` is not specified, it takes the information from VCAP_SERVICES environment variable.
Args:
vcap_services (str): Try to parse as a JSON string, otherwise, try open it as a file.
vcap_services (dict): Return the dict as is.
Returns:
dict: A dict representation of the VCAP Services information.
Raises:
ValueError:
* if `vcap_services` nor VCAP_SERVICES environment variable are specified.
* cannot parse `vcap_services` as a JSON string nor as a filename.
"""
vcap_services = vcap_services or os.environ.get('VCAP_SERVICES')
if not vcap_services:
raise ValueError(
"VCAP_SERVICES information must be supplied as a parameter or as environment variable 'VCAP_SERVICES'")
# If it was passed to config as a dict, simply return it
if isinstance(vcap_services, dict):
return vcap_services
try:
# Otherwise, if it's a string, try to load it as json
vcap_services = json.loads(vcap_services)
except json.JSONDecodeError:
# If that doesn't work, attempt to open it as a file path to the json config.
try:
with open(vcap_services) as vcap_json_data:
vcap_services = json.load(vcap_json_data)
except:
raise ValueError("VCAP_SERVICES information is not JSON or a file containing JSON:", vcap_services)
return vcap_services | Retrieves the VCAP Services information from the `ConfigParams.VCAP_SERVICES` field in the config object. If
`vcap_services` is not specified, it takes the information from VCAP_SERVICES environment variable.
Args:
vcap_services (str): Try to parse as a JSON string, otherwise, try open it as a file.
vcap_services (dict): Return the dict as is.
Returns:
dict: A dict representation of the VCAP Services information.
Raises:
ValueError:
* if `vcap_services` nor VCAP_SERVICES environment variable are specified.
* cannot parse `vcap_services` as a JSON string nor as a filename. | Below is the the instruction that describes the task:
### Input:
Retrieves the VCAP Services information from the `ConfigParams.VCAP_SERVICES` field in the config object. If
`vcap_services` is not specified, it takes the information from VCAP_SERVICES environment variable.
Args:
vcap_services (str): Try to parse as a JSON string, otherwise, try open it as a file.
vcap_services (dict): Return the dict as is.
Returns:
dict: A dict representation of the VCAP Services information.
Raises:
ValueError:
* if `vcap_services` nor VCAP_SERVICES environment variable are specified.
* cannot parse `vcap_services` as a JSON string nor as a filename.
### Response:
def _get_vcap_services(vcap_services=None):
"""Retrieves the VCAP Services information from the `ConfigParams.VCAP_SERVICES` field in the config object. If
`vcap_services` is not specified, it takes the information from VCAP_SERVICES environment variable.
Args:
vcap_services (str): Try to parse as a JSON string, otherwise, try open it as a file.
vcap_services (dict): Return the dict as is.
Returns:
dict: A dict representation of the VCAP Services information.
Raises:
ValueError:
* if `vcap_services` nor VCAP_SERVICES environment variable are specified.
* cannot parse `vcap_services` as a JSON string nor as a filename.
"""
vcap_services = vcap_services or os.environ.get('VCAP_SERVICES')
if not vcap_services:
raise ValueError(
"VCAP_SERVICES information must be supplied as a parameter or as environment variable 'VCAP_SERVICES'")
# If it was passed to config as a dict, simply return it
if isinstance(vcap_services, dict):
return vcap_services
try:
# Otherwise, if it's a string, try to load it as json
vcap_services = json.loads(vcap_services)
except json.JSONDecodeError:
# If that doesn't work, attempt to open it as a file path to the json config.
try:
with open(vcap_services) as vcap_json_data:
vcap_services = json.load(vcap_json_data)
except:
raise ValueError("VCAP_SERVICES information is not JSON or a file containing JSON:", vcap_services)
return vcap_services |
def _parse_args():
"""Parse and return command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=_CliFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='Enable verbose output.')
fb_group = parser.add_argument_group('FogBugz arguments')
fb_group.add_argument(
'-u', '--url', help=(
'URL for bugzscout requests to be sent. Should be something '
'like .../scoutSubmit.asp.'))
fb_group.add_argument(
'--user', help='User to designate when submitting via bugzscout.')
fb_group.add_argument(
'--project', help='Fogbugz project to file cases under.')
fb_group.add_argument(
'--area', help='Fogbugz area to file cases under.')
error_group = parser.add_argument_group('error arguments')
error_group.add_argument('-e', '--extra',
help='Extra data to send with error.')
error_group.add_argument('--default-message',
help='Set default message if case is new.')
error_group.add_argument('description',
help=('Description of error. Will be matched '
'against existing cases.'))
parser.set_defaults(**_defaults())
return parser.parse_args() | Parse and return command line arguments. | Below is the the instruction that describes the task:
### Input:
Parse and return command line arguments.
### Response:
def _parse_args():
"""Parse and return command line arguments."""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=_CliFormatter)
parser.add_argument('-v', '--verbose', action='store_true',
help='Enable verbose output.')
fb_group = parser.add_argument_group('FogBugz arguments')
fb_group.add_argument(
'-u', '--url', help=(
'URL for bugzscout requests to be sent. Should be something '
'like .../scoutSubmit.asp.'))
fb_group.add_argument(
'--user', help='User to designate when submitting via bugzscout.')
fb_group.add_argument(
'--project', help='Fogbugz project to file cases under.')
fb_group.add_argument(
'--area', help='Fogbugz area to file cases under.')
error_group = parser.add_argument_group('error arguments')
error_group.add_argument('-e', '--extra',
help='Extra data to send with error.')
error_group.add_argument('--default-message',
help='Set default message if case is new.')
error_group.add_argument('description',
help=('Description of error. Will be matched '
'against existing cases.'))
parser.set_defaults(**_defaults())
return parser.parse_args() |
def _union_with_dsis(self, dsis):
"""
Union with another DiscreteStridedIntervalSet.
:param dsis:
:return:
"""
copied = self.copy()
for a in dsis._si_set:
copied = copied.union(a)
if isinstance(copied, DiscreteStridedIntervalSet):
copied._update_bounds(dsis)
return copied.normalize() | Union with another DiscreteStridedIntervalSet.
:param dsis:
:return: | Below is the the instruction that describes the task:
### Input:
Union with another DiscreteStridedIntervalSet.
:param dsis:
:return:
### Response:
def _union_with_dsis(self, dsis):
"""
Union with another DiscreteStridedIntervalSet.
:param dsis:
:return:
"""
copied = self.copy()
for a in dsis._si_set:
copied = copied.union(a)
if isinstance(copied, DiscreteStridedIntervalSet):
copied._update_bounds(dsis)
return copied.normalize() |
def silent(duration=1000, frame_rate=11025):
"""
Creates an AudioSegment object of the specified duration/frame_rate filled with digital silence.
:param duration: The duration of the returned object in ms.
:param frame_rate: The samples per second of the returned object.
:returns: AudioSegment object filled with pure digital silence.
"""
seg = pydub.AudioSegment.silent(duration=duration, frame_rate=frame_rate)
return AudioSegment(seg, "") | Creates an AudioSegment object of the specified duration/frame_rate filled with digital silence.
:param duration: The duration of the returned object in ms.
:param frame_rate: The samples per second of the returned object.
:returns: AudioSegment object filled with pure digital silence. | Below is the the instruction that describes the task:
### Input:
Creates an AudioSegment object of the specified duration/frame_rate filled with digital silence.
:param duration: The duration of the returned object in ms.
:param frame_rate: The samples per second of the returned object.
:returns: AudioSegment object filled with pure digital silence.
### Response:
def silent(duration=1000, frame_rate=11025):
"""
Creates an AudioSegment object of the specified duration/frame_rate filled with digital silence.
:param duration: The duration of the returned object in ms.
:param frame_rate: The samples per second of the returned object.
:returns: AudioSegment object filled with pure digital silence.
"""
seg = pydub.AudioSegment.silent(duration=duration, frame_rate=frame_rate)
return AudioSegment(seg, "") |
def get_attrs_by_path(self, field_path, stop_first=False):
"""
It returns list of values looked up by field path.
Field path is dot-formatted string path: ``parent_field.child_field``.
:param field_path: field path. It allows ``*`` as wildcard.
:type field_path: list or None.
:param stop_first: Stop iteration on first value looked up. Default: False.
:type stop_first: bool
:return: value
"""
index_list, next_field = self._get_indexes_by_path(field_path)
values = []
for idx in index_list:
if next_field:
try:
res = self[idx].get_attrs_by_path(next_field, stop_first=stop_first)
if res is None:
continue
values.extend(res)
if stop_first and len(values):
break
except AttributeError:
pass
else:
if stop_first:
return [self[idx], ]
values.append(self[idx])
return values if len(values) else None | It returns list of values looked up by field path.
Field path is dot-formatted string path: ``parent_field.child_field``.
:param field_path: field path. It allows ``*`` as wildcard.
:type field_path: list or None.
:param stop_first: Stop iteration on first value looked up. Default: False.
:type stop_first: bool
:return: value | Below is the the instruction that describes the task:
### Input:
It returns list of values looked up by field path.
Field path is dot-formatted string path: ``parent_field.child_field``.
:param field_path: field path. It allows ``*`` as wildcard.
:type field_path: list or None.
:param stop_first: Stop iteration on first value looked up. Default: False.
:type stop_first: bool
:return: value
### Response:
def get_attrs_by_path(self, field_path, stop_first=False):
"""
It returns list of values looked up by field path.
Field path is dot-formatted string path: ``parent_field.child_field``.
:param field_path: field path. It allows ``*`` as wildcard.
:type field_path: list or None.
:param stop_first: Stop iteration on first value looked up. Default: False.
:type stop_first: bool
:return: value
"""
index_list, next_field = self._get_indexes_by_path(field_path)
values = []
for idx in index_list:
if next_field:
try:
res = self[idx].get_attrs_by_path(next_field, stop_first=stop_first)
if res is None:
continue
values.extend(res)
if stop_first and len(values):
break
except AttributeError:
pass
else:
if stop_first:
return [self[idx], ]
values.append(self[idx])
return values if len(values) else None |
def three_sum(array):
"""
:param array: List[int]
:return: Set[ Tuple[int, int, int] ]
"""
res = set()
array.sort()
for i in range(len(array) - 2):
if i > 0 and array[i] == array[i - 1]:
continue
l, r = i + 1, len(array) - 1
while l < r:
s = array[i] + array[l] + array[r]
if s > 0:
r -= 1
elif s < 0:
l += 1
else:
# found three sum
res.add((array[i], array[l], array[r]))
# remove duplicates
while l < r and array[l] == array[l + 1]:
l += 1
while l < r and array[r] == array[r - 1]:
r -= 1
l += 1
r -= 1
return res | :param array: List[int]
:return: Set[ Tuple[int, int, int] ] | Below is the the instruction that describes the task:
### Input:
:param array: List[int]
:return: Set[ Tuple[int, int, int] ]
### Response:
def three_sum(array):
"""
:param array: List[int]
:return: Set[ Tuple[int, int, int] ]
"""
res = set()
array.sort()
for i in range(len(array) - 2):
if i > 0 and array[i] == array[i - 1]:
continue
l, r = i + 1, len(array) - 1
while l < r:
s = array[i] + array[l] + array[r]
if s > 0:
r -= 1
elif s < 0:
l += 1
else:
# found three sum
res.add((array[i], array[l], array[r]))
# remove duplicates
while l < r and array[l] == array[l + 1]:
l += 1
while l < r and array[r] == array[r - 1]:
r -= 1
l += 1
r -= 1
return res |
def search_response(self, request):
"""
creates a key from the request and searches the cache with it
:param request:
:return CacheElement: returns None if there's a cache miss
"""
logger.debug("Cache Search Response")
if self.cache.is_empty() is True:
logger.debug("Empty Cache")
return None
"""
create a new cache key from the request
"""
if self.mode == defines.FORWARD_PROXY:
search_key = CacheKey(request)
else:
search_key = ReverseCacheKey(request)
response = self.cache.get(search_key)
return response | creates a key from the request and searches the cache with it
:param request:
:return CacheElement: returns None if there's a cache miss | Below is the the instruction that describes the task:
### Input:
creates a key from the request and searches the cache with it
:param request:
:return CacheElement: returns None if there's a cache miss
### Response:
def search_response(self, request):
"""
creates a key from the request and searches the cache with it
:param request:
:return CacheElement: returns None if there's a cache miss
"""
logger.debug("Cache Search Response")
if self.cache.is_empty() is True:
logger.debug("Empty Cache")
return None
"""
create a new cache key from the request
"""
if self.mode == defines.FORWARD_PROXY:
search_key = CacheKey(request)
else:
search_key = ReverseCacheKey(request)
response = self.cache.get(search_key)
return response |
def isUserCert(self, name):
'''
Checks if a user certificate exists.
Args:
name (str): The name of the user keypair.
Examples:
Check if the user cert "myuser" exists:
exists = cdir.isUserCert('myuser')
Returns:
bool: True if the certificate is present, False otherwise.
'''
crtpath = self._getPathJoin('users', '%s.crt' % name)
return os.path.isfile(crtpath) | Checks if a user certificate exists.
Args:
name (str): The name of the user keypair.
Examples:
Check if the user cert "myuser" exists:
exists = cdir.isUserCert('myuser')
Returns:
bool: True if the certificate is present, False otherwise. | Below is the the instruction that describes the task:
### Input:
Checks if a user certificate exists.
Args:
name (str): The name of the user keypair.
Examples:
Check if the user cert "myuser" exists:
exists = cdir.isUserCert('myuser')
Returns:
bool: True if the certificate is present, False otherwise.
### Response:
def isUserCert(self, name):
'''
Checks if a user certificate exists.
Args:
name (str): The name of the user keypair.
Examples:
Check if the user cert "myuser" exists:
exists = cdir.isUserCert('myuser')
Returns:
bool: True if the certificate is present, False otherwise.
'''
crtpath = self._getPathJoin('users', '%s.crt' % name)
return os.path.isfile(crtpath) |
def check_apartment_number(self, token):
"""
Finds apartment, unit, #, etc, regardless of spot in string. This needs to come after everything else has been ruled out,
because it has a lot of false positives.
"""
apartment_regexes = [r'#\w+ & \w+', '#\w+ rm \w+', "#\w+-\w", r'apt #{0,1}\w+', r'apartment #{0,1}\w+', r'#\w+',
r'# \w+', r'rm \w+', r'unit #?\w+', r'units #?\w+', r'- #{0,1}\w+', r'no\s?\d+\w*',
r'style\s\w{1,2}', r'\d{1,4}/\d{1,4}', r'\d{1,4}', r'\w{1,2}']
for regex in apartment_regexes:
if re.match(regex, token.lower()):
self.apartment = self._clean(token)
return True
# if self.apartment is None and re.match(apartment_regex_number, token.lower()):
## print "Apt regex"
# self.apartment = token
# return True
## If we come on apt or apartment and already have an apartment number, add apt or apartment to the front
if self.apartment and token.lower() in ['apt', 'apartment']:
# print "Apt in a_n"
self.apartment = self._clean(token + ' ' + self.apartment)
return True
if not self.street_suffix and not self.street and not self.apartment:
# print "Searching for unmatched term: ", token, token.lower(),
if re.match(r'\d?\w?', token.lower()):
self.apartment = self._clean(token)
return True
return False | Finds apartment, unit, #, etc, regardless of spot in string. This needs to come after everything else has been ruled out,
because it has a lot of false positives. | Below is the the instruction that describes the task:
### Input:
Finds apartment, unit, #, etc, regardless of spot in string. This needs to come after everything else has been ruled out,
because it has a lot of false positives.
### Response:
def check_apartment_number(self, token):
"""
Finds apartment, unit, #, etc, regardless of spot in string. This needs to come after everything else has been ruled out,
because it has a lot of false positives.
"""
apartment_regexes = [r'#\w+ & \w+', '#\w+ rm \w+', "#\w+-\w", r'apt #{0,1}\w+', r'apartment #{0,1}\w+', r'#\w+',
r'# \w+', r'rm \w+', r'unit #?\w+', r'units #?\w+', r'- #{0,1}\w+', r'no\s?\d+\w*',
r'style\s\w{1,2}', r'\d{1,4}/\d{1,4}', r'\d{1,4}', r'\w{1,2}']
for regex in apartment_regexes:
if re.match(regex, token.lower()):
self.apartment = self._clean(token)
return True
# if self.apartment is None and re.match(apartment_regex_number, token.lower()):
## print "Apt regex"
# self.apartment = token
# return True
## If we come on apt or apartment and already have an apartment number, add apt or apartment to the front
if self.apartment and token.lower() in ['apt', 'apartment']:
# print "Apt in a_n"
self.apartment = self._clean(token + ' ' + self.apartment)
return True
if not self.street_suffix and not self.street and not self.apartment:
# print "Searching for unmatched term: ", token, token.lower(),
if re.match(r'\d?\w?', token.lower()):
self.apartment = self._clean(token)
return True
return False |
def video_bitwise_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for embedding video bitwise."""
pixel_embedding_size = 64
inputs = x
with tf.variable_scope("video_modality_bitwise", reuse=tf.AUTO_REUSE):
common_layers.summarize_video(inputs, "bottom")
# Embed bitwise.
assert vocab_size == 256
embedded = discretization.int_to_bit_embed(inputs, 8,
pixel_embedding_size)
# Project.
return tf.layers.dense(
embedded,
model_hparams.hidden_size,
name="merge_pixel_embedded_frames") | Bottom transformation for embedding video bitwise. | Below is the the instruction that describes the task:
### Input:
Bottom transformation for embedding video bitwise.
### Response:
def video_bitwise_bottom(x, model_hparams, vocab_size):
"""Bottom transformation for embedding video bitwise."""
pixel_embedding_size = 64
inputs = x
with tf.variable_scope("video_modality_bitwise", reuse=tf.AUTO_REUSE):
common_layers.summarize_video(inputs, "bottom")
# Embed bitwise.
assert vocab_size == 256
embedded = discretization.int_to_bit_embed(inputs, 8,
pixel_embedding_size)
# Project.
return tf.layers.dense(
embedded,
model_hparams.hidden_size,
name="merge_pixel_embedded_frames") |
def _build_loop(self, lexer):
"""Build saveframe loop.
:param lexer: instance of lexical analyzer.
:type lexer: :func:`~nmrstarlib.bmrblex.bmrblex`
:return: Fields and values of the loop.
:rtype: :py:class:`tuple`
"""
fields = []
values = []
token = next(lexer)
while token[0] == u"_":
fields.append(token[1:])
token = next(lexer)
while token != u"stop_":
values.append(token)
token = next(lexer)
assert float(len(values) / len(fields)).is_integer(), \
"Error in loop construction: number of fields must be equal to number of values."
values = [OrderedDict(zip(fields, values[i:i + len(fields)])) for i in range(0, len(values), len(fields))]
return fields, values | Build saveframe loop.
:param lexer: instance of lexical analyzer.
:type lexer: :func:`~nmrstarlib.bmrblex.bmrblex`
:return: Fields and values of the loop.
:rtype: :py:class:`tuple` | Below is the the instruction that describes the task:
### Input:
Build saveframe loop.
:param lexer: instance of lexical analyzer.
:type lexer: :func:`~nmrstarlib.bmrblex.bmrblex`
:return: Fields and values of the loop.
:rtype: :py:class:`tuple`
### Response:
def _build_loop(self, lexer):
"""Build saveframe loop.
:param lexer: instance of lexical analyzer.
:type lexer: :func:`~nmrstarlib.bmrblex.bmrblex`
:return: Fields and values of the loop.
:rtype: :py:class:`tuple`
"""
fields = []
values = []
token = next(lexer)
while token[0] == u"_":
fields.append(token[1:])
token = next(lexer)
while token != u"stop_":
values.append(token)
token = next(lexer)
assert float(len(values) / len(fields)).is_integer(), \
"Error in loop construction: number of fields must be equal to number of values."
values = [OrderedDict(zip(fields, values[i:i + len(fields)])) for i in range(0, len(values), len(fields))]
return fields, values |
def find_packages(path: str) -> List[str]:
"""
A better version of find_packages than what setuptools offers
This function needs to be deterministic.
:param path:
:return:
"""
ret = []
for root, _dir, files in os.walk(path):
if '__init__.py' in files:
ret.append(root.replace("/", "."))
return sorted(ret) | A better version of find_packages than what setuptools offers
This function needs to be deterministic.
:param path:
:return: | Below is the the instruction that describes the task:
### Input:
A better version of find_packages than what setuptools offers
This function needs to be deterministic.
:param path:
:return:
### Response:
def find_packages(path: str) -> List[str]:
"""
A better version of find_packages than what setuptools offers
This function needs to be deterministic.
:param path:
:return:
"""
ret = []
for root, _dir, files in os.walk(path):
if '__init__.py' in files:
ret.append(root.replace("/", "."))
return sorted(ret) |
def list_remote(local_root):
"""Get remote branch/tag latest SHAs.
:raise GitError: When git ls-remote fails.
:param str local_root: Local path to git root directory.
:return: List of tuples containing strings. Each tuple is sha, name, kind.
:rtype: list
"""
command = ['git', 'ls-remote', '--heads', '--tags']
try:
output = run_command(local_root, command)
except CalledProcessError as exc:
raise GitError('Git failed to list remote refs.', exc.output)
# Dereference annotated tags if any. No need to fetch annotations.
if '^{}' in output:
parsed = list()
for group in (m.groupdict() for m in RE_REMOTE.finditer(output)):
dereferenced, name, kind = group['name'].endswith('^{}'), group['name'][:-3], group['kind']
if dereferenced and parsed and kind == parsed[-1]['kind'] == 'tags' and name == parsed[-1]['name']:
parsed[-1]['sha'] = group['sha']
else:
parsed.append(group)
else:
parsed = [m.groupdict() for m in RE_REMOTE.finditer(output)]
return [[i['sha'], i['name'], i['kind']] for i in parsed] | Get remote branch/tag latest SHAs.
:raise GitError: When git ls-remote fails.
:param str local_root: Local path to git root directory.
:return: List of tuples containing strings. Each tuple is sha, name, kind.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Get remote branch/tag latest SHAs.
:raise GitError: When git ls-remote fails.
:param str local_root: Local path to git root directory.
:return: List of tuples containing strings. Each tuple is sha, name, kind.
:rtype: list
### Response:
def list_remote(local_root):
"""Get remote branch/tag latest SHAs.
:raise GitError: When git ls-remote fails.
:param str local_root: Local path to git root directory.
:return: List of tuples containing strings. Each tuple is sha, name, kind.
:rtype: list
"""
command = ['git', 'ls-remote', '--heads', '--tags']
try:
output = run_command(local_root, command)
except CalledProcessError as exc:
raise GitError('Git failed to list remote refs.', exc.output)
# Dereference annotated tags if any. No need to fetch annotations.
if '^{}' in output:
parsed = list()
for group in (m.groupdict() for m in RE_REMOTE.finditer(output)):
dereferenced, name, kind = group['name'].endswith('^{}'), group['name'][:-3], group['kind']
if dereferenced and parsed and kind == parsed[-1]['kind'] == 'tags' and name == parsed[-1]['name']:
parsed[-1]['sha'] = group['sha']
else:
parsed.append(group)
else:
parsed = [m.groupdict() for m in RE_REMOTE.finditer(output)]
return [[i['sha'], i['name'], i['kind']] for i in parsed] |
def read(self, size=-1):
"""Read data from RAW file.
Args:
size: Number of bytes to read as integer. Actual number of bytes
read is always equal to size unless EOF is reached. If size is
negative or unspecified, read the entire file.
Returns:
data read as str.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if not self._remaining():
return ''
data_list = []
while True:
remaining = self._buffer.remaining()
if size >= 0 and size < remaining:
data_list.append(self._buffer.read(size))
self._offset += size
break
else:
size -= remaining
self._offset += remaining
data_list.append(self._buffer.read())
if self._buffer_future is None:
if size < 0 or size >= self._remaining():
needs = self._remaining()
else:
needs = size
data_list.extend(self._get_segments(self._offset, needs))
self._offset += needs
break
if self._buffer_future:
self._buffer.reset(self._buffer_future.get_result())
self._buffer_future = None
if self._buffer_future is None:
self._request_next_buffer()
return ''.join(data_list) | Read data from RAW file.
Args:
size: Number of bytes to read as integer. Actual number of bytes
read is always equal to size unless EOF is reached. If size is
negative or unspecified, read the entire file.
Returns:
data read as str.
Raises:
IOError: When this buffer is closed. | Below is the the instruction that describes the task:
### Input:
Read data from RAW file.
Args:
size: Number of bytes to read as integer. Actual number of bytes
read is always equal to size unless EOF is reached. If size is
negative or unspecified, read the entire file.
Returns:
data read as str.
Raises:
IOError: When this buffer is closed.
### Response:
def read(self, size=-1):
"""Read data from RAW file.
Args:
size: Number of bytes to read as integer. Actual number of bytes
read is always equal to size unless EOF is reached. If size is
negative or unspecified, read the entire file.
Returns:
data read as str.
Raises:
IOError: When this buffer is closed.
"""
self._check_open()
if not self._remaining():
return ''
data_list = []
while True:
remaining = self._buffer.remaining()
if size >= 0 and size < remaining:
data_list.append(self._buffer.read(size))
self._offset += size
break
else:
size -= remaining
self._offset += remaining
data_list.append(self._buffer.read())
if self._buffer_future is None:
if size < 0 or size >= self._remaining():
needs = self._remaining()
else:
needs = size
data_list.extend(self._get_segments(self._offset, needs))
self._offset += needs
break
if self._buffer_future:
self._buffer.reset(self._buffer_future.get_result())
self._buffer_future = None
if self._buffer_future is None:
self._request_next_buffer()
return ''.join(data_list) |
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter)) | required method to auto register this checker | Below is the the instruction that describes the task:
### Input:
required method to auto register this checker
### Response:
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(BasicErrorChecker(linter))
linter.register_checker(BasicChecker(linter))
linter.register_checker(NameChecker(linter))
linter.register_checker(DocStringChecker(linter))
linter.register_checker(PassChecker(linter))
linter.register_checker(ComparisonChecker(linter)) |
def view_structure(self, only_chains=None, opacity=1.0, recolor=False, gui=False):
"""Use NGLviewer to display a structure in a Jupyter notebook
Args:
only_chains (str, list): Chain ID or IDs to display
opacity (float): Opacity of the structure
recolor (bool): If structure should be cleaned and recolored to silver
gui (bool): If the NGLview GUI should show up
Returns:
NGLviewer object
"""
# TODO: show_structure_file does not work for MMTF files - need to check for that and load accordingly
if ssbio.utils.is_ipynb():
import nglview as nv
else:
raise EnvironmentError('Unable to display structure - not running in a Jupyter notebook environment')
if not self.structure_file:
raise ValueError("Structure file not loaded")
only_chains = ssbio.utils.force_list(only_chains)
to_show_chains = '( '
for c in only_chains:
to_show_chains += ':{} or'.format(c)
to_show_chains = to_show_chains.strip(' or ')
to_show_chains += ' )'
if self.file_type == 'mmtf' or self.file_type == 'mmtf.gz':
view = nv.NGLWidget()
view.add_component(self.structure_path)
else:
view = nv.show_structure_file(self.structure_path, gui=gui)
if recolor:
view.clear_representations()
if only_chains:
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)
else:
view.add_cartoon(selection='protein', color='silver', opacity=opacity)
elif only_chains:
view.clear_representations()
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)
return view | Use NGLviewer to display a structure in a Jupyter notebook
Args:
only_chains (str, list): Chain ID or IDs to display
opacity (float): Opacity of the structure
recolor (bool): If structure should be cleaned and recolored to silver
gui (bool): If the NGLview GUI should show up
Returns:
NGLviewer object | Below is the the instruction that describes the task:
### Input:
Use NGLviewer to display a structure in a Jupyter notebook
Args:
only_chains (str, list): Chain ID or IDs to display
opacity (float): Opacity of the structure
recolor (bool): If structure should be cleaned and recolored to silver
gui (bool): If the NGLview GUI should show up
Returns:
NGLviewer object
### Response:
def view_structure(self, only_chains=None, opacity=1.0, recolor=False, gui=False):
"""Use NGLviewer to display a structure in a Jupyter notebook
Args:
only_chains (str, list): Chain ID or IDs to display
opacity (float): Opacity of the structure
recolor (bool): If structure should be cleaned and recolored to silver
gui (bool): If the NGLview GUI should show up
Returns:
NGLviewer object
"""
# TODO: show_structure_file does not work for MMTF files - need to check for that and load accordingly
if ssbio.utils.is_ipynb():
import nglview as nv
else:
raise EnvironmentError('Unable to display structure - not running in a Jupyter notebook environment')
if not self.structure_file:
raise ValueError("Structure file not loaded")
only_chains = ssbio.utils.force_list(only_chains)
to_show_chains = '( '
for c in only_chains:
to_show_chains += ':{} or'.format(c)
to_show_chains = to_show_chains.strip(' or ')
to_show_chains += ' )'
if self.file_type == 'mmtf' or self.file_type == 'mmtf.gz':
view = nv.NGLWidget()
view.add_component(self.structure_path)
else:
view = nv.show_structure_file(self.structure_path, gui=gui)
if recolor:
view.clear_representations()
if only_chains:
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)
else:
view.add_cartoon(selection='protein', color='silver', opacity=opacity)
elif only_chains:
view.clear_representations()
view.add_cartoon(selection='{} and (not hydrogen)'.format(to_show_chains), color='silver', opacity=opacity)
return view |
def _len_tube(Flow, Diam, HeadLoss, conc_chem, temp, en_chem, KMinor):
"""Length of tube required to get desired head loss at maximum flow based on
the Hagen-Poiseuille equation."""
num1 = pc.gravity.magnitude * HeadLoss * np.pi * (Diam**4)
denom1 = 128 * viscosity_kinematic_chem(conc_chem, temp, en_chem) * Flow
num2 = Flow * KMinor
denom2 = 16 * np.pi * viscosity_kinematic_chem(conc_chem, temp, en_chem)
len = ((num1/denom1) - (num2/denom2))
return len.magnitude | Length of tube required to get desired head loss at maximum flow based on
the Hagen-Poiseuille equation. | Below is the the instruction that describes the task:
### Input:
Length of tube required to get desired head loss at maximum flow based on
the Hagen-Poiseuille equation.
### Response:
def _len_tube(Flow, Diam, HeadLoss, conc_chem, temp, en_chem, KMinor):
"""Length of tube required to get desired head loss at maximum flow based on
the Hagen-Poiseuille equation."""
num1 = pc.gravity.magnitude * HeadLoss * np.pi * (Diam**4)
denom1 = 128 * viscosity_kinematic_chem(conc_chem, temp, en_chem) * Flow
num2 = Flow * KMinor
denom2 = 16 * np.pi * viscosity_kinematic_chem(conc_chem, temp, en_chem)
len = ((num1/denom1) - (num2/denom2))
return len.magnitude |
def get(self, index, id, fields=None, doc_type=EsConst.ALL_VALUES, **query_params):
"""
Retrieve specific record by id
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_
:param index: the index name to query
:param id: the id of the record
:param fields: the fields you what to fetch from the record (str separated by comma's)
:param doc_type: the doc type to search in
:param query_params: params
:return:
"""
if fields:
query_params[EsConst.FIELDS] = fields
path = self._es_parser.make_path(index, doc_type, id)
result = yield self._perform_request(HttpMethod.GET, path, params=query_params)
returnValue(result) | Retrieve specific record by id
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_
:param index: the index name to query
:param id: the id of the record
:param fields: the fields you what to fetch from the record (str separated by comma's)
:param doc_type: the doc type to search in
:param query_params: params
:return: | Below is the the instruction that describes the task:
### Input:
Retrieve specific record by id
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_
:param index: the index name to query
:param id: the id of the record
:param fields: the fields you what to fetch from the record (str separated by comma's)
:param doc_type: the doc type to search in
:param query_params: params
:return:
### Response:
def get(self, index, id, fields=None, doc_type=EsConst.ALL_VALUES, **query_params):
"""
Retrieve specific record by id
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/docs-get.html>`_
:param index: the index name to query
:param id: the id of the record
:param fields: the fields you what to fetch from the record (str separated by comma's)
:param doc_type: the doc type to search in
:param query_params: params
:return:
"""
if fields:
query_params[EsConst.FIELDS] = fields
path = self._es_parser.make_path(index, doc_type, id)
result = yield self._perform_request(HttpMethod.GET, path, params=query_params)
returnValue(result) |
def pair_visual(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn("`pair_visual` has moved to `cleverhans.plot.pyplot_image`. "
"cleverhans.utils.pair_visual may be removed on or after "
"2019-04-24.")
from cleverhans.plot.pyplot_image import pair_visual as new_pair_visual
return new_pair_visual(*args, **kwargs) | Deprecation wrapper | Below is the the instruction that describes the task:
### Input:
Deprecation wrapper
### Response:
def pair_visual(*args, **kwargs):
"""Deprecation wrapper"""
warnings.warn("`pair_visual` has moved to `cleverhans.plot.pyplot_image`. "
"cleverhans.utils.pair_visual may be removed on or after "
"2019-04-24.")
from cleverhans.plot.pyplot_image import pair_visual as new_pair_visual
return new_pair_visual(*args, **kwargs) |
def _HandleHomepage(self, request):
"""Renders GRR home page by rendering base.html Jinja template."""
_ = request
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(config.CONFIG["AdminUI.template_root"]),
autoescape=True)
create_time = psutil.Process(os.getpid()).create_time()
context = {
"heading":
config.CONFIG["AdminUI.heading"],
"report_url":
config.CONFIG["AdminUI.report_url"],
"help_url":
config.CONFIG["AdminUI.help_url"],
"timestamp":
utils.SmartStr(create_time),
"use_precompiled_js":
config.CONFIG["AdminUI.use_precompiled_js"],
# Used in conjunction with FirebaseWebAuthManager.
"firebase_api_key":
config.CONFIG["AdminUI.firebase_api_key"],
"firebase_auth_domain":
config.CONFIG["AdminUI.firebase_auth_domain"],
"firebase_auth_provider":
config.CONFIG["AdminUI.firebase_auth_provider"],
"grr_version":
config.CONFIG["Source.version_string"]
}
template = env.get_template("base.html")
response = werkzeug_wrappers.Response(
template.render(context), mimetype="text/html")
# For a redirect-based Firebase authentication scheme we won't have any
# user information at this point - therefore checking if the user is
# present.
try:
StoreCSRFCookie(request.user, response)
except RequestHasNoUser:
pass
return response | Renders GRR home page by rendering base.html Jinja template. | Below is the the instruction that describes the task:
### Input:
Renders GRR home page by rendering base.html Jinja template.
### Response:
def _HandleHomepage(self, request):
"""Renders GRR home page by rendering base.html Jinja template."""
_ = request
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(config.CONFIG["AdminUI.template_root"]),
autoescape=True)
create_time = psutil.Process(os.getpid()).create_time()
context = {
"heading":
config.CONFIG["AdminUI.heading"],
"report_url":
config.CONFIG["AdminUI.report_url"],
"help_url":
config.CONFIG["AdminUI.help_url"],
"timestamp":
utils.SmartStr(create_time),
"use_precompiled_js":
config.CONFIG["AdminUI.use_precompiled_js"],
# Used in conjunction with FirebaseWebAuthManager.
"firebase_api_key":
config.CONFIG["AdminUI.firebase_api_key"],
"firebase_auth_domain":
config.CONFIG["AdminUI.firebase_auth_domain"],
"firebase_auth_provider":
config.CONFIG["AdminUI.firebase_auth_provider"],
"grr_version":
config.CONFIG["Source.version_string"]
}
template = env.get_template("base.html")
response = werkzeug_wrappers.Response(
template.render(context), mimetype="text/html")
# For a redirect-based Firebase authentication scheme we won't have any
# user information at this point - therefore checking if the user is
# present.
try:
StoreCSRFCookie(request.user, response)
except RequestHasNoUser:
pass
return response |
def environment_schedule_unset(self, name):
"""Schedules unsetting (removing) an environment variable when creating
the next guest process. This affects the
:py:func:`IGuestSession.environment_changes` attribute.
in name of type str
Name of the environment variable to unset. This cannot be empty
nor can it contain any equal signs.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
self._call("environmentScheduleUnset",
in_p=[name]) | Schedules unsetting (removing) an environment variable when creating
the next guest process. This affects the
:py:func:`IGuestSession.environment_changes` attribute.
in name of type str
Name of the environment variable to unset. This cannot be empty
nor can it contain any equal signs. | Below is the the instruction that describes the task:
### Input:
Schedules unsetting (removing) an environment variable when creating
the next guest process. This affects the
:py:func:`IGuestSession.environment_changes` attribute.
in name of type str
Name of the environment variable to unset. This cannot be empty
nor can it contain any equal signs.
### Response:
def environment_schedule_unset(self, name):
"""Schedules unsetting (removing) an environment variable when creating
the next guest process. This affects the
:py:func:`IGuestSession.environment_changes` attribute.
in name of type str
Name of the environment variable to unset. This cannot be empty
nor can it contain any equal signs.
"""
if not isinstance(name, basestring):
raise TypeError("name can only be an instance of type basestring")
self._call("environmentScheduleUnset",
in_p=[name]) |
def write_lock(self):
"""Context manager that grants a write lock.
Will wait until no active readers. Blocks readers after acquiring.
Guaranteed for locks to be processed in fair order (FIFO).
Raises a ``RuntimeError`` if an active reader attempts to acquire
a lock.
"""
me = self._current_thread()
i_am_writer = self.is_writer(check_pending=False)
if self.is_reader() and not i_am_writer:
raise RuntimeError("Reader %s to writer privilege"
" escalation not allowed" % me)
if i_am_writer:
# Already the writer; this allows for basic reentrancy.
yield self
else:
with self._cond:
self._pending_writers.append(me)
while True:
# No readers, and no active writer, am I next??
if len(self._readers) == 0 and self._writer is None:
if self._pending_writers[0] == me:
self._writer = self._pending_writers.popleft()
break
self._cond.wait()
try:
yield self
finally:
with self._cond:
self._writer = None
self._cond.notify_all() | Context manager that grants a write lock.
Will wait until no active readers. Blocks readers after acquiring.
Guaranteed for locks to be processed in fair order (FIFO).
Raises a ``RuntimeError`` if an active reader attempts to acquire
a lock. | Below is the the instruction that describes the task:
### Input:
Context manager that grants a write lock.
Will wait until no active readers. Blocks readers after acquiring.
Guaranteed for locks to be processed in fair order (FIFO).
Raises a ``RuntimeError`` if an active reader attempts to acquire
a lock.
### Response:
def write_lock(self):
"""Context manager that grants a write lock.
Will wait until no active readers. Blocks readers after acquiring.
Guaranteed for locks to be processed in fair order (FIFO).
Raises a ``RuntimeError`` if an active reader attempts to acquire
a lock.
"""
me = self._current_thread()
i_am_writer = self.is_writer(check_pending=False)
if self.is_reader() and not i_am_writer:
raise RuntimeError("Reader %s to writer privilege"
" escalation not allowed" % me)
if i_am_writer:
# Already the writer; this allows for basic reentrancy.
yield self
else:
with self._cond:
self._pending_writers.append(me)
while True:
# No readers, and no active writer, am I next??
if len(self._readers) == 0 and self._writer is None:
if self._pending_writers[0] == me:
self._writer = self._pending_writers.popleft()
break
self._cond.wait()
try:
yield self
finally:
with self._cond:
self._writer = None
self._cond.notify_all() |
def list_cache_subnet_groups(name=None, region=None, key=None,
keyid=None, profile=None):
'''
Return a list of all cache subnet group names
CLI example::
salt myminion boto_elasticache.list_subnet_groups region=us-east-1
'''
return [g['CacheSubnetGroupName'] for g in
get_all_cache_subnet_groups(name, region, key, keyid, profile)] | Return a list of all cache subnet group names
CLI example::
salt myminion boto_elasticache.list_subnet_groups region=us-east-1 | Below is the the instruction that describes the task:
### Input:
Return a list of all cache subnet group names
CLI example::
salt myminion boto_elasticache.list_subnet_groups region=us-east-1
### Response:
def list_cache_subnet_groups(name=None, region=None, key=None,
keyid=None, profile=None):
'''
Return a list of all cache subnet group names
CLI example::
salt myminion boto_elasticache.list_subnet_groups region=us-east-1
'''
return [g['CacheSubnetGroupName'] for g in
get_all_cache_subnet_groups(name, region, key, keyid, profile)] |
def create_mongo_db(database_name, collection_name, initial_document):
"""Create a new database and collection by inserting one document."""
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url,document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
d = json.loads(initial_document, object_pairs_hook=OrderedDict)
collection.save(d)
except:
# error connecting to mongodb
response_dict['error'] = str(sys.exc_info())
return response_dict | Create a new database and collection by inserting one document. | Below is the the instruction that describes the task:
### Input:
Create a new database and collection by inserting one document.
### Response:
def create_mongo_db(database_name, collection_name, initial_document):
"""Create a new database and collection by inserting one document."""
response_dict = {}
try:
mongodb_client_url = getattr(settings, 'MONGODB_CLIENT',
'mongodb://localhost:27017/')
mc = MongoClient(mongodb_client_url,document_class=OrderedDict)
db = mc[str(database_name)]
collection = db[str(collection_name)]
d = json.loads(initial_document, object_pairs_hook=OrderedDict)
collection.save(d)
except:
# error connecting to mongodb
response_dict['error'] = str(sys.exc_info())
return response_dict |
def from_learner(cls, learn: Learner, ds_type:DatasetType=DatasetType.Valid):
"Create an instance of `ClassificationInterpretation`"
preds = learn.get_preds(ds_type=ds_type, with_loss=True)
return cls(learn, *preds) | Create an instance of `ClassificationInterpretation` | Below is the the instruction that describes the task:
### Input:
Create an instance of `ClassificationInterpretation`
### Response:
def from_learner(cls, learn: Learner, ds_type:DatasetType=DatasetType.Valid):
"Create an instance of `ClassificationInterpretation`"
preds = learn.get_preds(ds_type=ds_type, with_loss=True)
return cls(learn, *preds) |
def update_entity(self, entity, agent=None, metadata=None):
"""
Updates the specified entity's values with the supplied parameters.
"""
body = {}
if agent:
body["agent_id"] = utils.get_id(agent)
if metadata:
body["metadata"] = metadata
if body:
uri = "/%s/%s" % (self.uri_base, utils.get_id(entity))
resp, body = self.api.method_put(uri, body=body) | Updates the specified entity's values with the supplied parameters. | Below is the the instruction that describes the task:
### Input:
Updates the specified entity's values with the supplied parameters.
### Response:
def update_entity(self, entity, agent=None, metadata=None):
"""
Updates the specified entity's values with the supplied parameters.
"""
body = {}
if agent:
body["agent_id"] = utils.get_id(agent)
if metadata:
body["metadata"] = metadata
if body:
uri = "/%s/%s" % (self.uri_base, utils.get_id(entity))
resp, body = self.api.method_put(uri, body=body) |
def filter_defragment(self, threshold, mode='include', filt=True, samples=None, subset=None):
"""
Remove 'fragments' from the calculated filter
Parameters
----------
threshold : int
Contiguous data regions that contain this number
or fewer points are considered 'fragments'
mode : str
Specifies wither to 'include' or 'exclude' the identified
fragments.
filt : bool or filt string
Which filter to apply the defragmenter to. Defaults to True
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
f = self.data[s].filt.grab_filt(filt)
self.data[s].filt.add(name='defrag_{:s}_{:.0f}'.format(mode, threshold),
filt=filters.defrag(f, threshold, mode),
info='Defrag {:s} filter with threshold {:.0f}'.format(mode, threshold),
params=(threshold, mode, filt, samples, subset)) | Remove 'fragments' from the calculated filter
Parameters
----------
threshold : int
Contiguous data regions that contain this number
or fewer points are considered 'fragments'
mode : str
Specifies wither to 'include' or 'exclude' the identified
fragments.
filt : bool or filt string
Which filter to apply the defragmenter to. Defaults to True
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Remove 'fragments' from the calculated filter
Parameters
----------
threshold : int
Contiguous data regions that contain this number
or fewer points are considered 'fragments'
mode : str
Specifies wither to 'include' or 'exclude' the identified
fragments.
filt : bool or filt string
Which filter to apply the defragmenter to. Defaults to True
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
### Response:
def filter_defragment(self, threshold, mode='include', filt=True, samples=None, subset=None):
"""
Remove 'fragments' from the calculated filter
Parameters
----------
threshold : int
Contiguous data regions that contain this number
or fewer points are considered 'fragments'
mode : str
Specifies wither to 'include' or 'exclude' the identified
fragments.
filt : bool or filt string
Which filter to apply the defragmenter to. Defaults to True
samples : array_like or None
Which samples to apply this filter to. If None, applies to all
samples.
subset : str or number
The subset of samples (defined by make_subset) you want to apply
the filter to.
Returns
-------
None
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
for s in samples:
f = self.data[s].filt.grab_filt(filt)
self.data[s].filt.add(name='defrag_{:s}_{:.0f}'.format(mode, threshold),
filt=filters.defrag(f, threshold, mode),
info='Defrag {:s} filter with threshold {:.0f}'.format(mode, threshold),
params=(threshold, mode, filt, samples, subset)) |
def deregisterevent(self, event_name):
"""
Remove callback of registered event
@param event_name: Event name in at-spi format.
@type event_name: string
@return: 1 if registration was successful, 0 if not.
@rtype: integer
"""
if event_name in self._pollEvents._callback:
del self._pollEvents._callback[event_name]
return self._remote_deregisterevent(event_name) | Remove callback of registered event
@param event_name: Event name in at-spi format.
@type event_name: string
@return: 1 if registration was successful, 0 if not.
@rtype: integer | Below is the the instruction that describes the task:
### Input:
Remove callback of registered event
@param event_name: Event name in at-spi format.
@type event_name: string
@return: 1 if registration was successful, 0 if not.
@rtype: integer
### Response:
def deregisterevent(self, event_name):
"""
Remove callback of registered event
@param event_name: Event name in at-spi format.
@type event_name: string
@return: 1 if registration was successful, 0 if not.
@rtype: integer
"""
if event_name in self._pollEvents._callback:
del self._pollEvents._callback[event_name]
return self._remote_deregisterevent(event_name) |
def get_carrier_concentration(self):
"""
gives the carrier concentration (in cm^-3)
Returns
a dictionary {temp:[]} with an array of carrier concentration
(in cm^-3) at each temperature
The array relates to each step of electron chemical potential
"""
return {temp: [1e24 * i / self.vol for i in self._carrier_conc[temp]]
for temp in self._carrier_conc} | gives the carrier concentration (in cm^-3)
Returns
a dictionary {temp:[]} with an array of carrier concentration
(in cm^-3) at each temperature
The array relates to each step of electron chemical potential | Below is the the instruction that describes the task:
### Input:
gives the carrier concentration (in cm^-3)
Returns
a dictionary {temp:[]} with an array of carrier concentration
(in cm^-3) at each temperature
The array relates to each step of electron chemical potential
### Response:
def get_carrier_concentration(self):
"""
gives the carrier concentration (in cm^-3)
Returns
a dictionary {temp:[]} with an array of carrier concentration
(in cm^-3) at each temperature
The array relates to each step of electron chemical potential
"""
return {temp: [1e24 * i / self.vol for i in self._carrier_conc[temp]]
for temp in self._carrier_conc} |
def _page(q, chunk=1000):
""" Quick utility to page a query, 1000 items at a time.
We need this so we don't OOM (out of memory) ourselves loading the world.
"""
offset = 0
while True:
r = False
for elem in q.limit(chunk).offset(offset):
r = True
yield elem
offset += chunk
if not r:
break | Quick utility to page a query, 1000 items at a time.
We need this so we don't OOM (out of memory) ourselves loading the world. | Below is the the instruction that describes the task:
### Input:
Quick utility to page a query, 1000 items at a time.
We need this so we don't OOM (out of memory) ourselves loading the world.
### Response:
def _page(q, chunk=1000):
""" Quick utility to page a query, 1000 items at a time.
We need this so we don't OOM (out of memory) ourselves loading the world.
"""
offset = 0
while True:
r = False
for elem in q.limit(chunk).offset(offset):
r = True
yield elem
offset += chunk
if not r:
break |
def permissions(self, addr, permissions=None):
"""
Retrieve the permissions of the page at address `addr`.
:param addr: address to get the page permissions
:param permissions: Integer or BVV to optionally set page permissions to
:return: AST representing the permissions on the page
"""
out = self.mem.permissions(addr, permissions)
# if unicorn is in play and we've marked a page writable, it must be uncached
if permissions is not None and self.state.solver.is_true(permissions & 2 == 2):
if self.state.has_plugin('unicorn'):
self.state.unicorn.uncache_page(addr)
return out | Retrieve the permissions of the page at address `addr`.
:param addr: address to get the page permissions
:param permissions: Integer or BVV to optionally set page permissions to
:return: AST representing the permissions on the page | Below is the the instruction that describes the task:
### Input:
Retrieve the permissions of the page at address `addr`.
:param addr: address to get the page permissions
:param permissions: Integer or BVV to optionally set page permissions to
:return: AST representing the permissions on the page
### Response:
def permissions(self, addr, permissions=None):
"""
Retrieve the permissions of the page at address `addr`.
:param addr: address to get the page permissions
:param permissions: Integer or BVV to optionally set page permissions to
:return: AST representing the permissions on the page
"""
out = self.mem.permissions(addr, permissions)
# if unicorn is in play and we've marked a page writable, it must be uncached
if permissions is not None and self.state.solver.is_true(permissions & 2 == 2):
if self.state.has_plugin('unicorn'):
self.state.unicorn.uncache_page(addr)
return out |
def commit_channel(self, channel_id):
""" commit_channel: commits channel to Kolibri Studio
Args:
channel_id (str): channel's id on Kolibri Studio
Returns: channel id and link to uploadedchannel
"""
payload = {
"channel_id":channel_id,
"stage": config.STAGE,
}
response = config.SESSION.post(config.finish_channel_url(), data=json.dumps(payload))
if response.status_code != 200:
config.LOGGER.error("\n\nCould not activate channel: {}\n".format(response._content.decode('utf-8')))
if response.status_code == 403:
config.LOGGER.error("Channel can be viewed at {}\n\n".format(config.open_channel_url(channel_id, staging=True)))
sys.exit()
response.raise_for_status()
new_channel = json.loads(response._content.decode("utf-8"))
channel_link = config.open_channel_url(new_channel['new_channel'])
return channel_id, channel_link | commit_channel: commits channel to Kolibri Studio
Args:
channel_id (str): channel's id on Kolibri Studio
Returns: channel id and link to uploadedchannel | Below is the the instruction that describes the task:
### Input:
commit_channel: commits channel to Kolibri Studio
Args:
channel_id (str): channel's id on Kolibri Studio
Returns: channel id and link to uploadedchannel
### Response:
def commit_channel(self, channel_id):
""" commit_channel: commits channel to Kolibri Studio
Args:
channel_id (str): channel's id on Kolibri Studio
Returns: channel id and link to uploadedchannel
"""
payload = {
"channel_id":channel_id,
"stage": config.STAGE,
}
response = config.SESSION.post(config.finish_channel_url(), data=json.dumps(payload))
if response.status_code != 200:
config.LOGGER.error("\n\nCould not activate channel: {}\n".format(response._content.decode('utf-8')))
if response.status_code == 403:
config.LOGGER.error("Channel can be viewed at {}\n\n".format(config.open_channel_url(channel_id, staging=True)))
sys.exit()
response.raise_for_status()
new_channel = json.loads(response._content.decode("utf-8"))
channel_link = config.open_channel_url(new_channel['new_channel'])
return channel_id, channel_link |
def send_msg_to_clients(client_ids, msg, error=False):
"""Send message to all clients"""
if error:
stream = "stderr"
else:
stream = "stdout"
response = [{"message": None, "type": "console", "payload": msg, "stream": stream}]
for client_id in client_ids:
logger.info("emiting message to websocket client id " + client_id)
socketio.emit(
"gdb_response", response, namespace="/gdb_listener", room=client_id
) | Send message to all clients | Below is the the instruction that describes the task:
### Input:
Send message to all clients
### Response:
def send_msg_to_clients(client_ids, msg, error=False):
"""Send message to all clients"""
if error:
stream = "stderr"
else:
stream = "stdout"
response = [{"message": None, "type": "console", "payload": msg, "stream": stream}]
for client_id in client_ids:
logger.info("emiting message to websocket client id " + client_id)
socketio.emit(
"gdb_response", response, namespace="/gdb_listener", room=client_id
) |
def getAllowedInstruments(self):
"""Returns the allowed instruments for this analysis, either if the
instrument was assigned directly (by using "Allows instrument entry of
results") or indirectly via Method (by using "Allows manual entry of
results") in Analysis Service edit view.
:return: A list of instruments allowed for this Analysis
:rtype: list of instruments
"""
service = self.getAnalysisService()
if not service:
return []
instruments = []
if self.getInstrumentEntryOfResults():
instruments = service.getInstruments()
if self.getManualEntryOfResults():
for meth in self.getAllowedMethods():
instruments += meth.getInstruments()
return list(set(instruments)) | Returns the allowed instruments for this analysis, either if the
instrument was assigned directly (by using "Allows instrument entry of
results") or indirectly via Method (by using "Allows manual entry of
results") in Analysis Service edit view.
:return: A list of instruments allowed for this Analysis
:rtype: list of instruments | Below is the the instruction that describes the task:
### Input:
Returns the allowed instruments for this analysis, either if the
instrument was assigned directly (by using "Allows instrument entry of
results") or indirectly via Method (by using "Allows manual entry of
results") in Analysis Service edit view.
:return: A list of instruments allowed for this Analysis
:rtype: list of instruments
### Response:
def getAllowedInstruments(self):
"""Returns the allowed instruments for this analysis, either if the
instrument was assigned directly (by using "Allows instrument entry of
results") or indirectly via Method (by using "Allows manual entry of
results") in Analysis Service edit view.
:return: A list of instruments allowed for this Analysis
:rtype: list of instruments
"""
service = self.getAnalysisService()
if not service:
return []
instruments = []
if self.getInstrumentEntryOfResults():
instruments = service.getInstruments()
if self.getManualEntryOfResults():
for meth in self.getAllowedMethods():
instruments += meth.getInstruments()
return list(set(instruments)) |
def normalize_rgb(r, g, b, a):
"""Transform a rgb[a] color to #hex[a].
"""
r = int(r, 10)
g = int(g, 10)
b = int(b, 10)
if a:
a = float(a) * 256
if r > 255 or g > 255 or b > 255 or (a and a > 255):
return None
color = '#%02x%02x%02x' % (r, g, b)
if a:
color += '%02x' % int(a)
return color | Transform a rgb[a] color to #hex[a]. | Below is the the instruction that describes the task:
### Input:
Transform a rgb[a] color to #hex[a].
### Response:
def normalize_rgb(r, g, b, a):
"""Transform a rgb[a] color to #hex[a].
"""
r = int(r, 10)
g = int(g, 10)
b = int(b, 10)
if a:
a = float(a) * 256
if r > 255 or g > 255 or b > 255 or (a and a > 255):
return None
color = '#%02x%02x%02x' % (r, g, b)
if a:
color += '%02x' % int(a)
return color |
def close(self):
"""
Unclaim the PEP node and unregister the registered features.
It is not necessary to call close if this claim is managed by
:class:`~aioxmpp.pep.register_pep_node`.
"""
if self._closed:
return
self._closed = True
self._pep_service._unclaim(self.node_namespace)
self._unregister() | Unclaim the PEP node and unregister the registered features.
It is not necessary to call close if this claim is managed by
:class:`~aioxmpp.pep.register_pep_node`. | Below is the the instruction that describes the task:
### Input:
Unclaim the PEP node and unregister the registered features.
It is not necessary to call close if this claim is managed by
:class:`~aioxmpp.pep.register_pep_node`.
### Response:
def close(self):
"""
Unclaim the PEP node and unregister the registered features.
It is not necessary to call close if this claim is managed by
:class:`~aioxmpp.pep.register_pep_node`.
"""
if self._closed:
return
self._closed = True
self._pep_service._unclaim(self.node_namespace)
self._unregister() |
def incrementSub(self, amount=1):
"""
Increments the sub-progress bar by amount.
"""
self._subProgressBar.setValue(self.subValue() + amount)
QApplication.instance().processEvents() | Increments the sub-progress bar by amount. | Below is the the instruction that describes the task:
### Input:
Increments the sub-progress bar by amount.
### Response:
def incrementSub(self, amount=1):
"""
Increments the sub-progress bar by amount.
"""
self._subProgressBar.setValue(self.subValue() + amount)
QApplication.instance().processEvents() |
def get_frames_singleimage(self):
"""
Get current left and right frames from a single image,
by splitting the image in half.
"""
frame = self.captures[0].read()[1]
height, width, colors = frame.shape
left_frame = frame[:, :width/2, :]
right_frame = frame[:, width/2:, :]
return [left_frame, right_frame] | Get current left and right frames from a single image,
by splitting the image in half. | Below is the the instruction that describes the task:
### Input:
Get current left and right frames from a single image,
by splitting the image in half.
### Response:
def get_frames_singleimage(self):
"""
Get current left and right frames from a single image,
by splitting the image in half.
"""
frame = self.captures[0].read()[1]
height, width, colors = frame.shape
left_frame = frame[:, :width/2, :]
right_frame = frame[:, width/2:, :]
return [left_frame, right_frame] |
def weight_from_comm(self, v, comm):
""" The total number of edges (or sum of weights) to node ``v`` from
community ``comm``.
See Also
--------
:func:`~VertexPartition.MutableVertexPartition.weight_to_comm`
"""
return _c_leiden._MutableVertexPartition_weight_from_comm(self._partition, v, comm) | The total number of edges (or sum of weights) to node ``v`` from
community ``comm``.
See Also
--------
:func:`~VertexPartition.MutableVertexPartition.weight_to_comm` | Below is the the instruction that describes the task:
### Input:
The total number of edges (or sum of weights) to node ``v`` from
community ``comm``.
See Also
--------
:func:`~VertexPartition.MutableVertexPartition.weight_to_comm`
### Response:
def weight_from_comm(self, v, comm):
""" The total number of edges (or sum of weights) to node ``v`` from
community ``comm``.
See Also
--------
:func:`~VertexPartition.MutableVertexPartition.weight_to_comm`
"""
return _c_leiden._MutableVertexPartition_weight_from_comm(self._partition, v, comm) |
def _compile_schema(self, schema):
""" Compile another schema """
assert self.matcher == schema.matcher
self.name = schema.name
self.compiled_type = schema.compiled_type
return schema.compiled | Compile another schema | Below is the the instruction that describes the task:
### Input:
Compile another schema
### Response:
def _compile_schema(self, schema):
""" Compile another schema """
assert self.matcher == schema.matcher
self.name = schema.name
self.compiled_type = schema.compiled_type
return schema.compiled |
def register_result(self, job, skip_sanity_checks=False):
"""
function to register the result of a job
This function is called from HB_master, don't call this from
your script.
"""
if self.is_finished:
raise RuntimeError("This HB iteration is finished, you can't register more results!")
config_id = job.id
config = job.kwargs['config']
budget = job.kwargs['budget']
timestamps = job.timestamps
result = job.result
exception = job.exception
d = self.data[config_id]
if not skip_sanity_checks:
assert d.config == config, 'Configurations differ!'
assert d.status == 'RUNNING', "Configuration wasn't scheduled for a run."
assert d.budget == budget, 'Budgets differ (%f != %f)!'%(self.data[config_id]['budget'], budget)
d.time_stamps[budget] = timestamps
d.results[budget] = result
if (not job.result is None) and np.isfinite(result['loss']):
d.status = 'REVIEW'
else:
d.status = 'CRASHED'
d.exceptions[budget] = exception
self.num_running -= 1 | function to register the result of a job
This function is called from HB_master, don't call this from
your script. | Below is the the instruction that describes the task:
### Input:
function to register the result of a job
This function is called from HB_master, don't call this from
your script.
### Response:
def register_result(self, job, skip_sanity_checks=False):
"""
function to register the result of a job
This function is called from HB_master, don't call this from
your script.
"""
if self.is_finished:
raise RuntimeError("This HB iteration is finished, you can't register more results!")
config_id = job.id
config = job.kwargs['config']
budget = job.kwargs['budget']
timestamps = job.timestamps
result = job.result
exception = job.exception
d = self.data[config_id]
if not skip_sanity_checks:
assert d.config == config, 'Configurations differ!'
assert d.status == 'RUNNING', "Configuration wasn't scheduled for a run."
assert d.budget == budget, 'Budgets differ (%f != %f)!'%(self.data[config_id]['budget'], budget)
d.time_stamps[budget] = timestamps
d.results[budget] = result
if (not job.result is None) and np.isfinite(result['loss']):
d.status = 'REVIEW'
else:
d.status = 'CRASHED'
d.exceptions[budget] = exception
self.num_running -= 1 |
def run(self, rapid_namelist_file=""):
"""
Run RAPID program and generate file based on inputs
This will generate your rapid_namelist file and run RAPID from wherever
you call this script (your working directory).
Parameters
----------
rapid_namelist_file: str, optional
Path of namelist file to use in the simulation.
It will be updated with any parameters added to the RAPID manager.
Linux Example:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_executable_location='~/work/rapid/src/rapid'
use_all_processors=True,
)
rapid_manager.update_parameters(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
Vlat_file='../rapid-io/input/m3_riv.nc',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
k_file='../rapid-io/input/k.csv',
x_file='../rapid-io/input/x.csv',
Qout_file='../rapid-io/output/Qout.nc',
)
rapid_manager.update_reach_number_data()
rapid_manager.update_simulation_runtime()
rapid_manager.run(
rapid_namelist_file='../rapid-io/input/rapid_namelist')
Linux Reservoir Forcing Flows Example:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_executable_location='~/work/rapid/src/rapid',
num_processors=4,
IS_for_tot=4,
IS_for_use=4,
for_tot_id_file='../rapid-io/input/dam_id.csv',
for_use_id_file='../rapid-io/input/dam_id.csv',
Qfor_file='../rapid-io/input/qout_dams.csv',
ZS_dtF=86400,
BS_opt_for=True,
)
rapid_manager.run(
rapid_namelist_file='../rapid-io/input/rapid_namelist_regular')
Windows with Cygwin Example:
.. code:: python
from RAPIDpy import RAPID
from os import path
rapid_exe_path = 'C:/cygwin64/home/username/rapid/run/rapid',
rapid_manager = RAPID(
rapid_executable_location=rapid_exe_path,
cygwin_bin_location='C:/cygwin64/bin',
use_all_processors=True,
ZS_TauR=24*3600,
ZS_dtR=15*60,
ZS_TauM=365*24*3600,
ZS_dtM=24*3600
)
rapid_input = 'C:/cygwin64/home/username/rapid-io/input'
rapid_output = 'C:/cygwin64/home/username/rapid-io/output'
rapid_manager.update_parameters(
rapid_connect_file=path.join(rapid_input, 'rapid_connect.csv'),
Vlat_file=path.join(rapid_input, 'm3_riv.nc'),
riv_bas_id_file=path.join(rapid_input, 'riv_bas_id.csv'),
k_file=path.join(rapid_input, 'k.csv'),
x_file=path.join(rapid_input, 'x.csv'),
Qout_file=path.join(rapid_output, 'Qout.nc'),
)
rapid_manager.update_reach_number_data()
rapid_manager.update_simulation_runtime()
rapid_manager.run()
"""
if not self._rapid_executable_location:
log("Missing rapid_executable_location. "
"Please set before running this function ...",
"ERROR")
time_start = datetime.datetime.utcnow()
temp_rapid_namelist_file = os.path.join(os.getcwd(), "rapid_namelist")
if not rapid_namelist_file or not os.path.exists(rapid_namelist_file):
# generate input file if it does not exist
self.generate_namelist_file(temp_rapid_namelist_file)
else:
# update existing file
self.update_namelist_file(rapid_namelist_file,
temp_rapid_namelist_file)
local_rapid_executable_location = \
os.path.join(os.path.dirname(temp_rapid_namelist_file),
"rapid_exe_symlink")
def rapid_cleanup(*args):
"""
Cleans up the rapid files generated by the process
"""
for arg in args:
# remove files
try:
os.remove(arg)
except OSError:
pass
# create link to RAPID if needed
temp_link_to_rapid = ""
# pylint: disable=no-member
if self._rapid_executable_location != \
local_rapid_executable_location:
rapid_cleanup(local_rapid_executable_location)
if os.name == "nt":
self._create_symlink_cygwin(self._rapid_executable_location,
local_rapid_executable_location)
else:
os.symlink(self._rapid_executable_location,
local_rapid_executable_location)
temp_link_to_rapid = local_rapid_executable_location
# run RAPID
log("Running RAPID ...",
"INFO")
if os.name == "nt":
local_rapid_executable_location = \
self._get_cygwin_path(local_rapid_executable_location)
# htcondor will not allow mpiexec for single processor jobs
# this was added for that purpose
run_rapid_command = [local_rapid_executable_location,
"-ksp_type", self._ksp_type]
if self._num_processors > 1:
run_rapid_command = [self._mpiexec_command,
"-n", str(self._num_processors)] \
+ run_rapid_command
process = Popen(run_rapid_command,
stdout=PIPE, stderr=PIPE, shell=False)
out, err = process.communicate()
if err:
rapid_cleanup(temp_link_to_rapid, temp_rapid_namelist_file)
raise Exception(err)
else:
log('RAPID output:',
"INFO")
for line in out.split(b'\n'):
print(line)
rapid_cleanup(temp_link_to_rapid, temp_rapid_namelist_file)
log("Time to run RAPID: %s" % (datetime.datetime.utcnow()-time_start),
"INFO") | Run RAPID program and generate file based on inputs
This will generate your rapid_namelist file and run RAPID from wherever
you call this script (your working directory).
Parameters
----------
rapid_namelist_file: str, optional
Path of namelist file to use in the simulation.
It will be updated with any parameters added to the RAPID manager.
Linux Example:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_executable_location='~/work/rapid/src/rapid'
use_all_processors=True,
)
rapid_manager.update_parameters(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
Vlat_file='../rapid-io/input/m3_riv.nc',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
k_file='../rapid-io/input/k.csv',
x_file='../rapid-io/input/x.csv',
Qout_file='../rapid-io/output/Qout.nc',
)
rapid_manager.update_reach_number_data()
rapid_manager.update_simulation_runtime()
rapid_manager.run(
rapid_namelist_file='../rapid-io/input/rapid_namelist')
Linux Reservoir Forcing Flows Example:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_executable_location='~/work/rapid/src/rapid',
num_processors=4,
IS_for_tot=4,
IS_for_use=4,
for_tot_id_file='../rapid-io/input/dam_id.csv',
for_use_id_file='../rapid-io/input/dam_id.csv',
Qfor_file='../rapid-io/input/qout_dams.csv',
ZS_dtF=86400,
BS_opt_for=True,
)
rapid_manager.run(
rapid_namelist_file='../rapid-io/input/rapid_namelist_regular')
Windows with Cygwin Example:
.. code:: python
from RAPIDpy import RAPID
from os import path
rapid_exe_path = 'C:/cygwin64/home/username/rapid/run/rapid',
rapid_manager = RAPID(
rapid_executable_location=rapid_exe_path,
cygwin_bin_location='C:/cygwin64/bin',
use_all_processors=True,
ZS_TauR=24*3600,
ZS_dtR=15*60,
ZS_TauM=365*24*3600,
ZS_dtM=24*3600
)
rapid_input = 'C:/cygwin64/home/username/rapid-io/input'
rapid_output = 'C:/cygwin64/home/username/rapid-io/output'
rapid_manager.update_parameters(
rapid_connect_file=path.join(rapid_input, 'rapid_connect.csv'),
Vlat_file=path.join(rapid_input, 'm3_riv.nc'),
riv_bas_id_file=path.join(rapid_input, 'riv_bas_id.csv'),
k_file=path.join(rapid_input, 'k.csv'),
x_file=path.join(rapid_input, 'x.csv'),
Qout_file=path.join(rapid_output, 'Qout.nc'),
)
rapid_manager.update_reach_number_data()
rapid_manager.update_simulation_runtime()
rapid_manager.run() | Below is the the instruction that describes the task:
### Input:
Run RAPID program and generate file based on inputs
This will generate your rapid_namelist file and run RAPID from wherever
you call this script (your working directory).
Parameters
----------
rapid_namelist_file: str, optional
Path of namelist file to use in the simulation.
It will be updated with any parameters added to the RAPID manager.
Linux Example:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_executable_location='~/work/rapid/src/rapid'
use_all_processors=True,
)
rapid_manager.update_parameters(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
Vlat_file='../rapid-io/input/m3_riv.nc',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
k_file='../rapid-io/input/k.csv',
x_file='../rapid-io/input/x.csv',
Qout_file='../rapid-io/output/Qout.nc',
)
rapid_manager.update_reach_number_data()
rapid_manager.update_simulation_runtime()
rapid_manager.run(
rapid_namelist_file='../rapid-io/input/rapid_namelist')
Linux Reservoir Forcing Flows Example:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_executable_location='~/work/rapid/src/rapid',
num_processors=4,
IS_for_tot=4,
IS_for_use=4,
for_tot_id_file='../rapid-io/input/dam_id.csv',
for_use_id_file='../rapid-io/input/dam_id.csv',
Qfor_file='../rapid-io/input/qout_dams.csv',
ZS_dtF=86400,
BS_opt_for=True,
)
rapid_manager.run(
rapid_namelist_file='../rapid-io/input/rapid_namelist_regular')
Windows with Cygwin Example:
.. code:: python
from RAPIDpy import RAPID
from os import path
rapid_exe_path = 'C:/cygwin64/home/username/rapid/run/rapid',
rapid_manager = RAPID(
rapid_executable_location=rapid_exe_path,
cygwin_bin_location='C:/cygwin64/bin',
use_all_processors=True,
ZS_TauR=24*3600,
ZS_dtR=15*60,
ZS_TauM=365*24*3600,
ZS_dtM=24*3600
)
rapid_input = 'C:/cygwin64/home/username/rapid-io/input'
rapid_output = 'C:/cygwin64/home/username/rapid-io/output'
rapid_manager.update_parameters(
rapid_connect_file=path.join(rapid_input, 'rapid_connect.csv'),
Vlat_file=path.join(rapid_input, 'm3_riv.nc'),
riv_bas_id_file=path.join(rapid_input, 'riv_bas_id.csv'),
k_file=path.join(rapid_input, 'k.csv'),
x_file=path.join(rapid_input, 'x.csv'),
Qout_file=path.join(rapid_output, 'Qout.nc'),
)
rapid_manager.update_reach_number_data()
rapid_manager.update_simulation_runtime()
rapid_manager.run()
### Response:
def run(self, rapid_namelist_file=""):
"""
Run RAPID program and generate file based on inputs
This will generate your rapid_namelist file and run RAPID from wherever
you call this script (your working directory).
Parameters
----------
rapid_namelist_file: str, optional
Path of namelist file to use in the simulation.
It will be updated with any parameters added to the RAPID manager.
Linux Example:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_executable_location='~/work/rapid/src/rapid'
use_all_processors=True,
)
rapid_manager.update_parameters(
rapid_connect_file='../rapid-io/input/rapid_connect.csv',
Vlat_file='../rapid-io/input/m3_riv.nc',
riv_bas_id_file='../rapid-io/input/riv_bas_id.csv',
k_file='../rapid-io/input/k.csv',
x_file='../rapid-io/input/x.csv',
Qout_file='../rapid-io/output/Qout.nc',
)
rapid_manager.update_reach_number_data()
rapid_manager.update_simulation_runtime()
rapid_manager.run(
rapid_namelist_file='../rapid-io/input/rapid_namelist')
Linux Reservoir Forcing Flows Example:
.. code:: python
from RAPIDpy import RAPID
rapid_manager = RAPID(
rapid_executable_location='~/work/rapid/src/rapid',
num_processors=4,
IS_for_tot=4,
IS_for_use=4,
for_tot_id_file='../rapid-io/input/dam_id.csv',
for_use_id_file='../rapid-io/input/dam_id.csv',
Qfor_file='../rapid-io/input/qout_dams.csv',
ZS_dtF=86400,
BS_opt_for=True,
)
rapid_manager.run(
rapid_namelist_file='../rapid-io/input/rapid_namelist_regular')
Windows with Cygwin Example:
.. code:: python
from RAPIDpy import RAPID
from os import path
rapid_exe_path = 'C:/cygwin64/home/username/rapid/run/rapid',
rapid_manager = RAPID(
rapid_executable_location=rapid_exe_path,
cygwin_bin_location='C:/cygwin64/bin',
use_all_processors=True,
ZS_TauR=24*3600,
ZS_dtR=15*60,
ZS_TauM=365*24*3600,
ZS_dtM=24*3600
)
rapid_input = 'C:/cygwin64/home/username/rapid-io/input'
rapid_output = 'C:/cygwin64/home/username/rapid-io/output'
rapid_manager.update_parameters(
rapid_connect_file=path.join(rapid_input, 'rapid_connect.csv'),
Vlat_file=path.join(rapid_input, 'm3_riv.nc'),
riv_bas_id_file=path.join(rapid_input, 'riv_bas_id.csv'),
k_file=path.join(rapid_input, 'k.csv'),
x_file=path.join(rapid_input, 'x.csv'),
Qout_file=path.join(rapid_output, 'Qout.nc'),
)
rapid_manager.update_reach_number_data()
rapid_manager.update_simulation_runtime()
rapid_manager.run()
"""
if not self._rapid_executable_location:
log("Missing rapid_executable_location. "
"Please set before running this function ...",
"ERROR")
time_start = datetime.datetime.utcnow()
temp_rapid_namelist_file = os.path.join(os.getcwd(), "rapid_namelist")
if not rapid_namelist_file or not os.path.exists(rapid_namelist_file):
# generate input file if it does not exist
self.generate_namelist_file(temp_rapid_namelist_file)
else:
# update existing file
self.update_namelist_file(rapid_namelist_file,
temp_rapid_namelist_file)
local_rapid_executable_location = \
os.path.join(os.path.dirname(temp_rapid_namelist_file),
"rapid_exe_symlink")
def rapid_cleanup(*args):
"""
Cleans up the rapid files generated by the process
"""
for arg in args:
# remove files
try:
os.remove(arg)
except OSError:
pass
# create link to RAPID if needed
temp_link_to_rapid = ""
# pylint: disable=no-member
if self._rapid_executable_location != \
local_rapid_executable_location:
rapid_cleanup(local_rapid_executable_location)
if os.name == "nt":
self._create_symlink_cygwin(self._rapid_executable_location,
local_rapid_executable_location)
else:
os.symlink(self._rapid_executable_location,
local_rapid_executable_location)
temp_link_to_rapid = local_rapid_executable_location
# run RAPID
log("Running RAPID ...",
"INFO")
if os.name == "nt":
local_rapid_executable_location = \
self._get_cygwin_path(local_rapid_executable_location)
# htcondor will not allow mpiexec for single processor jobs
# this was added for that purpose
run_rapid_command = [local_rapid_executable_location,
"-ksp_type", self._ksp_type]
if self._num_processors > 1:
run_rapid_command = [self._mpiexec_command,
"-n", str(self._num_processors)] \
+ run_rapid_command
process = Popen(run_rapid_command,
stdout=PIPE, stderr=PIPE, shell=False)
out, err = process.communicate()
if err:
rapid_cleanup(temp_link_to_rapid, temp_rapid_namelist_file)
raise Exception(err)
else:
log('RAPID output:',
"INFO")
for line in out.split(b'\n'):
print(line)
rapid_cleanup(temp_link_to_rapid, temp_rapid_namelist_file)
log("Time to run RAPID: %s" % (datetime.datetime.utcnow()-time_start),
"INFO") |
def publish(self, event_type, events):
"""Publish events."""
assert event_type in self.events
current_queues.queues['stats-{}'.format(event_type)].publish(events) | Publish events. | Below is the the instruction that describes the task:
### Input:
Publish events.
### Response:
def publish(self, event_type, events):
"""Publish events."""
assert event_type in self.events
current_queues.queues['stats-{}'.format(event_type)].publish(events) |
def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos | Skip sub-group. Returns the new position. | Below is the the instruction that describes the task:
### Input:
Skip sub-group. Returns the new position.
### Response:
def _SkipGroup(buffer, pos, end):
"""Skip sub-group. Returns the new position."""
while 1:
(tag_bytes, pos) = ReadTag(buffer, pos)
new_pos = SkipField(buffer, pos, end, tag_bytes)
if new_pos == -1:
return pos
pos = new_pos |
def read(afile):
"""Read a file into setup"""
the_relative_file = os.path.join(HERE, afile)
with codecs.open(the_relative_file, 'r', 'utf-8') as opened_file:
content = filter_out_test_code(opened_file)
content = "".join(list(content))
return content | Read a file into setup | Below is the the instruction that describes the task:
### Input:
Read a file into setup
### Response:
def read(afile):
"""Read a file into setup"""
the_relative_file = os.path.join(HERE, afile)
with codecs.open(the_relative_file, 'r', 'utf-8') as opened_file:
content = filter_out_test_code(opened_file)
content = "".join(list(content))
return content |
def _get_host_details(self):
"""Get the system details."""
# Assuming only one system present as part of collection,
# as we are dealing with iLO's here.
status, headers, system = self._rest_get('/rest/v1/Systems/1')
if status < 300:
stype = self._get_type(system)
if stype not in ['ComputerSystem.0', 'ComputerSystem.1']:
msg = "%s is not a valid system type " % stype
raise exception.IloError(msg)
else:
msg = self._get_extended_error(system)
raise exception.IloError(msg)
return system | Get the system details. | Below is the the instruction that describes the task:
### Input:
Get the system details.
### Response:
def _get_host_details(self):
"""Get the system details."""
# Assuming only one system present as part of collection,
# as we are dealing with iLO's here.
status, headers, system = self._rest_get('/rest/v1/Systems/1')
if status < 300:
stype = self._get_type(system)
if stype not in ['ComputerSystem.0', 'ComputerSystem.1']:
msg = "%s is not a valid system type " % stype
raise exception.IloError(msg)
else:
msg = self._get_extended_error(system)
raise exception.IloError(msg)
return system |
def authenticate(self):
"""
Handles authentication, and persists the X-APPLE-WEB-KB cookie so that
subsequent logins will not cause additional e-mails from Apple.
"""
logger.info("Authenticating as %s", self.user['apple_id'])
data = dict(self.user)
# We authenticate every time, so "remember me" is not needed
data.update({'extended_login': False})
try:
req = self.session.post(
self._base_login_url,
params=self.params,
data=json.dumps(data)
)
except PyiCloudAPIResponseError as error:
msg = 'Invalid email/password combination.'
raise PyiCloudFailedLoginException(msg, error)
resp = req.json()
self.params.update({'dsid': resp['dsInfo']['dsid']})
if not os.path.exists(self._cookie_directory):
os.mkdir(self._cookie_directory)
self.session.cookies.save()
logger.debug("Cookies saved to %s", self._get_cookiejar_path())
self.data = resp
self.webservices = self.data['webservices']
logger.info("Authentication completed successfully")
logger.debug(self.params) | Handles authentication, and persists the X-APPLE-WEB-KB cookie so that
subsequent logins will not cause additional e-mails from Apple. | Below is the the instruction that describes the task:
### Input:
Handles authentication, and persists the X-APPLE-WEB-KB cookie so that
subsequent logins will not cause additional e-mails from Apple.
### Response:
def authenticate(self):
"""
Handles authentication, and persists the X-APPLE-WEB-KB cookie so that
subsequent logins will not cause additional e-mails from Apple.
"""
logger.info("Authenticating as %s", self.user['apple_id'])
data = dict(self.user)
# We authenticate every time, so "remember me" is not needed
data.update({'extended_login': False})
try:
req = self.session.post(
self._base_login_url,
params=self.params,
data=json.dumps(data)
)
except PyiCloudAPIResponseError as error:
msg = 'Invalid email/password combination.'
raise PyiCloudFailedLoginException(msg, error)
resp = req.json()
self.params.update({'dsid': resp['dsInfo']['dsid']})
if not os.path.exists(self._cookie_directory):
os.mkdir(self._cookie_directory)
self.session.cookies.save()
logger.debug("Cookies saved to %s", self._get_cookiejar_path())
self.data = resp
self.webservices = self.data['webservices']
logger.info("Authentication completed successfully")
logger.debug(self.params) |
def to_html(self, table_width=5):
"""Write the program information to HTML code, which can be saved,
printed and brought to the gym.
Parameters
----------
table_width
The table with of the HTML code.
Returns
-------
string
HTML code.
"""
env = self.jinja2_environment
template = env.get_template(self.TEMPLATE_NAMES['html'])
return template.render(program=self, table_width=table_width) | Write the program information to HTML code, which can be saved,
printed and brought to the gym.
Parameters
----------
table_width
The table with of the HTML code.
Returns
-------
string
HTML code. | Below is the the instruction that describes the task:
### Input:
Write the program information to HTML code, which can be saved,
printed and brought to the gym.
Parameters
----------
table_width
The table with of the HTML code.
Returns
-------
string
HTML code.
### Response:
def to_html(self, table_width=5):
"""Write the program information to HTML code, which can be saved,
printed and brought to the gym.
Parameters
----------
table_width
The table with of the HTML code.
Returns
-------
string
HTML code.
"""
env = self.jinja2_environment
template = env.get_template(self.TEMPLATE_NAMES['html'])
return template.render(program=self, table_width=table_width) |
def open_external_editor(filename=None, sql=None):
"""Open external editor, wait for the user to type in their query, return
the query.
:return: list with one tuple, query as first element.
"""
message = None
filename = filename.strip().split(' ', 1)[0] if filename else None
sql = sql or ''
MARKER = '# Type your query above this line.\n'
# Populate the editor buffer with the partial sql (if available) and a
# placeholder comment.
query = click.edit(u'{sql}\n\n{marker}'.format(sql=sql, marker=MARKER),
filename=filename, extension='.sql')
if filename:
try:
with open(filename, encoding='utf-8') as f:
query = f.read()
except IOError:
message = 'Error reading file: %s.' % filename
if query is not None:
query = query.split(MARKER, 1)[0].rstrip('\n')
else:
# Don't return None for the caller to deal with.
# Empty string is ok.
query = sql
return (query, message) | Open external editor, wait for the user to type in their query, return
the query.
:return: list with one tuple, query as first element. | Below is the the instruction that describes the task:
### Input:
Open external editor, wait for the user to type in their query, return
the query.
:return: list with one tuple, query as first element.
### Response:
def open_external_editor(filename=None, sql=None):
"""Open external editor, wait for the user to type in their query, return
the query.
:return: list with one tuple, query as first element.
"""
message = None
filename = filename.strip().split(' ', 1)[0] if filename else None
sql = sql or ''
MARKER = '# Type your query above this line.\n'
# Populate the editor buffer with the partial sql (if available) and a
# placeholder comment.
query = click.edit(u'{sql}\n\n{marker}'.format(sql=sql, marker=MARKER),
filename=filename, extension='.sql')
if filename:
try:
with open(filename, encoding='utf-8') as f:
query = f.read()
except IOError:
message = 'Error reading file: %s.' % filename
if query is not None:
query = query.split(MARKER, 1)[0].rstrip('\n')
else:
# Don't return None for the caller to deal with.
# Empty string is ok.
query = sql
return (query, message) |
def load_blind(self, item):
"""Load blind from JSON."""
blind = Blind.from_config(self.pyvlx, item)
self.add(blind) | Load blind from JSON. | Below is the the instruction that describes the task:
### Input:
Load blind from JSON.
### Response:
def load_blind(self, item):
"""Load blind from JSON."""
blind = Blind.from_config(self.pyvlx, item)
self.add(blind) |
def set_scalebar_for_all(self, row_column_list=None,
location='lower right'):
"""Show marker area scale for subplots.
:param row_column_list: a list containing (row, column) tuples to
specify the subplots, or None to indicate *all* subplots.
:param location: the location of the label inside the plot. May
be one of 'center', 'upper right', 'lower right', 'upper
left', 'lower left'.
"""
if row_column_list is None:
for subplot in self.subplots:
subplot.set_scalebar(location)
else:
for row, column in row_column_list:
subplot = self.get_subplot_at(row, column)
subplot.set_scalebar(location) | Show marker area scale for subplots.
:param row_column_list: a list containing (row, column) tuples to
specify the subplots, or None to indicate *all* subplots.
:param location: the location of the label inside the plot. May
be one of 'center', 'upper right', 'lower right', 'upper
left', 'lower left'. | Below is the the instruction that describes the task:
### Input:
Show marker area scale for subplots.
:param row_column_list: a list containing (row, column) tuples to
specify the subplots, or None to indicate *all* subplots.
:param location: the location of the label inside the plot. May
be one of 'center', 'upper right', 'lower right', 'upper
left', 'lower left'.
### Response:
def set_scalebar_for_all(self, row_column_list=None,
location='lower right'):
"""Show marker area scale for subplots.
:param row_column_list: a list containing (row, column) tuples to
specify the subplots, or None to indicate *all* subplots.
:param location: the location of the label inside the plot. May
be one of 'center', 'upper right', 'lower right', 'upper
left', 'lower left'.
"""
if row_column_list is None:
for subplot in self.subplots:
subplot.set_scalebar(location)
else:
for row, column in row_column_list:
subplot = self.get_subplot_at(row, column)
subplot.set_scalebar(location) |
def endStep(self,key):
"""
Record the end time for the step.
If key==None, simply record ptime as end time for class to represent
the overall runtime since the initialization of the class.
"""
ptime = _ptime()
if key is not None:
self.steps[key]['end'] = ptime
self.steps[key]['elapsed'] = ptime[1] - self.steps[key]['start'][1]
self.end = ptime
print('==== Processing Step ',key,' finished at ',ptime[0])
print('') | Record the end time for the step.
If key==None, simply record ptime as end time for class to represent
the overall runtime since the initialization of the class. | Below is the the instruction that describes the task:
### Input:
Record the end time for the step.
If key==None, simply record ptime as end time for class to represent
the overall runtime since the initialization of the class.
### Response:
def endStep(self,key):
"""
Record the end time for the step.
If key==None, simply record ptime as end time for class to represent
the overall runtime since the initialization of the class.
"""
ptime = _ptime()
if key is not None:
self.steps[key]['end'] = ptime
self.steps[key]['elapsed'] = ptime[1] - self.steps[key]['start'][1]
self.end = ptime
print('==== Processing Step ',key,' finished at ',ptime[0])
print('') |
def get_new_addresses(
self,
index=0,
count=1,
security_level=AddressGenerator.DEFAULT_SECURITY_LEVEL,
checksum=False,
):
# type: (int, Optional[int], int, bool) -> dict
"""
Generates one or more new addresses from the seed.
:param index:
The key index of the first new address to generate (must be
>= 1).
:param count:
Number of addresses to generate (must be >= 1).
.. tip::
This is more efficient than calling ``get_new_address``
inside a loop.
If ``None``, this method will progressively generate
addresses and scan the Tangle until it finds one that has no
transactions referencing it.
:param security_level:
Number of iterations to use when generating new addresses.
Larger values take longer, but the resulting signatures are
more secure.
This value must be between 1 and 3, inclusive.
:param checksum:
Specify whether to return the address with the checksum.
Defaults to ``False``.
:return:
Dict with the following structure::
{
'addresses': List[Address],
Always a list, even if only one address was
generated.
}
References:
- https://github.com/iotaledger/wiki/blob/master/api-proposal.md#getnewaddress
"""
return extended.GetNewAddressesCommand(self.adapter)(
count=count,
index=index,
securityLevel=security_level,
checksum=checksum,
seed=self.seed,
) | Generates one or more new addresses from the seed.
:param index:
The key index of the first new address to generate (must be
>= 1).
:param count:
Number of addresses to generate (must be >= 1).
.. tip::
This is more efficient than calling ``get_new_address``
inside a loop.
If ``None``, this method will progressively generate
addresses and scan the Tangle until it finds one that has no
transactions referencing it.
:param security_level:
Number of iterations to use when generating new addresses.
Larger values take longer, but the resulting signatures are
more secure.
This value must be between 1 and 3, inclusive.
:param checksum:
Specify whether to return the address with the checksum.
Defaults to ``False``.
:return:
Dict with the following structure::
{
'addresses': List[Address],
Always a list, even if only one address was
generated.
}
References:
- https://github.com/iotaledger/wiki/blob/master/api-proposal.md#getnewaddress | Below is the the instruction that describes the task:
### Input:
Generates one or more new addresses from the seed.
:param index:
The key index of the first new address to generate (must be
>= 1).
:param count:
Number of addresses to generate (must be >= 1).
.. tip::
This is more efficient than calling ``get_new_address``
inside a loop.
If ``None``, this method will progressively generate
addresses and scan the Tangle until it finds one that has no
transactions referencing it.
:param security_level:
Number of iterations to use when generating new addresses.
Larger values take longer, but the resulting signatures are
more secure.
This value must be between 1 and 3, inclusive.
:param checksum:
Specify whether to return the address with the checksum.
Defaults to ``False``.
:return:
Dict with the following structure::
{
'addresses': List[Address],
Always a list, even if only one address was
generated.
}
References:
- https://github.com/iotaledger/wiki/blob/master/api-proposal.md#getnewaddress
### Response:
def get_new_addresses(
self,
index=0,
count=1,
security_level=AddressGenerator.DEFAULT_SECURITY_LEVEL,
checksum=False,
):
# type: (int, Optional[int], int, bool) -> dict
"""
Generates one or more new addresses from the seed.
:param index:
The key index of the first new address to generate (must be
>= 1).
:param count:
Number of addresses to generate (must be >= 1).
.. tip::
This is more efficient than calling ``get_new_address``
inside a loop.
If ``None``, this method will progressively generate
addresses and scan the Tangle until it finds one that has no
transactions referencing it.
:param security_level:
Number of iterations to use when generating new addresses.
Larger values take longer, but the resulting signatures are
more secure.
This value must be between 1 and 3, inclusive.
:param checksum:
Specify whether to return the address with the checksum.
Defaults to ``False``.
:return:
Dict with the following structure::
{
'addresses': List[Address],
Always a list, even if only one address was
generated.
}
References:
- https://github.com/iotaledger/wiki/blob/master/api-proposal.md#getnewaddress
"""
return extended.GetNewAddressesCommand(self.adapter)(
count=count,
index=index,
securityLevel=security_level,
checksum=checksum,
seed=self.seed,
) |
def uridefrag(uristring):
"""Remove an existing fragment component from a URI reference string.
"""
if isinstance(uristring, bytes):
parts = uristring.partition(b'#')
else:
parts = uristring.partition(u'#')
return DefragResult(parts[0], parts[2] if parts[1] else None) | Remove an existing fragment component from a URI reference string. | Below is the the instruction that describes the task:
### Input:
Remove an existing fragment component from a URI reference string.
### Response:
def uridefrag(uristring):
"""Remove an existing fragment component from a URI reference string.
"""
if isinstance(uristring, bytes):
parts = uristring.partition(b'#')
else:
parts = uristring.partition(u'#')
return DefragResult(parts[0], parts[2] if parts[1] else None) |
def route(vertices_resources, nets, machine, constraints, placements,
allocations={}, core_resource=Cores, radius=20):
"""Routing algorithm based on Neighbour Exploring Routing (NER).
Algorithm refrence: J. Navaridas et al. SpiNNaker: Enhanced multicast
routing, Parallel Computing (2014).
http://dx.doi.org/10.1016/j.parco.2015.01.002
This algorithm attempts to use NER to generate routing trees for all nets
and routes around broken links using A* graph search. If the system is
fully connected, this algorithm will always succeed though no consideration
of congestion or routing-table usage is attempted.
Parameters
----------
radius : int
Radius of area to search from each node. 20 is arbitrarily selected in
the paper and shown to be acceptable in practice. If set to zero, this
method is becomes longest dimension first routing.
"""
wrap_around = machine.has_wrap_around_links()
# Vertices constrained to route to a specific link. {vertex: route}
route_to_endpoint = {}
for constraint in constraints:
if isinstance(constraint, RouteEndpointConstraint):
route_to_endpoint[constraint.vertex] = constraint.route
routes = {}
for net in nets:
# Generate routing tree (assuming a perfect machine)
root, lookup = ner_net(placements[net.source],
set(placements[sink] for sink in net.sinks),
machine.width, machine.height,
wrap_around, radius)
# Fix routes to avoid dead chips/links
if route_has_dead_links(root, machine):
root, lookup = avoid_dead_links(root, machine, wrap_around)
# Add the sinks in the net to the RoutingTree
for sink in net.sinks:
tree_node = lookup[placements[sink]]
if sink in route_to_endpoint:
# Sinks with route-to-endpoint constraints must be routed
# in the according directions.
tree_node.children.append((route_to_endpoint[sink], sink))
else:
cores = allocations.get(sink, {}).get(core_resource, None)
if cores is not None:
# Sinks with the core_resource resource specified must be
# routed to that set of cores.
for core in range(cores.start, cores.stop):
tree_node.children.append((Routes.core(core), sink))
else:
# Sinks without that resource are simply included without
# an associated route
tree_node.children.append((None, sink))
routes[net] = root
return routes | Routing algorithm based on Neighbour Exploring Routing (NER).
Algorithm refrence: J. Navaridas et al. SpiNNaker: Enhanced multicast
routing, Parallel Computing (2014).
http://dx.doi.org/10.1016/j.parco.2015.01.002
This algorithm attempts to use NER to generate routing trees for all nets
and routes around broken links using A* graph search. If the system is
fully connected, this algorithm will always succeed though no consideration
of congestion or routing-table usage is attempted.
Parameters
----------
radius : int
Radius of area to search from each node. 20 is arbitrarily selected in
the paper and shown to be acceptable in practice. If set to zero, this
method is becomes longest dimension first routing. | Below is the the instruction that describes the task:
### Input:
Routing algorithm based on Neighbour Exploring Routing (NER).
Algorithm refrence: J. Navaridas et al. SpiNNaker: Enhanced multicast
routing, Parallel Computing (2014).
http://dx.doi.org/10.1016/j.parco.2015.01.002
This algorithm attempts to use NER to generate routing trees for all nets
and routes around broken links using A* graph search. If the system is
fully connected, this algorithm will always succeed though no consideration
of congestion or routing-table usage is attempted.
Parameters
----------
radius : int
Radius of area to search from each node. 20 is arbitrarily selected in
the paper and shown to be acceptable in practice. If set to zero, this
method is becomes longest dimension first routing.
### Response:
def route(vertices_resources, nets, machine, constraints, placements,
allocations={}, core_resource=Cores, radius=20):
"""Routing algorithm based on Neighbour Exploring Routing (NER).
Algorithm refrence: J. Navaridas et al. SpiNNaker: Enhanced multicast
routing, Parallel Computing (2014).
http://dx.doi.org/10.1016/j.parco.2015.01.002
This algorithm attempts to use NER to generate routing trees for all nets
and routes around broken links using A* graph search. If the system is
fully connected, this algorithm will always succeed though no consideration
of congestion or routing-table usage is attempted.
Parameters
----------
radius : int
Radius of area to search from each node. 20 is arbitrarily selected in
the paper and shown to be acceptable in practice. If set to zero, this
method is becomes longest dimension first routing.
"""
wrap_around = machine.has_wrap_around_links()
# Vertices constrained to route to a specific link. {vertex: route}
route_to_endpoint = {}
for constraint in constraints:
if isinstance(constraint, RouteEndpointConstraint):
route_to_endpoint[constraint.vertex] = constraint.route
routes = {}
for net in nets:
# Generate routing tree (assuming a perfect machine)
root, lookup = ner_net(placements[net.source],
set(placements[sink] for sink in net.sinks),
machine.width, machine.height,
wrap_around, radius)
# Fix routes to avoid dead chips/links
if route_has_dead_links(root, machine):
root, lookup = avoid_dead_links(root, machine, wrap_around)
# Add the sinks in the net to the RoutingTree
for sink in net.sinks:
tree_node = lookup[placements[sink]]
if sink in route_to_endpoint:
# Sinks with route-to-endpoint constraints must be routed
# in the according directions.
tree_node.children.append((route_to_endpoint[sink], sink))
else:
cores = allocations.get(sink, {}).get(core_resource, None)
if cores is not None:
# Sinks with the core_resource resource specified must be
# routed to that set of cores.
for core in range(cores.start, cores.stop):
tree_node.children.append((Routes.core(core), sink))
else:
# Sinks without that resource are simply included without
# an associated route
tree_node.children.append((None, sink))
routes[net] = root
return routes |
def facts(self, **kwargs):
"""Get all facts of this node. Additional arguments may also be
specified that will be passed to the query function.
"""
return self.__api.facts(query=EqualsOperator("certname", self.name),
**kwargs) | Get all facts of this node. Additional arguments may also be
specified that will be passed to the query function. | Below is the the instruction that describes the task:
### Input:
Get all facts of this node. Additional arguments may also be
specified that will be passed to the query function.
### Response:
def facts(self, **kwargs):
"""Get all facts of this node. Additional arguments may also be
specified that will be passed to the query function.
"""
return self.__api.facts(query=EqualsOperator("certname", self.name),
**kwargs) |
def system(cmd, data=None):
'''
pipes the output of a program
'''
import subprocess
s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
out, err = s.communicate(data)
return out.decode('utf8') | pipes the output of a program | Below is the the instruction that describes the task:
### Input:
pipes the output of a program
### Response:
def system(cmd, data=None):
'''
pipes the output of a program
'''
import subprocess
s = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
out, err = s.communicate(data)
return out.decode('utf8') |
def load_pricing_adjustments(self, columns, dts, assets):
"""
Returns
-------
adjustments : list[dict[int -> Adjustment]]
A list, where each element corresponds to the `columns`, of
mappings from index to adjustment objects to apply at that index.
"""
out = [None] * len(columns)
for i, column in enumerate(columns):
adjs = {}
for asset in assets:
adjs.update(self._get_adjustments_in_range(
asset, dts, column))
out[i] = adjs
return out | Returns
-------
adjustments : list[dict[int -> Adjustment]]
A list, where each element corresponds to the `columns`, of
mappings from index to adjustment objects to apply at that index. | Below is the the instruction that describes the task:
### Input:
Returns
-------
adjustments : list[dict[int -> Adjustment]]
A list, where each element corresponds to the `columns`, of
mappings from index to adjustment objects to apply at that index.
### Response:
def load_pricing_adjustments(self, columns, dts, assets):
"""
Returns
-------
adjustments : list[dict[int -> Adjustment]]
A list, where each element corresponds to the `columns`, of
mappings from index to adjustment objects to apply at that index.
"""
out = [None] * len(columns)
for i, column in enumerate(columns):
adjs = {}
for asset in assets:
adjs.update(self._get_adjustments_in_range(
asset, dts, column))
out[i] = adjs
return out |
def get_region_vcf(self, case_obj, chrom=None, start=None, end=None,
gene_obj=None, variant_type='clinical', category='snv',
rank_threshold=None):
"""Produce a reduced vcf with variants from the specified coordinates
This is used for the alignment viewer.
Args:
case_obj(dict): A case from the scout database
variant_type(str): 'clinical' or 'research'. Default: 'clinical'
category(str): 'snv' or 'sv'. Default: 'snv'
rank_threshold(float): Only load variants above this score. Default: 5
chrom(str): Load variants from a certain chromosome
start(int): Specify the start position
end(int): Specify the end position
gene_obj(dict): A gene object from the database
Returns:
file_name(str): Path to the temporary file
"""
rank_threshold = rank_threshold or -100
variant_file = None
if variant_type == 'clinical':
if category == 'snv':
variant_file = case_obj['vcf_files'].get('vcf_snv')
elif category == 'sv':
variant_file = case_obj['vcf_files'].get('vcf_sv')
elif category == 'str':
variant_file = case_obj['vcf_files'].get('vcf_str')
elif variant_type == 'research':
if category == 'snv':
variant_file = case_obj['vcf_files'].get('vcf_snv_research')
elif category == 'sv':
variant_file = case_obj['vcf_files'].get('vcf_sv_research')
if not variant_file:
raise SyntaxError("Vcf file does not seem to exist")
vcf_obj = VCF(variant_file)
region = ""
if gene_obj:
chrom = gene_obj['chromosome']
start = gene_obj['start']
end = gene_obj['end']
if chrom:
if (start and end):
region = "{0}:{1}-{2}".format(chrom, start, end)
else:
region = "{0}".format(chrom)
else:
rank_threshold = rank_threshold or 5
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp:
file_name = str(pathlib.Path(temp.name))
for header_line in vcf_obj.raw_header.split('\n'):
if len(header_line) > 3:
temp.write(header_line + '\n')
for variant in vcf_obj(region):
temp.write(str(variant))
return file_name | Produce a reduced vcf with variants from the specified coordinates
This is used for the alignment viewer.
Args:
case_obj(dict): A case from the scout database
variant_type(str): 'clinical' or 'research'. Default: 'clinical'
category(str): 'snv' or 'sv'. Default: 'snv'
rank_threshold(float): Only load variants above this score. Default: 5
chrom(str): Load variants from a certain chromosome
start(int): Specify the start position
end(int): Specify the end position
gene_obj(dict): A gene object from the database
Returns:
file_name(str): Path to the temporary file | Below is the the instruction that describes the task:
### Input:
Produce a reduced vcf with variants from the specified coordinates
This is used for the alignment viewer.
Args:
case_obj(dict): A case from the scout database
variant_type(str): 'clinical' or 'research'. Default: 'clinical'
category(str): 'snv' or 'sv'. Default: 'snv'
rank_threshold(float): Only load variants above this score. Default: 5
chrom(str): Load variants from a certain chromosome
start(int): Specify the start position
end(int): Specify the end position
gene_obj(dict): A gene object from the database
Returns:
file_name(str): Path to the temporary file
### Response:
def get_region_vcf(self, case_obj, chrom=None, start=None, end=None,
gene_obj=None, variant_type='clinical', category='snv',
rank_threshold=None):
"""Produce a reduced vcf with variants from the specified coordinates
This is used for the alignment viewer.
Args:
case_obj(dict): A case from the scout database
variant_type(str): 'clinical' or 'research'. Default: 'clinical'
category(str): 'snv' or 'sv'. Default: 'snv'
rank_threshold(float): Only load variants above this score. Default: 5
chrom(str): Load variants from a certain chromosome
start(int): Specify the start position
end(int): Specify the end position
gene_obj(dict): A gene object from the database
Returns:
file_name(str): Path to the temporary file
"""
rank_threshold = rank_threshold or -100
variant_file = None
if variant_type == 'clinical':
if category == 'snv':
variant_file = case_obj['vcf_files'].get('vcf_snv')
elif category == 'sv':
variant_file = case_obj['vcf_files'].get('vcf_sv')
elif category == 'str':
variant_file = case_obj['vcf_files'].get('vcf_str')
elif variant_type == 'research':
if category == 'snv':
variant_file = case_obj['vcf_files'].get('vcf_snv_research')
elif category == 'sv':
variant_file = case_obj['vcf_files'].get('vcf_sv_research')
if not variant_file:
raise SyntaxError("Vcf file does not seem to exist")
vcf_obj = VCF(variant_file)
region = ""
if gene_obj:
chrom = gene_obj['chromosome']
start = gene_obj['start']
end = gene_obj['end']
if chrom:
if (start and end):
region = "{0}:{1}-{2}".format(chrom, start, end)
else:
region = "{0}".format(chrom)
else:
rank_threshold = rank_threshold or 5
with tempfile.NamedTemporaryFile(mode='w', delete=False) as temp:
file_name = str(pathlib.Path(temp.name))
for header_line in vcf_obj.raw_header.split('\n'):
if len(header_line) > 3:
temp.write(header_line + '\n')
for variant in vcf_obj(region):
temp.write(str(variant))
return file_name |
def request(self, endpoint):
"""Perform a request for the APIRequest instance 'endpoint'.
Parameters
----------
endpoint : APIRequest
The endpoint parameter contains an instance of an APIRequest
containing the endpoint, method and optionally other parameters
or body data.
Raises
------
V20Error in case of HTTP response code >= 400
"""
method = endpoint.method
method = method.lower()
params = None
try:
params = getattr(endpoint, "params")
except AttributeError:
# request does not have params
params = {}
headers = {}
if hasattr(endpoint, "HEADERS"):
headers = getattr(endpoint, "HEADERS")
request_args = {}
if method == 'get':
request_args['params'] = params
elif hasattr(endpoint, "data") and endpoint.data:
request_args['json'] = endpoint.data
# if any parameter for request then merge them
request_args.update(self._request_params)
# which API to access ?
if not (hasattr(endpoint, "STREAM") and
getattr(endpoint, "STREAM") is True):
url = "{}/{}".format(
TRADING_ENVIRONMENTS[self.environment]["api"],
endpoint)
response = self.__request(method, url,
request_args, headers=headers)
content = response.content.decode('utf-8')
content = json.loads(content)
# update endpoint
endpoint.response = content
endpoint.status_code = response.status_code
return content
else:
url = "{}/{}".format(
TRADING_ENVIRONMENTS[self.environment]["stream"],
endpoint)
endpoint.response = self.__stream_request(method,
url,
request_args,
headers=headers)
return endpoint.response | Perform a request for the APIRequest instance 'endpoint'.
Parameters
----------
endpoint : APIRequest
The endpoint parameter contains an instance of an APIRequest
containing the endpoint, method and optionally other parameters
or body data.
Raises
------
V20Error in case of HTTP response code >= 400 | Below is the the instruction that describes the task:
### Input:
Perform a request for the APIRequest instance 'endpoint'.
Parameters
----------
endpoint : APIRequest
The endpoint parameter contains an instance of an APIRequest
containing the endpoint, method and optionally other parameters
or body data.
Raises
------
V20Error in case of HTTP response code >= 400
### Response:
def request(self, endpoint):
"""Perform a request for the APIRequest instance 'endpoint'.
Parameters
----------
endpoint : APIRequest
The endpoint parameter contains an instance of an APIRequest
containing the endpoint, method and optionally other parameters
or body data.
Raises
------
V20Error in case of HTTP response code >= 400
"""
method = endpoint.method
method = method.lower()
params = None
try:
params = getattr(endpoint, "params")
except AttributeError:
# request does not have params
params = {}
headers = {}
if hasattr(endpoint, "HEADERS"):
headers = getattr(endpoint, "HEADERS")
request_args = {}
if method == 'get':
request_args['params'] = params
elif hasattr(endpoint, "data") and endpoint.data:
request_args['json'] = endpoint.data
# if any parameter for request then merge them
request_args.update(self._request_params)
# which API to access ?
if not (hasattr(endpoint, "STREAM") and
getattr(endpoint, "STREAM") is True):
url = "{}/{}".format(
TRADING_ENVIRONMENTS[self.environment]["api"],
endpoint)
response = self.__request(method, url,
request_args, headers=headers)
content = response.content.decode('utf-8')
content = json.loads(content)
# update endpoint
endpoint.response = content
endpoint.status_code = response.status_code
return content
else:
url = "{}/{}".format(
TRADING_ENVIRONMENTS[self.environment]["stream"],
endpoint)
endpoint.response = self.__stream_request(method,
url,
request_args,
headers=headers)
return endpoint.response |
def tweet(tweet_text_func):
'''
A decorator to make a function Tweet
Parameters
- `tweet_text_func` is a function that takes no parameters and returns a tweetable string
For example::
@tweet
def total_deposits_this_week():
# ...
@tweet
def not_an_interesting_tweet():
return 'This tweet is not data-driven.'
'''
def tweet_func():
api = _connect_to_twitter()
tweet = tweet_text_func()
print "Tweeting: %s" % tweet
api.update_status(tweet)
return tweet
return tweet_func | A decorator to make a function Tweet
Parameters
- `tweet_text_func` is a function that takes no parameters and returns a tweetable string
For example::
@tweet
def total_deposits_this_week():
# ...
@tweet
def not_an_interesting_tweet():
return 'This tweet is not data-driven.' | Below is the the instruction that describes the task:
### Input:
A decorator to make a function Tweet
Parameters
- `tweet_text_func` is a function that takes no parameters and returns a tweetable string
For example::
@tweet
def total_deposits_this_week():
# ...
@tweet
def not_an_interesting_tweet():
return 'This tweet is not data-driven.'
### Response:
def tweet(tweet_text_func):
'''
A decorator to make a function Tweet
Parameters
- `tweet_text_func` is a function that takes no parameters and returns a tweetable string
For example::
@tweet
def total_deposits_this_week():
# ...
@tweet
def not_an_interesting_tweet():
return 'This tweet is not data-driven.'
'''
def tweet_func():
api = _connect_to_twitter()
tweet = tweet_text_func()
print "Tweeting: %s" % tweet
api.update_status(tweet)
return tweet
return tweet_func |
def add_char(self, char):
"""
Process the next character of input through the translation finite
state maching (FSM). There are two possible states, buffer pending
and not pending, but those are hidden behind the ``.flush()`` method
which must be called at the end of text to ensure any pending
``<w:t>`` element is written.
"""
if char == '\t':
self.flush()
self._r.add_tab()
elif char in '\r\n':
self.flush()
self._r.add_br()
else:
self._bfr.append(char) | Process the next character of input through the translation finite
state maching (FSM). There are two possible states, buffer pending
and not pending, but those are hidden behind the ``.flush()`` method
which must be called at the end of text to ensure any pending
``<w:t>`` element is written. | Below is the the instruction that describes the task:
### Input:
Process the next character of input through the translation finite
state maching (FSM). There are two possible states, buffer pending
and not pending, but those are hidden behind the ``.flush()`` method
which must be called at the end of text to ensure any pending
``<w:t>`` element is written.
### Response:
def add_char(self, char):
"""
Process the next character of input through the translation finite
state maching (FSM). There are two possible states, buffer pending
and not pending, but those are hidden behind the ``.flush()`` method
which must be called at the end of text to ensure any pending
``<w:t>`` element is written.
"""
if char == '\t':
self.flush()
self._r.add_tab()
elif char in '\r\n':
self.flush()
self._r.add_br()
else:
self._bfr.append(char) |
def pipeline(stages, run=True, stride=1, chunksize=None):
r""" Data analysis pipeline.
Constructs a data analysis :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` and parametrizes it
(unless prevented).
If this function takes too long, consider loading data in memory.
Alternatively if the data is to large to be loaded into memory make use
of the stride parameter.
Parameters
----------
stages : data input or list of pipeline stages
If given a single pipeline stage this must be a data input constructed
by :py:func:`source`. If a list of pipelining stages are given, the
first stage must be a data input constructed by :py:func:`source`.
run : bool, optional, default = True
If True, the pipeline will be parametrized immediately with the given
stages. If only an input stage is given, the run flag has no effect at
this time. True also means that the pipeline will be immediately
re-parametrized when further stages are added to it.
*Attention* True means this function may take a long time to compute.
If False, the pipeline will be passive, i.e. it will not do any
computations before you call parametrize()
stride : int, optional, default = 1
If set to 1, all input data will be used throughout the pipeline to
parametrize its stages. Note that this could cause the parametrization
step to be very slow for large data sets. Since molecular dynamics data
is usually correlated at short timescales, it is often sufficient to
parametrize the pipeline at a longer stride.
See also stride option in the output functions of the pipeline.
chunksize: int, default=None
Number of data frames to process at once. Choose a higher value here,
to optimize thread usage and gain processing speed. If None is passed,
use the default value of the underlying reader/data source. Choose zero to
disable chunking at all.
Returns
-------
pipe : :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>`
A pipeline object that is able to conduct big data analysis with
limited memory in streaming mode.
Examples
--------
>>> import numpy as np
>>> from pyemma.coordinates import source, tica, assign_to_centers, pipeline
Create some random data and cluster centers:
>>> data = np.random.random((1000, 3))
>>> centers = data[np.random.choice(1000, 10)]
>>> reader = source(data)
Define a TICA transformation with lag time 10:
>>> tica_obj = tica(lag=10)
Assign any input to given centers:
>>> assign = assign_to_centers(centers=centers)
>>> pipe = pipeline([reader, tica_obj, assign])
>>> pipe.parametrize()
.. autoclass:: pyemma.coordinates.pipelines.Pipeline
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.pipelines.Pipeline
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.pipelines.Pipeline
:attributes:
"""
from pyemma.coordinates.pipelines import Pipeline
if not isinstance(stages, list):
stages = [stages]
p = Pipeline(stages, param_stride=stride, chunksize=chunksize)
if run:
p.parametrize()
return p | r""" Data analysis pipeline.
Constructs a data analysis :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` and parametrizes it
(unless prevented).
If this function takes too long, consider loading data in memory.
Alternatively if the data is to large to be loaded into memory make use
of the stride parameter.
Parameters
----------
stages : data input or list of pipeline stages
If given a single pipeline stage this must be a data input constructed
by :py:func:`source`. If a list of pipelining stages are given, the
first stage must be a data input constructed by :py:func:`source`.
run : bool, optional, default = True
If True, the pipeline will be parametrized immediately with the given
stages. If only an input stage is given, the run flag has no effect at
this time. True also means that the pipeline will be immediately
re-parametrized when further stages are added to it.
*Attention* True means this function may take a long time to compute.
If False, the pipeline will be passive, i.e. it will not do any
computations before you call parametrize()
stride : int, optional, default = 1
If set to 1, all input data will be used throughout the pipeline to
parametrize its stages. Note that this could cause the parametrization
step to be very slow for large data sets. Since molecular dynamics data
is usually correlated at short timescales, it is often sufficient to
parametrize the pipeline at a longer stride.
See also stride option in the output functions of the pipeline.
chunksize: int, default=None
Number of data frames to process at once. Choose a higher value here,
to optimize thread usage and gain processing speed. If None is passed,
use the default value of the underlying reader/data source. Choose zero to
disable chunking at all.
Returns
-------
pipe : :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>`
A pipeline object that is able to conduct big data analysis with
limited memory in streaming mode.
Examples
--------
>>> import numpy as np
>>> from pyemma.coordinates import source, tica, assign_to_centers, pipeline
Create some random data and cluster centers:
>>> data = np.random.random((1000, 3))
>>> centers = data[np.random.choice(1000, 10)]
>>> reader = source(data)
Define a TICA transformation with lag time 10:
>>> tica_obj = tica(lag=10)
Assign any input to given centers:
>>> assign = assign_to_centers(centers=centers)
>>> pipe = pipeline([reader, tica_obj, assign])
>>> pipe.parametrize()
.. autoclass:: pyemma.coordinates.pipelines.Pipeline
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.pipelines.Pipeline
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.pipelines.Pipeline
:attributes: | Below is the the instruction that describes the task:
### Input:
r""" Data analysis pipeline.
Constructs a data analysis :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` and parametrizes it
(unless prevented).
If this function takes too long, consider loading data in memory.
Alternatively if the data is to large to be loaded into memory make use
of the stride parameter.
Parameters
----------
stages : data input or list of pipeline stages
If given a single pipeline stage this must be a data input constructed
by :py:func:`source`. If a list of pipelining stages are given, the
first stage must be a data input constructed by :py:func:`source`.
run : bool, optional, default = True
If True, the pipeline will be parametrized immediately with the given
stages. If only an input stage is given, the run flag has no effect at
this time. True also means that the pipeline will be immediately
re-parametrized when further stages are added to it.
*Attention* True means this function may take a long time to compute.
If False, the pipeline will be passive, i.e. it will not do any
computations before you call parametrize()
stride : int, optional, default = 1
If set to 1, all input data will be used throughout the pipeline to
parametrize its stages. Note that this could cause the parametrization
step to be very slow for large data sets. Since molecular dynamics data
is usually correlated at short timescales, it is often sufficient to
parametrize the pipeline at a longer stride.
See also stride option in the output functions of the pipeline.
chunksize: int, default=None
Number of data frames to process at once. Choose a higher value here,
to optimize thread usage and gain processing speed. If None is passed,
use the default value of the underlying reader/data source. Choose zero to
disable chunking at all.
Returns
-------
pipe : :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>`
A pipeline object that is able to conduct big data analysis with
limited memory in streaming mode.
Examples
--------
>>> import numpy as np
>>> from pyemma.coordinates import source, tica, assign_to_centers, pipeline
Create some random data and cluster centers:
>>> data = np.random.random((1000, 3))
>>> centers = data[np.random.choice(1000, 10)]
>>> reader = source(data)
Define a TICA transformation with lag time 10:
>>> tica_obj = tica(lag=10)
Assign any input to given centers:
>>> assign = assign_to_centers(centers=centers)
>>> pipe = pipeline([reader, tica_obj, assign])
>>> pipe.parametrize()
.. autoclass:: pyemma.coordinates.pipelines.Pipeline
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.pipelines.Pipeline
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.pipelines.Pipeline
:attributes:
### Response:
def pipeline(stages, run=True, stride=1, chunksize=None):
r""" Data analysis pipeline.
Constructs a data analysis :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>` and parametrizes it
(unless prevented).
If this function takes too long, consider loading data in memory.
Alternatively if the data is to large to be loaded into memory make use
of the stride parameter.
Parameters
----------
stages : data input or list of pipeline stages
If given a single pipeline stage this must be a data input constructed
by :py:func:`source`. If a list of pipelining stages are given, the
first stage must be a data input constructed by :py:func:`source`.
run : bool, optional, default = True
If True, the pipeline will be parametrized immediately with the given
stages. If only an input stage is given, the run flag has no effect at
this time. True also means that the pipeline will be immediately
re-parametrized when further stages are added to it.
*Attention* True means this function may take a long time to compute.
If False, the pipeline will be passive, i.e. it will not do any
computations before you call parametrize()
stride : int, optional, default = 1
If set to 1, all input data will be used throughout the pipeline to
parametrize its stages. Note that this could cause the parametrization
step to be very slow for large data sets. Since molecular dynamics data
is usually correlated at short timescales, it is often sufficient to
parametrize the pipeline at a longer stride.
See also stride option in the output functions of the pipeline.
chunksize: int, default=None
Number of data frames to process at once. Choose a higher value here,
to optimize thread usage and gain processing speed. If None is passed,
use the default value of the underlying reader/data source. Choose zero to
disable chunking at all.
Returns
-------
pipe : :class:`Pipeline <pyemma.coordinates.pipelines.Pipeline>`
A pipeline object that is able to conduct big data analysis with
limited memory in streaming mode.
Examples
--------
>>> import numpy as np
>>> from pyemma.coordinates import source, tica, assign_to_centers, pipeline
Create some random data and cluster centers:
>>> data = np.random.random((1000, 3))
>>> centers = data[np.random.choice(1000, 10)]
>>> reader = source(data)
Define a TICA transformation with lag time 10:
>>> tica_obj = tica(lag=10)
Assign any input to given centers:
>>> assign = assign_to_centers(centers=centers)
>>> pipe = pipeline([reader, tica_obj, assign])
>>> pipe.parametrize()
.. autoclass:: pyemma.coordinates.pipelines.Pipeline
:members:
:undoc-members:
.. rubric:: Methods
.. autoautosummary:: pyemma.coordinates.pipelines.Pipeline
:methods:
.. rubric:: Attributes
.. autoautosummary:: pyemma.coordinates.pipelines.Pipeline
:attributes:
"""
from pyemma.coordinates.pipelines import Pipeline
if not isinstance(stages, list):
stages = [stages]
p = Pipeline(stages, param_stride=stride, chunksize=chunksize)
if run:
p.parametrize()
return p |
def _det_inference(self):
"""
Internal method for determining the inference method
"""
# 2 random effects with complete design -> gp2KronSum
# TODO: add check for low-rankness, use GP3KronSumLR and GP2KronSumLR when possible
if (self.n_randEffs==2) and (~sp.isnan(self.Y).any()):
rv = 'GP2KronSum'
else:
rv = 'GP'
return rv | Internal method for determining the inference method | Below is the the instruction that describes the task:
### Input:
Internal method for determining the inference method
### Response:
def _det_inference(self):
"""
Internal method for determining the inference method
"""
# 2 random effects with complete design -> gp2KronSum
# TODO: add check for low-rankness, use GP3KronSumLR and GP2KronSumLR when possible
if (self.n_randEffs==2) and (~sp.isnan(self.Y).any()):
rv = 'GP2KronSum'
else:
rv = 'GP'
return rv |
def getLocalDeformationEnergy(self, bp, complexDna, freeDnaFrames=None, boundDnaFrames=None, helical=False,
unit='kT', which='all', outFile=None):
r"""Deformation energy of the input DNA using local elastic properties
The deformation energy of a base-step/s for probe DNA object with reference to
the same base-step/s DNA present in the current DNA object.
The deformation free energy is calculated using elastic matrix as follows
.. math::
G = \frac{1}{2}\mathbf{xKx^T}
When ``helical='False'``
.. math::
\mathbf{K} = \mathbf{K}_{base-step}
.. math::
\mathbf{x} = \begin{bmatrix}
(Dx_{i}-Dx_0) & (Dy_i - Dy_0) & (Dz_i - Dz_0) & (\tau_i - \tau_0) &
(\rho_i - \rho_0) & (\omega_i - \omega_0)
\end{bmatrix}
When ``helical='True'``
.. math::
\mathbf{K} = \mathbf{K}_{helical-base-step}
.. math::
\mathbf{x} = \begin{bmatrix}
(dx_{i}-dx_0) & (dy_i - dy_0) & (h_i - h_0) & (\eta_i - \eta_0) &
(\theta_i - \theta_0) & (\Omega_i - \Omega_0)
\end{bmatrix}
.. currentmodule:: dnaMD
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
complexDna : :class:`dnaMD.DNA`
Input :class:`dnaMD.DNA` instance for which deformation energy will be calculated.
freeDnaFrames : list
To select a trajectory segment of current (free) DNA data.
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
boundDnaFrames : list
To select a trajectory segment of input (bound) DNA data.
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
which : str or list
For which motions (degrees of freedom), energy should be calculated. It should be either a list containing
terms listed below or"all" for all energy terms.
Following keywords are available:
* ``'full'`` : Use entire elastic matrix -- all parameters with their coupling
* ``'diag'`` : Use diagonal of elastic matrix -- all motions but no coupling
* ``'shift'`` or ``'x-disp'``
* ``'slide'`` or ``'y-idsp'``
* ``'rise'`` or ``'h-rise'``
* ``'tilt'`` or ``'inclination'``
* ``'roll'`` or ``'tip'``
* ``'twist'`` or ``'h-twist'``
outFile : str
Output file in csv format.
Returns
-------
time : numpy.ndarray
1D array containing time values.
energy : dict of numpy.ndarray
Dictionary of 1D array of shape (nframes) containing energy terms requested for DNA.
"""
if helical:
energyTerms = ['full', 'diag', 'x-disp', 'y-disp', 'h-rise', 'inclination', 'tip', 'h-twist']
else:
energyTerms = ['full', 'diag', 'shift', 'slide', 'rise', 'tilt', 'roll', 'twist']
if isinstance(which, str):
if which != 'all':
raise ValueError('Either use "all" or use list of terms from this {0} list \n.'.format(energyTerms))
else:
which = energyTerms
elif isinstance(which, list):
for key in which:
if key not in energyTerms:
raise ValueError('{0} is not a supported keyword.\n Use from the following list: \n{1}'.format(
which, energyTerms))
else:
raise ValueError('Either use "all" or use list of terms from this {0} list \n.'.format(energyTerms))
means, esMatrix = self.calculateLocalElasticity(bp, frames=freeDnaFrames, helical=helical, unit=unit)
time, array = self.extractLocalParameters(complexDna, bp, frames=boundDnaFrames, helical=helical)
# Initialize energy dictionary
energyOut = OrderedDict()
for key in which:
energyOut[key] = []
for i in range(array[0].shape[0]):
vec = array[:, i]
diff = vec - means
for key in which:
t_energy = self._calcLocalEnergy(diff, esMatrix, key)
energyOut[key].append(t_energy)
for key in which:
energyOut[key] = np.asarray(energyOut[key])
# Write output file
if outFile is not None:
with open(outFile, 'w') as fout:
fout.write('#Time')
for name in which:
fout.write(', {0}'.format(name))
fout.write('\n')
for t in range(len(time)):
fout.write('{0:.3f}'.format(time[t]))
for name in which:
fout.write(', {0:.5f}'.format(energyOut[name][t]))
fout.write('\n')
return time, energyOut | r"""Deformation energy of the input DNA using local elastic properties
The deformation energy of a base-step/s for probe DNA object with reference to
the same base-step/s DNA present in the current DNA object.
The deformation free energy is calculated using elastic matrix as follows
.. math::
G = \frac{1}{2}\mathbf{xKx^T}
When ``helical='False'``
.. math::
\mathbf{K} = \mathbf{K}_{base-step}
.. math::
\mathbf{x} = \begin{bmatrix}
(Dx_{i}-Dx_0) & (Dy_i - Dy_0) & (Dz_i - Dz_0) & (\tau_i - \tau_0) &
(\rho_i - \rho_0) & (\omega_i - \omega_0)
\end{bmatrix}
When ``helical='True'``
.. math::
\mathbf{K} = \mathbf{K}_{helical-base-step}
.. math::
\mathbf{x} = \begin{bmatrix}
(dx_{i}-dx_0) & (dy_i - dy_0) & (h_i - h_0) & (\eta_i - \eta_0) &
(\theta_i - \theta_0) & (\Omega_i - \Omega_0)
\end{bmatrix}
.. currentmodule:: dnaMD
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
complexDna : :class:`dnaMD.DNA`
Input :class:`dnaMD.DNA` instance for which deformation energy will be calculated.
freeDnaFrames : list
To select a trajectory segment of current (free) DNA data.
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
boundDnaFrames : list
To select a trajectory segment of input (bound) DNA data.
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
which : str or list
For which motions (degrees of freedom), energy should be calculated. It should be either a list containing
terms listed below or"all" for all energy terms.
Following keywords are available:
* ``'full'`` : Use entire elastic matrix -- all parameters with their coupling
* ``'diag'`` : Use diagonal of elastic matrix -- all motions but no coupling
* ``'shift'`` or ``'x-disp'``
* ``'slide'`` or ``'y-idsp'``
* ``'rise'`` or ``'h-rise'``
* ``'tilt'`` or ``'inclination'``
* ``'roll'`` or ``'tip'``
* ``'twist'`` or ``'h-twist'``
outFile : str
Output file in csv format.
Returns
-------
time : numpy.ndarray
1D array containing time values.
energy : dict of numpy.ndarray
Dictionary of 1D array of shape (nframes) containing energy terms requested for DNA. | Below is the the instruction that describes the task:
### Input:
r"""Deformation energy of the input DNA using local elastic properties
The deformation energy of a base-step/s for probe DNA object with reference to
the same base-step/s DNA present in the current DNA object.
The deformation free energy is calculated using elastic matrix as follows
.. math::
G = \frac{1}{2}\mathbf{xKx^T}
When ``helical='False'``
.. math::
\mathbf{K} = \mathbf{K}_{base-step}
.. math::
\mathbf{x} = \begin{bmatrix}
(Dx_{i}-Dx_0) & (Dy_i - Dy_0) & (Dz_i - Dz_0) & (\tau_i - \tau_0) &
(\rho_i - \rho_0) & (\omega_i - \omega_0)
\end{bmatrix}
When ``helical='True'``
.. math::
\mathbf{K} = \mathbf{K}_{helical-base-step}
.. math::
\mathbf{x} = \begin{bmatrix}
(dx_{i}-dx_0) & (dy_i - dy_0) & (h_i - h_0) & (\eta_i - \eta_0) &
(\theta_i - \theta_0) & (\Omega_i - \Omega_0)
\end{bmatrix}
.. currentmodule:: dnaMD
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
complexDna : :class:`dnaMD.DNA`
Input :class:`dnaMD.DNA` instance for which deformation energy will be calculated.
freeDnaFrames : list
To select a trajectory segment of current (free) DNA data.
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
boundDnaFrames : list
To select a trajectory segment of input (bound) DNA data.
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
which : str or list
For which motions (degrees of freedom), energy should be calculated. It should be either a list containing
terms listed below or"all" for all energy terms.
Following keywords are available:
* ``'full'`` : Use entire elastic matrix -- all parameters with their coupling
* ``'diag'`` : Use diagonal of elastic matrix -- all motions but no coupling
* ``'shift'`` or ``'x-disp'``
* ``'slide'`` or ``'y-idsp'``
* ``'rise'`` or ``'h-rise'``
* ``'tilt'`` or ``'inclination'``
* ``'roll'`` or ``'tip'``
* ``'twist'`` or ``'h-twist'``
outFile : str
Output file in csv format.
Returns
-------
time : numpy.ndarray
1D array containing time values.
energy : dict of numpy.ndarray
Dictionary of 1D array of shape (nframes) containing energy terms requested for DNA.
### Response:
def getLocalDeformationEnergy(self, bp, complexDna, freeDnaFrames=None, boundDnaFrames=None, helical=False,
unit='kT', which='all', outFile=None):
r"""Deformation energy of the input DNA using local elastic properties
The deformation energy of a base-step/s for probe DNA object with reference to
the same base-step/s DNA present in the current DNA object.
The deformation free energy is calculated using elastic matrix as follows
.. math::
G = \frac{1}{2}\mathbf{xKx^T}
When ``helical='False'``
.. math::
\mathbf{K} = \mathbf{K}_{base-step}
.. math::
\mathbf{x} = \begin{bmatrix}
(Dx_{i}-Dx_0) & (Dy_i - Dy_0) & (Dz_i - Dz_0) & (\tau_i - \tau_0) &
(\rho_i - \rho_0) & (\omega_i - \omega_0)
\end{bmatrix}
When ``helical='True'``
.. math::
\mathbf{K} = \mathbf{K}_{helical-base-step}
.. math::
\mathbf{x} = \begin{bmatrix}
(dx_{i}-dx_0) & (dy_i - dy_0) & (h_i - h_0) & (\eta_i - \eta_0) &
(\theta_i - \theta_0) & (\Omega_i - \Omega_0)
\end{bmatrix}
.. currentmodule:: dnaMD
Parameters
----------
bp : list
List of two base-steps forming the DNA segment.
For example: with ``bp=[5, 50]``, 5-50 base-step segment will be considered.
complexDna : :class:`dnaMD.DNA`
Input :class:`dnaMD.DNA` instance for which deformation energy will be calculated.
freeDnaFrames : list
To select a trajectory segment of current (free) DNA data.
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
boundDnaFrames : list
To select a trajectory segment of input (bound) DNA data.
List of two trajectory frames between which parameters will be extracted. It can be used to select portions
of the trajectory. For example, with ``frames=[100, 1000]``, 100th to 1000th frame of the trajectory will be
considered.
helical : bool
If ``helical=True``, elastic matrix for **helical base-step** parameters are calculated. Otherwise,
by default, elastic matrix for **base-step** parameters are calculated.
unit : str
Unit of energy. Allowed units are: ``'kT', 'kJ/mol' and 'kcal/mol'``.
which : str or list
For which motions (degrees of freedom), energy should be calculated. It should be either a list containing
terms listed below or"all" for all energy terms.
Following keywords are available:
* ``'full'`` : Use entire elastic matrix -- all parameters with their coupling
* ``'diag'`` : Use diagonal of elastic matrix -- all motions but no coupling
* ``'shift'`` or ``'x-disp'``
* ``'slide'`` or ``'y-idsp'``
* ``'rise'`` or ``'h-rise'``
* ``'tilt'`` or ``'inclination'``
* ``'roll'`` or ``'tip'``
* ``'twist'`` or ``'h-twist'``
outFile : str
Output file in csv format.
Returns
-------
time : numpy.ndarray
1D array containing time values.
energy : dict of numpy.ndarray
Dictionary of 1D array of shape (nframes) containing energy terms requested for DNA.
"""
if helical:
energyTerms = ['full', 'diag', 'x-disp', 'y-disp', 'h-rise', 'inclination', 'tip', 'h-twist']
else:
energyTerms = ['full', 'diag', 'shift', 'slide', 'rise', 'tilt', 'roll', 'twist']
if isinstance(which, str):
if which != 'all':
raise ValueError('Either use "all" or use list of terms from this {0} list \n.'.format(energyTerms))
else:
which = energyTerms
elif isinstance(which, list):
for key in which:
if key not in energyTerms:
raise ValueError('{0} is not a supported keyword.\n Use from the following list: \n{1}'.format(
which, energyTerms))
else:
raise ValueError('Either use "all" or use list of terms from this {0} list \n.'.format(energyTerms))
means, esMatrix = self.calculateLocalElasticity(bp, frames=freeDnaFrames, helical=helical, unit=unit)
time, array = self.extractLocalParameters(complexDna, bp, frames=boundDnaFrames, helical=helical)
# Initialize energy dictionary
energyOut = OrderedDict()
for key in which:
energyOut[key] = []
for i in range(array[0].shape[0]):
vec = array[:, i]
diff = vec - means
for key in which:
t_energy = self._calcLocalEnergy(diff, esMatrix, key)
energyOut[key].append(t_energy)
for key in which:
energyOut[key] = np.asarray(energyOut[key])
# Write output file
if outFile is not None:
with open(outFile, 'w') as fout:
fout.write('#Time')
for name in which:
fout.write(', {0}'.format(name))
fout.write('\n')
for t in range(len(time)):
fout.write('{0:.3f}'.format(time[t]))
for name in which:
fout.write(', {0:.5f}'.format(energyOut[name][t]))
fout.write('\n')
return time, energyOut |
def prep_db_parallel(samples, parallel_fn):
"""Prepares gemini databases in parallel, handling jointly called populations.
"""
batch_groups, singles, out_retrieve, extras = _group_by_batches(samples, _has_variant_calls)
to_process = []
has_batches = False
for (name, caller), info in batch_groups.items():
fnames = [x[0] for x in info]
to_process.append([fnames, (str(name), caller, True), [x[1] for x in info], extras])
has_batches = True
for name, caller, data, fname in singles:
to_process.append([[fname], (str(name), caller, False), [data], extras])
output = parallel_fn("prep_gemini_db", to_process)
out_fetch = {}
for batch_id, out_file in output:
out_fetch[tuple(batch_id)] = out_file
out = []
for batch_name, data in out_retrieve:
out_variants = []
for vrn in data["variants"]:
use_population = vrn.pop("population", True)
if use_population:
vrn["population"] = out_fetch[(batch_name, vrn["variantcaller"])]
out_variants.append(vrn)
data["variants"] = out_variants
out.append([data])
for x in extras:
out.append([x])
return out | Prepares gemini databases in parallel, handling jointly called populations. | Below is the the instruction that describes the task:
### Input:
Prepares gemini databases in parallel, handling jointly called populations.
### Response:
def prep_db_parallel(samples, parallel_fn):
"""Prepares gemini databases in parallel, handling jointly called populations.
"""
batch_groups, singles, out_retrieve, extras = _group_by_batches(samples, _has_variant_calls)
to_process = []
has_batches = False
for (name, caller), info in batch_groups.items():
fnames = [x[0] for x in info]
to_process.append([fnames, (str(name), caller, True), [x[1] for x in info], extras])
has_batches = True
for name, caller, data, fname in singles:
to_process.append([[fname], (str(name), caller, False), [data], extras])
output = parallel_fn("prep_gemini_db", to_process)
out_fetch = {}
for batch_id, out_file in output:
out_fetch[tuple(batch_id)] = out_file
out = []
for batch_name, data in out_retrieve:
out_variants = []
for vrn in data["variants"]:
use_population = vrn.pop("population", True)
if use_population:
vrn["population"] = out_fetch[(batch_name, vrn["variantcaller"])]
out_variants.append(vrn)
data["variants"] = out_variants
out.append([data])
for x in extras:
out.append([x])
return out |
def get_assessment_ids(self):
"""Gets the Ids of any assessments associated with this activity.
return: (osid.id.IdList) - list of assessment Ids
raise: IllegalState - is_assessment_based_activity() is false
compliance: mandatory - This method must be implemented.
"""
if not self.is_assessment_based_activity():
raise IllegalState()
else:
return [Id(a) for a in self._my_map['assessmentIds']] | Gets the Ids of any assessments associated with this activity.
return: (osid.id.IdList) - list of assessment Ids
raise: IllegalState - is_assessment_based_activity() is false
compliance: mandatory - This method must be implemented. | Below is the the instruction that describes the task:
### Input:
Gets the Ids of any assessments associated with this activity.
return: (osid.id.IdList) - list of assessment Ids
raise: IllegalState - is_assessment_based_activity() is false
compliance: mandatory - This method must be implemented.
### Response:
def get_assessment_ids(self):
"""Gets the Ids of any assessments associated with this activity.
return: (osid.id.IdList) - list of assessment Ids
raise: IllegalState - is_assessment_based_activity() is false
compliance: mandatory - This method must be implemented.
"""
if not self.is_assessment_based_activity():
raise IllegalState()
else:
return [Id(a) for a in self._my_map['assessmentIds']] |
def preview(src_path):
''' Generates a preview of src_path in the requested format.
:returns: A list of preview paths, one for each page.
'''
previews = []
if sketch.is_sketchfile(src_path):
previews = sketch.preview(src_path)
if not previews:
previews = quicklook.preview(src_path)
previews = [safely_decode(preview) for preview in previews]
return previews | Generates a preview of src_path in the requested format.
:returns: A list of preview paths, one for each page. | Below is the the instruction that describes the task:
### Input:
Generates a preview of src_path in the requested format.
:returns: A list of preview paths, one for each page.
### Response:
def preview(src_path):
''' Generates a preview of src_path in the requested format.
:returns: A list of preview paths, one for each page.
'''
previews = []
if sketch.is_sketchfile(src_path):
previews = sketch.preview(src_path)
if not previews:
previews = quicklook.preview(src_path)
previews = [safely_decode(preview) for preview in previews]
return previews |
def parse_rpm_output(output, tags=None, separator=';'):
"""
Parse output of the rpm query.
:param output: list, decoded output (str) from the rpm subprocess
:param tags: list, str fields used for query output
:return: list, dicts describing each rpm package
"""
if tags is None:
tags = image_component_rpm_tags
def field(tag):
"""
Get a field value by name
"""
try:
value = fields[tags.index(tag)]
except ValueError:
return None
if value == '(none)':
return None
return value
components = []
sigmarker = 'Key ID '
for rpm in output:
fields = rpm.rstrip('\n').split(separator)
if len(fields) < len(tags):
continue
signature = field('SIGPGP:pgpsig') or field('SIGGPG:pgpsig')
if signature:
parts = signature.split(sigmarker, 1)
if len(parts) > 1:
signature = parts[1]
component_rpm = {
'type': 'rpm',
'name': field('NAME'),
'version': field('VERSION'),
'release': field('RELEASE'),
'arch': field('ARCH'),
'sigmd5': field('SIGMD5'),
'signature': signature,
}
# Special handling for epoch as it must be an integer or None
epoch = field('EPOCH')
if epoch is not None:
epoch = int(epoch)
component_rpm['epoch'] = epoch
if component_rpm['name'] != 'gpg-pubkey':
components.append(component_rpm)
return components | Parse output of the rpm query.
:param output: list, decoded output (str) from the rpm subprocess
:param tags: list, str fields used for query output
:return: list, dicts describing each rpm package | Below is the the instruction that describes the task:
### Input:
Parse output of the rpm query.
:param output: list, decoded output (str) from the rpm subprocess
:param tags: list, str fields used for query output
:return: list, dicts describing each rpm package
### Response:
def parse_rpm_output(output, tags=None, separator=';'):
"""
Parse output of the rpm query.
:param output: list, decoded output (str) from the rpm subprocess
:param tags: list, str fields used for query output
:return: list, dicts describing each rpm package
"""
if tags is None:
tags = image_component_rpm_tags
def field(tag):
"""
Get a field value by name
"""
try:
value = fields[tags.index(tag)]
except ValueError:
return None
if value == '(none)':
return None
return value
components = []
sigmarker = 'Key ID '
for rpm in output:
fields = rpm.rstrip('\n').split(separator)
if len(fields) < len(tags):
continue
signature = field('SIGPGP:pgpsig') or field('SIGGPG:pgpsig')
if signature:
parts = signature.split(sigmarker, 1)
if len(parts) > 1:
signature = parts[1]
component_rpm = {
'type': 'rpm',
'name': field('NAME'),
'version': field('VERSION'),
'release': field('RELEASE'),
'arch': field('ARCH'),
'sigmd5': field('SIGMD5'),
'signature': signature,
}
# Special handling for epoch as it must be an integer or None
epoch = field('EPOCH')
if epoch is not None:
epoch = int(epoch)
component_rpm['epoch'] = epoch
if component_rpm['name'] != 'gpg-pubkey':
components.append(component_rpm)
return components |
async def delete(self, device, remove=True):
"""
Detach the loop device.
:param device: device object, block device path or mount path
:param bool remove: whether to unmount the partition etc.
:returns: whether the loop device is deleted
"""
device = self._find_device(device)
if not self.is_handleable(device) or not device.is_loop:
self._log.warn(_('not deleting {0}: unhandled device', device))
return False
if remove:
await self.auto_remove(device, force=True)
self._log.debug(_('deleting {0}', device))
await device.delete()
self._log.info(_('deleted {0}', device))
return True | Detach the loop device.
:param device: device object, block device path or mount path
:param bool remove: whether to unmount the partition etc.
:returns: whether the loop device is deleted | Below is the the instruction that describes the task:
### Input:
Detach the loop device.
:param device: device object, block device path or mount path
:param bool remove: whether to unmount the partition etc.
:returns: whether the loop device is deleted
### Response:
async def delete(self, device, remove=True):
"""
Detach the loop device.
:param device: device object, block device path or mount path
:param bool remove: whether to unmount the partition etc.
:returns: whether the loop device is deleted
"""
device = self._find_device(device)
if not self.is_handleable(device) or not device.is_loop:
self._log.warn(_('not deleting {0}: unhandled device', device))
return False
if remove:
await self.auto_remove(device, force=True)
self._log.debug(_('deleting {0}', device))
await device.delete()
self._log.info(_('deleted {0}', device))
return True |
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
Unlike ``dougrain.Document.add_link``, this method does not detect
equivalence between relationship types with different representations.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``dougrain.Document`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
If ``target`` is a ``Builder`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
This method returns self, allowing it to be chained with additional
method calls.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if isinstance(target, bytes):
target = target.decode('utf-8')
if isinstance(target, str) or isinstance(target, unicode):
new_link = dict(href=target, **kwargs)
else:
new_link = dict(href=target.url(), **kwargs)
self._add_rel('_links', rel, new_link, wrap)
return self | Adds a link to the document.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
Unlike ``dougrain.Document.add_link``, this method does not detect
equivalence between relationship types with different representations.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``dougrain.Document`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
If ``target`` is a ``Builder`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
This method returns self, allowing it to be chained with additional
method calls.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``. | Below is the the instruction that describes the task:
### Input:
Adds a link to the document.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
Unlike ``dougrain.Document.add_link``, this method does not detect
equivalence between relationship types with different representations.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``dougrain.Document`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
If ``target`` is a ``Builder`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
This method returns self, allowing it to be chained with additional
method calls.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
### Response:
def add_link(self, rel, target, wrap=False, **kwargs):
"""Adds a link to the document.
This method adds a link to the given ``target`` to the document with
the given ``rel``. If one or more links are already present for that
link relationship type, the new link will be added to the existing
links for that link relationship type.
Unlike ``dougrain.Document.add_link``, this method does not detect
equivalence between relationship types with different representations.
If ``target`` is a string, a link is added with ``target`` as its
``href`` property and other properties from the keyword arguments.
If ``target`` is a ``dougrain.Document`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
If ``target`` is a ``Builder`` object, a link is added with
``target``'s URL as its ``href`` property and other property from the
keyword arguments.
This method returns self, allowing it to be chained with additional
method calls.
Arguments:
- ``rel``: a string specifying the link relationship type of the link.
It should be a well-known link relation name from the IANA registry
(http://www.iana.org/assignments/link-relations/link-relations.xml),
a full URI, or a CURIE.
- ``target``: the destination of the link.
- ``wrap``: Defaults to False, but if True, specifies that the link
object should be initally wrapped in a JSON array even if it is the
first link for the given ``rel``.
"""
if isinstance(target, bytes):
target = target.decode('utf-8')
if isinstance(target, str) or isinstance(target, unicode):
new_link = dict(href=target, **kwargs)
else:
new_link = dict(href=target.url(), **kwargs)
self._add_rel('_links', rel, new_link, wrap)
return self |
def byte_adaptor(fbuffer):
""" provides py3 compatibility by converting byte based
file stream to string based file stream
Arguments:
fbuffer: file like objects containing bytes
Returns:
string buffer
"""
if six.PY3:
strings = fbuffer.read().decode('latin-1')
fbuffer = six.StringIO(strings)
return fbuffer
else:
return fbuffer | provides py3 compatibility by converting byte based
file stream to string based file stream
Arguments:
fbuffer: file like objects containing bytes
Returns:
string buffer | Below is the the instruction that describes the task:
### Input:
provides py3 compatibility by converting byte based
file stream to string based file stream
Arguments:
fbuffer: file like objects containing bytes
Returns:
string buffer
### Response:
def byte_adaptor(fbuffer):
""" provides py3 compatibility by converting byte based
file stream to string based file stream
Arguments:
fbuffer: file like objects containing bytes
Returns:
string buffer
"""
if six.PY3:
strings = fbuffer.read().decode('latin-1')
fbuffer = six.StringIO(strings)
return fbuffer
else:
return fbuffer |
def list_by_instance(self, instance_id):
"""Gets security groups of an instance.
:returns: List of SecurityGroup objects associated with the instance
"""
ports = port_list(self.request, device_id=instance_id)
sg_ids = []
for p in ports:
sg_ids += p.security_groups
return self._list(id=set(sg_ids)) if sg_ids else [] | Gets security groups of an instance.
:returns: List of SecurityGroup objects associated with the instance | Below is the the instruction that describes the task:
### Input:
Gets security groups of an instance.
:returns: List of SecurityGroup objects associated with the instance
### Response:
def list_by_instance(self, instance_id):
"""Gets security groups of an instance.
:returns: List of SecurityGroup objects associated with the instance
"""
ports = port_list(self.request, device_id=instance_id)
sg_ids = []
for p in ports:
sg_ids += p.security_groups
return self._list(id=set(sg_ids)) if sg_ids else [] |
def add_doc(self, doc):
""" Simple dict of {'name': '@filename.pdf'}"""
if isinstance(doc, HelloDoc) and doc.validate():
self.docs.append(doc)
else:
if not doc.validate():
raise Exception("HelloDoc Errors %s" % (doc.errors,))
else:
raise Exception("add_doc doc must be an instance of class HelloDoc") | Simple dict of {'name': '@filename.pdf'} | Below is the the instruction that describes the task:
### Input:
Simple dict of {'name': '@filename.pdf'}
### Response:
def add_doc(self, doc):
""" Simple dict of {'name': '@filename.pdf'}"""
if isinstance(doc, HelloDoc) and doc.validate():
self.docs.append(doc)
else:
if not doc.validate():
raise Exception("HelloDoc Errors %s" % (doc.errors,))
else:
raise Exception("add_doc doc must be an instance of class HelloDoc") |
def open_acqdata(filename, user='unknown', filemode='w-'):
"""Opens and returns the correct AcquisitionData object according to filename extention.
Supported extentions:
* .hdf5, .h5 for sparkle data
* .pst, .raw for batlab data. Both the .pst and .raw file must be co-located and share the same base file name, but only one should be provided to this function
see :class:`AcquisitionData<sparkle.data.acqdata.AcquisitionData>`
examples (if data file already exists)::
data = open_acqdata('myexperiment.hdf5', filemode='r')
print data.dataset_names()
for batlab data::
data = open('mouse666.raw', filemode='r')
print data.dataset_names()
"""
if filename.lower().endswith((".hdf5", ".h5")):
return HDF5Data(filename, user, filemode)
elif filename.lower().endswith((".pst", ".raw")):
return BatlabData(filename, user, filemode)
else:
print "File format not supported: ", filename | Opens and returns the correct AcquisitionData object according to filename extention.
Supported extentions:
* .hdf5, .h5 for sparkle data
* .pst, .raw for batlab data. Both the .pst and .raw file must be co-located and share the same base file name, but only one should be provided to this function
see :class:`AcquisitionData<sparkle.data.acqdata.AcquisitionData>`
examples (if data file already exists)::
data = open_acqdata('myexperiment.hdf5', filemode='r')
print data.dataset_names()
for batlab data::
data = open('mouse666.raw', filemode='r')
print data.dataset_names() | Below is the the instruction that describes the task:
### Input:
Opens and returns the correct AcquisitionData object according to filename extention.
Supported extentions:
* .hdf5, .h5 for sparkle data
* .pst, .raw for batlab data. Both the .pst and .raw file must be co-located and share the same base file name, but only one should be provided to this function
see :class:`AcquisitionData<sparkle.data.acqdata.AcquisitionData>`
examples (if data file already exists)::
data = open_acqdata('myexperiment.hdf5', filemode='r')
print data.dataset_names()
for batlab data::
data = open('mouse666.raw', filemode='r')
print data.dataset_names()
### Response:
def open_acqdata(filename, user='unknown', filemode='w-'):
"""Opens and returns the correct AcquisitionData object according to filename extention.
Supported extentions:
* .hdf5, .h5 for sparkle data
* .pst, .raw for batlab data. Both the .pst and .raw file must be co-located and share the same base file name, but only one should be provided to this function
see :class:`AcquisitionData<sparkle.data.acqdata.AcquisitionData>`
examples (if data file already exists)::
data = open_acqdata('myexperiment.hdf5', filemode='r')
print data.dataset_names()
for batlab data::
data = open('mouse666.raw', filemode='r')
print data.dataset_names()
"""
if filename.lower().endswith((".hdf5", ".h5")):
return HDF5Data(filename, user, filemode)
elif filename.lower().endswith((".pst", ".raw")):
return BatlabData(filename, user, filemode)
else:
print "File format not supported: ", filename |
def compute_rotsym(self, threshold=1e-3*angstrom):
"""Compute the rotational symmetry number.
Optional argument:
| ``threshold`` -- only when a rotation results in an rmsd below the given
threshold, the rotation is considered to transform the
molecule onto itself.
"""
# Generate a graph with a more permissive threshold for bond lengths:
# (is convenient in case of transition state geometries)
graph = MolecularGraph.from_geometry(self, scaling=1.5)
try:
return compute_rotsym(self, graph, threshold)
except ValueError:
raise ValueError("The rotational symmetry number can only be computed when the graph is fully connected.") | Compute the rotational symmetry number.
Optional argument:
| ``threshold`` -- only when a rotation results in an rmsd below the given
threshold, the rotation is considered to transform the
molecule onto itself. | Below is the the instruction that describes the task:
### Input:
Compute the rotational symmetry number.
Optional argument:
| ``threshold`` -- only when a rotation results in an rmsd below the given
threshold, the rotation is considered to transform the
molecule onto itself.
### Response:
def compute_rotsym(self, threshold=1e-3*angstrom):
"""Compute the rotational symmetry number.
Optional argument:
| ``threshold`` -- only when a rotation results in an rmsd below the given
threshold, the rotation is considered to transform the
molecule onto itself.
"""
# Generate a graph with a more permissive threshold for bond lengths:
# (is convenient in case of transition state geometries)
graph = MolecularGraph.from_geometry(self, scaling=1.5)
try:
return compute_rotsym(self, graph, threshold)
except ValueError:
raise ValueError("The rotational symmetry number can only be computed when the graph is fully connected.") |
def build_global(self, global_node):
"""parse `global` section, and return the config.Global
Args:
global_node (TreeNode): `global` section treenode
Returns:
config.Global: an object
"""
config_block_lines = self.__build_config_block(
global_node.config_block)
return config.Global(config_block=config_block_lines) | parse `global` section, and return the config.Global
Args:
global_node (TreeNode): `global` section treenode
Returns:
config.Global: an object | Below is the the instruction that describes the task:
### Input:
parse `global` section, and return the config.Global
Args:
global_node (TreeNode): `global` section treenode
Returns:
config.Global: an object
### Response:
def build_global(self, global_node):
"""parse `global` section, and return the config.Global
Args:
global_node (TreeNode): `global` section treenode
Returns:
config.Global: an object
"""
config_block_lines = self.__build_config_block(
global_node.config_block)
return config.Global(config_block=config_block_lines) |
def upload(self, filepaths, enable_matching=False, transcode_quality='320k', delete_on_success=False):
"""Upload local songs to Google Music.
Parameters:
filepaths (list or str): Filepath(s) to upload.
enable_matching (bool): If ``True`` attempt to use `scan and match
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=2920799&topic=2450455>`__.
This requieres ffmpeg or avconv.
transcode_quality (str or int): If int, pass to ffmpeg/avconv ``-q:a`` for libmp3lame `VBR quality
<http://trac.ffmpeg.org/wiki/Encode/MP3#VBREncoding>'__.
If string, pass to ffmpeg/avconv ``-b:a`` for libmp3lame `CBR quality
<http://trac.ffmpeg.org/wiki/Encode/MP3#CBREncoding>'__.
Default: ``320k``
delete_on_success (bool): Delete successfully uploaded local files. Default: ``False``
Returns:
A list of result dictionaries.
::
[
{'result': 'uploaded', 'filepath': <filepath>, 'id': <song_id>}, # uploaded
{'result': 'matched', 'filepath': <filepath>, 'id': <song_id>}, # matched
{'result': 'error', 'filepath': <filepath>, 'message': <error_message>}, # error
{'result': 'not_uploaded', 'filepath': <filepath>, 'id': <song_id>, 'message': <reason_message>}, # not_uploaded ALREADY_EXISTS
{'result': 'not_uploaded', 'filepath': <filepath>, 'message': <reason_message>} # not_uploaded
]
"""
filenum = 0
total = len(filepaths)
results = []
errors = {}
pad = len(str(total))
exist_strings = ["ALREADY_EXISTS", "this song is already uploaded"]
for result in self._upload(filepaths, enable_matching=enable_matching, transcode_quality=transcode_quality):
filepath = filepaths[filenum]
filenum += 1
uploaded, matched, not_uploaded, error = result
if uploaded:
logger.info(
"({num:>{pad}}/{total}) Successfully uploaded -- {file} ({song_id})".format(
num=filenum, pad=pad, total=total, file=filepath, song_id=uploaded[filepath]
)
)
results.append({'result': 'uploaded', 'filepath': filepath, 'id': uploaded[filepath]})
elif matched:
logger.info(
"({num:>{pad}}/{total}) Successfully scanned and matched -- {file} ({song_id})".format(
num=filenum, pad=pad, total=total, file=filepath, song_id=matched[filepath]
)
)
results.append({'result': 'matched', 'filepath': filepath, 'id': matched[filepath]})
elif error:
logger.warning("({num:>{pad}}/{total}) Error on upload -- {file}".format(num=filenum, pad=pad, total=total, file=filepath))
results.append({'result': 'error', 'filepath': filepath, 'message': error[filepath]})
errors.update(error)
else:
if any(exist_string in not_uploaded[filepath] for exist_string in exist_strings):
response = "ALREADY EXISTS"
song_id = GM_ID_RE.search(not_uploaded[filepath]).group(0)
logger.info(
"({num:>{pad}}/{total}) Failed to upload -- {file} ({song_id}) | {response}".format(
num=filenum, pad=pad, total=total, file=filepath, response=response, song_id=song_id
)
)
results.append({'result': 'not_uploaded', 'filepath': filepath, 'id': song_id, 'message': not_uploaded[filepath]})
else:
response = not_uploaded[filepath]
logger.info(
"({num:>{pad}}/{total}) Failed to upload -- {file} | {response}".format(
num=filenum, pad=pad, total=total, file=filepath, response=response
)
)
results.append({'result': 'not_uploaded', 'filepath': filepath, 'message': not_uploaded[filepath]})
success = (uploaded or matched) or (not_uploaded and 'ALREADY_EXISTS' in not_uploaded[filepath])
if success and delete_on_success:
try:
os.remove(filepath)
except (OSError, PermissionError):
logger.warning("Failed to remove {} after successful upload".format(filepath))
if errors:
logger.info("\n\nThe following errors occurred:\n")
for filepath, e in errors.items():
logger.info("{file} | {error}".format(file=filepath, error=e))
logger.info("\nThese filepaths may need to be synced again.\n")
return results | Upload local songs to Google Music.
Parameters:
filepaths (list or str): Filepath(s) to upload.
enable_matching (bool): If ``True`` attempt to use `scan and match
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=2920799&topic=2450455>`__.
This requieres ffmpeg or avconv.
transcode_quality (str or int): If int, pass to ffmpeg/avconv ``-q:a`` for libmp3lame `VBR quality
<http://trac.ffmpeg.org/wiki/Encode/MP3#VBREncoding>'__.
If string, pass to ffmpeg/avconv ``-b:a`` for libmp3lame `CBR quality
<http://trac.ffmpeg.org/wiki/Encode/MP3#CBREncoding>'__.
Default: ``320k``
delete_on_success (bool): Delete successfully uploaded local files. Default: ``False``
Returns:
A list of result dictionaries.
::
[
{'result': 'uploaded', 'filepath': <filepath>, 'id': <song_id>}, # uploaded
{'result': 'matched', 'filepath': <filepath>, 'id': <song_id>}, # matched
{'result': 'error', 'filepath': <filepath>, 'message': <error_message>}, # error
{'result': 'not_uploaded', 'filepath': <filepath>, 'id': <song_id>, 'message': <reason_message>}, # not_uploaded ALREADY_EXISTS
{'result': 'not_uploaded', 'filepath': <filepath>, 'message': <reason_message>} # not_uploaded
] | Below is the the instruction that describes the task:
### Input:
Upload local songs to Google Music.
Parameters:
filepaths (list or str): Filepath(s) to upload.
enable_matching (bool): If ``True`` attempt to use `scan and match
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=2920799&topic=2450455>`__.
This requieres ffmpeg or avconv.
transcode_quality (str or int): If int, pass to ffmpeg/avconv ``-q:a`` for libmp3lame `VBR quality
<http://trac.ffmpeg.org/wiki/Encode/MP3#VBREncoding>'__.
If string, pass to ffmpeg/avconv ``-b:a`` for libmp3lame `CBR quality
<http://trac.ffmpeg.org/wiki/Encode/MP3#CBREncoding>'__.
Default: ``320k``
delete_on_success (bool): Delete successfully uploaded local files. Default: ``False``
Returns:
A list of result dictionaries.
::
[
{'result': 'uploaded', 'filepath': <filepath>, 'id': <song_id>}, # uploaded
{'result': 'matched', 'filepath': <filepath>, 'id': <song_id>}, # matched
{'result': 'error', 'filepath': <filepath>, 'message': <error_message>}, # error
{'result': 'not_uploaded', 'filepath': <filepath>, 'id': <song_id>, 'message': <reason_message>}, # not_uploaded ALREADY_EXISTS
{'result': 'not_uploaded', 'filepath': <filepath>, 'message': <reason_message>} # not_uploaded
]
### Response:
def upload(self, filepaths, enable_matching=False, transcode_quality='320k', delete_on_success=False):
"""Upload local songs to Google Music.
Parameters:
filepaths (list or str): Filepath(s) to upload.
enable_matching (bool): If ``True`` attempt to use `scan and match
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=2920799&topic=2450455>`__.
This requieres ffmpeg or avconv.
transcode_quality (str or int): If int, pass to ffmpeg/avconv ``-q:a`` for libmp3lame `VBR quality
<http://trac.ffmpeg.org/wiki/Encode/MP3#VBREncoding>'__.
If string, pass to ffmpeg/avconv ``-b:a`` for libmp3lame `CBR quality
<http://trac.ffmpeg.org/wiki/Encode/MP3#CBREncoding>'__.
Default: ``320k``
delete_on_success (bool): Delete successfully uploaded local files. Default: ``False``
Returns:
A list of result dictionaries.
::
[
{'result': 'uploaded', 'filepath': <filepath>, 'id': <song_id>}, # uploaded
{'result': 'matched', 'filepath': <filepath>, 'id': <song_id>}, # matched
{'result': 'error', 'filepath': <filepath>, 'message': <error_message>}, # error
{'result': 'not_uploaded', 'filepath': <filepath>, 'id': <song_id>, 'message': <reason_message>}, # not_uploaded ALREADY_EXISTS
{'result': 'not_uploaded', 'filepath': <filepath>, 'message': <reason_message>} # not_uploaded
]
"""
filenum = 0
total = len(filepaths)
results = []
errors = {}
pad = len(str(total))
exist_strings = ["ALREADY_EXISTS", "this song is already uploaded"]
for result in self._upload(filepaths, enable_matching=enable_matching, transcode_quality=transcode_quality):
filepath = filepaths[filenum]
filenum += 1
uploaded, matched, not_uploaded, error = result
if uploaded:
logger.info(
"({num:>{pad}}/{total}) Successfully uploaded -- {file} ({song_id})".format(
num=filenum, pad=pad, total=total, file=filepath, song_id=uploaded[filepath]
)
)
results.append({'result': 'uploaded', 'filepath': filepath, 'id': uploaded[filepath]})
elif matched:
logger.info(
"({num:>{pad}}/{total}) Successfully scanned and matched -- {file} ({song_id})".format(
num=filenum, pad=pad, total=total, file=filepath, song_id=matched[filepath]
)
)
results.append({'result': 'matched', 'filepath': filepath, 'id': matched[filepath]})
elif error:
logger.warning("({num:>{pad}}/{total}) Error on upload -- {file}".format(num=filenum, pad=pad, total=total, file=filepath))
results.append({'result': 'error', 'filepath': filepath, 'message': error[filepath]})
errors.update(error)
else:
if any(exist_string in not_uploaded[filepath] for exist_string in exist_strings):
response = "ALREADY EXISTS"
song_id = GM_ID_RE.search(not_uploaded[filepath]).group(0)
logger.info(
"({num:>{pad}}/{total}) Failed to upload -- {file} ({song_id}) | {response}".format(
num=filenum, pad=pad, total=total, file=filepath, response=response, song_id=song_id
)
)
results.append({'result': 'not_uploaded', 'filepath': filepath, 'id': song_id, 'message': not_uploaded[filepath]})
else:
response = not_uploaded[filepath]
logger.info(
"({num:>{pad}}/{total}) Failed to upload -- {file} | {response}".format(
num=filenum, pad=pad, total=total, file=filepath, response=response
)
)
results.append({'result': 'not_uploaded', 'filepath': filepath, 'message': not_uploaded[filepath]})
success = (uploaded or matched) or (not_uploaded and 'ALREADY_EXISTS' in not_uploaded[filepath])
if success and delete_on_success:
try:
os.remove(filepath)
except (OSError, PermissionError):
logger.warning("Failed to remove {} after successful upload".format(filepath))
if errors:
logger.info("\n\nThe following errors occurred:\n")
for filepath, e in errors.items():
logger.info("{file} | {error}".format(file=filepath, error=e))
logger.info("\nThese filepaths may need to be synced again.\n")
return results |
def _switch_charset_list(characters, target=''):
'''
Switches the character set of a list. If a character does not have
an equivalent in the target script (e.g. ヹ when converting to hiragana),
the original character is kept.
'''
# Copy the list to avoid modifying the existing one.
characters = characters[:]
offset = block_offset * offsets[target]['direction']
for n in range(len(characters)):
chars = list(characters[n])
for m in range(len(chars)):
char = chars[m]
char_offset = ord(char) + offset
# Verify that the offset character is within the valid range.
if in_range(char_offset, target):
chars[m] = chr(char_offset)
else:
chars[m] = char
characters[n] = ''.join(chars)
return characters | Switches the character set of a list. If a character does not have
an equivalent in the target script (e.g. ヹ when converting to hiragana),
the original character is kept. | Below is the the instruction that describes the task:
### Input:
Switches the character set of a list. If a character does not have
an equivalent in the target script (e.g. ヹ when converting to hiragana),
the original character is kept.
### Response:
def _switch_charset_list(characters, target=''):
'''
Switches the character set of a list. If a character does not have
an equivalent in the target script (e.g. ヹ when converting to hiragana),
the original character is kept.
'''
# Copy the list to avoid modifying the existing one.
characters = characters[:]
offset = block_offset * offsets[target]['direction']
for n in range(len(characters)):
chars = list(characters[n])
for m in range(len(chars)):
char = chars[m]
char_offset = ord(char) + offset
# Verify that the offset character is within the valid range.
if in_range(char_offset, target):
chars[m] = chr(char_offset)
else:
chars[m] = char
characters[n] = ''.join(chars)
return characters |
def get_plot(self, subplot=False, width=None, height=None, xmin=-6.,
xmax=6., yscale=1, colours=None, plot_total=True,
legend_on=True, num_columns=2, legend_frame_on=False,
legend_cutoff=3, xlabel='Energy (eV)', ylabel='Arb. units',
zero_to_efermi=True, dpi=400, fonts=None, plt=None,
style=None, no_base_style=False):
"""Get a :obj:`matplotlib.pyplot` object of the density of states.
Args:
subplot (:obj:`bool`, optional): Plot the density of states for
each element on separate subplots. Defaults to ``False``.
width (:obj:`float`, optional): The width of the plot.
height (:obj:`float`, optional): The height of the plot.
xmin (:obj:`float`, optional): The minimum energy on the x-axis.
xmax (:obj:`float`, optional): The maximum energy on the x-axis.
yscale (:obj:`float`, optional): Scaling factor for the y-axis.
colours (:obj:`dict`, optional): Use custom colours for specific
element and orbital combinations. Specified as a :obj:`dict` of
:obj:`dict` of the colours. For example::
{
'Sn': {'s': 'r', 'p': 'b'},
'O': {'s': '#000000'}
}
The colour can be a hex code, series of rgb value, or any other
format supported by matplotlib.
plot_total (:obj:`bool`, optional): Plot the total density of
states. Defaults to ``True``.
legend_on (:obj:`bool`, optional): Plot the graph legend. Defaults
to ``True``.
num_columns (:obj:`int`, optional): The number of columns in the
legend.
legend_frame_on (:obj:`bool`, optional): Plot a frame around the
graph legend. Defaults to ``False``.
legend_cutoff (:obj:`float`, optional): The cut-off (in % of the
maximum density of states within the plotting range) for an
elemental orbital to be labelled in the legend. This prevents
the legend from containing labels for orbitals that have very
little contribution in the plotting range.
xlabel (:obj:`str`, optional): Label/units for x-axis (i.e. energy)
ylabel (:obj:`str`, optional): Label/units for y-axis (i.e. DOS)
zero_to_efermi (:obj:`bool`, optional): Normalise the plot such
that the Fermi level is set as 0 eV.
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the image.
fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a
a single font, specified as a :obj:`str`, or several fonts,
specified as a :obj:`list` of :obj:`str`.
plt (:obj:`matplotlib.pyplot`, optional): A
:obj:`matplotlib.pyplot` object to use for plotting.
style (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib
style specifications, to be composed on top of Sumo base
style.
no_base_style (:obj:`bool`, optional): Prevent use of sumo base
style. This can make alternative styles behave more
predictably.
Returns:
:obj:`matplotlib.pyplot`: The density of states plot.
"""
plot_data = self.dos_plot_data(yscale=yscale, xmin=xmin, xmax=xmax,
colours=colours, plot_total=plot_total,
legend_cutoff=legend_cutoff,
subplot=subplot,
zero_to_efermi=zero_to_efermi)
if subplot:
nplots = len(plot_data['lines'])
plt = pretty_subplot(nplots, 1, width=width, height=height,
dpi=dpi, plt=plt)
else:
plt = pretty_plot(width=width, height=height, dpi=dpi, plt=plt)
mask = plot_data['mask']
energies = plot_data['energies'][mask]
fig = plt.gcf()
lines = plot_data['lines']
spins = [Spin.up] if len(lines[0][0]['dens']) == 1 else \
[Spin.up, Spin.down]
for i, line_set in enumerate(plot_data['lines']):
if subplot:
ax = fig.axes[i]
else:
ax = plt.gca()
for line, spin in itertools.product(line_set, spins):
if spin == Spin.up:
label = line['label']
densities = line['dens'][spin][mask]
elif spin == Spin.down:
label = ""
densities = -line['dens'][spin][mask]
ax.fill_between(energies, densities, lw=0,
facecolor=line['colour'],
alpha=line['alpha'])
ax.plot(energies, densities, label=label,
color=line['colour'])
ax.set_ylim(plot_data['ymin'], plot_data['ymax'])
ax.set_xlim(xmin, xmax)
ax.tick_params(axis='y', labelleft='off')
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
loc = 'upper right' if subplot else 'best'
ncol = 1 if subplot else num_columns
if legend_on:
ax.legend(loc=loc, frameon=legend_frame_on, ncol=ncol)
# no add axis labels and sort out ticks
if subplot:
ax.set_xlabel(xlabel)
fig.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in fig.axes[:-1]],
visible=False)
if 'axes.labelcolor' in matplotlib.rcParams:
ylabelcolor = matplotlib.rcParams['axes.labelcolor']
else:
ylabelcolor = None
fig.text(0.08, 0.5, ylabel, ha='left', color=ylabelcolor,
va='center', rotation='vertical', transform=ax.transAxes)
else:
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return plt | Get a :obj:`matplotlib.pyplot` object of the density of states.
Args:
subplot (:obj:`bool`, optional): Plot the density of states for
each element on separate subplots. Defaults to ``False``.
width (:obj:`float`, optional): The width of the plot.
height (:obj:`float`, optional): The height of the plot.
xmin (:obj:`float`, optional): The minimum energy on the x-axis.
xmax (:obj:`float`, optional): The maximum energy on the x-axis.
yscale (:obj:`float`, optional): Scaling factor for the y-axis.
colours (:obj:`dict`, optional): Use custom colours for specific
element and orbital combinations. Specified as a :obj:`dict` of
:obj:`dict` of the colours. For example::
{
'Sn': {'s': 'r', 'p': 'b'},
'O': {'s': '#000000'}
}
The colour can be a hex code, series of rgb value, or any other
format supported by matplotlib.
plot_total (:obj:`bool`, optional): Plot the total density of
states. Defaults to ``True``.
legend_on (:obj:`bool`, optional): Plot the graph legend. Defaults
to ``True``.
num_columns (:obj:`int`, optional): The number of columns in the
legend.
legend_frame_on (:obj:`bool`, optional): Plot a frame around the
graph legend. Defaults to ``False``.
legend_cutoff (:obj:`float`, optional): The cut-off (in % of the
maximum density of states within the plotting range) for an
elemental orbital to be labelled in the legend. This prevents
the legend from containing labels for orbitals that have very
little contribution in the plotting range.
xlabel (:obj:`str`, optional): Label/units for x-axis (i.e. energy)
ylabel (:obj:`str`, optional): Label/units for y-axis (i.e. DOS)
zero_to_efermi (:obj:`bool`, optional): Normalise the plot such
that the Fermi level is set as 0 eV.
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the image.
fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a
a single font, specified as a :obj:`str`, or several fonts,
specified as a :obj:`list` of :obj:`str`.
plt (:obj:`matplotlib.pyplot`, optional): A
:obj:`matplotlib.pyplot` object to use for plotting.
style (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib
style specifications, to be composed on top of Sumo base
style.
no_base_style (:obj:`bool`, optional): Prevent use of sumo base
style. This can make alternative styles behave more
predictably.
Returns:
:obj:`matplotlib.pyplot`: The density of states plot. | Below is the the instruction that describes the task:
### Input:
Get a :obj:`matplotlib.pyplot` object of the density of states.
Args:
subplot (:obj:`bool`, optional): Plot the density of states for
each element on separate subplots. Defaults to ``False``.
width (:obj:`float`, optional): The width of the plot.
height (:obj:`float`, optional): The height of the plot.
xmin (:obj:`float`, optional): The minimum energy on the x-axis.
xmax (:obj:`float`, optional): The maximum energy on the x-axis.
yscale (:obj:`float`, optional): Scaling factor for the y-axis.
colours (:obj:`dict`, optional): Use custom colours for specific
element and orbital combinations. Specified as a :obj:`dict` of
:obj:`dict` of the colours. For example::
{
'Sn': {'s': 'r', 'p': 'b'},
'O': {'s': '#000000'}
}
The colour can be a hex code, series of rgb value, or any other
format supported by matplotlib.
plot_total (:obj:`bool`, optional): Plot the total density of
states. Defaults to ``True``.
legend_on (:obj:`bool`, optional): Plot the graph legend. Defaults
to ``True``.
num_columns (:obj:`int`, optional): The number of columns in the
legend.
legend_frame_on (:obj:`bool`, optional): Plot a frame around the
graph legend. Defaults to ``False``.
legend_cutoff (:obj:`float`, optional): The cut-off (in % of the
maximum density of states within the plotting range) for an
elemental orbital to be labelled in the legend. This prevents
the legend from containing labels for orbitals that have very
little contribution in the plotting range.
xlabel (:obj:`str`, optional): Label/units for x-axis (i.e. energy)
ylabel (:obj:`str`, optional): Label/units for y-axis (i.e. DOS)
zero_to_efermi (:obj:`bool`, optional): Normalise the plot such
that the Fermi level is set as 0 eV.
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the image.
fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a
a single font, specified as a :obj:`str`, or several fonts,
specified as a :obj:`list` of :obj:`str`.
plt (:obj:`matplotlib.pyplot`, optional): A
:obj:`matplotlib.pyplot` object to use for plotting.
style (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib
style specifications, to be composed on top of Sumo base
style.
no_base_style (:obj:`bool`, optional): Prevent use of sumo base
style. This can make alternative styles behave more
predictably.
Returns:
:obj:`matplotlib.pyplot`: The density of states plot.
### Response:
def get_plot(self, subplot=False, width=None, height=None, xmin=-6.,
xmax=6., yscale=1, colours=None, plot_total=True,
legend_on=True, num_columns=2, legend_frame_on=False,
legend_cutoff=3, xlabel='Energy (eV)', ylabel='Arb. units',
zero_to_efermi=True, dpi=400, fonts=None, plt=None,
style=None, no_base_style=False):
"""Get a :obj:`matplotlib.pyplot` object of the density of states.
Args:
subplot (:obj:`bool`, optional): Plot the density of states for
each element on separate subplots. Defaults to ``False``.
width (:obj:`float`, optional): The width of the plot.
height (:obj:`float`, optional): The height of the plot.
xmin (:obj:`float`, optional): The minimum energy on the x-axis.
xmax (:obj:`float`, optional): The maximum energy on the x-axis.
yscale (:obj:`float`, optional): Scaling factor for the y-axis.
colours (:obj:`dict`, optional): Use custom colours for specific
element and orbital combinations. Specified as a :obj:`dict` of
:obj:`dict` of the colours. For example::
{
'Sn': {'s': 'r', 'p': 'b'},
'O': {'s': '#000000'}
}
The colour can be a hex code, series of rgb value, or any other
format supported by matplotlib.
plot_total (:obj:`bool`, optional): Plot the total density of
states. Defaults to ``True``.
legend_on (:obj:`bool`, optional): Plot the graph legend. Defaults
to ``True``.
num_columns (:obj:`int`, optional): The number of columns in the
legend.
legend_frame_on (:obj:`bool`, optional): Plot a frame around the
graph legend. Defaults to ``False``.
legend_cutoff (:obj:`float`, optional): The cut-off (in % of the
maximum density of states within the plotting range) for an
elemental orbital to be labelled in the legend. This prevents
the legend from containing labels for orbitals that have very
little contribution in the plotting range.
xlabel (:obj:`str`, optional): Label/units for x-axis (i.e. energy)
ylabel (:obj:`str`, optional): Label/units for y-axis (i.e. DOS)
zero_to_efermi (:obj:`bool`, optional): Normalise the plot such
that the Fermi level is set as 0 eV.
dpi (:obj:`int`, optional): The dots-per-inch (pixel density) for
the image.
fonts (:obj:`list`, optional): Fonts to use in the plot. Can be a
a single font, specified as a :obj:`str`, or several fonts,
specified as a :obj:`list` of :obj:`str`.
plt (:obj:`matplotlib.pyplot`, optional): A
:obj:`matplotlib.pyplot` object to use for plotting.
style (:obj:`list`, :obj:`str`, or :obj:`dict`): Any matplotlib
style specifications, to be composed on top of Sumo base
style.
no_base_style (:obj:`bool`, optional): Prevent use of sumo base
style. This can make alternative styles behave more
predictably.
Returns:
:obj:`matplotlib.pyplot`: The density of states plot.
"""
plot_data = self.dos_plot_data(yscale=yscale, xmin=xmin, xmax=xmax,
colours=colours, plot_total=plot_total,
legend_cutoff=legend_cutoff,
subplot=subplot,
zero_to_efermi=zero_to_efermi)
if subplot:
nplots = len(plot_data['lines'])
plt = pretty_subplot(nplots, 1, width=width, height=height,
dpi=dpi, plt=plt)
else:
plt = pretty_plot(width=width, height=height, dpi=dpi, plt=plt)
mask = plot_data['mask']
energies = plot_data['energies'][mask]
fig = plt.gcf()
lines = plot_data['lines']
spins = [Spin.up] if len(lines[0][0]['dens']) == 1 else \
[Spin.up, Spin.down]
for i, line_set in enumerate(plot_data['lines']):
if subplot:
ax = fig.axes[i]
else:
ax = plt.gca()
for line, spin in itertools.product(line_set, spins):
if spin == Spin.up:
label = line['label']
densities = line['dens'][spin][mask]
elif spin == Spin.down:
label = ""
densities = -line['dens'][spin][mask]
ax.fill_between(energies, densities, lw=0,
facecolor=line['colour'],
alpha=line['alpha'])
ax.plot(energies, densities, label=label,
color=line['colour'])
ax.set_ylim(plot_data['ymin'], plot_data['ymax'])
ax.set_xlim(xmin, xmax)
ax.tick_params(axis='y', labelleft='off')
ax.yaxis.set_minor_locator(AutoMinorLocator(2))
ax.xaxis.set_minor_locator(AutoMinorLocator(2))
loc = 'upper right' if subplot else 'best'
ncol = 1 if subplot else num_columns
if legend_on:
ax.legend(loc=loc, frameon=legend_frame_on, ncol=ncol)
# no add axis labels and sort out ticks
if subplot:
ax.set_xlabel(xlabel)
fig.subplots_adjust(hspace=0)
plt.setp([a.get_xticklabels() for a in fig.axes[:-1]],
visible=False)
if 'axes.labelcolor' in matplotlib.rcParams:
ylabelcolor = matplotlib.rcParams['axes.labelcolor']
else:
ylabelcolor = None
fig.text(0.08, 0.5, ylabel, ha='left', color=ylabelcolor,
va='center', rotation='vertical', transform=ax.transAxes)
else:
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
return plt |
def strip_context_items(self, a_string):
"""Strip PaloAlto-specific output.
PaloAlto will also put a configuration context:
[edit]
This method removes those lines.
"""
strings_to_strip = [r"\[edit.*\]"]
response_list = a_string.split(self.RESPONSE_RETURN)
last_line = response_list[-1]
for pattern in strings_to_strip:
if re.search(pattern, last_line):
return self.RESPONSE_RETURN.join(response_list[:-1])
return a_string | Strip PaloAlto-specific output.
PaloAlto will also put a configuration context:
[edit]
This method removes those lines. | Below is the the instruction that describes the task:
### Input:
Strip PaloAlto-specific output.
PaloAlto will also put a configuration context:
[edit]
This method removes those lines.
### Response:
def strip_context_items(self, a_string):
"""Strip PaloAlto-specific output.
PaloAlto will also put a configuration context:
[edit]
This method removes those lines.
"""
strings_to_strip = [r"\[edit.*\]"]
response_list = a_string.split(self.RESPONSE_RETURN)
last_line = response_list[-1]
for pattern in strings_to_strip:
if re.search(pattern, last_line):
return self.RESPONSE_RETURN.join(response_list[:-1])
return a_string |
def find_packages(path):
""" Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path.
"""
matches = []
root = join(path, 'src', 'main', 'java')
for folder, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, '*Package.java'):
#: Open and make sure it's an EnamlPackage somewhere
with open(join(folder, filename)) as f:
if "implements EnamlPackage" in f.read():
package = os.path.relpath(folder, root)
matches.append(os.path.join(package, filename))
return matches | Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path. | Below is the the instruction that describes the task:
### Input:
Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path.
### Response:
def find_packages(path):
""" Find all java files matching the "*Package.java" pattern within
the given enaml package directory relative to the java source path.
"""
matches = []
root = join(path, 'src', 'main', 'java')
for folder, dirnames, filenames in os.walk(root):
for filename in fnmatch.filter(filenames, '*Package.java'):
#: Open and make sure it's an EnamlPackage somewhere
with open(join(folder, filename)) as f:
if "implements EnamlPackage" in f.read():
package = os.path.relpath(folder, root)
matches.append(os.path.join(package, filename))
return matches |
def train(sess, loss, x_train, y_train,
init_all=False, evaluate=None, feed=None, args=None,
rng=None, var_list=None, fprop_args=None, optimizer=None,
devices=None, x_batch_preprocessor=None, use_ema=False,
ema_decay=.998, run_canary=None,
loss_threshold=1e5, dataset_train=None, dataset_size=None):
"""
Run (optionally multi-replica, synchronous) training to minimize `loss`
:param sess: TF session to use when training the graph
:param loss: tensor, the loss to minimize
:param x_train: numpy array with training inputs or tf Dataset
:param y_train: numpy array with training outputs or tf Dataset
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
:param rng: Instance of numpy.random.RandomState
:param var_list: Optional list of parameters to train.
:param fprop_args: dict, extra arguments to pass to fprop (loss and model).
:param optimizer: Optimizer to be used for training
:param devices: list of device names to use for training
If None, defaults to: all GPUs, if GPUs are available
all devices, if no GPUs are available
:param x_batch_preprocessor: callable
Takes a single tensor containing an x_train batch as input
Returns a single tensor containing an x_train batch as output
Called to preprocess the data before passing the data to the Loss
:param use_ema: bool
If true, uses an exponential moving average of the model parameters
:param ema_decay: float or callable
The decay parameter for EMA, if EMA is used
If a callable rather than a float, this is a callable that takes
the epoch and batch as arguments and returns the ema_decay for
the current batch.
:param loss_threshold: float
Raise an exception if the loss exceeds this value.
This is intended to rapidly detect numerical problems.
Sometimes the loss may legitimately be higher than this value. In
such cases, raise the value. If needed it can be np.inf.
:param dataset_train: tf Dataset instance.
Used as a replacement for x_train, y_train for faster performance.
:param dataset_size: integer, the size of the dataset_train.
:return: True if model trained
"""
# Check whether the hardware is working correctly
canary.run_canary()
if run_canary is not None:
warnings.warn("The `run_canary` argument is deprecated. The canary "
"is now much cheaper and thus runs all the time. The "
"canary now uses its own loss function so it is not "
"necessary to turn off the canary when training with "
" a stochastic loss. Simply quit passing `run_canary`."
"Passing `run_canary` may become an error on or after "
"2019-10-16.")
args = _ArgsWrapper(args or {})
fprop_args = fprop_args or {}
# Check that necessary arguments were given (see doc above)
# Be sure to support 0 epochs for debugging purposes
if args.nb_epochs is None:
raise ValueError("`args` must specify number of epochs")
if optimizer is None:
if args.learning_rate is None:
raise ValueError("Learning rate was not given in args dict")
assert args.batch_size, "Batch size was not given in args dict"
if rng is None:
rng = np.random.RandomState()
if optimizer is None:
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
else:
if not isinstance(optimizer, tf.train.Optimizer):
raise ValueError("optimizer object must be from a child class of "
"tf.train.Optimizer")
grads = []
xs = []
preprocessed_xs = []
ys = []
if dataset_train is not None:
assert x_train is None and y_train is None and x_batch_preprocessor is None
if dataset_size is None:
raise ValueError("You must provide a dataset size")
data_iterator = dataset_train.make_one_shot_iterator().get_next()
x_train, y_train = sess.run(data_iterator)
devices = infer_devices(devices)
for device in devices:
with tf.device(device):
x = tf.placeholder(x_train.dtype, (None,) + x_train.shape[1:])
y = tf.placeholder(y_train.dtype, (None,) + y_train.shape[1:])
xs.append(x)
ys.append(y)
if x_batch_preprocessor is not None:
x = x_batch_preprocessor(x)
# We need to keep track of these so that the canary can feed
# preprocessed values. If the canary had to feed raw values,
# stochastic preprocessing could make the canary fail.
preprocessed_xs.append(x)
loss_value = loss.fprop(x, y, **fprop_args)
grads.append(optimizer.compute_gradients(
loss_value, var_list=var_list))
num_devices = len(devices)
print("num_devices: ", num_devices)
grad = avg_grads(grads)
# Trigger update operations within the default graph (such as batch_norm).
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = optimizer.apply_gradients(grad)
epoch_tf = tf.placeholder(tf.int32, [])
batch_tf = tf.placeholder(tf.int32, [])
if use_ema:
if callable(ema_decay):
ema_decay = ema_decay(epoch_tf, batch_tf)
ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
with tf.control_dependencies([train_step]):
train_step = ema.apply(var_list)
# Get pointers to the EMA's running average variables
avg_params = [ema.average(param) for param in var_list]
# Make temporary buffers used for swapping the live and running average
# parameters
tmp_params = [tf.Variable(param, trainable=False)
for param in var_list]
# Define the swapping operation
param_to_tmp = [tf.assign(tmp, param)
for tmp, param in safe_zip(tmp_params, var_list)]
with tf.control_dependencies(param_to_tmp):
avg_to_param = [tf.assign(param, avg)
for param, avg in safe_zip(var_list, avg_params)]
with tf.control_dependencies(avg_to_param):
tmp_to_avg = [tf.assign(avg, tmp)
for avg, tmp in safe_zip(avg_params, tmp_params)]
swap = tmp_to_avg
batch_size = args.batch_size
assert batch_size % num_devices == 0
device_batch_size = batch_size // num_devices
if init_all:
sess.run(tf.global_variables_initializer())
else:
initialize_uninitialized_global_variables(sess)
for epoch in xrange(args.nb_epochs):
if dataset_train is not None:
nb_batches = int(math.ceil(float(dataset_size) / batch_size))
else:
# Indices to shuffle training set
index_shuf = list(range(len(x_train)))
# Randomly repeat a few training examples each epoch to avoid
# having a too-small batch
while len(index_shuf) % batch_size != 0:
index_shuf.append(rng.randint(len(x_train)))
nb_batches = len(index_shuf) // batch_size
rng.shuffle(index_shuf)
# Shuffling here versus inside the loop doesn't seem to affect
# timing very much, but shuffling here makes the code slightly
# easier to read
x_train_shuffled = x_train[index_shuf]
y_train_shuffled = y_train[index_shuf]
prev = time.time()
for batch in range(nb_batches):
if dataset_train is not None:
x_train_shuffled, y_train_shuffled = sess.run(data_iterator)
start, end = 0, batch_size
else:
# Compute batch start and end indices
start = batch * batch_size
end = (batch + 1) * batch_size
# Perform one training step
diff = end - start
assert diff == batch_size
feed_dict = {epoch_tf: epoch, batch_tf: batch}
for dev_idx in xrange(num_devices):
cur_start = start + dev_idx * device_batch_size
cur_end = start + (dev_idx + 1) * device_batch_size
feed_dict[xs[dev_idx]] = x_train_shuffled[cur_start:cur_end]
feed_dict[ys[dev_idx]] = y_train_shuffled[cur_start:cur_end]
if cur_end != end and dataset_train is None:
msg = ("batch_size (%d) must be a multiple of num_devices "
"(%d).\nCUDA_VISIBLE_DEVICES: %s"
"\ndevices: %s")
args = (batch_size, num_devices,
os.environ['CUDA_VISIBLE_DEVICES'],
str(devices))
raise ValueError(msg % args)
if feed is not None:
feed_dict.update(feed)
_, loss_numpy = sess.run(
[train_step, loss_value], feed_dict=feed_dict)
if np.abs(loss_numpy) > loss_threshold:
raise ValueError("Extreme loss during training: ", loss_numpy)
if np.isnan(loss_numpy) or np.isinf(loss_numpy):
raise ValueError("NaN/Inf loss during training")
assert (dataset_train is not None or
end == len(index_shuf)) # Check that all examples were used
cur = time.time()
_logger.info("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
if use_ema:
# Before running evaluation, load the running average
# parameters into the live slot, so we can see how well
# the EMA parameters are performing
sess.run(swap)
evaluate()
if use_ema:
# Swap the parameters back, so that we continue training
# on the live parameters
sess.run(swap)
if use_ema:
# When training is done, swap the running average parameters into
# the live slot, so that we use them when we deploy the model
sess.run(swap)
return True | Run (optionally multi-replica, synchronous) training to minimize `loss`
:param sess: TF session to use when training the graph
:param loss: tensor, the loss to minimize
:param x_train: numpy array with training inputs or tf Dataset
:param y_train: numpy array with training outputs or tf Dataset
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
:param rng: Instance of numpy.random.RandomState
:param var_list: Optional list of parameters to train.
:param fprop_args: dict, extra arguments to pass to fprop (loss and model).
:param optimizer: Optimizer to be used for training
:param devices: list of device names to use for training
If None, defaults to: all GPUs, if GPUs are available
all devices, if no GPUs are available
:param x_batch_preprocessor: callable
Takes a single tensor containing an x_train batch as input
Returns a single tensor containing an x_train batch as output
Called to preprocess the data before passing the data to the Loss
:param use_ema: bool
If true, uses an exponential moving average of the model parameters
:param ema_decay: float or callable
The decay parameter for EMA, if EMA is used
If a callable rather than a float, this is a callable that takes
the epoch and batch as arguments and returns the ema_decay for
the current batch.
:param loss_threshold: float
Raise an exception if the loss exceeds this value.
This is intended to rapidly detect numerical problems.
Sometimes the loss may legitimately be higher than this value. In
such cases, raise the value. If needed it can be np.inf.
:param dataset_train: tf Dataset instance.
Used as a replacement for x_train, y_train for faster performance.
:param dataset_size: integer, the size of the dataset_train.
:return: True if model trained | Below is the the instruction that describes the task:
### Input:
Run (optionally multi-replica, synchronous) training to minimize `loss`
:param sess: TF session to use when training the graph
:param loss: tensor, the loss to minimize
:param x_train: numpy array with training inputs or tf Dataset
:param y_train: numpy array with training outputs or tf Dataset
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
:param rng: Instance of numpy.random.RandomState
:param var_list: Optional list of parameters to train.
:param fprop_args: dict, extra arguments to pass to fprop (loss and model).
:param optimizer: Optimizer to be used for training
:param devices: list of device names to use for training
If None, defaults to: all GPUs, if GPUs are available
all devices, if no GPUs are available
:param x_batch_preprocessor: callable
Takes a single tensor containing an x_train batch as input
Returns a single tensor containing an x_train batch as output
Called to preprocess the data before passing the data to the Loss
:param use_ema: bool
If true, uses an exponential moving average of the model parameters
:param ema_decay: float or callable
The decay parameter for EMA, if EMA is used
If a callable rather than a float, this is a callable that takes
the epoch and batch as arguments and returns the ema_decay for
the current batch.
:param loss_threshold: float
Raise an exception if the loss exceeds this value.
This is intended to rapidly detect numerical problems.
Sometimes the loss may legitimately be higher than this value. In
such cases, raise the value. If needed it can be np.inf.
:param dataset_train: tf Dataset instance.
Used as a replacement for x_train, y_train for faster performance.
:param dataset_size: integer, the size of the dataset_train.
:return: True if model trained
### Response:
def train(sess, loss, x_train, y_train,
init_all=False, evaluate=None, feed=None, args=None,
rng=None, var_list=None, fprop_args=None, optimizer=None,
devices=None, x_batch_preprocessor=None, use_ema=False,
ema_decay=.998, run_canary=None,
loss_threshold=1e5, dataset_train=None, dataset_size=None):
"""
Run (optionally multi-replica, synchronous) training to minimize `loss`
:param sess: TF session to use when training the graph
:param loss: tensor, the loss to minimize
:param x_train: numpy array with training inputs or tf Dataset
:param y_train: numpy array with training outputs or tf Dataset
:param init_all: (boolean) If set to true, all TF variables in the session
are (re)initialized, otherwise only previously
uninitialized variables are initialized before training.
:param evaluate: function that is run after each training iteration
(typically to display the test/validation accuracy).
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param args: dict or argparse `Namespace` object.
Should contain `nb_epochs`, `learning_rate`,
`batch_size`
:param rng: Instance of numpy.random.RandomState
:param var_list: Optional list of parameters to train.
:param fprop_args: dict, extra arguments to pass to fprop (loss and model).
:param optimizer: Optimizer to be used for training
:param devices: list of device names to use for training
If None, defaults to: all GPUs, if GPUs are available
all devices, if no GPUs are available
:param x_batch_preprocessor: callable
Takes a single tensor containing an x_train batch as input
Returns a single tensor containing an x_train batch as output
Called to preprocess the data before passing the data to the Loss
:param use_ema: bool
If true, uses an exponential moving average of the model parameters
:param ema_decay: float or callable
The decay parameter for EMA, if EMA is used
If a callable rather than a float, this is a callable that takes
the epoch and batch as arguments and returns the ema_decay for
the current batch.
:param loss_threshold: float
Raise an exception if the loss exceeds this value.
This is intended to rapidly detect numerical problems.
Sometimes the loss may legitimately be higher than this value. In
such cases, raise the value. If needed it can be np.inf.
:param dataset_train: tf Dataset instance.
Used as a replacement for x_train, y_train for faster performance.
:param dataset_size: integer, the size of the dataset_train.
:return: True if model trained
"""
# Check whether the hardware is working correctly
canary.run_canary()
if run_canary is not None:
warnings.warn("The `run_canary` argument is deprecated. The canary "
"is now much cheaper and thus runs all the time. The "
"canary now uses its own loss function so it is not "
"necessary to turn off the canary when training with "
" a stochastic loss. Simply quit passing `run_canary`."
"Passing `run_canary` may become an error on or after "
"2019-10-16.")
args = _ArgsWrapper(args or {})
fprop_args = fprop_args or {}
# Check that necessary arguments were given (see doc above)
# Be sure to support 0 epochs for debugging purposes
if args.nb_epochs is None:
raise ValueError("`args` must specify number of epochs")
if optimizer is None:
if args.learning_rate is None:
raise ValueError("Learning rate was not given in args dict")
assert args.batch_size, "Batch size was not given in args dict"
if rng is None:
rng = np.random.RandomState()
if optimizer is None:
optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
else:
if not isinstance(optimizer, tf.train.Optimizer):
raise ValueError("optimizer object must be from a child class of "
"tf.train.Optimizer")
grads = []
xs = []
preprocessed_xs = []
ys = []
if dataset_train is not None:
assert x_train is None and y_train is None and x_batch_preprocessor is None
if dataset_size is None:
raise ValueError("You must provide a dataset size")
data_iterator = dataset_train.make_one_shot_iterator().get_next()
x_train, y_train = sess.run(data_iterator)
devices = infer_devices(devices)
for device in devices:
with tf.device(device):
x = tf.placeholder(x_train.dtype, (None,) + x_train.shape[1:])
y = tf.placeholder(y_train.dtype, (None,) + y_train.shape[1:])
xs.append(x)
ys.append(y)
if x_batch_preprocessor is not None:
x = x_batch_preprocessor(x)
# We need to keep track of these so that the canary can feed
# preprocessed values. If the canary had to feed raw values,
# stochastic preprocessing could make the canary fail.
preprocessed_xs.append(x)
loss_value = loss.fprop(x, y, **fprop_args)
grads.append(optimizer.compute_gradients(
loss_value, var_list=var_list))
num_devices = len(devices)
print("num_devices: ", num_devices)
grad = avg_grads(grads)
# Trigger update operations within the default graph (such as batch_norm).
with tf.control_dependencies(tf.get_collection(tf.GraphKeys.UPDATE_OPS)):
train_step = optimizer.apply_gradients(grad)
epoch_tf = tf.placeholder(tf.int32, [])
batch_tf = tf.placeholder(tf.int32, [])
if use_ema:
if callable(ema_decay):
ema_decay = ema_decay(epoch_tf, batch_tf)
ema = tf.train.ExponentialMovingAverage(decay=ema_decay)
with tf.control_dependencies([train_step]):
train_step = ema.apply(var_list)
# Get pointers to the EMA's running average variables
avg_params = [ema.average(param) for param in var_list]
# Make temporary buffers used for swapping the live and running average
# parameters
tmp_params = [tf.Variable(param, trainable=False)
for param in var_list]
# Define the swapping operation
param_to_tmp = [tf.assign(tmp, param)
for tmp, param in safe_zip(tmp_params, var_list)]
with tf.control_dependencies(param_to_tmp):
avg_to_param = [tf.assign(param, avg)
for param, avg in safe_zip(var_list, avg_params)]
with tf.control_dependencies(avg_to_param):
tmp_to_avg = [tf.assign(avg, tmp)
for avg, tmp in safe_zip(avg_params, tmp_params)]
swap = tmp_to_avg
batch_size = args.batch_size
assert batch_size % num_devices == 0
device_batch_size = batch_size // num_devices
if init_all:
sess.run(tf.global_variables_initializer())
else:
initialize_uninitialized_global_variables(sess)
for epoch in xrange(args.nb_epochs):
if dataset_train is not None:
nb_batches = int(math.ceil(float(dataset_size) / batch_size))
else:
# Indices to shuffle training set
index_shuf = list(range(len(x_train)))
# Randomly repeat a few training examples each epoch to avoid
# having a too-small batch
while len(index_shuf) % batch_size != 0:
index_shuf.append(rng.randint(len(x_train)))
nb_batches = len(index_shuf) // batch_size
rng.shuffle(index_shuf)
# Shuffling here versus inside the loop doesn't seem to affect
# timing very much, but shuffling here makes the code slightly
# easier to read
x_train_shuffled = x_train[index_shuf]
y_train_shuffled = y_train[index_shuf]
prev = time.time()
for batch in range(nb_batches):
if dataset_train is not None:
x_train_shuffled, y_train_shuffled = sess.run(data_iterator)
start, end = 0, batch_size
else:
# Compute batch start and end indices
start = batch * batch_size
end = (batch + 1) * batch_size
# Perform one training step
diff = end - start
assert diff == batch_size
feed_dict = {epoch_tf: epoch, batch_tf: batch}
for dev_idx in xrange(num_devices):
cur_start = start + dev_idx * device_batch_size
cur_end = start + (dev_idx + 1) * device_batch_size
feed_dict[xs[dev_idx]] = x_train_shuffled[cur_start:cur_end]
feed_dict[ys[dev_idx]] = y_train_shuffled[cur_start:cur_end]
if cur_end != end and dataset_train is None:
msg = ("batch_size (%d) must be a multiple of num_devices "
"(%d).\nCUDA_VISIBLE_DEVICES: %s"
"\ndevices: %s")
args = (batch_size, num_devices,
os.environ['CUDA_VISIBLE_DEVICES'],
str(devices))
raise ValueError(msg % args)
if feed is not None:
feed_dict.update(feed)
_, loss_numpy = sess.run(
[train_step, loss_value], feed_dict=feed_dict)
if np.abs(loss_numpy) > loss_threshold:
raise ValueError("Extreme loss during training: ", loss_numpy)
if np.isnan(loss_numpy) or np.isinf(loss_numpy):
raise ValueError("NaN/Inf loss during training")
assert (dataset_train is not None or
end == len(index_shuf)) # Check that all examples were used
cur = time.time()
_logger.info("Epoch " + str(epoch) + " took " +
str(cur - prev) + " seconds")
if evaluate is not None:
if use_ema:
# Before running evaluation, load the running average
# parameters into the live slot, so we can see how well
# the EMA parameters are performing
sess.run(swap)
evaluate()
if use_ema:
# Swap the parameters back, so that we continue training
# on the live parameters
sess.run(swap)
if use_ema:
# When training is done, swap the running average parameters into
# the live slot, so that we use them when we deploy the model
sess.run(swap)
return True |
def export(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=False, sort=None, ascending=True):
"""Exports the DataFrame to a file written with arrow
:param DataFrameLocal df: DataFrame to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian (not supported for fits)
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:param str sort: expression used for sorting the output
:param bool ascending: sort ascending (True) or descending
:return:
"""
if path.endswith('.arrow'):
self.export_arrow(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
elif path.endswith('.hdf5'):
self.export_hdf5(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
elif path.endswith('.fits'):
self.export_fits(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
if path.endswith('.parquet'):
self.export_parquet(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending) | Exports the DataFrame to a file written with arrow
:param DataFrameLocal df: DataFrame to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian (not supported for fits)
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:param str sort: expression used for sorting the output
:param bool ascending: sort ascending (True) or descending
:return: | Below is the the instruction that describes the task:
### Input:
Exports the DataFrame to a file written with arrow
:param DataFrameLocal df: DataFrame to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian (not supported for fits)
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:param str sort: expression used for sorting the output
:param bool ascending: sort ascending (True) or descending
:return:
### Response:
def export(self, path, column_names=None, byteorder="=", shuffle=False, selection=False, progress=None, virtual=False, sort=None, ascending=True):
"""Exports the DataFrame to a file written with arrow
:param DataFrameLocal df: DataFrame to export
:param str path: path for file
:param lis[str] column_names: list of column names to export or None for all columns
:param str byteorder: = for native, < for little endian and > for big endian (not supported for fits)
:param bool shuffle: export rows in random order
:param bool selection: export selection or not
:param progress: progress callback that gets a progress fraction as argument and should return True to continue,
or a default progress bar when progress=True
:param: bool virtual: When True, export virtual columns
:param str sort: expression used for sorting the output
:param bool ascending: sort ascending (True) or descending
:return:
"""
if path.endswith('.arrow'):
self.export_arrow(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
elif path.endswith('.hdf5'):
self.export_hdf5(path, column_names, byteorder, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
elif path.endswith('.fits'):
self.export_fits(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending)
if path.endswith('.parquet'):
self.export_parquet(path, column_names, shuffle, selection, progress=progress, virtual=virtual, sort=sort, ascending=ascending) |
def save(self):
"""
Send out a password reset if the provided data is valid.
If the provided email address exists and is verified, a reset
email is sent to the address.
Returns:
The password reset token if it was returned and ``None``
otherwise.
"""
try:
email = models.EmailAddress.objects.get(
email=self.validated_data["email"], is_verified=True
)
except models.EmailAddress.DoesNotExist:
return None
token = models.PasswordResetToken.objects.create(email=email)
token.send()
return token | Send out a password reset if the provided data is valid.
If the provided email address exists and is verified, a reset
email is sent to the address.
Returns:
The password reset token if it was returned and ``None``
otherwise. | Below is the the instruction that describes the task:
### Input:
Send out a password reset if the provided data is valid.
If the provided email address exists and is verified, a reset
email is sent to the address.
Returns:
The password reset token if it was returned and ``None``
otherwise.
### Response:
def save(self):
"""
Send out a password reset if the provided data is valid.
If the provided email address exists and is verified, a reset
email is sent to the address.
Returns:
The password reset token if it was returned and ``None``
otherwise.
"""
try:
email = models.EmailAddress.objects.get(
email=self.validated_data["email"], is_verified=True
)
except models.EmailAddress.DoesNotExist:
return None
token = models.PasswordResetToken.objects.create(email=email)
token.send()
return token |
def process(self, metrics, config):
"""Processes metrics.
This method is called by the Snap deamon during the process phase
of the execution of a Snap workflow. Examples of processing metrics
include applying filtering, max, min, average functions as well as
adding additional context to the metrics to name just a few.
In this example we are adding a tag called 'context' to every metric.
Args:
metrics (obj:`list` of `snap_plugin.v1.Metric`):
List of metrics to be processed.
Returns:
:obj:`list` of `snap_plugin.v1.Metric`:
List of processed metrics.
"""
LOG.debug("Process called")
for metric in metrics:
metric.tags["instance-id"] = config["instance-id"]
return metrics | Processes metrics.
This method is called by the Snap deamon during the process phase
of the execution of a Snap workflow. Examples of processing metrics
include applying filtering, max, min, average functions as well as
adding additional context to the metrics to name just a few.
In this example we are adding a tag called 'context' to every metric.
Args:
metrics (obj:`list` of `snap_plugin.v1.Metric`):
List of metrics to be processed.
Returns:
:obj:`list` of `snap_plugin.v1.Metric`:
List of processed metrics. | Below is the the instruction that describes the task:
### Input:
Processes metrics.
This method is called by the Snap deamon during the process phase
of the execution of a Snap workflow. Examples of processing metrics
include applying filtering, max, min, average functions as well as
adding additional context to the metrics to name just a few.
In this example we are adding a tag called 'context' to every metric.
Args:
metrics (obj:`list` of `snap_plugin.v1.Metric`):
List of metrics to be processed.
Returns:
:obj:`list` of `snap_plugin.v1.Metric`:
List of processed metrics.
### Response:
def process(self, metrics, config):
"""Processes metrics.
This method is called by the Snap deamon during the process phase
of the execution of a Snap workflow. Examples of processing metrics
include applying filtering, max, min, average functions as well as
adding additional context to the metrics to name just a few.
In this example we are adding a tag called 'context' to every metric.
Args:
metrics (obj:`list` of `snap_plugin.v1.Metric`):
List of metrics to be processed.
Returns:
:obj:`list` of `snap_plugin.v1.Metric`:
List of processed metrics.
"""
LOG.debug("Process called")
for metric in metrics:
metric.tags["instance-id"] = config["instance-id"]
return metrics |
def getAsKmlPngAnimation(self, session, projectFile=None, path=None, documentName=None, colorRamp=None, alpha=1.0,
noDataValue=0, drawOrder=0, cellSize=None, resampleMethod='NearestNeighbour'):
"""
Retrieve the WMS dataset as a PNG time stamped KMZ
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
drawOrder (int, optional): Set the draw order of the images. Defaults to 0.
cellSize (float, optional): Define the cell size in the units of the project projection at which to resample
the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the
original raster cell size. It is generally better to set this to a size smaller than the original cell
size to obtain a higher resolution image. However, computation time increases exponentially as the cell
size is decreased.
resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid
values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to
NearestNeighbour.
Returns:
(str, list): Returns a KML string and a list of binary strings that are the PNG images.
"""
# Prepare rasters
timeStampedRasters = self._assembleRasterParams(projectFile, self.rasters)
# Make sure the raster field is valid
converter = RasterConverter(sqlAlchemyEngineOrSession=session)
# Configure color ramp
if isinstance(colorRamp, dict):
converter.setCustomColorRamp(colorRamp['colors'], colorRamp['interpolatedPoints'])
else:
converter.setDefaultColorRamp(colorRamp)
if documentName is None:
documentName = self.fileExtension
kmlString, binaryPngStrings = converter.getAsKmlPngAnimation(tableName=WMSDatasetRaster.tableName,
timeStampedRasters=timeStampedRasters,
rasterIdFieldName='id',
rasterFieldName='raster',
documentName=documentName,
alpha=alpha,
drawOrder=drawOrder,
cellSize=cellSize,
noDataValue=noDataValue,
resampleMethod=resampleMethod)
if path:
directory = os.path.dirname(path)
archiveName = (os.path.split(path)[1]).split('.')[0]
kmzPath = os.path.join(directory, (archiveName + '.kmz'))
with ZipFile(kmzPath, 'w') as kmz:
kmz.writestr(archiveName + '.kml', kmlString)
for index, binaryPngString in enumerate(binaryPngStrings):
kmz.writestr('raster{0}.png'.format(index), binaryPngString)
return kmlString, binaryPngStrings | Retrieve the WMS dataset as a PNG time stamped KMZ
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
drawOrder (int, optional): Set the draw order of the images. Defaults to 0.
cellSize (float, optional): Define the cell size in the units of the project projection at which to resample
the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the
original raster cell size. It is generally better to set this to a size smaller than the original cell
size to obtain a higher resolution image. However, computation time increases exponentially as the cell
size is decreased.
resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid
values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to
NearestNeighbour.
Returns:
(str, list): Returns a KML string and a list of binary strings that are the PNG images. | Below is the the instruction that describes the task:
### Input:
Retrieve the WMS dataset as a PNG time stamped KMZ
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
drawOrder (int, optional): Set the draw order of the images. Defaults to 0.
cellSize (float, optional): Define the cell size in the units of the project projection at which to resample
the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the
original raster cell size. It is generally better to set this to a size smaller than the original cell
size to obtain a higher resolution image. However, computation time increases exponentially as the cell
size is decreased.
resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid
values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to
NearestNeighbour.
Returns:
(str, list): Returns a KML string and a list of binary strings that are the PNG images.
### Response:
def getAsKmlPngAnimation(self, session, projectFile=None, path=None, documentName=None, colorRamp=None, alpha=1.0,
noDataValue=0, drawOrder=0, cellSize=None, resampleMethod='NearestNeighbour'):
"""
Retrieve the WMS dataset as a PNG time stamped KMZ
Args:
session (:mod:`sqlalchemy.orm.session.Session`): SQLAlchemy session object bound to PostGIS enabled database.
projectFile(:class:`gsshapy.orm.ProjectFile`): Project file object for the GSSHA project to which the WMS dataset belongs.
path (str, optional): Path to file where KML file will be written. Defaults to None.
documentName (str, optional): Name of the KML document. This will be the name that appears in the legend.
Defaults to 'Stream Network'.
colorRamp (:mod:`mapkit.ColorRampGenerator.ColorRampEnum` or dict, optional): Use ColorRampEnum to select a
default color ramp or a dictionary with keys 'colors' and 'interpolatedPoints' to specify a custom color
ramp. The 'colors' key must be a list of RGB integer tuples (e.g.: (255, 0, 0)) and the
'interpolatedPoints' must be an integer representing the number of points to interpolate between each
color given in the colors list.
alpha (float, optional): Set transparency of visualization. Value between 0.0 and 1.0 where 1.0 is 100%
opaque and 0.0 is 100% transparent. Defaults to 1.0.
noDataValue (float, optional): The value to treat as no data when generating visualizations of rasters.
Defaults to 0.0.
drawOrder (int, optional): Set the draw order of the images. Defaults to 0.
cellSize (float, optional): Define the cell size in the units of the project projection at which to resample
the raster to generate the PNG. Defaults to None which will cause the PNG to be generated with the
original raster cell size. It is generally better to set this to a size smaller than the original cell
size to obtain a higher resolution image. However, computation time increases exponentially as the cell
size is decreased.
resampleMethod (str, optional): If cellSize is set, this method will be used to resample the raster. Valid
values include: NearestNeighbour, Bilinear, Cubic, CubicSpline, and Lanczos. Defaults to
NearestNeighbour.
Returns:
(str, list): Returns a KML string and a list of binary strings that are the PNG images.
"""
# Prepare rasters
timeStampedRasters = self._assembleRasterParams(projectFile, self.rasters)
# Make sure the raster field is valid
converter = RasterConverter(sqlAlchemyEngineOrSession=session)
# Configure color ramp
if isinstance(colorRamp, dict):
converter.setCustomColorRamp(colorRamp['colors'], colorRamp['interpolatedPoints'])
else:
converter.setDefaultColorRamp(colorRamp)
if documentName is None:
documentName = self.fileExtension
kmlString, binaryPngStrings = converter.getAsKmlPngAnimation(tableName=WMSDatasetRaster.tableName,
timeStampedRasters=timeStampedRasters,
rasterIdFieldName='id',
rasterFieldName='raster',
documentName=documentName,
alpha=alpha,
drawOrder=drawOrder,
cellSize=cellSize,
noDataValue=noDataValue,
resampleMethod=resampleMethod)
if path:
directory = os.path.dirname(path)
archiveName = (os.path.split(path)[1]).split('.')[0]
kmzPath = os.path.join(directory, (archiveName + '.kmz'))
with ZipFile(kmzPath, 'w') as kmz:
kmz.writestr(archiveName + '.kml', kmlString)
for index, binaryPngString in enumerate(binaryPngStrings):
kmz.writestr('raster{0}.png'.format(index), binaryPngString)
return kmlString, binaryPngStrings |
def get_usertag(email, *tags):
"""Get buglists by usertags.
Parameters
----------
email : str
tags : tuple of strings
If tags are given the dictionary is limited to the matching
tags, if no tags are given all available tags are returned.
Returns
-------
mapping : dict
a mapping of usertag -> buglist
"""
reply = _soap_client_call('get_usertag', email, *tags)
map_el = reply('s-gensym3')
mapping = {}
# element <s-gensys3> in response can have standard type
# xsi:type=apachens:Map (example, for email [email protected])
# OR no type, in this case keys are the names of child elements and
# the array is contained in the child elements
type_attr = map_el.attributes().get('xsi:type')
if type_attr and type_attr.value == 'apachens:Map':
for usertag_el in map_el.children() or []:
tag = _uc(str(usertag_el('key')))
buglist_el = usertag_el('value')
mapping[tag] = [int(bug) for bug in buglist_el.children() or []]
else:
for usertag_el in map_el.children() or []:
tag = _uc(usertag_el.get_name())
mapping[tag] = [int(bug) for bug in usertag_el.children() or []]
return mapping | Get buglists by usertags.
Parameters
----------
email : str
tags : tuple of strings
If tags are given the dictionary is limited to the matching
tags, if no tags are given all available tags are returned.
Returns
-------
mapping : dict
a mapping of usertag -> buglist | Below is the the instruction that describes the task:
### Input:
Get buglists by usertags.
Parameters
----------
email : str
tags : tuple of strings
If tags are given the dictionary is limited to the matching
tags, if no tags are given all available tags are returned.
Returns
-------
mapping : dict
a mapping of usertag -> buglist
### Response:
def get_usertag(email, *tags):
"""Get buglists by usertags.
Parameters
----------
email : str
tags : tuple of strings
If tags are given the dictionary is limited to the matching
tags, if no tags are given all available tags are returned.
Returns
-------
mapping : dict
a mapping of usertag -> buglist
"""
reply = _soap_client_call('get_usertag', email, *tags)
map_el = reply('s-gensym3')
mapping = {}
# element <s-gensys3> in response can have standard type
# xsi:type=apachens:Map (example, for email [email protected])
# OR no type, in this case keys are the names of child elements and
# the array is contained in the child elements
type_attr = map_el.attributes().get('xsi:type')
if type_attr and type_attr.value == 'apachens:Map':
for usertag_el in map_el.children() or []:
tag = _uc(str(usertag_el('key')))
buglist_el = usertag_el('value')
mapping[tag] = [int(bug) for bug in buglist_el.children() or []]
else:
for usertag_el in map_el.children() or []:
tag = _uc(usertag_el.get_name())
mapping[tag] = [int(bug) for bug in usertag_el.children() or []]
return mapping |
def rpc_fix_code_with_yapf(self, source, directory):
"""Formats Python code to conform to the PEP 8 style guide.
"""
source = get_source(source)
return fix_code_with_yapf(source, directory) | Formats Python code to conform to the PEP 8 style guide. | Below is the the instruction that describes the task:
### Input:
Formats Python code to conform to the PEP 8 style guide.
### Response:
def rpc_fix_code_with_yapf(self, source, directory):
"""Formats Python code to conform to the PEP 8 style guide.
"""
source = get_source(source)
return fix_code_with_yapf(source, directory) |
def stop(self):
"""
Stop all threads and modules of the client.
:return:
"""
super(BitfinexWSS, self).stop()
log.info("BitfinexWSS.stop(): Stopping client..")
log.info("BitfinexWSS.stop(): Joining receiver thread..")
try:
self.receiver_thread.join()
if self.receiver_thread.is_alive():
time.time(1)
except AttributeError:
log.debug("BitfinexWSS.stop(): Receiver thread was not running!")
log.info("BitfinexWSS.stop(): Joining processing thread..")
try:
self.processing_thread.join()
if self.processing_thread.is_alive():
time.time(1)
except AttributeError:
log.debug("BitfinexWSS.stop(): Processing thread was not running!")
log.info("BitfinexWSS.stop(): Closing websocket conection..")
try:
self.conn.close()
except WebSocketConnectionClosedException:
pass
except AttributeError:
# Connection is None
pass
self.conn = None
self.processing_thread = None
self.receiver_thread = None
log.info("BitfinexWSS.stop(): Done!") | Stop all threads and modules of the client.
:return: | Below is the the instruction that describes the task:
### Input:
Stop all threads and modules of the client.
:return:
### Response:
def stop(self):
"""
Stop all threads and modules of the client.
:return:
"""
super(BitfinexWSS, self).stop()
log.info("BitfinexWSS.stop(): Stopping client..")
log.info("BitfinexWSS.stop(): Joining receiver thread..")
try:
self.receiver_thread.join()
if self.receiver_thread.is_alive():
time.time(1)
except AttributeError:
log.debug("BitfinexWSS.stop(): Receiver thread was not running!")
log.info("BitfinexWSS.stop(): Joining processing thread..")
try:
self.processing_thread.join()
if self.processing_thread.is_alive():
time.time(1)
except AttributeError:
log.debug("BitfinexWSS.stop(): Processing thread was not running!")
log.info("BitfinexWSS.stop(): Closing websocket conection..")
try:
self.conn.close()
except WebSocketConnectionClosedException:
pass
except AttributeError:
# Connection is None
pass
self.conn = None
self.processing_thread = None
self.receiver_thread = None
log.info("BitfinexWSS.stop(): Done!") |
def boto_fix_security_token_in_profile(self, connect_args):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + self.boto_profile
if boto.config.has_option(profile, 'aws_security_token'):
connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
return connect_args | monkey patch for boto issue boto/boto#2100 | Below is the the instruction that describes the task:
### Input:
monkey patch for boto issue boto/boto#2100
### Response:
def boto_fix_security_token_in_profile(self, connect_args):
''' monkey patch for boto issue boto/boto#2100 '''
profile = 'profile ' + self.boto_profile
if boto.config.has_option(profile, 'aws_security_token'):
connect_args['security_token'] = boto.config.get(profile, 'aws_security_token')
return connect_args |
def to_df(self, varnames=None, ranefs=False, transformed=False,
chains=None):
'''
Returns the MCMC samples in a nice, neat pandas DataFrame with all MCMC chains
concatenated.
Args:
varnames (list): List of variable names to include; if None
(default), all eligible variables are included.
ranefs (bool): Whether or not to include random effects in the
returned DataFrame. Default is True.
transformed (bool): Whether or not to include internally
transformed variables in the result. Default is False.
chains (int, list): Index, or list of indexes, of chains to
concatenate. E.g., [1, 3] would concatenate the first and
third chains, and ignore any others. If None (default),
concatenates all available chains.
'''
# filter out unwanted variables
names = self._filter_names(varnames, ranefs, transformed)
# concatenate the (pre-sliced) chains
if chains is None:
chains = list(range(self.n_chains))
chains = listify(chains)
data = [self.data[:, i, :] for i in chains]
data = np.concatenate(data, axis=0)
# construct the trace DataFrame
df = sum([self.level_dict[x] for x in names], [])
df = pd.DataFrame({x: data[:, self.levels.index(x)] for x in df})
return df | Returns the MCMC samples in a nice, neat pandas DataFrame with all MCMC chains
concatenated.
Args:
varnames (list): List of variable names to include; if None
(default), all eligible variables are included.
ranefs (bool): Whether or not to include random effects in the
returned DataFrame. Default is True.
transformed (bool): Whether or not to include internally
transformed variables in the result. Default is False.
chains (int, list): Index, or list of indexes, of chains to
concatenate. E.g., [1, 3] would concatenate the first and
third chains, and ignore any others. If None (default),
concatenates all available chains. | Below is the the instruction that describes the task:
### Input:
Returns the MCMC samples in a nice, neat pandas DataFrame with all MCMC chains
concatenated.
Args:
varnames (list): List of variable names to include; if None
(default), all eligible variables are included.
ranefs (bool): Whether or not to include random effects in the
returned DataFrame. Default is True.
transformed (bool): Whether or not to include internally
transformed variables in the result. Default is False.
chains (int, list): Index, or list of indexes, of chains to
concatenate. E.g., [1, 3] would concatenate the first and
third chains, and ignore any others. If None (default),
concatenates all available chains.
### Response:
def to_df(self, varnames=None, ranefs=False, transformed=False,
chains=None):
'''
Returns the MCMC samples in a nice, neat pandas DataFrame with all MCMC chains
concatenated.
Args:
varnames (list): List of variable names to include; if None
(default), all eligible variables are included.
ranefs (bool): Whether or not to include random effects in the
returned DataFrame. Default is True.
transformed (bool): Whether or not to include internally
transformed variables in the result. Default is False.
chains (int, list): Index, or list of indexes, of chains to
concatenate. E.g., [1, 3] would concatenate the first and
third chains, and ignore any others. If None (default),
concatenates all available chains.
'''
# filter out unwanted variables
names = self._filter_names(varnames, ranefs, transformed)
# concatenate the (pre-sliced) chains
if chains is None:
chains = list(range(self.n_chains))
chains = listify(chains)
data = [self.data[:, i, :] for i in chains]
data = np.concatenate(data, axis=0)
# construct the trace DataFrame
df = sum([self.level_dict[x] for x in names], [])
df = pd.DataFrame({x: data[:, self.levels.index(x)] for x in df})
return df |
def gen_hot_url(hot_index, page=1):
"""拼接 首页热门文章 URL
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
str
热门文章分类的url
"""
assert hasattr(WechatSogouConst.hot_index, hot_index)
assert isinstance(page, int) and page > 0
index_urls = {
WechatSogouConst.hot_index.hot: 0, # 热门
WechatSogouConst.hot_index.gaoxiao: 1, # 搞笑
WechatSogouConst.hot_index.health: 2, # 养生
WechatSogouConst.hot_index.sifanghua: 3, # 私房话
WechatSogouConst.hot_index.gossip: 4, # 八卦
WechatSogouConst.hot_index.technology: 5, # 科技
WechatSogouConst.hot_index.finance: 6, # 财经
WechatSogouConst.hot_index.car: 7, # 汽车
WechatSogouConst.hot_index.life: 8, # 生活
WechatSogouConst.hot_index.fashion: 9, # 时尚
WechatSogouConst.hot_index.mummy: 10, # 辣妈 / 育儿
WechatSogouConst.hot_index.travel: 11, # 旅行
WechatSogouConst.hot_index.job: 12, # 职场
WechatSogouConst.hot_index.food: 13, # 美食
WechatSogouConst.hot_index.history: 14, # 历史
WechatSogouConst.hot_index.study: 15, # 学霸 / 教育
WechatSogouConst.hot_index.constellation: 16, # 星座
WechatSogouConst.hot_index.sport: 17, # 体育
WechatSogouConst.hot_index.military: 18, # 军事
WechatSogouConst.hot_index.game: 19, # 游戏
WechatSogouConst.hot_index.pet: 20, # 萌宠
}
return 'http://weixin.sogou.com/wapindex/wap/0612/wap_{}/{}.html'.format(index_urls[hot_index], page - 1) | 拼接 首页热门文章 URL
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
str
热门文章分类的url | Below is the the instruction that describes the task:
### Input:
拼接 首页热门文章 URL
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
str
热门文章分类的url
### Response:
def gen_hot_url(hot_index, page=1):
"""拼接 首页热门文章 URL
Parameters
----------
hot_index : WechatSogouConst.hot_index
首页热门文章的分类(常量):WechatSogouConst.hot_index.xxx
page : int
页数
Returns
-------
str
热门文章分类的url
"""
assert hasattr(WechatSogouConst.hot_index, hot_index)
assert isinstance(page, int) and page > 0
index_urls = {
WechatSogouConst.hot_index.hot: 0, # 热门
WechatSogouConst.hot_index.gaoxiao: 1, # 搞笑
WechatSogouConst.hot_index.health: 2, # 养生
WechatSogouConst.hot_index.sifanghua: 3, # 私房话
WechatSogouConst.hot_index.gossip: 4, # 八卦
WechatSogouConst.hot_index.technology: 5, # 科技
WechatSogouConst.hot_index.finance: 6, # 财经
WechatSogouConst.hot_index.car: 7, # 汽车
WechatSogouConst.hot_index.life: 8, # 生活
WechatSogouConst.hot_index.fashion: 9, # 时尚
WechatSogouConst.hot_index.mummy: 10, # 辣妈 / 育儿
WechatSogouConst.hot_index.travel: 11, # 旅行
WechatSogouConst.hot_index.job: 12, # 职场
WechatSogouConst.hot_index.food: 13, # 美食
WechatSogouConst.hot_index.history: 14, # 历史
WechatSogouConst.hot_index.study: 15, # 学霸 / 教育
WechatSogouConst.hot_index.constellation: 16, # 星座
WechatSogouConst.hot_index.sport: 17, # 体育
WechatSogouConst.hot_index.military: 18, # 军事
WechatSogouConst.hot_index.game: 19, # 游戏
WechatSogouConst.hot_index.pet: 20, # 萌宠
}
return 'http://weixin.sogou.com/wapindex/wap/0612/wap_{}/{}.html'.format(index_urls[hot_index], page - 1) |
def diff_lorenz(value_array, sigma, beta, rho):
"""The Lorenz attractor differential equation
:param value_array: 3d array containing the x,y, and z component values.
:param sigma: Constant attractor parameter
:param beta: FConstant attractor parameter
:param rho: Constant attractor parameter
:return: 3d array of the Lorenz system evaluated at `value_array`
"""
diff_array = np.zeros(3)
diff_array[0] = sigma * (value_array[1]-value_array[0])
diff_array[1] = value_array[0] * (rho - value_array[2]) - value_array[1]
diff_array[2] = value_array[0] * value_array[1] - beta * value_array[2]
return diff_array | The Lorenz attractor differential equation
:param value_array: 3d array containing the x,y, and z component values.
:param sigma: Constant attractor parameter
:param beta: FConstant attractor parameter
:param rho: Constant attractor parameter
:return: 3d array of the Lorenz system evaluated at `value_array` | Below is the the instruction that describes the task:
### Input:
The Lorenz attractor differential equation
:param value_array: 3d array containing the x,y, and z component values.
:param sigma: Constant attractor parameter
:param beta: FConstant attractor parameter
:param rho: Constant attractor parameter
:return: 3d array of the Lorenz system evaluated at `value_array`
### Response:
def diff_lorenz(value_array, sigma, beta, rho):
"""The Lorenz attractor differential equation
:param value_array: 3d array containing the x,y, and z component values.
:param sigma: Constant attractor parameter
:param beta: FConstant attractor parameter
:param rho: Constant attractor parameter
:return: 3d array of the Lorenz system evaluated at `value_array`
"""
diff_array = np.zeros(3)
diff_array[0] = sigma * (value_array[1]-value_array[0])
diff_array[1] = value_array[0] * (rho - value_array[2]) - value_array[1]
diff_array[2] = value_array[0] * value_array[1] - beta * value_array[2]
return diff_array |
def strict_parse(cls, query_str, *specs, extra_parameters=True):
""" Parse query and return :class:`.WStrictURIQuery` object
:param query_str: query component of URI to parse
:param specs: list of parameters specifications
:param extra_parameters: whether parameters that was not specified in "specs" are allowed
:return: WStrictURIQuery
"""
plain_result = cls.parse(query_str)
return WStrictURIQuery(plain_result, *specs, extra_parameters=extra_parameters) | Parse query and return :class:`.WStrictURIQuery` object
:param query_str: query component of URI to parse
:param specs: list of parameters specifications
:param extra_parameters: whether parameters that was not specified in "specs" are allowed
:return: WStrictURIQuery | Below is the the instruction that describes the task:
### Input:
Parse query and return :class:`.WStrictURIQuery` object
:param query_str: query component of URI to parse
:param specs: list of parameters specifications
:param extra_parameters: whether parameters that was not specified in "specs" are allowed
:return: WStrictURIQuery
### Response:
def strict_parse(cls, query_str, *specs, extra_parameters=True):
""" Parse query and return :class:`.WStrictURIQuery` object
:param query_str: query component of URI to parse
:param specs: list of parameters specifications
:param extra_parameters: whether parameters that was not specified in "specs" are allowed
:return: WStrictURIQuery
"""
plain_result = cls.parse(query_str)
return WStrictURIQuery(plain_result, *specs, extra_parameters=extra_parameters) |
def get_directory_relative_to_git_root(directory: str):
"""
Gets the path to the given directory relative to the git repository root in which it is a subdirectory.
:param directory: the directory within a git repository
:return: the path to the directory relative to the git repository root
"""
return os.path.relpath(os.path.realpath(directory), get_git_root_directory(directory)) | Gets the path to the given directory relative to the git repository root in which it is a subdirectory.
:param directory: the directory within a git repository
:return: the path to the directory relative to the git repository root | Below is the the instruction that describes the task:
### Input:
Gets the path to the given directory relative to the git repository root in which it is a subdirectory.
:param directory: the directory within a git repository
:return: the path to the directory relative to the git repository root
### Response:
def get_directory_relative_to_git_root(directory: str):
"""
Gets the path to the given directory relative to the git repository root in which it is a subdirectory.
:param directory: the directory within a git repository
:return: the path to the directory relative to the git repository root
"""
return os.path.relpath(os.path.realpath(directory), get_git_root_directory(directory)) |
def do_banner(self, arg, arguments):
"""
::
Usage:
banner [-c CHAR] [-n WIDTH] [-i INDENT] [-r COLOR] TEXT
Arguments:
TEXT The text message from which to create the banner
CHAR The character for the frame.
WIDTH Width of the banner
INDENT indentation of the banner
COLOR the color
Options:
-c CHAR The character for the frame. [default: #]
-n WIDTH The width of the banner. [default: 70]
-i INDENT The width of the banner. [default: 0]
-r COLOR The color of the banner. [default: BLACK]
Prints a banner form a one line text message.
"""
print arguments
n = int(arguments['-n'])
c = arguments['-c']
i = int(arguments['-i'])
color = arguments['-r'].upper()
Console._print(color, "", i * " " + (n-i) * c)
Console._print(color, "", i * " " + c + " " + arguments['TEXT'])
Console._print(color, "", i * " " + (n-i) * c) | ::
Usage:
banner [-c CHAR] [-n WIDTH] [-i INDENT] [-r COLOR] TEXT
Arguments:
TEXT The text message from which to create the banner
CHAR The character for the frame.
WIDTH Width of the banner
INDENT indentation of the banner
COLOR the color
Options:
-c CHAR The character for the frame. [default: #]
-n WIDTH The width of the banner. [default: 70]
-i INDENT The width of the banner. [default: 0]
-r COLOR The color of the banner. [default: BLACK]
Prints a banner form a one line text message. | Below is the the instruction that describes the task:
### Input:
::
Usage:
banner [-c CHAR] [-n WIDTH] [-i INDENT] [-r COLOR] TEXT
Arguments:
TEXT The text message from which to create the banner
CHAR The character for the frame.
WIDTH Width of the banner
INDENT indentation of the banner
COLOR the color
Options:
-c CHAR The character for the frame. [default: #]
-n WIDTH The width of the banner. [default: 70]
-i INDENT The width of the banner. [default: 0]
-r COLOR The color of the banner. [default: BLACK]
Prints a banner form a one line text message.
### Response:
def do_banner(self, arg, arguments):
"""
::
Usage:
banner [-c CHAR] [-n WIDTH] [-i INDENT] [-r COLOR] TEXT
Arguments:
TEXT The text message from which to create the banner
CHAR The character for the frame.
WIDTH Width of the banner
INDENT indentation of the banner
COLOR the color
Options:
-c CHAR The character for the frame. [default: #]
-n WIDTH The width of the banner. [default: 70]
-i INDENT The width of the banner. [default: 0]
-r COLOR The color of the banner. [default: BLACK]
Prints a banner form a one line text message.
"""
print arguments
n = int(arguments['-n'])
c = arguments['-c']
i = int(arguments['-i'])
color = arguments['-r'].upper()
Console._print(color, "", i * " " + (n-i) * c)
Console._print(color, "", i * " " + c + " " + arguments['TEXT'])
Console._print(color, "", i * " " + (n-i) * c) |
def set_grads(params, params_with_grad):
"""
Copies gradients from param_with_grad to params
:param params: dst parameters
:param params_with_grad: src parameters
"""
for param, param_w_grad in zip(params, params_with_grad):
if param.grad is None:
param.grad = torch.nn.Parameter(torch.empty_like(param))
param.grad.data.copy_(param_w_grad.grad.data) | Copies gradients from param_with_grad to params
:param params: dst parameters
:param params_with_grad: src parameters | Below is the the instruction that describes the task:
### Input:
Copies gradients from param_with_grad to params
:param params: dst parameters
:param params_with_grad: src parameters
### Response:
def set_grads(params, params_with_grad):
"""
Copies gradients from param_with_grad to params
:param params: dst parameters
:param params_with_grad: src parameters
"""
for param, param_w_grad in zip(params, params_with_grad):
if param.grad is None:
param.grad = torch.nn.Parameter(torch.empty_like(param))
param.grad.data.copy_(param_w_grad.grad.data) |
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
"""
self._database = database
for _ in xrange(self.size):
session = self._new_session()
session.create()
self.put(session) | Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed. | Below is the the instruction that describes the task:
### Input:
Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
### Response:
def bind(self, database):
"""Associate the pool with a database.
:type database: :class:`~google.cloud.spanner_v1.database.Database`
:param database: database used by the pool: used to create sessions
when needed.
"""
self._database = database
for _ in xrange(self.size):
session = self._new_session()
session.create()
self.put(session) |
def quantile(self, q=0.5, interpolation='linear'):
"""
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
float or Series
If ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
See Also
--------
core.window.Rolling.quantile
numpy.percentile
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
self._check_percentile(q)
# We dispatch to DataFrame so that core.internals only has to worry
# about 2D cases.
df = self.to_frame()
result = df.quantile(q=q, interpolation=interpolation,
numeric_only=False)
if result.ndim == 2:
result = result.iloc[:, 0]
if is_list_like(q):
result.name = self.name
return self._constructor(result,
index=Float64Index(q),
name=self.name)
else:
# scalar
return result.iloc[0] | Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
float or Series
If ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
See Also
--------
core.window.Rolling.quantile
numpy.percentile
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64 | Below is the the instruction that describes the task:
### Input:
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
float or Series
If ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
See Also
--------
core.window.Rolling.quantile
numpy.percentile
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
### Response:
def quantile(self, q=0.5, interpolation='linear'):
"""
Return value at the given quantile.
Parameters
----------
q : float or array-like, default 0.5 (50% quantile)
0 <= q <= 1, the quantile(s) to compute.
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
.. versionadded:: 0.18.0
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
float or Series
If ``q`` is an array, a Series will be returned where the
index is ``q`` and the values are the quantiles, otherwise
a float will be returned.
See Also
--------
core.window.Rolling.quantile
numpy.percentile
Examples
--------
>>> s = pd.Series([1, 2, 3, 4])
>>> s.quantile(.5)
2.5
>>> s.quantile([.25, .5, .75])
0.25 1.75
0.50 2.50
0.75 3.25
dtype: float64
"""
self._check_percentile(q)
# We dispatch to DataFrame so that core.internals only has to worry
# about 2D cases.
df = self.to_frame()
result = df.quantile(q=q, interpolation=interpolation,
numeric_only=False)
if result.ndim == 2:
result = result.iloc[:, 0]
if is_list_like(q):
result.name = self.name
return self._constructor(result,
index=Float64Index(q),
name=self.name)
else:
# scalar
return result.iloc[0] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.