code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def xray_heartbeat_batch_handler(self, unused_channel, data):
"""Handle an xray heartbeat batch message from Redis."""
gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(
data, 0)
heartbeat_data = gcs_entries.Entries(0)
message = (ray.gcs_utils.HeartbeatBatchTableData.
GetRootAsHeartbeatBatchTableData(heartbeat_data, 0))
for j in range(message.BatchLength()):
heartbeat_message = message.Batch(j)
num_resources = heartbeat_message.ResourcesAvailableLabelLength()
static_resources = {}
dynamic_resources = {}
for i in range(num_resources):
dyn = heartbeat_message.ResourcesAvailableLabel(i)
static = heartbeat_message.ResourcesTotalLabel(i)
dynamic_resources[dyn] = (
heartbeat_message.ResourcesAvailableCapacity(i))
static_resources[static] = (
heartbeat_message.ResourcesTotalCapacity(i))
# Update the load metrics for this raylet.
client_id = ray.utils.binary_to_hex(heartbeat_message.ClientId())
ip = self.raylet_id_to_ip_map.get(client_id)
if ip:
self.load_metrics.update(ip, static_resources,
dynamic_resources)
else:
logger.warning(
"Monitor: "
"could not find ip for client {}".format(client_id)) | Handle an xray heartbeat batch message from Redis. | Below is the the instruction that describes the task:
### Input:
Handle an xray heartbeat batch message from Redis.
### Response:
def xray_heartbeat_batch_handler(self, unused_channel, data):
"""Handle an xray heartbeat batch message from Redis."""
gcs_entries = ray.gcs_utils.GcsTableEntry.GetRootAsGcsTableEntry(
data, 0)
heartbeat_data = gcs_entries.Entries(0)
message = (ray.gcs_utils.HeartbeatBatchTableData.
GetRootAsHeartbeatBatchTableData(heartbeat_data, 0))
for j in range(message.BatchLength()):
heartbeat_message = message.Batch(j)
num_resources = heartbeat_message.ResourcesAvailableLabelLength()
static_resources = {}
dynamic_resources = {}
for i in range(num_resources):
dyn = heartbeat_message.ResourcesAvailableLabel(i)
static = heartbeat_message.ResourcesTotalLabel(i)
dynamic_resources[dyn] = (
heartbeat_message.ResourcesAvailableCapacity(i))
static_resources[static] = (
heartbeat_message.ResourcesTotalCapacity(i))
# Update the load metrics for this raylet.
client_id = ray.utils.binary_to_hex(heartbeat_message.ClientId())
ip = self.raylet_id_to_ip_map.get(client_id)
if ip:
self.load_metrics.update(ip, static_resources,
dynamic_resources)
else:
logger.warning(
"Monitor: "
"could not find ip for client {}".format(client_id)) |
def _match_maximum_decimal(self, match_key, decimal_value, match):
"""Matches a minimum decimal value"""
if decimal_value is None:
raise NullArgument()
if match is None:
match = True
if match:
ltegt = '$lte'
else:
ltegt = '$gt'
if match_key in self._query_terms:
self._query_terms[match_key][ltegt] = decimal_value
else:
self._query_terms[match_key] = {ltegt: decimal_value} | Matches a minimum decimal value | Below is the the instruction that describes the task:
### Input:
Matches a minimum decimal value
### Response:
def _match_maximum_decimal(self, match_key, decimal_value, match):
"""Matches a minimum decimal value"""
if decimal_value is None:
raise NullArgument()
if match is None:
match = True
if match:
ltegt = '$lte'
else:
ltegt = '$gt'
if match_key in self._query_terms:
self._query_terms[match_key][ltegt] = decimal_value
else:
self._query_terms[match_key] = {ltegt: decimal_value} |
def variables(self, name):
"""Search for variable by name. Searches scope top down
Args:
name (string): Search term
Returns:
Variable object OR False
"""
if isinstance(name, tuple):
name = name[0]
if name.startswith('@{'):
name = '@' + name[2:-1]
i = len(self)
while i >= 0:
i -= 1
if name in self[i]['__variables__']:
return self[i]['__variables__'][name]
return False | Search for variable by name. Searches scope top down
Args:
name (string): Search term
Returns:
Variable object OR False | Below is the the instruction that describes the task:
### Input:
Search for variable by name. Searches scope top down
Args:
name (string): Search term
Returns:
Variable object OR False
### Response:
def variables(self, name):
"""Search for variable by name. Searches scope top down
Args:
name (string): Search term
Returns:
Variable object OR False
"""
if isinstance(name, tuple):
name = name[0]
if name.startswith('@{'):
name = '@' + name[2:-1]
i = len(self)
while i >= 0:
i -= 1
if name in self[i]['__variables__']:
return self[i]['__variables__'][name]
return False |
def reg_incomplete_beta(a, b, x):
"""
Incomplete beta function; code translated from: Numerical Recipes in C.
:param a: a > 0
:param b: b > 0
:param x: 0 <= x <= 1.
"""
if (x == 0):
return 0
elif (x == 1):
return 1
else:
lbeta = (math.lgamma(a + b) - math.lgamma(a) - math.lgamma(b) +
a * math.log(x) + b * math.log(1 - x))
if (x < (a + 1) / (a + b + 2)):
return math.exp(lbeta) * __contfractbeta(a, b, x) / a
else:
return 1 - math.exp(lbeta) * __contfractbeta(b, a, 1 - x) / b | Incomplete beta function; code translated from: Numerical Recipes in C.
:param a: a > 0
:param b: b > 0
:param x: 0 <= x <= 1. | Below is the the instruction that describes the task:
### Input:
Incomplete beta function; code translated from: Numerical Recipes in C.
:param a: a > 0
:param b: b > 0
:param x: 0 <= x <= 1.
### Response:
def reg_incomplete_beta(a, b, x):
"""
Incomplete beta function; code translated from: Numerical Recipes in C.
:param a: a > 0
:param b: b > 0
:param x: 0 <= x <= 1.
"""
if (x == 0):
return 0
elif (x == 1):
return 1
else:
lbeta = (math.lgamma(a + b) - math.lgamma(a) - math.lgamma(b) +
a * math.log(x) + b * math.log(1 - x))
if (x < (a + 1) / (a + b + 2)):
return math.exp(lbeta) * __contfractbeta(a, b, x) / a
else:
return 1 - math.exp(lbeta) * __contfractbeta(b, a, 1 - x) / b |
def add_input(cmd, immediate=False):
'''add some command input to be processed'''
if immediate:
process_stdin(cmd)
else:
mpstate.input_queue.put(cmd) | add some command input to be processed | Below is the the instruction that describes the task:
### Input:
add some command input to be processed
### Response:
def add_input(cmd, immediate=False):
'''add some command input to be processed'''
if immediate:
process_stdin(cmd)
else:
mpstate.input_queue.put(cmd) |
def explode(self):
"""
If the current Line entity consists of multiple line
break it up into n Line entities.
Returns
----------
exploded: (n,) Line entities
"""
points = np.column_stack((
self.points,
self.points)).ravel()[1:-1].reshape((-1, 2))
exploded = [Line(i) for i in points]
return exploded | If the current Line entity consists of multiple line
break it up into n Line entities.
Returns
----------
exploded: (n,) Line entities | Below is the the instruction that describes the task:
### Input:
If the current Line entity consists of multiple line
break it up into n Line entities.
Returns
----------
exploded: (n,) Line entities
### Response:
def explode(self):
"""
If the current Line entity consists of multiple line
break it up into n Line entities.
Returns
----------
exploded: (n,) Line entities
"""
points = np.column_stack((
self.points,
self.points)).ravel()[1:-1].reshape((-1, 2))
exploded = [Line(i) for i in points]
return exploded |
def ingest(event):
'''Ingest a finished recording to the Opencast server.
'''
# Update status
set_service_status(Service.INGEST, ServiceStatus.BUSY)
notify.notify('STATUS=Uploading')
recording_state(event.uid, 'uploading')
update_event_status(event, Status.UPLOADING)
# Select ingest service
# The ingest service to use is selected at random from the available
# ingest services to ensure that not every capture agent uses the same
# service at the same time
service = config('service-ingest')
service = service[randrange(0, len(service))]
logger.info('Selecting ingest service to use: ' + service)
# create mediapackage
logger.info('Creating new mediapackage')
mediapackage = http_request(service + '/createMediaPackage')
# extract workflow_def, workflow_config and add DC catalogs
prop = 'org.opencastproject.capture.agent.properties'
dcns = 'http://www.opencastproject.org/xsd/1.0/dublincore/'
for attachment in event.get_data().get('attach'):
data = attachment.get('data')
if attachment.get('x-apple-filename') == prop:
workflow_def, workflow_config = get_config_params(data)
# Check for dublincore catalogs
elif attachment.get('fmttype') == 'application/xml' and dcns in data:
name = attachment.get('x-apple-filename', '').rsplit('.', 1)[0]
logger.info('Adding %s DC catalog' % name)
fields = [('mediaPackage', mediapackage),
('flavor', 'dublincore/%s' % name),
('dublinCore', data.encode('utf-8'))]
mediapackage = http_request(service + '/addDCCatalog', fields)
# add track
for (flavor, track) in event.get_tracks():
logger.info('Adding track ({0} -> {1})'.format(flavor, track))
track = track.encode('ascii', 'ignore')
fields = [('mediaPackage', mediapackage), ('flavor', flavor),
('BODY1', (pycurl.FORM_FILE, track))]
mediapackage = http_request(service + '/addTrack', fields)
# ingest
logger.info('Ingest recording')
fields = [('mediaPackage', mediapackage)]
if workflow_def:
fields.append(('workflowDefinitionId', workflow_def))
if event.uid:
fields.append(('workflowInstanceId',
event.uid.encode('ascii', 'ignore')))
fields += workflow_config
mediapackage = http_request(service + '/ingest', fields)
# Update status
recording_state(event.uid, 'upload_finished')
update_event_status(event, Status.FINISHED_UPLOADING)
notify.notify('STATUS=Running')
set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE)
logger.info('Finished ingest') | Ingest a finished recording to the Opencast server. | Below is the the instruction that describes the task:
### Input:
Ingest a finished recording to the Opencast server.
### Response:
def ingest(event):
'''Ingest a finished recording to the Opencast server.
'''
# Update status
set_service_status(Service.INGEST, ServiceStatus.BUSY)
notify.notify('STATUS=Uploading')
recording_state(event.uid, 'uploading')
update_event_status(event, Status.UPLOADING)
# Select ingest service
# The ingest service to use is selected at random from the available
# ingest services to ensure that not every capture agent uses the same
# service at the same time
service = config('service-ingest')
service = service[randrange(0, len(service))]
logger.info('Selecting ingest service to use: ' + service)
# create mediapackage
logger.info('Creating new mediapackage')
mediapackage = http_request(service + '/createMediaPackage')
# extract workflow_def, workflow_config and add DC catalogs
prop = 'org.opencastproject.capture.agent.properties'
dcns = 'http://www.opencastproject.org/xsd/1.0/dublincore/'
for attachment in event.get_data().get('attach'):
data = attachment.get('data')
if attachment.get('x-apple-filename') == prop:
workflow_def, workflow_config = get_config_params(data)
# Check for dublincore catalogs
elif attachment.get('fmttype') == 'application/xml' and dcns in data:
name = attachment.get('x-apple-filename', '').rsplit('.', 1)[0]
logger.info('Adding %s DC catalog' % name)
fields = [('mediaPackage', mediapackage),
('flavor', 'dublincore/%s' % name),
('dublinCore', data.encode('utf-8'))]
mediapackage = http_request(service + '/addDCCatalog', fields)
# add track
for (flavor, track) in event.get_tracks():
logger.info('Adding track ({0} -> {1})'.format(flavor, track))
track = track.encode('ascii', 'ignore')
fields = [('mediaPackage', mediapackage), ('flavor', flavor),
('BODY1', (pycurl.FORM_FILE, track))]
mediapackage = http_request(service + '/addTrack', fields)
# ingest
logger.info('Ingest recording')
fields = [('mediaPackage', mediapackage)]
if workflow_def:
fields.append(('workflowDefinitionId', workflow_def))
if event.uid:
fields.append(('workflowInstanceId',
event.uid.encode('ascii', 'ignore')))
fields += workflow_config
mediapackage = http_request(service + '/ingest', fields)
# Update status
recording_state(event.uid, 'upload_finished')
update_event_status(event, Status.FINISHED_UPLOADING)
notify.notify('STATUS=Running')
set_service_status_immediate(Service.INGEST, ServiceStatus.IDLE)
logger.info('Finished ingest') |
def set_stderrthreshold(s):
"""Sets the stderr threshold to the value passed in.
Args:
s: str|int, valid strings values are case-insensitive 'debug',
'info', 'warning', 'error', and 'fatal'; valid integer values are
logging.DEBUG|INFO|WARNING|ERROR|FATAL.
Raises:
ValueError: Raised when s is an invalid value.
"""
if s in converter.ABSL_LEVELS:
FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
FLAGS.stderrthreshold = s
else:
raise ValueError(
'set_stderrthreshold only accepts integer absl logging level '
'from -3 to 1, or case-insensitive string values '
"'debug', 'info', 'warning', 'error', and 'fatal'. "
'But found "{}" ({}).'.format(s, type(s))) | Sets the stderr threshold to the value passed in.
Args:
s: str|int, valid strings values are case-insensitive 'debug',
'info', 'warning', 'error', and 'fatal'; valid integer values are
logging.DEBUG|INFO|WARNING|ERROR|FATAL.
Raises:
ValueError: Raised when s is an invalid value. | Below is the the instruction that describes the task:
### Input:
Sets the stderr threshold to the value passed in.
Args:
s: str|int, valid strings values are case-insensitive 'debug',
'info', 'warning', 'error', and 'fatal'; valid integer values are
logging.DEBUG|INFO|WARNING|ERROR|FATAL.
Raises:
ValueError: Raised when s is an invalid value.
### Response:
def set_stderrthreshold(s):
"""Sets the stderr threshold to the value passed in.
Args:
s: str|int, valid strings values are case-insensitive 'debug',
'info', 'warning', 'error', and 'fatal'; valid integer values are
logging.DEBUG|INFO|WARNING|ERROR|FATAL.
Raises:
ValueError: Raised when s is an invalid value.
"""
if s in converter.ABSL_LEVELS:
FLAGS.stderrthreshold = converter.ABSL_LEVELS[s]
elif isinstance(s, str) and s.upper() in converter.ABSL_NAMES:
FLAGS.stderrthreshold = s
else:
raise ValueError(
'set_stderrthreshold only accepts integer absl logging level '
'from -3 to 1, or case-insensitive string values '
"'debug', 'info', 'warning', 'error', and 'fatal'. "
'But found "{}" ({}).'.format(s, type(s))) |
def sortkey(x):
"""Return '001002003' for (colorname, 1, 2, 3)"""
k = str(x[1]).zfill(3) + str(x[2]).zfill(3) + str(x[3]).zfill(3)
return k | Return '001002003' for (colorname, 1, 2, 3) | Below is the the instruction that describes the task:
### Input:
Return '001002003' for (colorname, 1, 2, 3)
### Response:
def sortkey(x):
"""Return '001002003' for (colorname, 1, 2, 3)"""
k = str(x[1]).zfill(3) + str(x[2]).zfill(3) + str(x[3]).zfill(3)
return k |
def ServiceWorker_deliverPushMessage(self, origin, registrationId, data):
"""
Function path: ServiceWorker.deliverPushMessage
Domain: ServiceWorker
Method name: deliverPushMessage
Parameters:
Required arguments:
'origin' (type: string) -> No description
'registrationId' (type: string) -> No description
'data' (type: string) -> No description
No return value.
"""
assert isinstance(origin, (str,)
), "Argument 'origin' must be of type '['str']'. Received type: '%s'" % type(
origin)
assert isinstance(registrationId, (str,)
), "Argument 'registrationId' must be of type '['str']'. Received type: '%s'" % type(
registrationId)
assert isinstance(data, (str,)
), "Argument 'data' must be of type '['str']'. Received type: '%s'" % type(
data)
subdom_funcs = self.synchronous_command('ServiceWorker.deliverPushMessage',
origin=origin, registrationId=registrationId, data=data)
return subdom_funcs | Function path: ServiceWorker.deliverPushMessage
Domain: ServiceWorker
Method name: deliverPushMessage
Parameters:
Required arguments:
'origin' (type: string) -> No description
'registrationId' (type: string) -> No description
'data' (type: string) -> No description
No return value. | Below is the the instruction that describes the task:
### Input:
Function path: ServiceWorker.deliverPushMessage
Domain: ServiceWorker
Method name: deliverPushMessage
Parameters:
Required arguments:
'origin' (type: string) -> No description
'registrationId' (type: string) -> No description
'data' (type: string) -> No description
No return value.
### Response:
def ServiceWorker_deliverPushMessage(self, origin, registrationId, data):
"""
Function path: ServiceWorker.deliverPushMessage
Domain: ServiceWorker
Method name: deliverPushMessage
Parameters:
Required arguments:
'origin' (type: string) -> No description
'registrationId' (type: string) -> No description
'data' (type: string) -> No description
No return value.
"""
assert isinstance(origin, (str,)
), "Argument 'origin' must be of type '['str']'. Received type: '%s'" % type(
origin)
assert isinstance(registrationId, (str,)
), "Argument 'registrationId' must be of type '['str']'. Received type: '%s'" % type(
registrationId)
assert isinstance(data, (str,)
), "Argument 'data' must be of type '['str']'. Received type: '%s'" % type(
data)
subdom_funcs = self.synchronous_command('ServiceWorker.deliverPushMessage',
origin=origin, registrationId=registrationId, data=data)
return subdom_funcs |
def sample_mgrid(self, mgrid: np.array) -> np.array:
"""Sample a mesh-grid array and return the result.
The :any:`sample_ogrid` method performs better as there is a lot of
overhead when working with large mesh-grids.
Args:
mgrid (numpy.ndarray): A mesh-grid array of points to sample.
A contiguous array of type `numpy.float32` is preferred.
Returns:
numpy.ndarray: An array of sampled points.
This array has the shape: ``mgrid.shape[:-1]``.
The ``dtype`` is `numpy.float32`.
"""
mgrid = np.ascontiguousarray(mgrid, np.float32)
if mgrid.shape[0] != self.dimensions:
raise ValueError(
"mgrid.shape[0] must equal self.dimensions, "
"%r[0] != %r" % (mgrid.shape, self.dimensions)
)
out = np.ndarray(mgrid.shape[1:], np.float32)
if mgrid.shape[1:] != out.shape:
raise ValueError(
"mgrid.shape[1:] must equal out.shape, "
"%r[1:] != %r" % (mgrid.shape, out.shape)
)
lib.NoiseSampleMeshGrid(
self._tdl_noise_c,
out.size,
ffi.cast("float*", mgrid.ctypes.data),
ffi.cast("float*", out.ctypes.data),
)
return out | Sample a mesh-grid array and return the result.
The :any:`sample_ogrid` method performs better as there is a lot of
overhead when working with large mesh-grids.
Args:
mgrid (numpy.ndarray): A mesh-grid array of points to sample.
A contiguous array of type `numpy.float32` is preferred.
Returns:
numpy.ndarray: An array of sampled points.
This array has the shape: ``mgrid.shape[:-1]``.
The ``dtype`` is `numpy.float32`. | Below is the the instruction that describes the task:
### Input:
Sample a mesh-grid array and return the result.
The :any:`sample_ogrid` method performs better as there is a lot of
overhead when working with large mesh-grids.
Args:
mgrid (numpy.ndarray): A mesh-grid array of points to sample.
A contiguous array of type `numpy.float32` is preferred.
Returns:
numpy.ndarray: An array of sampled points.
This array has the shape: ``mgrid.shape[:-1]``.
The ``dtype`` is `numpy.float32`.
### Response:
def sample_mgrid(self, mgrid: np.array) -> np.array:
"""Sample a mesh-grid array and return the result.
The :any:`sample_ogrid` method performs better as there is a lot of
overhead when working with large mesh-grids.
Args:
mgrid (numpy.ndarray): A mesh-grid array of points to sample.
A contiguous array of type `numpy.float32` is preferred.
Returns:
numpy.ndarray: An array of sampled points.
This array has the shape: ``mgrid.shape[:-1]``.
The ``dtype`` is `numpy.float32`.
"""
mgrid = np.ascontiguousarray(mgrid, np.float32)
if mgrid.shape[0] != self.dimensions:
raise ValueError(
"mgrid.shape[0] must equal self.dimensions, "
"%r[0] != %r" % (mgrid.shape, self.dimensions)
)
out = np.ndarray(mgrid.shape[1:], np.float32)
if mgrid.shape[1:] != out.shape:
raise ValueError(
"mgrid.shape[1:] must equal out.shape, "
"%r[1:] != %r" % (mgrid.shape, out.shape)
)
lib.NoiseSampleMeshGrid(
self._tdl_noise_c,
out.size,
ffi.cast("float*", mgrid.ctypes.data),
ffi.cast("float*", out.ctypes.data),
)
return out |
def _compute_acq_withGradients(self, x):
"""
Integrated Expected Improvement and its derivative
"""
means, stds, dmdxs, dsdxs = self.model.predict_withGradients(x)
fmins = self.model.get_fmin()
f_acqu = None
df_acqu = None
for m, s, fmin, dmdx, dsdx in zip(means, stds, fmins, dmdxs, dsdxs):
phi, Phi, u = get_quantiles(self.jitter, fmin, m, s)
f = Phi
df = -(phi/s)* (dmdx + dsdx * u)
if f_acqu is None:
f_acqu = f
df_acqu = df
else:
f_acqu += f
df_acqu += df
return f_acqu/(len(means)), df_acqu/(len(means)) | Integrated Expected Improvement and its derivative | Below is the the instruction that describes the task:
### Input:
Integrated Expected Improvement and its derivative
### Response:
def _compute_acq_withGradients(self, x):
"""
Integrated Expected Improvement and its derivative
"""
means, stds, dmdxs, dsdxs = self.model.predict_withGradients(x)
fmins = self.model.get_fmin()
f_acqu = None
df_acqu = None
for m, s, fmin, dmdx, dsdx in zip(means, stds, fmins, dmdxs, dsdxs):
phi, Phi, u = get_quantiles(self.jitter, fmin, m, s)
f = Phi
df = -(phi/s)* (dmdx + dsdx * u)
if f_acqu is None:
f_acqu = f
df_acqu = df
else:
f_acqu += f
df_acqu += df
return f_acqu/(len(means)), df_acqu/(len(means)) |
def _decode_v2(value):
"""
Decode ':' and '$' characters encoded by `_encode`.
"""
if re.search(r'(?<!\$):', value):
raise ValueError("Unescaped ':' in the encoded string")
decode_colons = value.replace('$:', ':')
if re.search(r'(?<!\$)(\$\$)*\$([^$]|\Z)', decode_colons):
raise ValueError("Unescaped '$' in encoded string")
return decode_colons.replace('$$', '$') | Decode ':' and '$' characters encoded by `_encode`. | Below is the the instruction that describes the task:
### Input:
Decode ':' and '$' characters encoded by `_encode`.
### Response:
def _decode_v2(value):
"""
Decode ':' and '$' characters encoded by `_encode`.
"""
if re.search(r'(?<!\$):', value):
raise ValueError("Unescaped ':' in the encoded string")
decode_colons = value.replace('$:', ':')
if re.search(r'(?<!\$)(\$\$)*\$([^$]|\Z)', decode_colons):
raise ValueError("Unescaped '$' in encoded string")
return decode_colons.replace('$$', '$') |
def print_usage(actions):
"""Print the usage information. (Help screen)"""
actions = actions.items()
actions.sort()
print('usage: %s <action> [<options>]' % basename(sys.argv[0]))
print(' %s --help' % basename(sys.argv[0]))
print()
print('actions:')
for name, (func, doc, arguments) in actions:
print(' %s:' % name)
for line in doc.splitlines():
print(' %s' % line)
if arguments:
print()
for arg, shortcut, default, argtype in arguments:
if isinstance(default, bool):
print(' %s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg
))
else:
print(' %-30s%-10s%s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg,
argtype, default
))
print() | Print the usage information. (Help screen) | Below is the the instruction that describes the task:
### Input:
Print the usage information. (Help screen)
### Response:
def print_usage(actions):
"""Print the usage information. (Help screen)"""
actions = actions.items()
actions.sort()
print('usage: %s <action> [<options>]' % basename(sys.argv[0]))
print(' %s --help' % basename(sys.argv[0]))
print()
print('actions:')
for name, (func, doc, arguments) in actions:
print(' %s:' % name)
for line in doc.splitlines():
print(' %s' % line)
if arguments:
print()
for arg, shortcut, default, argtype in arguments:
if isinstance(default, bool):
print(' %s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg
))
else:
print(' %-30s%-10s%s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg,
argtype, default
))
print() |
def push(self, buf):
""" Push a buffer into the source. """
self._src.emit('push-buffer', Gst.Buffer.new_wrapped(buf)) | Push a buffer into the source. | Below is the the instruction that describes the task:
### Input:
Push a buffer into the source.
### Response:
def push(self, buf):
""" Push a buffer into the source. """
self._src.emit('push-buffer', Gst.Buffer.new_wrapped(buf)) |
def get_watermark_for_topic(
kafka_client,
topic,
):
"""This method:
* refreshes metadata for the kafka client
* fetches watermarks
:param kafka_client: KafkaToolClient instance
:param topic: the topic
:returns: dict <topic>: [ConsumerPartitionOffsets]
"""
# Refresh client metadata. We do not use the topic list, because we
# don't want to accidentally create the topic if it does not exist.
# If Kafka is unavailable, let's retry loading client metadata
try:
kafka_client.load_metadata_for_topics()
except KafkaUnavailableError:
kafka_client.load_metadata_for_topics()
watermarks = get_topics_watermarks(
kafka_client, [topic]
)
return watermarks | This method:
* refreshes metadata for the kafka client
* fetches watermarks
:param kafka_client: KafkaToolClient instance
:param topic: the topic
:returns: dict <topic>: [ConsumerPartitionOffsets] | Below is the the instruction that describes the task:
### Input:
This method:
* refreshes metadata for the kafka client
* fetches watermarks
:param kafka_client: KafkaToolClient instance
:param topic: the topic
:returns: dict <topic>: [ConsumerPartitionOffsets]
### Response:
def get_watermark_for_topic(
kafka_client,
topic,
):
"""This method:
* refreshes metadata for the kafka client
* fetches watermarks
:param kafka_client: KafkaToolClient instance
:param topic: the topic
:returns: dict <topic>: [ConsumerPartitionOffsets]
"""
# Refresh client metadata. We do not use the topic list, because we
# don't want to accidentally create the topic if it does not exist.
# If Kafka is unavailable, let's retry loading client metadata
try:
kafka_client.load_metadata_for_topics()
except KafkaUnavailableError:
kafka_client.load_metadata_for_topics()
watermarks = get_topics_watermarks(
kafka_client, [topic]
)
return watermarks |
def save_task(self):
"""Transition to save the task and return to ``ASSIGNED`` state."""
task = self.request.activation.task
task.status = STATUS.ASSIGNED
task.save() | Transition to save the task and return to ``ASSIGNED`` state. | Below is the the instruction that describes the task:
### Input:
Transition to save the task and return to ``ASSIGNED`` state.
### Response:
def save_task(self):
"""Transition to save the task and return to ``ASSIGNED`` state."""
task = self.request.activation.task
task.status = STATUS.ASSIGNED
task.save() |
def replace_print(fileobj=sys.stderr):
"""Sys.out replacer, by default with stderr.
Use it like this:
with replace_print_with(fileobj):
print "hello" # writes to the file
print "done" # prints to stdout
Args:
fileobj: a file object to replace stdout.
Yields:
The printer.
"""
printer = _Printer(fileobj)
previous_stdout = sys.stdout
sys.stdout = printer
try:
yield printer
finally:
sys.stdout = previous_stdout | Sys.out replacer, by default with stderr.
Use it like this:
with replace_print_with(fileobj):
print "hello" # writes to the file
print "done" # prints to stdout
Args:
fileobj: a file object to replace stdout.
Yields:
The printer. | Below is the the instruction that describes the task:
### Input:
Sys.out replacer, by default with stderr.
Use it like this:
with replace_print_with(fileobj):
print "hello" # writes to the file
print "done" # prints to stdout
Args:
fileobj: a file object to replace stdout.
Yields:
The printer.
### Response:
def replace_print(fileobj=sys.stderr):
"""Sys.out replacer, by default with stderr.
Use it like this:
with replace_print_with(fileobj):
print "hello" # writes to the file
print "done" # prints to stdout
Args:
fileobj: a file object to replace stdout.
Yields:
The printer.
"""
printer = _Printer(fileobj)
previous_stdout = sys.stdout
sys.stdout = printer
try:
yield printer
finally:
sys.stdout = previous_stdout |
def run(self,
ipyclient=None,
quiet=False,
force=False,
block=False,
):
"""
Submits raxml job to run. If no ipyclient object is provided then
the function will block until the raxml run is finished. If an ipyclient
is provided then the job is sent to a remote engine and an asynchronous
result object is returned which can be queried or awaited until it finishes.
Parameters
-----------
ipyclient:
Not yet supported...
quiet:
suppress print statements
force:
overwrite existing results files with this job name.
block:
will block progress in notebook until job finishes, even if job
is running on a remote ipyclient.
"""
## stop before trying in raxml
if force:
for key, oldfile in self.trees:
if os.path.exists(oldfile):
os.remove(oldfile)
if os.path.exists(self.trees.info):
print("Error: set a new name for this job or use Force flag.\nFile exists: {}"\
.format(self.trees.info))
return
## TODO: add a progress bar tracker here. It could even read it from
## the info file that is being written.
## submit it
if not ipyclient:
proc = _call_raxml(self._command_list)
self.stdout = proc[0]
self.stderr = proc[1]
else:
## find all hosts and submit job to the host with most available engines
lbview = ipyclient.load_balanced_view()
self.async = lbview.apply(_call_raxml, self._command_list)
## initiate random seed
if not quiet:
if not ipyclient:
## look for errors
if "Overall execution time" not in self.stdout:
print("Error in raxml run\n" + self.stdout)
else:
print("job {} finished successfully".format(self.params.n))
else:
print("job {} submitted to cluster".format(self.params.n)) | Submits raxml job to run. If no ipyclient object is provided then
the function will block until the raxml run is finished. If an ipyclient
is provided then the job is sent to a remote engine and an asynchronous
result object is returned which can be queried or awaited until it finishes.
Parameters
-----------
ipyclient:
Not yet supported...
quiet:
suppress print statements
force:
overwrite existing results files with this job name.
block:
will block progress in notebook until job finishes, even if job
is running on a remote ipyclient. | Below is the the instruction that describes the task:
### Input:
Submits raxml job to run. If no ipyclient object is provided then
the function will block until the raxml run is finished. If an ipyclient
is provided then the job is sent to a remote engine and an asynchronous
result object is returned which can be queried or awaited until it finishes.
Parameters
-----------
ipyclient:
Not yet supported...
quiet:
suppress print statements
force:
overwrite existing results files with this job name.
block:
will block progress in notebook until job finishes, even if job
is running on a remote ipyclient.
### Response:
def run(self,
ipyclient=None,
quiet=False,
force=False,
block=False,
):
"""
Submits raxml job to run. If no ipyclient object is provided then
the function will block until the raxml run is finished. If an ipyclient
is provided then the job is sent to a remote engine and an asynchronous
result object is returned which can be queried or awaited until it finishes.
Parameters
-----------
ipyclient:
Not yet supported...
quiet:
suppress print statements
force:
overwrite existing results files with this job name.
block:
will block progress in notebook until job finishes, even if job
is running on a remote ipyclient.
"""
## stop before trying in raxml
if force:
for key, oldfile in self.trees:
if os.path.exists(oldfile):
os.remove(oldfile)
if os.path.exists(self.trees.info):
print("Error: set a new name for this job or use Force flag.\nFile exists: {}"\
.format(self.trees.info))
return
## TODO: add a progress bar tracker here. It could even read it from
## the info file that is being written.
## submit it
if not ipyclient:
proc = _call_raxml(self._command_list)
self.stdout = proc[0]
self.stderr = proc[1]
else:
## find all hosts and submit job to the host with most available engines
lbview = ipyclient.load_balanced_view()
self.async = lbview.apply(_call_raxml, self._command_list)
## initiate random seed
if not quiet:
if not ipyclient:
## look for errors
if "Overall execution time" not in self.stdout:
print("Error in raxml run\n" + self.stdout)
else:
print("job {} finished successfully".format(self.params.n))
else:
print("job {} submitted to cluster".format(self.params.n)) |
def in6_getLocalUniquePrefix():
"""
Returns a pseudo-randomly generated Local Unique prefix. Function
follows recommandation of Section 3.2.2 of RFC 4193 for prefix
generation.
"""
# Extracted from RFC 1305 (NTP) :
# NTP timestamps are represented as a 64-bit unsigned fixed-point number,
# in seconds relative to 0h on 1 January 1900. The integer part is in the
# first 32 bits and the fraction part in the last 32 bits.
# epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0)
# x = time.time()
# from time import gmtime, strftime, gmtime, mktime
# delta = mktime(gmtime(0)) - mktime(self.epoch)
# x = x-delta
tod = time.time() # time of day. Will bother with epoch later
i = int(tod)
j = int((tod - i)*(2**32))
tod = struct.pack("!II", i,j)
# TODO: Add some check regarding system address gathering
rawmac = get_if_raw_hwaddr(conf.iface6)
mac = b":".join(map(lambda x: b"%.02x" % ord(x), list(rawmac)))
# construct modified EUI-64 ID
eui64 = inet_pton(socket.AF_INET6, '::' + in6_mactoifaceid(mac))[8:]
import sha
globalid = sha.new(tod+eui64).digest()[:5]
return inet_ntop(socket.AF_INET6, b'\xfd' + globalid + b'\x00'*10) | Returns a pseudo-randomly generated Local Unique prefix. Function
follows recommandation of Section 3.2.2 of RFC 4193 for prefix
generation. | Below is the the instruction that describes the task:
### Input:
Returns a pseudo-randomly generated Local Unique prefix. Function
follows recommandation of Section 3.2.2 of RFC 4193 for prefix
generation.
### Response:
def in6_getLocalUniquePrefix():
"""
Returns a pseudo-randomly generated Local Unique prefix. Function
follows recommandation of Section 3.2.2 of RFC 4193 for prefix
generation.
"""
# Extracted from RFC 1305 (NTP) :
# NTP timestamps are represented as a 64-bit unsigned fixed-point number,
# in seconds relative to 0h on 1 January 1900. The integer part is in the
# first 32 bits and the fraction part in the last 32 bits.
# epoch = (1900, 1, 1, 0, 0, 0, 5, 1, 0)
# x = time.time()
# from time import gmtime, strftime, gmtime, mktime
# delta = mktime(gmtime(0)) - mktime(self.epoch)
# x = x-delta
tod = time.time() # time of day. Will bother with epoch later
i = int(tod)
j = int((tod - i)*(2**32))
tod = struct.pack("!II", i,j)
# TODO: Add some check regarding system address gathering
rawmac = get_if_raw_hwaddr(conf.iface6)
mac = b":".join(map(lambda x: b"%.02x" % ord(x), list(rawmac)))
# construct modified EUI-64 ID
eui64 = inet_pton(socket.AF_INET6, '::' + in6_mactoifaceid(mac))[8:]
import sha
globalid = sha.new(tod+eui64).digest()[:5]
return inet_ntop(socket.AF_INET6, b'\xfd' + globalid + b'\x00'*10) |
def collect_results(rule, max_results=500, result_stream_args=None):
"""
Utility function to quickly get a list of tweets from a ``ResultStream``
without keeping the object around. Requires your args to be configured
prior to using.
Args:
rule (str): valid powertrack rule for your account, preferably
generated by the `gen_rule_payload` function.
max_results (int): maximum number of tweets or counts to return from
the API / underlying ``ResultStream`` object.
result_stream_args (dict): configuration dict that has connection
information for a ``ResultStream`` object.
Returns:
list of results
Example:
>>> from searchtweets import collect_results
>>> tweets = collect_results(rule,
max_results=500,
result_stream_args=search_args)
"""
if result_stream_args is None:
logger.error("This function requires a configuration dict for the "
"inner ResultStream object.")
raise KeyError
rs = ResultStream(rule_payload=rule,
max_results=max_results,
**result_stream_args)
return list(rs.stream()) | Utility function to quickly get a list of tweets from a ``ResultStream``
without keeping the object around. Requires your args to be configured
prior to using.
Args:
rule (str): valid powertrack rule for your account, preferably
generated by the `gen_rule_payload` function.
max_results (int): maximum number of tweets or counts to return from
the API / underlying ``ResultStream`` object.
result_stream_args (dict): configuration dict that has connection
information for a ``ResultStream`` object.
Returns:
list of results
Example:
>>> from searchtweets import collect_results
>>> tweets = collect_results(rule,
max_results=500,
result_stream_args=search_args) | Below is the the instruction that describes the task:
### Input:
Utility function to quickly get a list of tweets from a ``ResultStream``
without keeping the object around. Requires your args to be configured
prior to using.
Args:
rule (str): valid powertrack rule for your account, preferably
generated by the `gen_rule_payload` function.
max_results (int): maximum number of tweets or counts to return from
the API / underlying ``ResultStream`` object.
result_stream_args (dict): configuration dict that has connection
information for a ``ResultStream`` object.
Returns:
list of results
Example:
>>> from searchtweets import collect_results
>>> tweets = collect_results(rule,
max_results=500,
result_stream_args=search_args)
### Response:
def collect_results(rule, max_results=500, result_stream_args=None):
"""
Utility function to quickly get a list of tweets from a ``ResultStream``
without keeping the object around. Requires your args to be configured
prior to using.
Args:
rule (str): valid powertrack rule for your account, preferably
generated by the `gen_rule_payload` function.
max_results (int): maximum number of tweets or counts to return from
the API / underlying ``ResultStream`` object.
result_stream_args (dict): configuration dict that has connection
information for a ``ResultStream`` object.
Returns:
list of results
Example:
>>> from searchtweets import collect_results
>>> tweets = collect_results(rule,
max_results=500,
result_stream_args=search_args)
"""
if result_stream_args is None:
logger.error("This function requires a configuration dict for the "
"inner ResultStream object.")
raise KeyError
rs = ResultStream(rule_payload=rule,
max_results=max_results,
**result_stream_args)
return list(rs.stream()) |
def logical_lines(lines):
"""Merge lines into chunks according to q rules"""
if isinstance(lines, string_types):
lines = StringIO(lines)
buf = []
for line in lines:
if buf and not line.startswith(' '):
chunk = ''.join(buf).strip()
if chunk:
yield chunk
buf[:] = []
buf.append(line)
chunk = ''.join(buf).strip()
if chunk:
yield chunk | Merge lines into chunks according to q rules | Below is the the instruction that describes the task:
### Input:
Merge lines into chunks according to q rules
### Response:
def logical_lines(lines):
"""Merge lines into chunks according to q rules"""
if isinstance(lines, string_types):
lines = StringIO(lines)
buf = []
for line in lines:
if buf and not line.startswith(' '):
chunk = ''.join(buf).strip()
if chunk:
yield chunk
buf[:] = []
buf.append(line)
chunk = ''.join(buf).strip()
if chunk:
yield chunk |
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.client.ReqChannel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
finally:
channel.close() | Send mine data to the master | Below is the the instruction that describes the task:
### Input:
Send mine data to the master
### Response:
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.client.ReqChannel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
finally:
channel.close() |
def dump(self, force=False):
"""
Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value
"""
if self._parsed is None:
self.parse()
return self._parsed[0].dump(force=force) | Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value | Below is the the instruction that describes the task:
### Input:
Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value
### Response:
def dump(self, force=False):
"""
Encodes the value using DER
:param force:
If the encoded contents already exist, clear them and regenerate
to ensure they are in DER format instead of BER format
:return:
A byte string of the DER-encoded value
"""
if self._parsed is None:
self.parse()
return self._parsed[0].dump(force=force) |
def _get_style_id_from_style(self, style, style_type):
"""
Return the id of *style*, or |None| if it is the default style of
*style_type*. Raises |ValueError| if style is not of *style_type*.
"""
if style.type != style_type:
raise ValueError(
"assigned style is type %s, need type %s" %
(style.type, style_type)
)
if style == self.default(style_type):
return None
return style.style_id | Return the id of *style*, or |None| if it is the default style of
*style_type*. Raises |ValueError| if style is not of *style_type*. | Below is the the instruction that describes the task:
### Input:
Return the id of *style*, or |None| if it is the default style of
*style_type*. Raises |ValueError| if style is not of *style_type*.
### Response:
def _get_style_id_from_style(self, style, style_type):
"""
Return the id of *style*, or |None| if it is the default style of
*style_type*. Raises |ValueError| if style is not of *style_type*.
"""
if style.type != style_type:
raise ValueError(
"assigned style is type %s, need type %s" %
(style.type, style_type)
)
if style == self.default(style_type):
return None
return style.style_id |
def alerts(self):
"""
:rtype: twilio.rest.monitor.v1.alert.AlertList
"""
if self._alerts is None:
self._alerts = AlertList(self)
return self._alerts | :rtype: twilio.rest.monitor.v1.alert.AlertList | Below is the the instruction that describes the task:
### Input:
:rtype: twilio.rest.monitor.v1.alert.AlertList
### Response:
def alerts(self):
"""
:rtype: twilio.rest.monitor.v1.alert.AlertList
"""
if self._alerts is None:
self._alerts = AlertList(self)
return self._alerts |
def delete(self):
"""Remove this resource or collection (recursive).
See DAVResource.delete()
"""
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
shutil.rmtree(self._file_path, ignore_errors=False)
self.remove_all_properties(True)
self.remove_all_locks(True) | Remove this resource or collection (recursive).
See DAVResource.delete() | Below is the the instruction that describes the task:
### Input:
Remove this resource or collection (recursive).
See DAVResource.delete()
### Response:
def delete(self):
"""Remove this resource or collection (recursive).
See DAVResource.delete()
"""
if self.provider.readonly:
raise DAVError(HTTP_FORBIDDEN)
shutil.rmtree(self._file_path, ignore_errors=False)
self.remove_all_properties(True)
self.remove_all_locks(True) |
def eq(self, r1, r2):
""" True if values of r1 and r2 registers are equal
"""
if not is_register(r1) or not is_register(r2):
return False
if self.regs[r1] is None or self.regs[r2] is None: # HINT: This's been never USED??
return False
return self.regs[r1] == self.regs[r2] | True if values of r1 and r2 registers are equal | Below is the the instruction that describes the task:
### Input:
True if values of r1 and r2 registers are equal
### Response:
def eq(self, r1, r2):
""" True if values of r1 and r2 registers are equal
"""
if not is_register(r1) or not is_register(r2):
return False
if self.regs[r1] is None or self.regs[r2] is None: # HINT: This's been never USED??
return False
return self.regs[r1] == self.regs[r2] |
def _filter_with_hooks(self, svc_event, listeners):
"""
Filters listeners with EventListenerHooks
:param svc_event: ServiceEvent being triggered
:param listeners: Listeners to filter
:return: A list of listeners with hook references
"""
svc_ref = svc_event.get_service_reference()
# Get EventListenerHooks service refs from registry
hook_refs = self._registry.find_service_references(
SERVICE_EVENT_LISTENER_HOOK
)
# only do something if there are some hook_refs
if hook_refs:
# Associate bundle context to hooks
ctx_listeners = {}
for listener in listeners:
context = listener.bundle_context
ctx_listeners.setdefault(context, []).append(listener)
# Convert the dictionary to a shrinkable one,
# with shrinkable lists of listeners
shrinkable_ctx_listeners = ShrinkableMap(
{
context: ShrinkableList(value)
for context, value in ctx_listeners.items()
}
)
for hook_ref in hook_refs:
if not svc_ref == hook_ref:
# Get the bundle of the hook service
hook_bundle = hook_ref.get_bundle()
# lookup service from registry
hook_svc = self._registry.get_service(hook_bundle, hook_ref)
if hook_svc is not None:
# call event method of the hook service,
# pass in svc_event and shrinkable_ctx_listeners
# (which can be modified by hook)
try:
hook_svc.event(svc_event, shrinkable_ctx_listeners)
except:
self._logger.exception(
"Error calling EventListenerHook"
)
finally:
# Clean up the service
self._registry.unget_service(hook_bundle, hook_ref)
# Convert the shrinkable_ctx_listeners back to a list of listeners
# before returning
ret_listeners = set()
for bnd_listeners in shrinkable_ctx_listeners.values():
ret_listeners.update(bnd_listeners)
return ret_listeners
# No hook ref
return listeners | Filters listeners with EventListenerHooks
:param svc_event: ServiceEvent being triggered
:param listeners: Listeners to filter
:return: A list of listeners with hook references | Below is the the instruction that describes the task:
### Input:
Filters listeners with EventListenerHooks
:param svc_event: ServiceEvent being triggered
:param listeners: Listeners to filter
:return: A list of listeners with hook references
### Response:
def _filter_with_hooks(self, svc_event, listeners):
"""
Filters listeners with EventListenerHooks
:param svc_event: ServiceEvent being triggered
:param listeners: Listeners to filter
:return: A list of listeners with hook references
"""
svc_ref = svc_event.get_service_reference()
# Get EventListenerHooks service refs from registry
hook_refs = self._registry.find_service_references(
SERVICE_EVENT_LISTENER_HOOK
)
# only do something if there are some hook_refs
if hook_refs:
# Associate bundle context to hooks
ctx_listeners = {}
for listener in listeners:
context = listener.bundle_context
ctx_listeners.setdefault(context, []).append(listener)
# Convert the dictionary to a shrinkable one,
# with shrinkable lists of listeners
shrinkable_ctx_listeners = ShrinkableMap(
{
context: ShrinkableList(value)
for context, value in ctx_listeners.items()
}
)
for hook_ref in hook_refs:
if not svc_ref == hook_ref:
# Get the bundle of the hook service
hook_bundle = hook_ref.get_bundle()
# lookup service from registry
hook_svc = self._registry.get_service(hook_bundle, hook_ref)
if hook_svc is not None:
# call event method of the hook service,
# pass in svc_event and shrinkable_ctx_listeners
# (which can be modified by hook)
try:
hook_svc.event(svc_event, shrinkable_ctx_listeners)
except:
self._logger.exception(
"Error calling EventListenerHook"
)
finally:
# Clean up the service
self._registry.unget_service(hook_bundle, hook_ref)
# Convert the shrinkable_ctx_listeners back to a list of listeners
# before returning
ret_listeners = set()
for bnd_listeners in shrinkable_ctx_listeners.values():
ret_listeners.update(bnd_listeners)
return ret_listeners
# No hook ref
return listeners |
def _prepend_row_index(rows, index):
"""Add a left-most index column."""
if index is None or index is False:
return rows
if len(index) != len(rows):
print('index=', index)
print('rows=', rows)
raise ValueError('index must be as long as the number of data rows')
rows = [[v] + list(row) for v, row in zip(index, rows)]
return rows | Add a left-most index column. | Below is the the instruction that describes the task:
### Input:
Add a left-most index column.
### Response:
def _prepend_row_index(rows, index):
"""Add a left-most index column."""
if index is None or index is False:
return rows
if len(index) != len(rows):
print('index=', index)
print('rows=', rows)
raise ValueError('index must be as long as the number of data rows')
rows = [[v] + list(row) for v, row in zip(index, rows)]
return rows |
def _unpublish(self):
"""
Process an unpublish action on the related object, returns a boolean if a change is made.
Only objects with a current active version will be updated.
"""
obj = self.content_object
actioned = False
# Only update if needed
if obj.current_version is not None:
obj.current_version = None
obj.save(update_fields=['current_version'])
actioned = True
return actioned | Process an unpublish action on the related object, returns a boolean if a change is made.
Only objects with a current active version will be updated. | Below is the the instruction that describes the task:
### Input:
Process an unpublish action on the related object, returns a boolean if a change is made.
Only objects with a current active version will be updated.
### Response:
def _unpublish(self):
"""
Process an unpublish action on the related object, returns a boolean if a change is made.
Only objects with a current active version will be updated.
"""
obj = self.content_object
actioned = False
# Only update if needed
if obj.current_version is not None:
obj.current_version = None
obj.save(update_fields=['current_version'])
actioned = True
return actioned |
def validatePopElement(self, doc, elem, qname):
"""Pop the element end from the validation stack. """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidatePopElement(self._o, doc__o, elem__o, qname)
return ret | Pop the element end from the validation stack. | Below is the the instruction that describes the task:
### Input:
Pop the element end from the validation stack.
### Response:
def validatePopElement(self, doc, elem, qname):
"""Pop the element end from the validation stack. """
if doc is None: doc__o = None
else: doc__o = doc._o
if elem is None: elem__o = None
else: elem__o = elem._o
ret = libxml2mod.xmlValidatePopElement(self._o, doc__o, elem__o, qname)
return ret |
def save_target_classes(self, filename):
"""Saves target classed for all dataset images into given file."""
with open(filename, 'w') as f:
for k, v in self._target_classes.items():
f.write('{0}.png,{1}\n'.format(k, v)) | Saves target classed for all dataset images into given file. | Below is the the instruction that describes the task:
### Input:
Saves target classed for all dataset images into given file.
### Response:
def save_target_classes(self, filename):
"""Saves target classed for all dataset images into given file."""
with open(filename, 'w') as f:
for k, v in self._target_classes.items():
f.write('{0}.png,{1}\n'.format(k, v)) |
def insertBulkBlock(self):
"""
API to insert a bulk block
:param blockDump: Output of the block dump command
:type blockDump: dict
"""
try:
body = request.body.read()
indata = cjson.decode(body)
if (indata.get("file_parent_list", []) and indata.get("dataset_parent_list", [])):
dbsExceptionHandler("dbsException-invalid-input2", "insertBulkBlock: dataset and file parentages cannot be in the input at the same time",
self.logger.exception, "insertBulkBlock: datset and file parentages cannot be in the input at the same time.")
indata = validateJSONInputNoCopy("blockBulk", indata)
self.dbsBlockInsert.putBlock(indata)
except cjson.DecodeError as dc:
dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert BulkBlock input", self.logger.exception, str(dc))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except HTTPError as he:
raise he
except Exception as ex:
#illegal variable name/number
if str(ex).find("ORA-01036") != -1:
dbsExceptionHandler("dbsException-invalid-input2", "illegal variable name/number from input", self.logger.exception, str(ex))
else:
sError = "DBSWriterModel/insertBulkBlock. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) | API to insert a bulk block
:param blockDump: Output of the block dump command
:type blockDump: dict | Below is the the instruction that describes the task:
### Input:
API to insert a bulk block
:param blockDump: Output of the block dump command
:type blockDump: dict
### Response:
def insertBulkBlock(self):
"""
API to insert a bulk block
:param blockDump: Output of the block dump command
:type blockDump: dict
"""
try:
body = request.body.read()
indata = cjson.decode(body)
if (indata.get("file_parent_list", []) and indata.get("dataset_parent_list", [])):
dbsExceptionHandler("dbsException-invalid-input2", "insertBulkBlock: dataset and file parentages cannot be in the input at the same time",
self.logger.exception, "insertBulkBlock: datset and file parentages cannot be in the input at the same time.")
indata = validateJSONInputNoCopy("blockBulk", indata)
self.dbsBlockInsert.putBlock(indata)
except cjson.DecodeError as dc:
dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert BulkBlock input", self.logger.exception, str(dc))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except HTTPError as he:
raise he
except Exception as ex:
#illegal variable name/number
if str(ex).find("ORA-01036") != -1:
dbsExceptionHandler("dbsException-invalid-input2", "illegal variable name/number from input", self.logger.exception, str(ex))
else:
sError = "DBSWriterModel/insertBulkBlock. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError) |
def create(name, **params):
'''
Function to create device in Server Density. For more info, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.create lama
salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768
'''
log.debug('Server Density params: %s', params)
params = _clean_salt_variables(params)
params['name'] = name
api_response = requests.post(
'https://api.serverdensity.io/inventory/devices/',
params={'token': get_sd_auth('api_token')},
data=params
)
log.debug('Server Density API Response: %s', api_response)
log.debug('Server Density API Response content: %s', api_response.content)
if api_response.status_code == 200:
try:
return salt.utils.json.loads(api_response.content)
except ValueError:
log.error('Could not parse API Response content: %s', api_response.content)
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None | Function to create device in Server Density. For more info, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.create lama
salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768 | Below is the the instruction that describes the task:
### Input:
Function to create device in Server Density. For more info, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.create lama
salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768
### Response:
def create(name, **params):
'''
Function to create device in Server Density. For more info, see the `API
docs`__.
.. __: https://apidocs.serverdensity.com/Inventory/Devices/Creating
CLI Example:
.. code-block:: bash
salt '*' serverdensity_device.create lama
salt '*' serverdensity_device.create rich_lama group=lama_band installedRAM=32768
'''
log.debug('Server Density params: %s', params)
params = _clean_salt_variables(params)
params['name'] = name
api_response = requests.post(
'https://api.serverdensity.io/inventory/devices/',
params={'token': get_sd_auth('api_token')},
data=params
)
log.debug('Server Density API Response: %s', api_response)
log.debug('Server Density API Response content: %s', api_response.content)
if api_response.status_code == 200:
try:
return salt.utils.json.loads(api_response.content)
except ValueError:
log.error('Could not parse API Response content: %s', api_response.content)
raise CommandExecutionError(
'Failed to create, API Response: {0}'.format(api_response)
)
else:
return None |
def _evaluate(self,R,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
Phi(R,phi,t)
HISTORY:
2017-10-16 - Written - Bovy (UofT)
"""
return 0.5*R*R*(1.+2./3.*R*numpy.sin(3.*phi)) | NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
Phi(R,phi,t)
HISTORY:
2017-10-16 - Written - Bovy (UofT) | Below is the the instruction that describes the task:
### Input:
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
Phi(R,phi,t)
HISTORY:
2017-10-16 - Written - Bovy (UofT)
### Response:
def _evaluate(self,R,phi=0.,t=0.):
"""
NAME:
_evaluate
PURPOSE:
evaluate the potential at R,phi,t
INPUT:
R - Galactocentric cylindrical radius
phi - azimuth
t - time
OUTPUT:
Phi(R,phi,t)
HISTORY:
2017-10-16 - Written - Bovy (UofT)
"""
return 0.5*R*R*(1.+2./3.*R*numpy.sin(3.*phi)) |
def draw_commands(self, surf):
"""Draw the list of available commands."""
past_abilities = {act.ability for act in self._past_actions if act.ability}
for y, cmd in enumerate(sorted(self._abilities(
lambda c: c.name != "Smart"), key=lambda c: c.name), start=2):
if self._queued_action and cmd == self._queued_action:
color = colors.green
elif self._queued_hotkey and cmd.hotkey.startswith(self._queued_hotkey):
color = colors.green * 0.75
elif cmd.ability_id in past_abilities:
color = colors.red
else:
color = colors.yellow
hotkey = cmd.hotkey[0:3] # truncate "escape" -> "esc"
surf.write_screen(self._font_large, color, (0.2, y), hotkey)
surf.write_screen(self._font_large, color, (3, y), cmd.name) | Draw the list of available commands. | Below is the the instruction that describes the task:
### Input:
Draw the list of available commands.
### Response:
def draw_commands(self, surf):
"""Draw the list of available commands."""
past_abilities = {act.ability for act in self._past_actions if act.ability}
for y, cmd in enumerate(sorted(self._abilities(
lambda c: c.name != "Smart"), key=lambda c: c.name), start=2):
if self._queued_action and cmd == self._queued_action:
color = colors.green
elif self._queued_hotkey and cmd.hotkey.startswith(self._queued_hotkey):
color = colors.green * 0.75
elif cmd.ability_id in past_abilities:
color = colors.red
else:
color = colors.yellow
hotkey = cmd.hotkey[0:3] # truncate "escape" -> "esc"
surf.write_screen(self._font_large, color, (0.2, y), hotkey)
surf.write_screen(self._font_large, color, (3, y), cmd.name) |
def flip(f):
"""Flip the order of positonal arguments of given function."""
ensure_callable(f)
result = lambda *args, **kwargs: f(*reversed(args), **kwargs)
functools.update_wrapper(result, f, ('__name__', '__module__'))
return result | Flip the order of positonal arguments of given function. | Below is the the instruction that describes the task:
### Input:
Flip the order of positonal arguments of given function.
### Response:
def flip(f):
"""Flip the order of positonal arguments of given function."""
ensure_callable(f)
result = lambda *args, **kwargs: f(*reversed(args), **kwargs)
functools.update_wrapper(result, f, ('__name__', '__module__'))
return result |
def delete_secret(namespace, name, apiserver_url=None, force=True):
'''
.. versionadded:: 2016.3.0
Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name.
CLI Example:
.. code-block:: bash
salt '*' k8s.delete_secret namespace_name secret_name
salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
# Try to get kubernetes master
apiserver_url = _guess_apiserver(apiserver_url)
if apiserver_url is None:
return False
# we need namespace to delete secret in it
if not _get_namespaces(apiserver_url, namespace):
return {'name': name, 'result': False,
'comment': "Namespace doesn't exists, can't delete anything there",
'changes': {}}
url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url,
namespace, name)
res = http.query(url, method='DELETE')
if res.get('body'):
ret['comment'] = "Removed secret {0} in {1} namespace".format(name,
namespace)
return ret | .. versionadded:: 2016.3.0
Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name.
CLI Example:
.. code-block:: bash
salt '*' k8s.delete_secret namespace_name secret_name
salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2016.3.0
Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name.
CLI Example:
.. code-block:: bash
salt '*' k8s.delete_secret namespace_name secret_name
salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local
### Response:
def delete_secret(namespace, name, apiserver_url=None, force=True):
'''
.. versionadded:: 2016.3.0
Delete kubernetes secret in the defined namespace. Namespace is the mandatory parameter as well as name.
CLI Example:
.. code-block:: bash
salt '*' k8s.delete_secret namespace_name secret_name
salt '*' k8s.delete_secret namespace_name secret_name http://kube-master.cluster.local
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
# Try to get kubernetes master
apiserver_url = _guess_apiserver(apiserver_url)
if apiserver_url is None:
return False
# we need namespace to delete secret in it
if not _get_namespaces(apiserver_url, namespace):
return {'name': name, 'result': False,
'comment': "Namespace doesn't exists, can't delete anything there",
'changes': {}}
url = "{0}/api/v1/namespaces/{1}/secrets/{2}".format(apiserver_url,
namespace, name)
res = http.query(url, method='DELETE')
if res.get('body'):
ret['comment'] = "Removed secret {0} in {1} namespace".format(name,
namespace)
return ret |
def fcsp_sa_fcsp_auth_proto_auth_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcsp_sa = ET.SubElement(config, "fcsp-sa", xmlns="urn:brocade.com:mgmt:brocade-fc-auth")
fcsp = ET.SubElement(fcsp_sa, "fcsp")
auth = ET.SubElement(fcsp, "auth")
proto = ET.SubElement(auth, "proto")
auth_type = ET.SubElement(proto, "auth-type")
auth_type.text = kwargs.pop('auth_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def fcsp_sa_fcsp_auth_proto_auth_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
fcsp_sa = ET.SubElement(config, "fcsp-sa", xmlns="urn:brocade.com:mgmt:brocade-fc-auth")
fcsp = ET.SubElement(fcsp_sa, "fcsp")
auth = ET.SubElement(fcsp, "auth")
proto = ET.SubElement(auth, "proto")
auth_type = ET.SubElement(proto, "auth-type")
auth_type.text = kwargs.pop('auth_type')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def getInstalledThemes(self, store):
"""
Collect themes from all offerings installed on this store, or (if called
multiple times) return the previously collected list.
"""
if not store in self._getInstalledThemesCache:
self._getInstalledThemesCache[store] = (self.
_realGetInstalledThemes(store))
return self._getInstalledThemesCache[store] | Collect themes from all offerings installed on this store, or (if called
multiple times) return the previously collected list. | Below is the the instruction that describes the task:
### Input:
Collect themes from all offerings installed on this store, or (if called
multiple times) return the previously collected list.
### Response:
def getInstalledThemes(self, store):
"""
Collect themes from all offerings installed on this store, or (if called
multiple times) return the previously collected list.
"""
if not store in self._getInstalledThemesCache:
self._getInstalledThemesCache[store] = (self.
_realGetInstalledThemes(store))
return self._getInstalledThemesCache[store] |
async def insert(self, **kwargs):
"""
Accepts request object, retrieves data from the one`s body
and creates new account.
"""
if kwargs:
# Create autoincrement for account
pk = await self.autoincrement()
kwargs.update({"id": pk})
# Create account with received data and autoincrement
await self.collection.insert_one(kwargs)
row = await self.collection.find_one({"id": pk})
else:
row = None
if row:
return {i:row[i] for i in row if i != "_id"}
else:
return {"error":500,
"reason":"Not created"} | Accepts request object, retrieves data from the one`s body
and creates new account. | Below is the the instruction that describes the task:
### Input:
Accepts request object, retrieves data from the one`s body
and creates new account.
### Response:
async def insert(self, **kwargs):
"""
Accepts request object, retrieves data from the one`s body
and creates new account.
"""
if kwargs:
# Create autoincrement for account
pk = await self.autoincrement()
kwargs.update({"id": pk})
# Create account with received data and autoincrement
await self.collection.insert_one(kwargs)
row = await self.collection.find_one({"id": pk})
else:
row = None
if row:
return {i:row[i] for i in row if i != "_id"}
else:
return {"error":500,
"reason":"Not created"} |
def in_op(self, other):
'''checks if self is in other'''
if not is_object(other):
raise MakeError(
'TypeError',
"You can\'t use 'in' operator to search in non-objects")
return other.has_property(to_string(self)) | checks if self is in other | Below is the the instruction that describes the task:
### Input:
checks if self is in other
### Response:
def in_op(self, other):
'''checks if self is in other'''
if not is_object(other):
raise MakeError(
'TypeError',
"You can\'t use 'in' operator to search in non-objects")
return other.has_property(to_string(self)) |
def element_count(self):
"""Retrieve the number of elements in this type.
Returns an int.
If the Type is not an array or vector, this raises.
"""
result = conf.lib.clang_getNumElements(self)
if result < 0:
raise Exception('Type does not have elements.')
return result | Retrieve the number of elements in this type.
Returns an int.
If the Type is not an array or vector, this raises. | Below is the the instruction that describes the task:
### Input:
Retrieve the number of elements in this type.
Returns an int.
If the Type is not an array or vector, this raises.
### Response:
def element_count(self):
"""Retrieve the number of elements in this type.
Returns an int.
If the Type is not an array or vector, this raises.
"""
result = conf.lib.clang_getNumElements(self)
if result < 0:
raise Exception('Type does not have elements.')
return result |
def calc_adc_params(self):
"""
Compute appropriate adc_gain and baseline parameters for adc
conversion, given the physical signal and the fmts.
Returns
-------
adc_gains : list
List of calculated `adc_gain` values for each channel.
baselines : list
List of calculated `baseline` values for each channel.
Notes
-----
This is the mapping equation:
`digital - baseline / adc_gain = physical`
`physical * adc_gain + baseline = digital`
The original WFDB library stores `baseline` as int32.
Constrain abs(adc_gain) <= 2**31 == 2147483648
This function does carefully deal with overflow for calculated
int32 `baseline` values, but does not consider over/underflow
for calculated float `adc_gain` values.
"""
adc_gains = []
baselines = []
if np.where(np.isinf(self.p_signal))[0].size:
raise ValueError('Signal contains inf. Cannot perform adc.')
# min and max ignoring nans, unless whole channel is nan.
# Should suppress warning message.
minvals = np.nanmin(self.p_signal, axis=0)
maxvals = np.nanmax(self.p_signal, axis=0)
for ch in range(np.shape(self.p_signal)[1]):
# Get the minimum and maximum (valid) storage values
dmin, dmax = _digi_bounds(self.fmt[ch])
# add 1 because the lowest value is used to store nans
dmin = dmin + 1
pmin = minvals[ch]
pmax = maxvals[ch]
# Figure out digital samples used to store physical samples
# If the entire signal is nan, gain/baseline won't be used
if pmin == np.nan:
adc_gain = 1
baseline = 1
# If the signal is just one value, store one digital value.
elif pmin == pmax:
if pmin == 0:
adc_gain = 1
baseline = 1
else:
# All digital values are +1 or -1. Keep adc_gain > 0
adc_gain = abs(1 / pmin)
baseline = 0
# Regular varied signal case.
else:
# The equation is: p = (d - b) / g
# Approximately, pmax maps to dmax, and pmin maps to
# dmin. Gradient will be equal to, or close to
# delta(d) / delta(p), since intercept baseline has
# to be an integer.
# Constraint: baseline must be between +/- 2**31
adc_gain = (dmax-dmin) / (pmax-pmin)
baseline = dmin - adc_gain*pmin
# Make adjustments for baseline to be an integer
# This up/down round logic of baseline is to ensure
# there is no overshoot of dmax. Now pmax will map
# to dmax or dmax-1 which is also fine.
if pmin > 0:
baseline = int(np.ceil(baseline))
else:
baseline = int(np.floor(baseline))
# After baseline is set, adjust gain correspondingly.Set
# the gain to map pmin to dmin, and p==0 to baseline.
# In the case where pmin == 0 and dmin == baseline,
# adc_gain is already correct. Avoid dividing by 0.
if dmin != baseline:
adc_gain = (dmin - baseline) / pmin
# Remap signal if baseline exceeds boundaries.
# This may happen if pmax < 0
if baseline > MAX_I32:
# pmin maps to dmin, baseline maps to 2**31 - 1
# pmax will map to a lower value than before
adc_gain = (MAX_I32) - dmin / abs(pmin)
baseline = MAX_I32
# This may happen if pmin > 0
elif baseline < MIN_I32:
# pmax maps to dmax, baseline maps to -2**31 + 1
adc_gain = (dmax - MIN_I32) / pmax
baseline = MIN_I32
adc_gains.append(adc_gain)
baselines.append(baseline)
return (adc_gains, baselines) | Compute appropriate adc_gain and baseline parameters for adc
conversion, given the physical signal and the fmts.
Returns
-------
adc_gains : list
List of calculated `adc_gain` values for each channel.
baselines : list
List of calculated `baseline` values for each channel.
Notes
-----
This is the mapping equation:
`digital - baseline / adc_gain = physical`
`physical * adc_gain + baseline = digital`
The original WFDB library stores `baseline` as int32.
Constrain abs(adc_gain) <= 2**31 == 2147483648
This function does carefully deal with overflow for calculated
int32 `baseline` values, but does not consider over/underflow
for calculated float `adc_gain` values. | Below is the the instruction that describes the task:
### Input:
Compute appropriate adc_gain and baseline parameters for adc
conversion, given the physical signal and the fmts.
Returns
-------
adc_gains : list
List of calculated `adc_gain` values for each channel.
baselines : list
List of calculated `baseline` values for each channel.
Notes
-----
This is the mapping equation:
`digital - baseline / adc_gain = physical`
`physical * adc_gain + baseline = digital`
The original WFDB library stores `baseline` as int32.
Constrain abs(adc_gain) <= 2**31 == 2147483648
This function does carefully deal with overflow for calculated
int32 `baseline` values, but does not consider over/underflow
for calculated float `adc_gain` values.
### Response:
def calc_adc_params(self):
"""
Compute appropriate adc_gain and baseline parameters for adc
conversion, given the physical signal and the fmts.
Returns
-------
adc_gains : list
List of calculated `adc_gain` values for each channel.
baselines : list
List of calculated `baseline` values for each channel.
Notes
-----
This is the mapping equation:
`digital - baseline / adc_gain = physical`
`physical * adc_gain + baseline = digital`
The original WFDB library stores `baseline` as int32.
Constrain abs(adc_gain) <= 2**31 == 2147483648
This function does carefully deal with overflow for calculated
int32 `baseline` values, but does not consider over/underflow
for calculated float `adc_gain` values.
"""
adc_gains = []
baselines = []
if np.where(np.isinf(self.p_signal))[0].size:
raise ValueError('Signal contains inf. Cannot perform adc.')
# min and max ignoring nans, unless whole channel is nan.
# Should suppress warning message.
minvals = np.nanmin(self.p_signal, axis=0)
maxvals = np.nanmax(self.p_signal, axis=0)
for ch in range(np.shape(self.p_signal)[1]):
# Get the minimum and maximum (valid) storage values
dmin, dmax = _digi_bounds(self.fmt[ch])
# add 1 because the lowest value is used to store nans
dmin = dmin + 1
pmin = minvals[ch]
pmax = maxvals[ch]
# Figure out digital samples used to store physical samples
# If the entire signal is nan, gain/baseline won't be used
if pmin == np.nan:
adc_gain = 1
baseline = 1
# If the signal is just one value, store one digital value.
elif pmin == pmax:
if pmin == 0:
adc_gain = 1
baseline = 1
else:
# All digital values are +1 or -1. Keep adc_gain > 0
adc_gain = abs(1 / pmin)
baseline = 0
# Regular varied signal case.
else:
# The equation is: p = (d - b) / g
# Approximately, pmax maps to dmax, and pmin maps to
# dmin. Gradient will be equal to, or close to
# delta(d) / delta(p), since intercept baseline has
# to be an integer.
# Constraint: baseline must be between +/- 2**31
adc_gain = (dmax-dmin) / (pmax-pmin)
baseline = dmin - adc_gain*pmin
# Make adjustments for baseline to be an integer
# This up/down round logic of baseline is to ensure
# there is no overshoot of dmax. Now pmax will map
# to dmax or dmax-1 which is also fine.
if pmin > 0:
baseline = int(np.ceil(baseline))
else:
baseline = int(np.floor(baseline))
# After baseline is set, adjust gain correspondingly.Set
# the gain to map pmin to dmin, and p==0 to baseline.
# In the case where pmin == 0 and dmin == baseline,
# adc_gain is already correct. Avoid dividing by 0.
if dmin != baseline:
adc_gain = (dmin - baseline) / pmin
# Remap signal if baseline exceeds boundaries.
# This may happen if pmax < 0
if baseline > MAX_I32:
# pmin maps to dmin, baseline maps to 2**31 - 1
# pmax will map to a lower value than before
adc_gain = (MAX_I32) - dmin / abs(pmin)
baseline = MAX_I32
# This may happen if pmin > 0
elif baseline < MIN_I32:
# pmax maps to dmax, baseline maps to -2**31 + 1
adc_gain = (dmax - MIN_I32) / pmax
baseline = MIN_I32
adc_gains.append(adc_gain)
baselines.append(baseline)
return (adc_gains, baselines) |
def get_related_flat(self, content_id, min_strength=None):
'''Follow coreference relationships to get full related graph.
This differs from ``get_related_coref_relationships`` in that
it returns a flat list of all identifiers found through the
coreference layer of indirection.
:rtype: list of identifiers
'''
rel_id_to_idents = self.get_related_coref_relationships(
content_id, min_strength=min_strength)
flat_list = []
for val in rel_id_to_idents.values():
flat_list.extend(val)
return flat_list | Follow coreference relationships to get full related graph.
This differs from ``get_related_coref_relationships`` in that
it returns a flat list of all identifiers found through the
coreference layer of indirection.
:rtype: list of identifiers | Below is the the instruction that describes the task:
### Input:
Follow coreference relationships to get full related graph.
This differs from ``get_related_coref_relationships`` in that
it returns a flat list of all identifiers found through the
coreference layer of indirection.
:rtype: list of identifiers
### Response:
def get_related_flat(self, content_id, min_strength=None):
'''Follow coreference relationships to get full related graph.
This differs from ``get_related_coref_relationships`` in that
it returns a flat list of all identifiers found through the
coreference layer of indirection.
:rtype: list of identifiers
'''
rel_id_to_idents = self.get_related_coref_relationships(
content_id, min_strength=min_strength)
flat_list = []
for val in rel_id_to_idents.values():
flat_list.extend(val)
return flat_list |
def create_http_monitor(self, topics, transport_url, transport_token=None, transport_method='PUT', connect_timeout=0,
response_timeout=0, batch_size=1, batch_duration=0, compression='none', format_type='json'):
"""Creates a HTTP Monitor instance in Device Cloud for a given list of topics
:param topics: a string list of topics (e.g. ['DeviceCore[U]',
'FileDataCore']).
:param transport_url: URL of the customer web server.
:param transport_token: Credentials for basic authentication in the following format: username:password
:param transport_method: HTTP method to use for sending data: PUT or POST. The default is PUT.
:param connect_timeout: A value of 0 means use the system default of 5000 (5 seconds).
:param response_timeout: A value of 0 means use the system default of 5000 (5 seconds).
:param batch_size: How many Msgs received before sending data.
:param batch_duration: How long to wait before sending batch if it
does not exceed batch_size.
:param compression: Compression value (i.e. 'gzip').
:param format_type: What format server should send data in (i.e. 'xml' or 'json').
Returns an object of the created Monitor
"""
monitor_xml = """\
<Monitor>
<monTopic>{topics}</monTopic>
<monBatchSize>{batch_size}</monBatchSize>
<monFormatType>{format_type}</monFormatType>
<monTransportType>http</monTransportType>
<monTransportUrl>{transport_url}</monTransportUrl>
<monTransportToken>{transport_token}</monTransportToken>
<monTransportMethod>{transport_method}</monTransportMethod>
<monConnectTimeout>{connect_timeout}</monConnectTimeout>
<monResponseTimeout>{response_timeout}</monResponseTimeout>
<monCompression>{compression}</monCompression>
</Monitor>
""".format(
topics=','.join(topics),
transport_url=transport_url,
transport_token=transport_token,
transport_method=transport_method,
connect_timeout=connect_timeout,
response_timeout=response_timeout,
batch_size=batch_size,
batch_duration=batch_duration,
format_type=format_type,
compression=compression,
)
monitor_xml = textwrap.dedent(monitor_xml)
response = self._conn.post("/ws/Monitor", monitor_xml)
location = ET.fromstring(response.text).find('.//location').text
monitor_id = int(location.split('/')[-1])
return HTTPDeviceCloudMonitor(self._conn, monitor_id) | Creates a HTTP Monitor instance in Device Cloud for a given list of topics
:param topics: a string list of topics (e.g. ['DeviceCore[U]',
'FileDataCore']).
:param transport_url: URL of the customer web server.
:param transport_token: Credentials for basic authentication in the following format: username:password
:param transport_method: HTTP method to use for sending data: PUT or POST. The default is PUT.
:param connect_timeout: A value of 0 means use the system default of 5000 (5 seconds).
:param response_timeout: A value of 0 means use the system default of 5000 (5 seconds).
:param batch_size: How many Msgs received before sending data.
:param batch_duration: How long to wait before sending batch if it
does not exceed batch_size.
:param compression: Compression value (i.e. 'gzip').
:param format_type: What format server should send data in (i.e. 'xml' or 'json').
Returns an object of the created Monitor | Below is the the instruction that describes the task:
### Input:
Creates a HTTP Monitor instance in Device Cloud for a given list of topics
:param topics: a string list of topics (e.g. ['DeviceCore[U]',
'FileDataCore']).
:param transport_url: URL of the customer web server.
:param transport_token: Credentials for basic authentication in the following format: username:password
:param transport_method: HTTP method to use for sending data: PUT or POST. The default is PUT.
:param connect_timeout: A value of 0 means use the system default of 5000 (5 seconds).
:param response_timeout: A value of 0 means use the system default of 5000 (5 seconds).
:param batch_size: How many Msgs received before sending data.
:param batch_duration: How long to wait before sending batch if it
does not exceed batch_size.
:param compression: Compression value (i.e. 'gzip').
:param format_type: What format server should send data in (i.e. 'xml' or 'json').
Returns an object of the created Monitor
### Response:
def create_http_monitor(self, topics, transport_url, transport_token=None, transport_method='PUT', connect_timeout=0,
response_timeout=0, batch_size=1, batch_duration=0, compression='none', format_type='json'):
"""Creates a HTTP Monitor instance in Device Cloud for a given list of topics
:param topics: a string list of topics (e.g. ['DeviceCore[U]',
'FileDataCore']).
:param transport_url: URL of the customer web server.
:param transport_token: Credentials for basic authentication in the following format: username:password
:param transport_method: HTTP method to use for sending data: PUT or POST. The default is PUT.
:param connect_timeout: A value of 0 means use the system default of 5000 (5 seconds).
:param response_timeout: A value of 0 means use the system default of 5000 (5 seconds).
:param batch_size: How many Msgs received before sending data.
:param batch_duration: How long to wait before sending batch if it
does not exceed batch_size.
:param compression: Compression value (i.e. 'gzip').
:param format_type: What format server should send data in (i.e. 'xml' or 'json').
Returns an object of the created Monitor
"""
monitor_xml = """\
<Monitor>
<monTopic>{topics}</monTopic>
<monBatchSize>{batch_size}</monBatchSize>
<monFormatType>{format_type}</monFormatType>
<monTransportType>http</monTransportType>
<monTransportUrl>{transport_url}</monTransportUrl>
<monTransportToken>{transport_token}</monTransportToken>
<monTransportMethod>{transport_method}</monTransportMethod>
<monConnectTimeout>{connect_timeout}</monConnectTimeout>
<monResponseTimeout>{response_timeout}</monResponseTimeout>
<monCompression>{compression}</monCompression>
</Monitor>
""".format(
topics=','.join(topics),
transport_url=transport_url,
transport_token=transport_token,
transport_method=transport_method,
connect_timeout=connect_timeout,
response_timeout=response_timeout,
batch_size=batch_size,
batch_duration=batch_duration,
format_type=format_type,
compression=compression,
)
monitor_xml = textwrap.dedent(monitor_xml)
response = self._conn.post("/ws/Monitor", monitor_xml)
location = ET.fromstring(response.text).find('.//location').text
monitor_id = int(location.split('/')[-1])
return HTTPDeviceCloudMonitor(self._conn, monitor_id) |
def tile(self, bbox, z=0, format=None, clip=True):
"""Returns a GeoQuerySet intersecting a tile boundary.
Arguments:
bbox -- tile extent as geometry
Keyword args:
z -- tile zoom level used as basis for geometry simplification
format -- vector tile format as str (pbf, geojson)
clip -- clip geometries to tile boundary as boolean
"""
# Tile grid uses 3857, but GeoJSON coordinates should be in 4326.
tile_srid = 3857
bbox = getattr(bbox, 'geos', bbox)
clone = filter_geometry(self, intersects=bbox)
field = clone.geo_field
srid = field.srid
sql = field.name
try:
tilew = self.tilewidths[z]
except IndexError:
tilew = self.tilewidths[-1]
if bbox.srid != srid:
bbox = bbox.transform(srid, clone=True)
# Estimate tile width in degrees instead of meters.
if bbox.srs.geographic:
p = geos.Point(tilew, tilew, srid=tile_srid)
p.transform(srid)
tilew = p.x
if clip:
bufbox = bbox.buffer(tilew)
sql = geofn.Intersection(sql, bufbox.envelope)
sql = SimplifyPreserveTopology(sql, tilew)
if format == 'pbf':
return clone.pbf(bbox, geo_col=sql)
sql = geofn.Transform(sql, 4326)
return clone.annotate(**{format: sql}) | Returns a GeoQuerySet intersecting a tile boundary.
Arguments:
bbox -- tile extent as geometry
Keyword args:
z -- tile zoom level used as basis for geometry simplification
format -- vector tile format as str (pbf, geojson)
clip -- clip geometries to tile boundary as boolean | Below is the the instruction that describes the task:
### Input:
Returns a GeoQuerySet intersecting a tile boundary.
Arguments:
bbox -- tile extent as geometry
Keyword args:
z -- tile zoom level used as basis for geometry simplification
format -- vector tile format as str (pbf, geojson)
clip -- clip geometries to tile boundary as boolean
### Response:
def tile(self, bbox, z=0, format=None, clip=True):
"""Returns a GeoQuerySet intersecting a tile boundary.
Arguments:
bbox -- tile extent as geometry
Keyword args:
z -- tile zoom level used as basis for geometry simplification
format -- vector tile format as str (pbf, geojson)
clip -- clip geometries to tile boundary as boolean
"""
# Tile grid uses 3857, but GeoJSON coordinates should be in 4326.
tile_srid = 3857
bbox = getattr(bbox, 'geos', bbox)
clone = filter_geometry(self, intersects=bbox)
field = clone.geo_field
srid = field.srid
sql = field.name
try:
tilew = self.tilewidths[z]
except IndexError:
tilew = self.tilewidths[-1]
if bbox.srid != srid:
bbox = bbox.transform(srid, clone=True)
# Estimate tile width in degrees instead of meters.
if bbox.srs.geographic:
p = geos.Point(tilew, tilew, srid=tile_srid)
p.transform(srid)
tilew = p.x
if clip:
bufbox = bbox.buffer(tilew)
sql = geofn.Intersection(sql, bufbox.envelope)
sql = SimplifyPreserveTopology(sql, tilew)
if format == 'pbf':
return clone.pbf(bbox, geo_col=sql)
sql = geofn.Transform(sql, 4326)
return clone.annotate(**{format: sql}) |
def check_required_fields(self, ignore_fields=list(), allow_no_resources=False):
# type: (List[str], bool) -> None
"""Check that metadata for dataset and its resources is complete. The parameter ignore_fields
should be set if required to any fields that should be ignored for the particular operation.
Args:
ignore_fields (List[str]): Fields to ignore. Default is [].
allow_no_resources (bool): Whether to allow no resources. Defaults to False.
Returns:
None
"""
if self.is_requestable():
self._check_required_fields('dataset-requestable', ignore_fields)
else:
self._check_required_fields('dataset', ignore_fields)
if len(self.resources) == 0 and not allow_no_resources:
raise HDXError('There are no resources! Please add at least one resource!')
for resource in self.resources:
ignore_fields = ['package_id']
resource.check_required_fields(ignore_fields=ignore_fields) | Check that metadata for dataset and its resources is complete. The parameter ignore_fields
should be set if required to any fields that should be ignored for the particular operation.
Args:
ignore_fields (List[str]): Fields to ignore. Default is [].
allow_no_resources (bool): Whether to allow no resources. Defaults to False.
Returns:
None | Below is the the instruction that describes the task:
### Input:
Check that metadata for dataset and its resources is complete. The parameter ignore_fields
should be set if required to any fields that should be ignored for the particular operation.
Args:
ignore_fields (List[str]): Fields to ignore. Default is [].
allow_no_resources (bool): Whether to allow no resources. Defaults to False.
Returns:
None
### Response:
def check_required_fields(self, ignore_fields=list(), allow_no_resources=False):
# type: (List[str], bool) -> None
"""Check that metadata for dataset and its resources is complete. The parameter ignore_fields
should be set if required to any fields that should be ignored for the particular operation.
Args:
ignore_fields (List[str]): Fields to ignore. Default is [].
allow_no_resources (bool): Whether to allow no resources. Defaults to False.
Returns:
None
"""
if self.is_requestable():
self._check_required_fields('dataset-requestable', ignore_fields)
else:
self._check_required_fields('dataset', ignore_fields)
if len(self.resources) == 0 and not allow_no_resources:
raise HDXError('There are no resources! Please add at least one resource!')
for resource in self.resources:
ignore_fields = ['package_id']
resource.check_required_fields(ignore_fields=ignore_fields) |
def make_session(self):
"""Authenticate and get the name of assigned SFDC data server"""
with connect_lock:
if self._sf_session is None:
sf_session = requests.Session()
# TODO configurable class Salesforce***Auth
sf_session.auth = SalesforcePasswordAuth(db_alias=self.alias,
settings_dict=self.settings_dict)
sf_instance_url = sf_session.auth.instance_url
sf_requests_adapter = HTTPAdapter(max_retries=get_max_retries())
sf_session.mount(sf_instance_url, sf_requests_adapter)
# Additional headers work, but the same are added automatically by "requests' package.
# sf_session.header = {'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive'} # TODO
self._sf_session = sf_session | Authenticate and get the name of assigned SFDC data server | Below is the the instruction that describes the task:
### Input:
Authenticate and get the name of assigned SFDC data server
### Response:
def make_session(self):
"""Authenticate and get the name of assigned SFDC data server"""
with connect_lock:
if self._sf_session is None:
sf_session = requests.Session()
# TODO configurable class Salesforce***Auth
sf_session.auth = SalesforcePasswordAuth(db_alias=self.alias,
settings_dict=self.settings_dict)
sf_instance_url = sf_session.auth.instance_url
sf_requests_adapter = HTTPAdapter(max_retries=get_max_retries())
sf_session.mount(sf_instance_url, sf_requests_adapter)
# Additional headers work, but the same are added automatically by "requests' package.
# sf_session.header = {'accept-encoding': 'gzip, deflate', 'connection': 'keep-alive'} # TODO
self._sf_session = sf_session |
def _load_params_of(self, effect):
"""
Called only when a effect has created
Param changes calls :meth:`~pluginsmanager.observer.host_observer.host_observer.HostObserver.on_param_value_changed()`
"""
for param in effect.params:
if param.value != param.default:
self._set_param_value(param) | Called only when a effect has created
Param changes calls :meth:`~pluginsmanager.observer.host_observer.host_observer.HostObserver.on_param_value_changed()` | Below is the the instruction that describes the task:
### Input:
Called only when a effect has created
Param changes calls :meth:`~pluginsmanager.observer.host_observer.host_observer.HostObserver.on_param_value_changed()`
### Response:
def _load_params_of(self, effect):
"""
Called only when a effect has created
Param changes calls :meth:`~pluginsmanager.observer.host_observer.host_observer.HostObserver.on_param_value_changed()`
"""
for param in effect.params:
if param.value != param.default:
self._set_param_value(param) |
def get_object(self, name, obj):
"""
:param name: -- string name of backend
:param name: str
:param obj: -- model object
:type obj: django.db.models.Model
:return: backend object
:rtype: object
"""
return self[name](obj, **self.opts(name)) | :param name: -- string name of backend
:param name: str
:param obj: -- model object
:type obj: django.db.models.Model
:return: backend object
:rtype: object | Below is the the instruction that describes the task:
### Input:
:param name: -- string name of backend
:param name: str
:param obj: -- model object
:type obj: django.db.models.Model
:return: backend object
:rtype: object
### Response:
def get_object(self, name, obj):
"""
:param name: -- string name of backend
:param name: str
:param obj: -- model object
:type obj: django.db.models.Model
:return: backend object
:rtype: object
"""
return self[name](obj, **self.opts(name)) |
def run_loop(leds=all_leds):
"""
Start the loop.
:param `leds`: Which LEDs to light up upon switch press.
:type `leds`: sequence of LED objects
"""
print('Loop started.\nPress Ctrl+C to break out of the loop.')
while 1:
try:
if switch():
[led.on() for led in leds]
else:
[led.off() for led in leds]
except OSError: # VCPInterrupt # Ctrl+C in interpreter mode.
break | Start the loop.
:param `leds`: Which LEDs to light up upon switch press.
:type `leds`: sequence of LED objects | Below is the the instruction that describes the task:
### Input:
Start the loop.
:param `leds`: Which LEDs to light up upon switch press.
:type `leds`: sequence of LED objects
### Response:
def run_loop(leds=all_leds):
"""
Start the loop.
:param `leds`: Which LEDs to light up upon switch press.
:type `leds`: sequence of LED objects
"""
print('Loop started.\nPress Ctrl+C to break out of the loop.')
while 1:
try:
if switch():
[led.on() for led in leds]
else:
[led.off() for led in leds]
except OSError: # VCPInterrupt # Ctrl+C in interpreter mode.
break |
def translate(script):
'''translate zipline script into pylivetrader script.
'''
tree = ast.parse(script)
ZiplineImportVisitor().visit(tree)
return astor.to_source(tree) | translate zipline script into pylivetrader script. | Below is the the instruction that describes the task:
### Input:
translate zipline script into pylivetrader script.
### Response:
def translate(script):
'''translate zipline script into pylivetrader script.
'''
tree = ast.parse(script)
ZiplineImportVisitor().visit(tree)
return astor.to_source(tree) |
def SetupDisplayDevice(self, type, state, percentage, energy, energy_full,
energy_rate, time_to_empty, time_to_full, is_present,
icon_name, warning_level):
'''Convenience method to configure DisplayDevice properties
This calls Set() for all properties that the DisplayDevice is defined to
have, and is shorter if you have to completely set it up instead of
changing just one or two properties.
This is only available when mocking the 1.0 API.
'''
if not self.api1:
raise dbus.exceptions.DBusException(
'SetupDisplayDevice() can only be used with the 1.0 API',
name=MOCK_IFACE + '.APIVersion')
display_props = mockobject.objects[self.p_display_dev]
display_props.Set(DEVICE_IFACE, 'Type',
dbus.UInt32(type))
display_props.Set(DEVICE_IFACE, 'State',
dbus.UInt32(state))
display_props.Set(DEVICE_IFACE, 'Percentage',
percentage)
display_props.Set(DEVICE_IFACE, 'Energy', energy)
display_props.Set(DEVICE_IFACE, 'EnergyFull',
energy_full)
display_props.Set(DEVICE_IFACE, 'EnergyRate',
energy_rate)
display_props.Set(DEVICE_IFACE, 'TimeToEmpty',
dbus.Int64(time_to_empty))
display_props.Set(DEVICE_IFACE, 'TimeToFull',
dbus.Int64(time_to_full))
display_props.Set(DEVICE_IFACE, 'IsPresent',
is_present)
display_props.Set(DEVICE_IFACE, 'IconName',
icon_name)
display_props.Set(DEVICE_IFACE, 'WarningLevel',
dbus.UInt32(warning_level)) | Convenience method to configure DisplayDevice properties
This calls Set() for all properties that the DisplayDevice is defined to
have, and is shorter if you have to completely set it up instead of
changing just one or two properties.
This is only available when mocking the 1.0 API. | Below is the the instruction that describes the task:
### Input:
Convenience method to configure DisplayDevice properties
This calls Set() for all properties that the DisplayDevice is defined to
have, and is shorter if you have to completely set it up instead of
changing just one or two properties.
This is only available when mocking the 1.0 API.
### Response:
def SetupDisplayDevice(self, type, state, percentage, energy, energy_full,
energy_rate, time_to_empty, time_to_full, is_present,
icon_name, warning_level):
'''Convenience method to configure DisplayDevice properties
This calls Set() for all properties that the DisplayDevice is defined to
have, and is shorter if you have to completely set it up instead of
changing just one or two properties.
This is only available when mocking the 1.0 API.
'''
if not self.api1:
raise dbus.exceptions.DBusException(
'SetupDisplayDevice() can only be used with the 1.0 API',
name=MOCK_IFACE + '.APIVersion')
display_props = mockobject.objects[self.p_display_dev]
display_props.Set(DEVICE_IFACE, 'Type',
dbus.UInt32(type))
display_props.Set(DEVICE_IFACE, 'State',
dbus.UInt32(state))
display_props.Set(DEVICE_IFACE, 'Percentage',
percentage)
display_props.Set(DEVICE_IFACE, 'Energy', energy)
display_props.Set(DEVICE_IFACE, 'EnergyFull',
energy_full)
display_props.Set(DEVICE_IFACE, 'EnergyRate',
energy_rate)
display_props.Set(DEVICE_IFACE, 'TimeToEmpty',
dbus.Int64(time_to_empty))
display_props.Set(DEVICE_IFACE, 'TimeToFull',
dbus.Int64(time_to_full))
display_props.Set(DEVICE_IFACE, 'IsPresent',
is_present)
display_props.Set(DEVICE_IFACE, 'IconName',
icon_name)
display_props.Set(DEVICE_IFACE, 'WarningLevel',
dbus.UInt32(warning_level)) |
def orient_averaged_adaptive(tm):
"""Compute the T-matrix using variable orientation scatterers.
This method uses a very slow adaptive routine and should mainly be used
for reference purposes. Uses the set particle orientation PDF, ignoring
the alpha and beta attributes.
Args:
tm: TMatrix (or descendant) instance
Returns:
The amplitude (S) and phase (Z) matrices.
"""
S = np.zeros((2,2), dtype=complex)
Z = np.zeros((4,4))
def Sfunc(beta, alpha, i, j, real):
(S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)
s = S_ang[i,j].real if real else S_ang[i,j].imag
return s * tm.or_pdf(beta)
ind = range(2)
for i in ind:
for j in ind:
S.real[i,j] = dblquad(Sfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j,True))[0]/360.0
S.imag[i,j] = dblquad(Sfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j,False))[0]/360.0
def Zfunc(beta, alpha, i, j):
(S_and, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)
return Z_ang[i,j] * tm.or_pdf(beta)
ind = range(4)
for i in ind:
for j in ind:
Z[i,j] = dblquad(Zfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j))[0]/360.0
return (S, Z) | Compute the T-matrix using variable orientation scatterers.
This method uses a very slow adaptive routine and should mainly be used
for reference purposes. Uses the set particle orientation PDF, ignoring
the alpha and beta attributes.
Args:
tm: TMatrix (or descendant) instance
Returns:
The amplitude (S) and phase (Z) matrices. | Below is the the instruction that describes the task:
### Input:
Compute the T-matrix using variable orientation scatterers.
This method uses a very slow adaptive routine and should mainly be used
for reference purposes. Uses the set particle orientation PDF, ignoring
the alpha and beta attributes.
Args:
tm: TMatrix (or descendant) instance
Returns:
The amplitude (S) and phase (Z) matrices.
### Response:
def orient_averaged_adaptive(tm):
"""Compute the T-matrix using variable orientation scatterers.
This method uses a very slow adaptive routine and should mainly be used
for reference purposes. Uses the set particle orientation PDF, ignoring
the alpha and beta attributes.
Args:
tm: TMatrix (or descendant) instance
Returns:
The amplitude (S) and phase (Z) matrices.
"""
S = np.zeros((2,2), dtype=complex)
Z = np.zeros((4,4))
def Sfunc(beta, alpha, i, j, real):
(S_ang, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)
s = S_ang[i,j].real if real else S_ang[i,j].imag
return s * tm.or_pdf(beta)
ind = range(2)
for i in ind:
for j in ind:
S.real[i,j] = dblquad(Sfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j,True))[0]/360.0
S.imag[i,j] = dblquad(Sfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j,False))[0]/360.0
def Zfunc(beta, alpha, i, j):
(S_and, Z_ang) = tm.get_SZ_single(alpha=alpha, beta=beta)
return Z_ang[i,j] * tm.or_pdf(beta)
ind = range(4)
for i in ind:
for j in ind:
Z[i,j] = dblquad(Zfunc, 0.0, 360.0,
lambda x: 0.0, lambda x: 180.0, (i,j))[0]/360.0
return (S, Z) |
def create_knowledge_base(project_id, display_name):
"""Creates a Knowledge base.
Args:
project_id: The GCP project linked with the agent.
display_name: The display name of the Knowledge base."""
import dialogflow_v2beta1 as dialogflow
client = dialogflow.KnowledgeBasesClient()
project_path = client.project_path(project_id)
knowledge_base = dialogflow.types.KnowledgeBase(
display_name=display_name)
response = client.create_knowledge_base(project_path, knowledge_base)
print('Knowledge Base created:\n')
print('Display Name: {}\n'.format(response.display_name))
print('Knowledge ID: {}\n'.format(response.name)) | Creates a Knowledge base.
Args:
project_id: The GCP project linked with the agent.
display_name: The display name of the Knowledge base. | Below is the the instruction that describes the task:
### Input:
Creates a Knowledge base.
Args:
project_id: The GCP project linked with the agent.
display_name: The display name of the Knowledge base.
### Response:
def create_knowledge_base(project_id, display_name):
"""Creates a Knowledge base.
Args:
project_id: The GCP project linked with the agent.
display_name: The display name of the Knowledge base."""
import dialogflow_v2beta1 as dialogflow
client = dialogflow.KnowledgeBasesClient()
project_path = client.project_path(project_id)
knowledge_base = dialogflow.types.KnowledgeBase(
display_name=display_name)
response = client.create_knowledge_base(project_path, knowledge_base)
print('Knowledge Base created:\n')
print('Display Name: {}\n'.format(response.display_name))
print('Knowledge ID: {}\n'.format(response.name)) |
def disable_servicegroup_passive_host_checks(self, servicegroup):
"""Disable passive host checks for a servicegroup
Format of the line that triggers function call::
DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;<servicegroup_name>
:param servicegroup: servicegroup to disable
:type servicegroup: alignak.objects.servicegroup.Servicegroup
:return: None
"""
for service_id in servicegroup.get_services():
if service_id in self.daemon.services:
host_id = self.daemon.services[service_id].host
self.disable_passive_host_checks(self.daemon.hosts[host_id]) | Disable passive host checks for a servicegroup
Format of the line that triggers function call::
DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;<servicegroup_name>
:param servicegroup: servicegroup to disable
:type servicegroup: alignak.objects.servicegroup.Servicegroup
:return: None | Below is the the instruction that describes the task:
### Input:
Disable passive host checks for a servicegroup
Format of the line that triggers function call::
DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;<servicegroup_name>
:param servicegroup: servicegroup to disable
:type servicegroup: alignak.objects.servicegroup.Servicegroup
:return: None
### Response:
def disable_servicegroup_passive_host_checks(self, servicegroup):
"""Disable passive host checks for a servicegroup
Format of the line that triggers function call::
DISABLE_SERVICEGROUP_PASSIVE_HOST_CHECKS;<servicegroup_name>
:param servicegroup: servicegroup to disable
:type servicegroup: alignak.objects.servicegroup.Servicegroup
:return: None
"""
for service_id in servicegroup.get_services():
if service_id in self.daemon.services:
host_id = self.daemon.services[service_id].host
self.disable_passive_host_checks(self.daemon.hosts[host_id]) |
def prepare_environment(work_dir):
"""
Performs a few maintenance tasks before the Honeypot is run. Copies the data directory,
and the config file to the cwd. The config file copied here is overwritten if
the __init__ method is called with a configuration URL.
:param work_dir: The directory to copy files to.
"""
package_directory = os.path.dirname(os.path.abspath(beeswarm.__file__))
logger.info('Copying data files to workdir.')
shutil.copytree(os.path.join(package_directory, 'drones/honeypot/data'), os.path.join(work_dir, 'data/'),
ignore=Honeypot._ignore_copy_files) | Performs a few maintenance tasks before the Honeypot is run. Copies the data directory,
and the config file to the cwd. The config file copied here is overwritten if
the __init__ method is called with a configuration URL.
:param work_dir: The directory to copy files to. | Below is the the instruction that describes the task:
### Input:
Performs a few maintenance tasks before the Honeypot is run. Copies the data directory,
and the config file to the cwd. The config file copied here is overwritten if
the __init__ method is called with a configuration URL.
:param work_dir: The directory to copy files to.
### Response:
def prepare_environment(work_dir):
"""
Performs a few maintenance tasks before the Honeypot is run. Copies the data directory,
and the config file to the cwd. The config file copied here is overwritten if
the __init__ method is called with a configuration URL.
:param work_dir: The directory to copy files to.
"""
package_directory = os.path.dirname(os.path.abspath(beeswarm.__file__))
logger.info('Copying data files to workdir.')
shutil.copytree(os.path.join(package_directory, 'drones/honeypot/data'), os.path.join(work_dir, 'data/'),
ignore=Honeypot._ignore_copy_files) |
def get_schedules_for_season(self, season, season_type="REG"):
"""
Game schedule for a specified season.
"""
try:
season = int(season)
if season_type not in ["REG", "PRE", "POST"]:
raise ValueError
except (ValueError, TypeError):
raise FantasyDataError('Error: Invalid method parameters')
season_param = "{0}{1}".format(season, season_type)
result = self._method_call("Schedules/{season}", "stats", season=season_param)
return result | Game schedule for a specified season. | Below is the the instruction that describes the task:
### Input:
Game schedule for a specified season.
### Response:
def get_schedules_for_season(self, season, season_type="REG"):
"""
Game schedule for a specified season.
"""
try:
season = int(season)
if season_type not in ["REG", "PRE", "POST"]:
raise ValueError
except (ValueError, TypeError):
raise FantasyDataError('Error: Invalid method parameters')
season_param = "{0}{1}".format(season, season_type)
result = self._method_call("Schedules/{season}", "stats", season=season_param)
return result |
def _pwl1_to_poly(self, generators):
""" Converts single-block piecewise-linear costs into linear
polynomial.
"""
for g in generators:
if (g.pcost_model == PW_LINEAR) and (len(g.p_cost) == 2):
g.pwl_to_poly()
return generators | Converts single-block piecewise-linear costs into linear
polynomial. | Below is the the instruction that describes the task:
### Input:
Converts single-block piecewise-linear costs into linear
polynomial.
### Response:
def _pwl1_to_poly(self, generators):
""" Converts single-block piecewise-linear costs into linear
polynomial.
"""
for g in generators:
if (g.pcost_model == PW_LINEAR) and (len(g.p_cost) == 2):
g.pwl_to_poly()
return generators |
def ensure_final_value(packageName, arsc, value):
"""Ensure incoming value is always the value, not the resid
androguard will sometimes return the Android "resId" aka
Resource ID instead of the actual value. This checks whether
the value is actually a resId, then performs the Android
Resource lookup as needed.
"""
if value:
returnValue = value
if value[0] == '@':
# TODO: @packagename:DEADBEEF is not supported here!
try: # can be a literal value or a resId
res_id = int('0x' + value[1:], 16)
res_id = arsc.get_id(packageName, res_id)[1]
returnValue = arsc.get_string(packageName, res_id)[1]
except (ValueError, TypeError):
pass
return returnValue
return '' | Ensure incoming value is always the value, not the resid
androguard will sometimes return the Android "resId" aka
Resource ID instead of the actual value. This checks whether
the value is actually a resId, then performs the Android
Resource lookup as needed. | Below is the the instruction that describes the task:
### Input:
Ensure incoming value is always the value, not the resid
androguard will sometimes return the Android "resId" aka
Resource ID instead of the actual value. This checks whether
the value is actually a resId, then performs the Android
Resource lookup as needed.
### Response:
def ensure_final_value(packageName, arsc, value):
"""Ensure incoming value is always the value, not the resid
androguard will sometimes return the Android "resId" aka
Resource ID instead of the actual value. This checks whether
the value is actually a resId, then performs the Android
Resource lookup as needed.
"""
if value:
returnValue = value
if value[0] == '@':
# TODO: @packagename:DEADBEEF is not supported here!
try: # can be a literal value or a resId
res_id = int('0x' + value[1:], 16)
res_id = arsc.get_id(packageName, res_id)[1]
returnValue = arsc.get_string(packageName, res_id)[1]
except (ValueError, TypeError):
pass
return returnValue
return '' |
def skip_whitespace(self):
"""Consume input until a non-whitespace character is encountered.
The non-whitespace character is then ungotten, and the number of
whitespace characters consumed is returned.
If the tokenizer is in multiline mode, then newlines are whitespace.
@rtype: int
"""
skipped = 0
while True:
c = self._get_char()
if c != ' ' and c != '\t':
if (c != '\n') or not self.multiline:
self._unget_char(c)
return skipped
skipped += 1 | Consume input until a non-whitespace character is encountered.
The non-whitespace character is then ungotten, and the number of
whitespace characters consumed is returned.
If the tokenizer is in multiline mode, then newlines are whitespace.
@rtype: int | Below is the the instruction that describes the task:
### Input:
Consume input until a non-whitespace character is encountered.
The non-whitespace character is then ungotten, and the number of
whitespace characters consumed is returned.
If the tokenizer is in multiline mode, then newlines are whitespace.
@rtype: int
### Response:
def skip_whitespace(self):
"""Consume input until a non-whitespace character is encountered.
The non-whitespace character is then ungotten, and the number of
whitespace characters consumed is returned.
If the tokenizer is in multiline mode, then newlines are whitespace.
@rtype: int
"""
skipped = 0
while True:
c = self._get_char()
if c != ' ' and c != '\t':
if (c != '\n') or not self.multiline:
self._unget_char(c)
return skipped
skipped += 1 |
def output(self, message, color=None):
"""
A helper to used like print() or click's secho() tunneling all the
outputs to sys.stdout or sys.stderr
:param message: (str)
:param color: (str) check click.secho() documentation
:return: (None) prints to sys.stdout or sys.stderr
"""
output_to = stderr if color == "red" else stdout
secho(self.indent(message), fg=color, file=output_to) | A helper to used like print() or click's secho() tunneling all the
outputs to sys.stdout or sys.stderr
:param message: (str)
:param color: (str) check click.secho() documentation
:return: (None) prints to sys.stdout or sys.stderr | Below is the the instruction that describes the task:
### Input:
A helper to used like print() or click's secho() tunneling all the
outputs to sys.stdout or sys.stderr
:param message: (str)
:param color: (str) check click.secho() documentation
:return: (None) prints to sys.stdout or sys.stderr
### Response:
def output(self, message, color=None):
"""
A helper to used like print() or click's secho() tunneling all the
outputs to sys.stdout or sys.stderr
:param message: (str)
:param color: (str) check click.secho() documentation
:return: (None) prints to sys.stdout or sys.stderr
"""
output_to = stderr if color == "red" else stdout
secho(self.indent(message), fg=color, file=output_to) |
def scalarmult_B(e):
"""
Implements scalarmult(B, e) more efficiently.
"""
# scalarmult(B, l) is the identity
e %= L
P = IDENT
for i in range(253):
if e & 1:
P = edwards_add(P=P, Q=Bpow[i])
e //= 2
assert e == 0, e
return P | Implements scalarmult(B, e) more efficiently. | Below is the the instruction that describes the task:
### Input:
Implements scalarmult(B, e) more efficiently.
### Response:
def scalarmult_B(e):
"""
Implements scalarmult(B, e) more efficiently.
"""
# scalarmult(B, l) is the identity
e %= L
P = IDENT
for i in range(253):
if e & 1:
P = edwards_add(P=P, Q=Bpow[i])
e //= 2
assert e == 0, e
return P |
def setup_dotcloud_account(cli):
"""Gets user/pass for dotcloud, performs auth, and stores keys"""
client = RESTClient(endpoint=cli.client.endpoint)
client.authenticator = NullAuth()
urlmap = client.get('/auth/discovery').item
username = cli.prompt('dotCloud email')
password = cli.prompt('Password', noecho=True)
credential = {'token_url': urlmap.get('token'),
'key': CLIENT_KEY, 'secret': CLIENT_SECRET}
try:
token = cli.authorize_client(urlmap.get('token'), credential, username, password)
except Exception as e:
cli.die('Username and password do not match. Try again.')
token['url'] = credential['token_url']
config = GlobalConfig()
config.data = {'token': token}
config.save()
cli.global_config = GlobalConfig() # reload
cli.setup_auth()
cli.get_keys() | Gets user/pass for dotcloud, performs auth, and stores keys | Below is the the instruction that describes the task:
### Input:
Gets user/pass for dotcloud, performs auth, and stores keys
### Response:
def setup_dotcloud_account(cli):
"""Gets user/pass for dotcloud, performs auth, and stores keys"""
client = RESTClient(endpoint=cli.client.endpoint)
client.authenticator = NullAuth()
urlmap = client.get('/auth/discovery').item
username = cli.prompt('dotCloud email')
password = cli.prompt('Password', noecho=True)
credential = {'token_url': urlmap.get('token'),
'key': CLIENT_KEY, 'secret': CLIENT_SECRET}
try:
token = cli.authorize_client(urlmap.get('token'), credential, username, password)
except Exception as e:
cli.die('Username and password do not match. Try again.')
token['url'] = credential['token_url']
config = GlobalConfig()
config.data = {'token': token}
config.save()
cli.global_config = GlobalConfig() # reload
cli.setup_auth()
cli.get_keys() |
def rst_to_obj(cls, file_path=None, text='', columns=None,
remove_empty_rows=True, key_on=None,
deliminator=' ', eval_cells=True):
"""
This will convert a rst file or text to a seaborn table
:param file_path: str of the path to the file
:param text: str of the csv text
:param columns: list of str of columns to use
:param remove_empty_rows: bool if True will remove empty rows
:param key_on: list of str of columns to key on
:param deliminator: str to use as a deliminator
:param eval_cells: bool if True will try to evaluate numbers
:return: SeabornTable
"""
text = cls._get_lines(file_path, text)
if len(text) == 1:
text = text[0].split('\r')
for i in [-1, 2, 0]:
if not text[i].replace('=', '').strip():
text.pop(i) # get rid of bar
lines = [row.split() for row in text]
list_of_list = cls._merge_quoted_cells(lines, deliminator,
remove_empty_rows, eval_cells,
excel_boolean=False)
return cls.list_to_obj(list_of_list, key_on=key_on, columns=columns) | This will convert a rst file or text to a seaborn table
:param file_path: str of the path to the file
:param text: str of the csv text
:param columns: list of str of columns to use
:param remove_empty_rows: bool if True will remove empty rows
:param key_on: list of str of columns to key on
:param deliminator: str to use as a deliminator
:param eval_cells: bool if True will try to evaluate numbers
:return: SeabornTable | Below is the the instruction that describes the task:
### Input:
This will convert a rst file or text to a seaborn table
:param file_path: str of the path to the file
:param text: str of the csv text
:param columns: list of str of columns to use
:param remove_empty_rows: bool if True will remove empty rows
:param key_on: list of str of columns to key on
:param deliminator: str to use as a deliminator
:param eval_cells: bool if True will try to evaluate numbers
:return: SeabornTable
### Response:
def rst_to_obj(cls, file_path=None, text='', columns=None,
remove_empty_rows=True, key_on=None,
deliminator=' ', eval_cells=True):
"""
This will convert a rst file or text to a seaborn table
:param file_path: str of the path to the file
:param text: str of the csv text
:param columns: list of str of columns to use
:param remove_empty_rows: bool if True will remove empty rows
:param key_on: list of str of columns to key on
:param deliminator: str to use as a deliminator
:param eval_cells: bool if True will try to evaluate numbers
:return: SeabornTable
"""
text = cls._get_lines(file_path, text)
if len(text) == 1:
text = text[0].split('\r')
for i in [-1, 2, 0]:
if not text[i].replace('=', '').strip():
text.pop(i) # get rid of bar
lines = [row.split() for row in text]
list_of_list = cls._merge_quoted_cells(lines, deliminator,
remove_empty_rows, eval_cells,
excel_boolean=False)
return cls.list_to_obj(list_of_list, key_on=key_on, columns=columns) |
def end_task(self):
'''
Remove the current task from the stack.
'''
self.progress(self.task_stack[-1].size)
self.task_stack.pop() | Remove the current task from the stack. | Below is the the instruction that describes the task:
### Input:
Remove the current task from the stack.
### Response:
def end_task(self):
'''
Remove the current task from the stack.
'''
self.progress(self.task_stack[-1].size)
self.task_stack.pop() |
def prepare_adiabatic_limit(slh, k=None):
"""Prepare the adiabatic elimination on an SLH object
Args:
slh: The SLH object to take the limit for
k: The scaling parameter $k \rightarrow \infty$. The default is a
positive symbol 'k'
Returns:
tuple: The objects ``Y, A, B, F, G, N``
necessary to compute the limiting system.
"""
if k is None:
k = symbols('k', positive=True)
Ld = slh.L.dag()
LdL = (Ld * slh.L)[0, 0]
K = (-LdL / 2 + I * slh.H).expand().simplify_scalar()
N = slh.S.dag()
B, A, Y = K.series_expand(k, 0, 2)
G, F = Ld.series_expand(k, 0, 1)
return Y, A, B, F, G, N | Prepare the adiabatic elimination on an SLH object
Args:
slh: The SLH object to take the limit for
k: The scaling parameter $k \rightarrow \infty$. The default is a
positive symbol 'k'
Returns:
tuple: The objects ``Y, A, B, F, G, N``
necessary to compute the limiting system. | Below is the the instruction that describes the task:
### Input:
Prepare the adiabatic elimination on an SLH object
Args:
slh: The SLH object to take the limit for
k: The scaling parameter $k \rightarrow \infty$. The default is a
positive symbol 'k'
Returns:
tuple: The objects ``Y, A, B, F, G, N``
necessary to compute the limiting system.
### Response:
def prepare_adiabatic_limit(slh, k=None):
"""Prepare the adiabatic elimination on an SLH object
Args:
slh: The SLH object to take the limit for
k: The scaling parameter $k \rightarrow \infty$. The default is a
positive symbol 'k'
Returns:
tuple: The objects ``Y, A, B, F, G, N``
necessary to compute the limiting system.
"""
if k is None:
k = symbols('k', positive=True)
Ld = slh.L.dag()
LdL = (Ld * slh.L)[0, 0]
K = (-LdL / 2 + I * slh.H).expand().simplify_scalar()
N = slh.S.dag()
B, A, Y = K.series_expand(k, 0, 2)
G, F = Ld.series_expand(k, 0, 1)
return Y, A, B, F, G, N |
def eigenvectors_right_samples(self):
r""" Samples of the right eigenvectors of the hidden transition matrix """
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :, :] = self._sampled_hmms[i].eigenvectors_right
return res | r""" Samples of the right eigenvectors of the hidden transition matrix | Below is the the instruction that describes the task:
### Input:
r""" Samples of the right eigenvectors of the hidden transition matrix
### Response:
def eigenvectors_right_samples(self):
r""" Samples of the right eigenvectors of the hidden transition matrix """
res = np.empty((self.nsamples, self.nstates, self.nstates), dtype=config.dtype)
for i in range(self.nsamples):
res[i, :, :] = self._sampled_hmms[i].eigenvectors_right
return res |
def read_simulation_temps(pathname,NumTemps):
"""Reads in the various temperatures from each TEMP#/simul.output file by knowing
beforehand the total number of temperatures (parameter at top)
"""
print("--Reading temperatures from %s/..." % pathname)
# Initialize return variable
temps_from_file = numpy.zeros(NumTemps, numpy.float64)
for k in range(NumTemps):
infile = open(os.path.join(pathname,'TEMP'+ str(k), 'simul'+str(k)+'.output'), 'r')
lines = infile.readlines()
infile.close()
for line in lines:
if (line[0:11] == 'Temperature'):
vals = line.split(':')
break
temps_from_file[k] = float(vals[1])
return temps_from_file | Reads in the various temperatures from each TEMP#/simul.output file by knowing
beforehand the total number of temperatures (parameter at top) | Below is the the instruction that describes the task:
### Input:
Reads in the various temperatures from each TEMP#/simul.output file by knowing
beforehand the total number of temperatures (parameter at top)
### Response:
def read_simulation_temps(pathname,NumTemps):
"""Reads in the various temperatures from each TEMP#/simul.output file by knowing
beforehand the total number of temperatures (parameter at top)
"""
print("--Reading temperatures from %s/..." % pathname)
# Initialize return variable
temps_from_file = numpy.zeros(NumTemps, numpy.float64)
for k in range(NumTemps):
infile = open(os.path.join(pathname,'TEMP'+ str(k), 'simul'+str(k)+'.output'), 'r')
lines = infile.readlines()
infile.close()
for line in lines:
if (line[0:11] == 'Temperature'):
vals = line.split(':')
break
temps_from_file[k] = float(vals[1])
return temps_from_file |
def _example_from_allof(self, prop_spec):
"""Get the examples from an allOf section.
Args:
prop_spec: property specification you want an example of.
Returns:
An example dict
"""
example_dict = {}
for definition in prop_spec['allOf']:
update = self.get_example_from_prop_spec(definition, True)
example_dict.update(update)
return example_dict | Get the examples from an allOf section.
Args:
prop_spec: property specification you want an example of.
Returns:
An example dict | Below is the the instruction that describes the task:
### Input:
Get the examples from an allOf section.
Args:
prop_spec: property specification you want an example of.
Returns:
An example dict
### Response:
def _example_from_allof(self, prop_spec):
"""Get the examples from an allOf section.
Args:
prop_spec: property specification you want an example of.
Returns:
An example dict
"""
example_dict = {}
for definition in prop_spec['allOf']:
update = self.get_example_from_prop_spec(definition, True)
example_dict.update(update)
return example_dict |
def draw_polygon(
self,
*pts,
close_path=True,
stroke=None,
stroke_width=1,
stroke_dash=None,
fill=None
) -> None:
"""Draws the given polygon."""
c = self.c
c.saveState()
if stroke is not None:
c.setStrokeColorRGB(*stroke)
c.setLineWidth(stroke_width)
c.setDash(stroke_dash)
if fill is not None:
c.setFillColorRGB(*fill)
p = c.beginPath()
fn = p.moveTo
for x,y in zip(*[iter(pts)]*2):
fn(x, y)
fn = p.lineTo
if close_path:
p.close()
c.drawPath(p, stroke=(stroke is not None), fill=(fill is not None))
c.restoreState() | Draws the given polygon. | Below is the the instruction that describes the task:
### Input:
Draws the given polygon.
### Response:
def draw_polygon(
self,
*pts,
close_path=True,
stroke=None,
stroke_width=1,
stroke_dash=None,
fill=None
) -> None:
"""Draws the given polygon."""
c = self.c
c.saveState()
if stroke is not None:
c.setStrokeColorRGB(*stroke)
c.setLineWidth(stroke_width)
c.setDash(stroke_dash)
if fill is not None:
c.setFillColorRGB(*fill)
p = c.beginPath()
fn = p.moveTo
for x,y in zip(*[iter(pts)]*2):
fn(x, y)
fn = p.lineTo
if close_path:
p.close()
c.drawPath(p, stroke=(stroke is not None), fill=(fill is not None))
c.restoreState() |
def server_shutdown(server_state):
"""
Shut down server subsystems.
Remove PID file.
"""
set_running( False )
# stop API servers
rpc_stop(server_state)
api_stop(server_state)
# stop atlas node
server_atlas_shutdown(server_state)
# stopping GC
gc_stop()
# clear PID file
try:
if os.path.exists(server_state['pid_file']):
os.unlink(server_state['pid_file'])
except:
pass
return True | Shut down server subsystems.
Remove PID file. | Below is the the instruction that describes the task:
### Input:
Shut down server subsystems.
Remove PID file.
### Response:
def server_shutdown(server_state):
"""
Shut down server subsystems.
Remove PID file.
"""
set_running( False )
# stop API servers
rpc_stop(server_state)
api_stop(server_state)
# stop atlas node
server_atlas_shutdown(server_state)
# stopping GC
gc_stop()
# clear PID file
try:
if os.path.exists(server_state['pid_file']):
os.unlink(server_state['pid_file'])
except:
pass
return True |
def start(self):
"""Start ZAP authentication"""
super().start()
self.__poller = zmq.asyncio.Poller()
self.__poller.register(self.zap_socket, zmq.POLLIN)
self.__task = asyncio.ensure_future(self.__handle_zap()) | Start ZAP authentication | Below is the the instruction that describes the task:
### Input:
Start ZAP authentication
### Response:
def start(self):
"""Start ZAP authentication"""
super().start()
self.__poller = zmq.asyncio.Poller()
self.__poller.register(self.zap_socket, zmq.POLLIN)
self.__task = asyncio.ensure_future(self.__handle_zap()) |
def OnTool(self, event):
"""Toolbar event handler"""
msgtype = self.ids_msgs[event.GetId()]
post_command_event(self, msgtype) | Toolbar event handler | Below is the the instruction that describes the task:
### Input:
Toolbar event handler
### Response:
def OnTool(self, event):
"""Toolbar event handler"""
msgtype = self.ids_msgs[event.GetId()]
post_command_event(self, msgtype) |
def _replace_scalar(self, scalar):
""" Replace scalar name with scalar value """
if not is_arg_scalar(scalar):
return scalar
name = scalar[1:]
return self.get_scalar_value(name) | Replace scalar name with scalar value | Below is the the instruction that describes the task:
### Input:
Replace scalar name with scalar value
### Response:
def _replace_scalar(self, scalar):
""" Replace scalar name with scalar value """
if not is_arg_scalar(scalar):
return scalar
name = scalar[1:]
return self.get_scalar_value(name) |
def prepare_refresh_body(self, body='', refresh_token=None, scope=None, **kwargs):
"""Prepare an access token request, using a refresh token.
If the authorization server issued a refresh token to the client, the
client makes a refresh request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "refresh_token".
refresh_token
REQUIRED. The refresh token issued to the client.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3. The requested scope MUST NOT include any scope
not originally granted by the resource owner, and if omitted is
treated as equal to the scope originally granted by the
resource owner.
"""
refresh_token = refresh_token or self.refresh_token
return prepare_token_request(self.refresh_token_key, body=body, scope=scope,
refresh_token=refresh_token, **kwargs) | Prepare an access token request, using a refresh token.
If the authorization server issued a refresh token to the client, the
client makes a refresh request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "refresh_token".
refresh_token
REQUIRED. The refresh token issued to the client.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3. The requested scope MUST NOT include any scope
not originally granted by the resource owner, and if omitted is
treated as equal to the scope originally granted by the
resource owner. | Below is the the instruction that describes the task:
### Input:
Prepare an access token request, using a refresh token.
If the authorization server issued a refresh token to the client, the
client makes a refresh request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "refresh_token".
refresh_token
REQUIRED. The refresh token issued to the client.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3. The requested scope MUST NOT include any scope
not originally granted by the resource owner, and if omitted is
treated as equal to the scope originally granted by the
resource owner.
### Response:
def prepare_refresh_body(self, body='', refresh_token=None, scope=None, **kwargs):
"""Prepare an access token request, using a refresh token.
If the authorization server issued a refresh token to the client, the
client makes a refresh request to the token endpoint by adding the
following parameters using the "application/x-www-form-urlencoded"
format in the HTTP request entity-body:
grant_type
REQUIRED. Value MUST be set to "refresh_token".
refresh_token
REQUIRED. The refresh token issued to the client.
scope
OPTIONAL. The scope of the access request as described by
Section 3.3. The requested scope MUST NOT include any scope
not originally granted by the resource owner, and if omitted is
treated as equal to the scope originally granted by the
resource owner.
"""
refresh_token = refresh_token or self.refresh_token
return prepare_token_request(self.refresh_token_key, body=body, scope=scope,
refresh_token=refresh_token, **kwargs) |
def _md5_file(fn, block_size=1048576):
"""Builds the MD5 of a file block by block
Args:
fn: File path
block_size: Size of the blocks to consider (default 1048576)
Returns:
File MD5
"""
h = hashlib.md5()
with open(fn) as fp:
d = 1
while d:
d = fp.read(block_size)
h.update(d)
return h.hexdigest() | Builds the MD5 of a file block by block
Args:
fn: File path
block_size: Size of the blocks to consider (default 1048576)
Returns:
File MD5 | Below is the the instruction that describes the task:
### Input:
Builds the MD5 of a file block by block
Args:
fn: File path
block_size: Size of the blocks to consider (default 1048576)
Returns:
File MD5
### Response:
def _md5_file(fn, block_size=1048576):
"""Builds the MD5 of a file block by block
Args:
fn: File path
block_size: Size of the blocks to consider (default 1048576)
Returns:
File MD5
"""
h = hashlib.md5()
with open(fn) as fp:
d = 1
while d:
d = fp.read(block_size)
h.update(d)
return h.hexdigest() |
def is_filterbank(filename):
""" Open file and confirm if it is a filterbank file or not. """
with open(filename, 'rb') as fh:
is_fil = True
# Check this is a blimpy file
try:
keyword, value, idx = read_next_header_keyword(fh)
try:
assert keyword == b'HEADER_START'
except AssertionError:
is_fil = False
except KeyError:
is_fil = False
return is_fil | Open file and confirm if it is a filterbank file or not. | Below is the the instruction that describes the task:
### Input:
Open file and confirm if it is a filterbank file or not.
### Response:
def is_filterbank(filename):
""" Open file and confirm if it is a filterbank file or not. """
with open(filename, 'rb') as fh:
is_fil = True
# Check this is a blimpy file
try:
keyword, value, idx = read_next_header_keyword(fh)
try:
assert keyword == b'HEADER_START'
except AssertionError:
is_fil = False
except KeyError:
is_fil = False
return is_fil |
def item(self, key):
"""Retrieves an Item object for the specified key in this bucket.
The item need not exist.
Args:
key: the key of the item within the bucket.
Returns:
An Item instance representing the specified key.
"""
return _item.Item(self._name, key, context=self._context) | Retrieves an Item object for the specified key in this bucket.
The item need not exist.
Args:
key: the key of the item within the bucket.
Returns:
An Item instance representing the specified key. | Below is the the instruction that describes the task:
### Input:
Retrieves an Item object for the specified key in this bucket.
The item need not exist.
Args:
key: the key of the item within the bucket.
Returns:
An Item instance representing the specified key.
### Response:
def item(self, key):
"""Retrieves an Item object for the specified key in this bucket.
The item need not exist.
Args:
key: the key of the item within the bucket.
Returns:
An Item instance representing the specified key.
"""
return _item.Item(self._name, key, context=self._context) |
def get_all():
'''
Return all installed services
Returns:
list: Returns a list of all services on the system.
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
services = _get_services()
ret = set()
for service in services:
ret.add(service['ServiceName'])
return sorted(ret) | Return all installed services
Returns:
list: Returns a list of all services on the system.
CLI Example:
.. code-block:: bash
salt '*' service.get_all | Below is the the instruction that describes the task:
### Input:
Return all installed services
Returns:
list: Returns a list of all services on the system.
CLI Example:
.. code-block:: bash
salt '*' service.get_all
### Response:
def get_all():
'''
Return all installed services
Returns:
list: Returns a list of all services on the system.
CLI Example:
.. code-block:: bash
salt '*' service.get_all
'''
services = _get_services()
ret = set()
for service in services:
ret.add(service['ServiceName'])
return sorted(ret) |
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxcharno, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, kw in self.drawables:
draw.text(pos, value, font=font, **kw)
im.save(outfile, self.image_format.upper()) | Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items. | Below is the the instruction that describes the task:
### Input:
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
### Response:
def format(self, tokensource, outfile):
"""
Format ``tokensource``, an iterable of ``(tokentype, tokenstring)``
tuples and write it into ``outfile``.
This implementation calculates where it should draw each token on the
pixmap, then calculates the required pixmap size and draws the items.
"""
self._create_drawables(tokensource)
self._draw_line_numbers()
im = Image.new(
'RGB',
self._get_image_size(self.maxcharno, self.maxlineno),
self.background_color
)
self._paint_line_number_bg(im)
draw = ImageDraw.Draw(im)
# Highlight
if self.hl_lines:
x = self.image_pad + self.line_number_width - self.line_number_pad + 1
recth = self._get_line_height()
rectw = im.size[0] - x
for linenumber in self.hl_lines:
y = self._get_line_y(linenumber - 1)
draw.rectangle([(x, y), (x + rectw, y + recth)],
fill=self.hl_color)
for pos, value, font, kw in self.drawables:
draw.text(pos, value, font=font, **kw)
im.save(outfile, self.image_format.upper()) |
def Trebble_Bishnoi(self, T, full=True, quick=True):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Trebble and Bishnoi (1987) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. One coefficient needed.
.. math::
\alpha = e^{c_{1} \left(- \frac{T}{Tc} + 1\right)}
References
----------
.. [1] Trebble, M. A., and P. R. Bishnoi. "Development of a New Four-
Parameter Cubic Equation of State." Fluid Phase Equilibria 35, no. 1
(September 1, 1987): 1-18. doi:10.1016/0378-3812(87)80001-8.
'''
c1 = self.alpha_function_coeffs
T, Tc, a = self.T, self.Tc, self.a
a_alpha = a*exp(c1*(-T/Tc + 1))
if not full:
return a_alpha
else:
da_alpha_dT = a*-c1*exp(c1*(-T/Tc + 1))/Tc
d2a_alpha_dT2 = a*c1**2*exp(-c1*(T/Tc - 1))/Tc**2
return a_alpha, da_alpha_dT, d2a_alpha_dT2 | r'''Method to calculate `a_alpha` and its first and second
derivatives according to Trebble and Bishnoi (1987) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. One coefficient needed.
.. math::
\alpha = e^{c_{1} \left(- \frac{T}{Tc} + 1\right)}
References
----------
.. [1] Trebble, M. A., and P. R. Bishnoi. "Development of a New Four-
Parameter Cubic Equation of State." Fluid Phase Equilibria 35, no. 1
(September 1, 1987): 1-18. doi:10.1016/0378-3812(87)80001-8. | Below is the the instruction that describes the task:
### Input:
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Trebble and Bishnoi (1987) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. One coefficient needed.
.. math::
\alpha = e^{c_{1} \left(- \frac{T}{Tc} + 1\right)}
References
----------
.. [1] Trebble, M. A., and P. R. Bishnoi. "Development of a New Four-
Parameter Cubic Equation of State." Fluid Phase Equilibria 35, no. 1
(September 1, 1987): 1-18. doi:10.1016/0378-3812(87)80001-8.
### Response:
def Trebble_Bishnoi(self, T, full=True, quick=True):
r'''Method to calculate `a_alpha` and its first and second
derivatives according to Trebble and Bishnoi (1987) [1]_. Returns `a_alpha`,
`da_alpha_dT`, and `d2a_alpha_dT2`. See `GCEOS.a_alpha_and_derivatives`
for more documentation. One coefficient needed.
.. math::
\alpha = e^{c_{1} \left(- \frac{T}{Tc} + 1\right)}
References
----------
.. [1] Trebble, M. A., and P. R. Bishnoi. "Development of a New Four-
Parameter Cubic Equation of State." Fluid Phase Equilibria 35, no. 1
(September 1, 1987): 1-18. doi:10.1016/0378-3812(87)80001-8.
'''
c1 = self.alpha_function_coeffs
T, Tc, a = self.T, self.Tc, self.a
a_alpha = a*exp(c1*(-T/Tc + 1))
if not full:
return a_alpha
else:
da_alpha_dT = a*-c1*exp(c1*(-T/Tc + 1))/Tc
d2a_alpha_dT2 = a*c1**2*exp(-c1*(T/Tc - 1))/Tc**2
return a_alpha, da_alpha_dT, d2a_alpha_dT2 |
def easy_train_and_evaluate(hyper_params, Model=None, create_loss=None,
training_data=None, validation_data=None,
inline_plotting=False, session_config=None, log_suffix=None,
continue_training=False, continue_with_specific_checkpointpath=None):
"""
Train and evaluate your model without any boilerplate code.
1) Write your data using the starttf.tfrecords.autorecords.write_data method.
2) Create your hyper parameter file containing all required fields and then load it using
starttf.utils.hyper_params.load_params method.
Minimal Sample Hyperparams File:
{"train": {
"learning_rate": {
"type": "const",
"start_value": 0.001
},
"optimizer": {
"type": "adam"
},
"batch_size": 1024,
"iters": 10000,
"summary_iters": 100,
"checkpoint_path": "checkpoints/mnist",
"tf_records_path": "data/.records/mnist"
}
}
3) Pass everything required to this method and that's it.
:param hyper_params: The hyper parameters obejct loaded via starttf.utils.hyper_params.load_params
:param Model: A keras model.
:param create_loss: A create_loss function like that in starttf.examples.mnist.loss.
:param inline_plotting: When you are using jupyter notebooks you can tell it to plot the loss directly inside the notebook.
:param continue_training: Bool, continue last training in the checkpoint path specified in the hyper parameters.
:param session_config: A configuration for the session.
:param log_suffix: A suffix for the log folder, so you can remember what was special about the run.
:return:
"""
time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H.%M.%S')
chkpt_path = hyper_params.train.checkpoint_path + "/" + time_stamp
if log_suffix is not None:
chkpt_path = chkpt_path + "_" + log_suffix
if session_config is None:
session_config = get_default_config()
if continue_with_specific_checkpointpath:
chkpt_path = hyper_params.train.checkpoint_path + "/" + continue_with_specific_checkpointpath
print("Continue with checkpoint: {}".format(chkpt_path))
elif continue_training:
chkpts = sorted([name for name in os.listdir(hyper_params.train.checkpoint_path)])
chkpt_path = hyper_params.train.checkpoint_path + "/" + chkpts[-1]
print("Latest found checkpoint: {}".format(chkpt_path))
if not os.path.exists(chkpt_path):
os.makedirs(chkpt_path)
# If hyperparam config is used, load and save code
if Model is None:
model_backup = os.path.join(chkpt_path, "model.py")
copyfile(hyperparams["arch"]["model"].replace(".", os.sep), model_backup)
arch_model = __import__(hyperparams["arch"]["model"], fromlist=["Model"])
Model = arch_model.Model
if create_loss is None:
loss_backup = os.path.join(chkpt_path, "loss.py")
copyfile(hyperparams["arch"]["loss"].replace(".", os.sep), loss_backup)
arch_loss = __import__(hyperparams["arch"]["loss"], fromlist=["create_loss"])
create_loss = arch_loss.create_loss
# Load training data
print("Load data")
if training_data is None:
training_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_TRAIN),
hyper_params.train.batch_size)
if validation_data is None:
validation_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_VALIDATION),
hyper_params.train.batch_size)
# Write hyper parameters to be able to track what config you had.
with open(chkpt_path + "/hyperparameters.json", "w") as json_file:
json_file.write(json.dumps(hyper_params.to_dict(), indent=4, sort_keys=True))
estimator_spec = create_tf_estimator_spec(chkpt_path, Model, create_loss, inline_plotting)
# Create a run configuration
config = None
if hyper_params.train.get("distributed", False):
distribution = tf.contrib.distribute.MirroredStrategy()
config = tf.estimator.RunConfig(model_dir=chkpt_path,
save_summary_steps=hyper_params.train.summary_steps,
train_distribute=distribution,
save_checkpoints_steps=hyper_params.train.save_checkpoint_steps,
keep_checkpoint_max=hyper_params.train.keep_checkpoint_max,
keep_checkpoint_every_n_hours=1)
else:
config = tf.estimator.RunConfig(session_config=session_config,
model_dir=chkpt_path,
save_summary_steps=hyper_params.train.summary_steps,
save_checkpoints_steps=hyper_params.train.save_checkpoint_steps,
keep_checkpoint_max=hyper_params.train.keep_checkpoint_max,
keep_checkpoint_every_n_hours=1)
# Create the estimator.
estimator = None
if hyper_params.train.get("warm_start_checkpoint", None) is not None:
warm_start_dir = hyper_params.train.warm_start_checkpoint
estimator = tf.estimator.Estimator(estimator_spec,
config=config,
warm_start_from=warm_start_dir,
params=hyper_params)
else:
estimator = tf.estimator.Estimator(estimator_spec,
config=config,
params=hyper_params)
# Specify training and actually train.
throttle_secs = hyper_params.train.get("throttle_secs", 120)
train_spec = tf.estimator.TrainSpec(input_fn=training_data,
max_steps=hyper_params.train.steps)
eval_spec = tf.estimator.EvalSpec(input_fn=validation_data,
throttle_secs=throttle_secs)
print("Start training")
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
return estimator | Train and evaluate your model without any boilerplate code.
1) Write your data using the starttf.tfrecords.autorecords.write_data method.
2) Create your hyper parameter file containing all required fields and then load it using
starttf.utils.hyper_params.load_params method.
Minimal Sample Hyperparams File:
{"train": {
"learning_rate": {
"type": "const",
"start_value": 0.001
},
"optimizer": {
"type": "adam"
},
"batch_size": 1024,
"iters": 10000,
"summary_iters": 100,
"checkpoint_path": "checkpoints/mnist",
"tf_records_path": "data/.records/mnist"
}
}
3) Pass everything required to this method and that's it.
:param hyper_params: The hyper parameters obejct loaded via starttf.utils.hyper_params.load_params
:param Model: A keras model.
:param create_loss: A create_loss function like that in starttf.examples.mnist.loss.
:param inline_plotting: When you are using jupyter notebooks you can tell it to plot the loss directly inside the notebook.
:param continue_training: Bool, continue last training in the checkpoint path specified in the hyper parameters.
:param session_config: A configuration for the session.
:param log_suffix: A suffix for the log folder, so you can remember what was special about the run.
:return: | Below is the the instruction that describes the task:
### Input:
Train and evaluate your model without any boilerplate code.
1) Write your data using the starttf.tfrecords.autorecords.write_data method.
2) Create your hyper parameter file containing all required fields and then load it using
starttf.utils.hyper_params.load_params method.
Minimal Sample Hyperparams File:
{"train": {
"learning_rate": {
"type": "const",
"start_value": 0.001
},
"optimizer": {
"type": "adam"
},
"batch_size": 1024,
"iters": 10000,
"summary_iters": 100,
"checkpoint_path": "checkpoints/mnist",
"tf_records_path": "data/.records/mnist"
}
}
3) Pass everything required to this method and that's it.
:param hyper_params: The hyper parameters obejct loaded via starttf.utils.hyper_params.load_params
:param Model: A keras model.
:param create_loss: A create_loss function like that in starttf.examples.mnist.loss.
:param inline_plotting: When you are using jupyter notebooks you can tell it to plot the loss directly inside the notebook.
:param continue_training: Bool, continue last training in the checkpoint path specified in the hyper parameters.
:param session_config: A configuration for the session.
:param log_suffix: A suffix for the log folder, so you can remember what was special about the run.
:return:
### Response:
def easy_train_and_evaluate(hyper_params, Model=None, create_loss=None,
training_data=None, validation_data=None,
inline_plotting=False, session_config=None, log_suffix=None,
continue_training=False, continue_with_specific_checkpointpath=None):
"""
Train and evaluate your model without any boilerplate code.
1) Write your data using the starttf.tfrecords.autorecords.write_data method.
2) Create your hyper parameter file containing all required fields and then load it using
starttf.utils.hyper_params.load_params method.
Minimal Sample Hyperparams File:
{"train": {
"learning_rate": {
"type": "const",
"start_value": 0.001
},
"optimizer": {
"type": "adam"
},
"batch_size": 1024,
"iters": 10000,
"summary_iters": 100,
"checkpoint_path": "checkpoints/mnist",
"tf_records_path": "data/.records/mnist"
}
}
3) Pass everything required to this method and that's it.
:param hyper_params: The hyper parameters obejct loaded via starttf.utils.hyper_params.load_params
:param Model: A keras model.
:param create_loss: A create_loss function like that in starttf.examples.mnist.loss.
:param inline_plotting: When you are using jupyter notebooks you can tell it to plot the loss directly inside the notebook.
:param continue_training: Bool, continue last training in the checkpoint path specified in the hyper parameters.
:param session_config: A configuration for the session.
:param log_suffix: A suffix for the log folder, so you can remember what was special about the run.
:return:
"""
time_stamp = datetime.datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d_%H.%M.%S')
chkpt_path = hyper_params.train.checkpoint_path + "/" + time_stamp
if log_suffix is not None:
chkpt_path = chkpt_path + "_" + log_suffix
if session_config is None:
session_config = get_default_config()
if continue_with_specific_checkpointpath:
chkpt_path = hyper_params.train.checkpoint_path + "/" + continue_with_specific_checkpointpath
print("Continue with checkpoint: {}".format(chkpt_path))
elif continue_training:
chkpts = sorted([name for name in os.listdir(hyper_params.train.checkpoint_path)])
chkpt_path = hyper_params.train.checkpoint_path + "/" + chkpts[-1]
print("Latest found checkpoint: {}".format(chkpt_path))
if not os.path.exists(chkpt_path):
os.makedirs(chkpt_path)
# If hyperparam config is used, load and save code
if Model is None:
model_backup = os.path.join(chkpt_path, "model.py")
copyfile(hyperparams["arch"]["model"].replace(".", os.sep), model_backup)
arch_model = __import__(hyperparams["arch"]["model"], fromlist=["Model"])
Model = arch_model.Model
if create_loss is None:
loss_backup = os.path.join(chkpt_path, "loss.py")
copyfile(hyperparams["arch"]["loss"].replace(".", os.sep), loss_backup)
arch_loss = __import__(hyperparams["arch"]["loss"], fromlist=["create_loss"])
create_loss = arch_loss.create_loss
# Load training data
print("Load data")
if training_data is None:
training_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_TRAIN),
hyper_params.train.batch_size)
if validation_data is None:
validation_data = create_input_fn(os.path.join(hyper_params.train.tf_records_path, PHASE_VALIDATION),
hyper_params.train.batch_size)
# Write hyper parameters to be able to track what config you had.
with open(chkpt_path + "/hyperparameters.json", "w") as json_file:
json_file.write(json.dumps(hyper_params.to_dict(), indent=4, sort_keys=True))
estimator_spec = create_tf_estimator_spec(chkpt_path, Model, create_loss, inline_plotting)
# Create a run configuration
config = None
if hyper_params.train.get("distributed", False):
distribution = tf.contrib.distribute.MirroredStrategy()
config = tf.estimator.RunConfig(model_dir=chkpt_path,
save_summary_steps=hyper_params.train.summary_steps,
train_distribute=distribution,
save_checkpoints_steps=hyper_params.train.save_checkpoint_steps,
keep_checkpoint_max=hyper_params.train.keep_checkpoint_max,
keep_checkpoint_every_n_hours=1)
else:
config = tf.estimator.RunConfig(session_config=session_config,
model_dir=chkpt_path,
save_summary_steps=hyper_params.train.summary_steps,
save_checkpoints_steps=hyper_params.train.save_checkpoint_steps,
keep_checkpoint_max=hyper_params.train.keep_checkpoint_max,
keep_checkpoint_every_n_hours=1)
# Create the estimator.
estimator = None
if hyper_params.train.get("warm_start_checkpoint", None) is not None:
warm_start_dir = hyper_params.train.warm_start_checkpoint
estimator = tf.estimator.Estimator(estimator_spec,
config=config,
warm_start_from=warm_start_dir,
params=hyper_params)
else:
estimator = tf.estimator.Estimator(estimator_spec,
config=config,
params=hyper_params)
# Specify training and actually train.
throttle_secs = hyper_params.train.get("throttle_secs", 120)
train_spec = tf.estimator.TrainSpec(input_fn=training_data,
max_steps=hyper_params.train.steps)
eval_spec = tf.estimator.EvalSpec(input_fn=validation_data,
throttle_secs=throttle_secs)
print("Start training")
tf.estimator.train_and_evaluate(estimator, train_spec, eval_spec)
return estimator |
def parse(cls, buff, offset):
"""
Given a buffer and offset, returns the parsed value and new offset.
Calls `parse()` on the given buffer for each sub-part in order and
creates a new instance with the results.
"""
values = {}
for name, part in cls.parts:
value, new_offset = part.parse(buff, offset)
values[name] = value
offset = new_offset
return cls(**values), offset | Given a buffer and offset, returns the parsed value and new offset.
Calls `parse()` on the given buffer for each sub-part in order and
creates a new instance with the results. | Below is the the instruction that describes the task:
### Input:
Given a buffer and offset, returns the parsed value and new offset.
Calls `parse()` on the given buffer for each sub-part in order and
creates a new instance with the results.
### Response:
def parse(cls, buff, offset):
"""
Given a buffer and offset, returns the parsed value and new offset.
Calls `parse()` on the given buffer for each sub-part in order and
creates a new instance with the results.
"""
values = {}
for name, part in cls.parts:
value, new_offset = part.parse(buff, offset)
values[name] = value
offset = new_offset
return cls(**values), offset |
def speakerDiarizationEvaluateScript(folder_name, ldas):
'''
This function prints the cluster purity and speaker purity for
each WAV file stored in a provided directory (.SEGMENT files
are needed as ground-truth)
ARGUMENTS:
- folder_name: the full path of the folder where the WAV and
SEGMENT (ground-truth) files are stored
- ldas: a list of LDA dimensions (0 for no LDA)
'''
types = ('*.wav', )
wavFilesList = []
for files in types:
wavFilesList.extend(glob.glob(os.path.join(folder_name, files)))
wavFilesList = sorted(wavFilesList)
# get number of unique speakers per file (from ground-truth)
N = []
for wav_file in wavFilesList:
gt_file = wav_file.replace('.wav', '.segments');
if os.path.isfile(gt_file):
[seg_start, seg_end, seg_labs] = readSegmentGT(gt_file)
N.append(len(list(set(seg_labs))))
else:
N.append(-1)
for l in ldas:
print("LDA = {0:d}".format(l))
for i, wav_file in enumerate(wavFilesList):
speakerDiarization(wav_file, N[i], 2.0, 0.2, 0.05, l, plot_res=False)
print | This function prints the cluster purity and speaker purity for
each WAV file stored in a provided directory (.SEGMENT files
are needed as ground-truth)
ARGUMENTS:
- folder_name: the full path of the folder where the WAV and
SEGMENT (ground-truth) files are stored
- ldas: a list of LDA dimensions (0 for no LDA) | Below is the the instruction that describes the task:
### Input:
This function prints the cluster purity and speaker purity for
each WAV file stored in a provided directory (.SEGMENT files
are needed as ground-truth)
ARGUMENTS:
- folder_name: the full path of the folder where the WAV and
SEGMENT (ground-truth) files are stored
- ldas: a list of LDA dimensions (0 for no LDA)
### Response:
def speakerDiarizationEvaluateScript(folder_name, ldas):
'''
This function prints the cluster purity and speaker purity for
each WAV file stored in a provided directory (.SEGMENT files
are needed as ground-truth)
ARGUMENTS:
- folder_name: the full path of the folder where the WAV and
SEGMENT (ground-truth) files are stored
- ldas: a list of LDA dimensions (0 for no LDA)
'''
types = ('*.wav', )
wavFilesList = []
for files in types:
wavFilesList.extend(glob.glob(os.path.join(folder_name, files)))
wavFilesList = sorted(wavFilesList)
# get number of unique speakers per file (from ground-truth)
N = []
for wav_file in wavFilesList:
gt_file = wav_file.replace('.wav', '.segments');
if os.path.isfile(gt_file):
[seg_start, seg_end, seg_labs] = readSegmentGT(gt_file)
N.append(len(list(set(seg_labs))))
else:
N.append(-1)
for l in ldas:
print("LDA = {0:d}".format(l))
for i, wav_file in enumerate(wavFilesList):
speakerDiarization(wav_file, N[i], 2.0, 0.2, 0.05, l, plot_res=False)
print |
def _is_collinear(self, x, y):
"""
Checks if first three points are collinear
"""
pts = np.column_stack([x[:3], y[:3], np.ones(3)])
return np.linalg.det(pts) == 0.0 | Checks if first three points are collinear | Below is the the instruction that describes the task:
### Input:
Checks if first three points are collinear
### Response:
def _is_collinear(self, x, y):
"""
Checks if first three points are collinear
"""
pts = np.column_stack([x[:3], y[:3], np.ones(3)])
return np.linalg.det(pts) == 0.0 |
def _unzip(self, src, dst, scene, force_unzip=False):
""" Unzip tar files """
self.output("Unzipping %s - It might take some time" % scene, normal=True, arrow=True)
try:
# check if file is already unzipped, skip
if isdir(dst) and not force_unzip:
self.output('%s is already unzipped.' % scene, normal=True, color='green', indent=1)
return
else:
tar = tarfile.open(src, 'r')
tar.extractall(path=dst)
tar.close()
except tarfile.ReadError:
check_create_folder(dst)
subprocess.check_call(['tar', '-xf', src, '-C', dst]) | Unzip tar files | Below is the the instruction that describes the task:
### Input:
Unzip tar files
### Response:
def _unzip(self, src, dst, scene, force_unzip=False):
""" Unzip tar files """
self.output("Unzipping %s - It might take some time" % scene, normal=True, arrow=True)
try:
# check if file is already unzipped, skip
if isdir(dst) and not force_unzip:
self.output('%s is already unzipped.' % scene, normal=True, color='green', indent=1)
return
else:
tar = tarfile.open(src, 'r')
tar.extractall(path=dst)
tar.close()
except tarfile.ReadError:
check_create_folder(dst)
subprocess.check_call(['tar', '-xf', src, '-C', dst]) |
def process_transform(self, tag_value, resource_set):
"""
Transform tag value
- Collect value from tag
- Transform Tag value
- Assign new value for key
"""
self.log.info("Transforming tag value on %s instances" % (
len(resource_set)))
key = self.data.get('key')
c = utils.local_session(self.manager.session_factory).client('ec2')
self.create_tag(
c,
[r[self.id_key] for r in resource_set if len(
r.get('Tags', [])) < 50],
key, tag_value) | Transform tag value
- Collect value from tag
- Transform Tag value
- Assign new value for key | Below is the the instruction that describes the task:
### Input:
Transform tag value
- Collect value from tag
- Transform Tag value
- Assign new value for key
### Response:
def process_transform(self, tag_value, resource_set):
"""
Transform tag value
- Collect value from tag
- Transform Tag value
- Assign new value for key
"""
self.log.info("Transforming tag value on %s instances" % (
len(resource_set)))
key = self.data.get('key')
c = utils.local_session(self.manager.session_factory).client('ec2')
self.create_tag(
c,
[r[self.id_key] for r in resource_set if len(
r.get('Tags', [])) < 50],
key, tag_value) |
def read_from_buffer(cls, buf, identifier_str=None):
"""Load the context from a buffer."""
try:
return cls._read_from_buffer(buf, identifier_str)
except Exception as e:
cls._load_error(e, identifier_str) | Load the context from a buffer. | Below is the the instruction that describes the task:
### Input:
Load the context from a buffer.
### Response:
def read_from_buffer(cls, buf, identifier_str=None):
"""Load the context from a buffer."""
try:
return cls._read_from_buffer(buf, identifier_str)
except Exception as e:
cls._load_error(e, identifier_str) |
def get_unused_node_id(graph, initial_guess='unknown', _format='{}<%d>'):
"""
Finds an unused node id in `graph`.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param initial_guess:
Initial node id guess.
:type initial_guess: str, optional
:param _format:
Format to generate the new node id if the given is already used.
:type _format: str, optional
:return:
An unused node id.
:rtype: str
"""
has_node = graph.has_node # Namespace shortcut for speed.
n = counter() # Counter.
node_id_format = _format.format(initial_guess) # Node id format.
node_id = initial_guess # Initial guess.
while has_node(node_id): # Check if node id is used.
node_id = node_id_format % n() # Guess.
return node_id | Finds an unused node id in `graph`.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param initial_guess:
Initial node id guess.
:type initial_guess: str, optional
:param _format:
Format to generate the new node id if the given is already used.
:type _format: str, optional
:return:
An unused node id.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Finds an unused node id in `graph`.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param initial_guess:
Initial node id guess.
:type initial_guess: str, optional
:param _format:
Format to generate the new node id if the given is already used.
:type _format: str, optional
:return:
An unused node id.
:rtype: str
### Response:
def get_unused_node_id(graph, initial_guess='unknown', _format='{}<%d>'):
"""
Finds an unused node id in `graph`.
:param graph:
A directed graph.
:type graph: networkx.classes.digraph.DiGraph
:param initial_guess:
Initial node id guess.
:type initial_guess: str, optional
:param _format:
Format to generate the new node id if the given is already used.
:type _format: str, optional
:return:
An unused node id.
:rtype: str
"""
has_node = graph.has_node # Namespace shortcut for speed.
n = counter() # Counter.
node_id_format = _format.format(initial_guess) # Node id format.
node_id = initial_guess # Initial guess.
while has_node(node_id): # Check if node id is used.
node_id = node_id_format % n() # Guess.
return node_id |
def updateWPText(self):
'''Updates the current waypoint and distance to it.'''
self.wpText.set_position((self.leftPos+(1.5*self.vertSize/10.0),0.97-(1.5*self.vertSize)+(0.5*self.vertSize/10.0)))
self.wpText.set_size(self.fontSize)
if type(self.nextWPTime) is str:
self.wpText.set_text('%.f/%.f\n(%.f m, ~ s)' % (self.currentWP,self.finalWP,self.wpDist))
else:
self.wpText.set_text('%.f/%.f\n(%.f m, %.f s)' % (self.currentWP,self.finalWP,self.wpDist,self.nextWPTime)) | Updates the current waypoint and distance to it. | Below is the the instruction that describes the task:
### Input:
Updates the current waypoint and distance to it.
### Response:
def updateWPText(self):
'''Updates the current waypoint and distance to it.'''
self.wpText.set_position((self.leftPos+(1.5*self.vertSize/10.0),0.97-(1.5*self.vertSize)+(0.5*self.vertSize/10.0)))
self.wpText.set_size(self.fontSize)
if type(self.nextWPTime) is str:
self.wpText.set_text('%.f/%.f\n(%.f m, ~ s)' % (self.currentWP,self.finalWP,self.wpDist))
else:
self.wpText.set_text('%.f/%.f\n(%.f m, %.f s)' % (self.currentWP,self.finalWP,self.wpDist,self.nextWPTime)) |
def queue_files(dirpath, queue):
"""Add files in a directory to a queue"""
for root, _, files in os.walk(os.path.abspath(dirpath)):
if not files:
continue
for filename in files:
queue.put(os.path.join(root, filename)) | Add files in a directory to a queue | Below is the the instruction that describes the task:
### Input:
Add files in a directory to a queue
### Response:
def queue_files(dirpath, queue):
"""Add files in a directory to a queue"""
for root, _, files in os.walk(os.path.abspath(dirpath)):
if not files:
continue
for filename in files:
queue.put(os.path.join(root, filename)) |
def _remove_vlan_from_all_sp_templates(self, handle, vlan_id, ucsm_ip):
"""Deletes VLAN config from all SP Templates that have it."""
sp_template_info_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].sp_template_list.values())
vlan_name = self.make_vlan_name(vlan_id)
virtio_port_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports)
try:
# sp_template_info_list is a list of tuples.
# Each tuple is of the form :
# (ucsm_ip, sp_template_path, sp_template)
for sp_template_info in sp_template_info_list:
sp_template_path = sp_template_info.path
sp_template = sp_template_info.name
sp_template_full_path = (sp_template_path +
const.SP_TEMPLATE_PREFIX + sp_template)
obj = handle.query_dn(sp_template_full_path)
if not obj:
LOG.error('UCS Manager network driver could not '
'find Service Profile template %s',
sp_template_full_path)
continue
eth_port_paths = ["%s%s" % (sp_template_full_path, ep)
for ep in virtio_port_list]
for eth_port_path in eth_port_paths:
eth = handle.query_dn(eth_port_path)
if eth:
vlan_path = (eth_port_path +
const.VLAN_PATH_PREFIX + vlan_name)
vlan = handle.query_dn(vlan_path)
if vlan:
# Found vlan config. Now remove it.
handle.remove_mo(vlan)
else:
LOG.debug('UCS Manager network driver did not '
'find VLAN %s at %s', vlan_name, eth_port_path)
else:
LOG.debug('UCS Manager network driver did not '
'find ethernet port at %s', eth_port_path)
handle.commit()
return True
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigDeleteFailed(config=vlan_id,
ucsm_ip=ucsm_ip,
exc=e) | Deletes VLAN config from all SP Templates that have it. | Below is the the instruction that describes the task:
### Input:
Deletes VLAN config from all SP Templates that have it.
### Response:
def _remove_vlan_from_all_sp_templates(self, handle, vlan_id, ucsm_ip):
"""Deletes VLAN config from all SP Templates that have it."""
sp_template_info_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].sp_template_list.values())
vlan_name = self.make_vlan_name(vlan_id)
virtio_port_list = (
CONF.ml2_cisco_ucsm.ucsms[ucsm_ip].ucsm_virtio_eth_ports)
try:
# sp_template_info_list is a list of tuples.
# Each tuple is of the form :
# (ucsm_ip, sp_template_path, sp_template)
for sp_template_info in sp_template_info_list:
sp_template_path = sp_template_info.path
sp_template = sp_template_info.name
sp_template_full_path = (sp_template_path +
const.SP_TEMPLATE_PREFIX + sp_template)
obj = handle.query_dn(sp_template_full_path)
if not obj:
LOG.error('UCS Manager network driver could not '
'find Service Profile template %s',
sp_template_full_path)
continue
eth_port_paths = ["%s%s" % (sp_template_full_path, ep)
for ep in virtio_port_list]
for eth_port_path in eth_port_paths:
eth = handle.query_dn(eth_port_path)
if eth:
vlan_path = (eth_port_path +
const.VLAN_PATH_PREFIX + vlan_name)
vlan = handle.query_dn(vlan_path)
if vlan:
# Found vlan config. Now remove it.
handle.remove_mo(vlan)
else:
LOG.debug('UCS Manager network driver did not '
'find VLAN %s at %s', vlan_name, eth_port_path)
else:
LOG.debug('UCS Manager network driver did not '
'find ethernet port at %s', eth_port_path)
handle.commit()
return True
except Exception as e:
# Raise a Neutron exception. Include a description of
# the original exception.
raise cexc.UcsmConfigDeleteFailed(config=vlan_id,
ucsm_ip=ucsm_ip,
exc=e) |
def get_notices(self):
"""
[deprecated] 建議使用方法 `get_notice()` 及 `get_notice_content()`
"""
result = []
# 取得公布欄訊息列表
for date, title in self.get_notice().items():
content = self.get_notice_content(date)
result.append([date, title, content])
# 回傳結果
return result | [deprecated] 建議使用方法 `get_notice()` 及 `get_notice_content()` | Below is the the instruction that describes the task:
### Input:
[deprecated] 建議使用方法 `get_notice()` 及 `get_notice_content()`
### Response:
def get_notices(self):
"""
[deprecated] 建議使用方法 `get_notice()` 及 `get_notice_content()`
"""
result = []
# 取得公布欄訊息列表
for date, title in self.get_notice().items():
content = self.get_notice_content(date)
result.append([date, title, content])
# 回傳結果
return result |
def listRoleIds(self, *args, **kwargs):
"""
List Role IDs
If no limit is given, the roleIds of all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-role-ids-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listRoleIds"], *args, **kwargs) | List Role IDs
If no limit is given, the roleIds of all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-role-ids-response.json#``
This method is ``stable`` | Below is the the instruction that describes the task:
### Input:
List Role IDs
If no limit is given, the roleIds of all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-role-ids-response.json#``
This method is ``stable``
### Response:
def listRoleIds(self, *args, **kwargs):
"""
List Role IDs
If no limit is given, the roleIds of all roles are returned. Since this
list may become long, callers can use the `limit` and `continuationToken`
query arguments to page through the responses.
This method gives output: ``v1/list-role-ids-response.json#``
This method is ``stable``
"""
return self._makeApiCall(self.funcinfo["listRoleIds"], *args, **kwargs) |
def push_blob(self,
filename=None,
progress=None,
data=None, digest=None,
check_exists=True):
# pylint: disable=too-many-arguments
"""
Upload a file to the registry and return its (SHA-256) hash.
The registry is content-addressable so the file's content (aka blob)
can be retrieved later by passing the hash to :meth:`pull_blob`.
:param filename: File to upload.
:type filename: str
:param data: Data to upload if ``filename`` isn't given. The data is uploaded in chunks and you must also pass ``digest``.
:type data: Generator or iterator
:param digest: Hash of the data to be uploaded in ``data``, if specified.
:type digest: str (hex-encoded SHA-256, prefixed by ``sha256:``)
:param progress: Optional function to call as the upload progresses. The function will be called with the hash of the file's content (or ``digest``), the blob just read from the file (or chunk from ``data``) and if ``filename`` is specified the total size of the file.
:type progress: function(dgst, chunk, size)
:param check_exists: Whether to check if a blob with the same hash already exists in the registry. If so, it won't be uploaded again.
:type check_exists: bool
:rtype: str
:returns: Hash of file's content.
"""
if filename is None:
dgst = digest
else:
dgst = hash_file(filename)
if check_exists:
try:
self._request('head', 'blobs/' + dgst)
return dgst
except requests.exceptions.HTTPError as ex:
# pylint: disable=no-member
if ex.response.status_code != requests.codes.not_found:
raise
r = self._request('post', 'blobs/uploads/')
upload_url = r.headers['Location']
url_parts = list(urlparse.urlparse(upload_url))
query = urlparse.parse_qs(url_parts[4])
query.update({'digest': dgst})
url_parts[4] = urlencode(query, True)
url_parts[0] = 'http' if self._insecure else 'https'
upload_url = urlparse.urlunparse(url_parts)
if filename is None:
data = _ReportingChunks(dgst, data, progress) if progress else data
self._base_request('put', upload_url, data=data)
else:
with open(filename, 'rb') as f:
data = _ReportingFile(dgst, f, progress) if progress else f
self._base_request('put', upload_url, data=data)
return dgst | Upload a file to the registry and return its (SHA-256) hash.
The registry is content-addressable so the file's content (aka blob)
can be retrieved later by passing the hash to :meth:`pull_blob`.
:param filename: File to upload.
:type filename: str
:param data: Data to upload if ``filename`` isn't given. The data is uploaded in chunks and you must also pass ``digest``.
:type data: Generator or iterator
:param digest: Hash of the data to be uploaded in ``data``, if specified.
:type digest: str (hex-encoded SHA-256, prefixed by ``sha256:``)
:param progress: Optional function to call as the upload progresses. The function will be called with the hash of the file's content (or ``digest``), the blob just read from the file (or chunk from ``data``) and if ``filename`` is specified the total size of the file.
:type progress: function(dgst, chunk, size)
:param check_exists: Whether to check if a blob with the same hash already exists in the registry. If so, it won't be uploaded again.
:type check_exists: bool
:rtype: str
:returns: Hash of file's content. | Below is the the instruction that describes the task:
### Input:
Upload a file to the registry and return its (SHA-256) hash.
The registry is content-addressable so the file's content (aka blob)
can be retrieved later by passing the hash to :meth:`pull_blob`.
:param filename: File to upload.
:type filename: str
:param data: Data to upload if ``filename`` isn't given. The data is uploaded in chunks and you must also pass ``digest``.
:type data: Generator or iterator
:param digest: Hash of the data to be uploaded in ``data``, if specified.
:type digest: str (hex-encoded SHA-256, prefixed by ``sha256:``)
:param progress: Optional function to call as the upload progresses. The function will be called with the hash of the file's content (or ``digest``), the blob just read from the file (or chunk from ``data``) and if ``filename`` is specified the total size of the file.
:type progress: function(dgst, chunk, size)
:param check_exists: Whether to check if a blob with the same hash already exists in the registry. If so, it won't be uploaded again.
:type check_exists: bool
:rtype: str
:returns: Hash of file's content.
### Response:
def push_blob(self,
filename=None,
progress=None,
data=None, digest=None,
check_exists=True):
# pylint: disable=too-many-arguments
"""
Upload a file to the registry and return its (SHA-256) hash.
The registry is content-addressable so the file's content (aka blob)
can be retrieved later by passing the hash to :meth:`pull_blob`.
:param filename: File to upload.
:type filename: str
:param data: Data to upload if ``filename`` isn't given. The data is uploaded in chunks and you must also pass ``digest``.
:type data: Generator or iterator
:param digest: Hash of the data to be uploaded in ``data``, if specified.
:type digest: str (hex-encoded SHA-256, prefixed by ``sha256:``)
:param progress: Optional function to call as the upload progresses. The function will be called with the hash of the file's content (or ``digest``), the blob just read from the file (or chunk from ``data``) and if ``filename`` is specified the total size of the file.
:type progress: function(dgst, chunk, size)
:param check_exists: Whether to check if a blob with the same hash already exists in the registry. If so, it won't be uploaded again.
:type check_exists: bool
:rtype: str
:returns: Hash of file's content.
"""
if filename is None:
dgst = digest
else:
dgst = hash_file(filename)
if check_exists:
try:
self._request('head', 'blobs/' + dgst)
return dgst
except requests.exceptions.HTTPError as ex:
# pylint: disable=no-member
if ex.response.status_code != requests.codes.not_found:
raise
r = self._request('post', 'blobs/uploads/')
upload_url = r.headers['Location']
url_parts = list(urlparse.urlparse(upload_url))
query = urlparse.parse_qs(url_parts[4])
query.update({'digest': dgst})
url_parts[4] = urlencode(query, True)
url_parts[0] = 'http' if self._insecure else 'https'
upload_url = urlparse.urlunparse(url_parts)
if filename is None:
data = _ReportingChunks(dgst, data, progress) if progress else data
self._base_request('put', upload_url, data=data)
else:
with open(filename, 'rb') as f:
data = _ReportingFile(dgst, f, progress) if progress else f
self._base_request('put', upload_url, data=data)
return dgst |
def get_notification_commands(self, notifways, n_type, command_name=False):
"""Get notification commands for object type
:param notifways: list of alignak.objects.NotificationWay objects
:type notifways: NotificationWays
:param n_type: object type (host or service)
:type n_type: string
:param command_name: True to update the inner property with the name of the command,
False to update with the Command objects list
:type command_name: bool
:return: command list
:rtype: list[alignak.objects.command.Command]
"""
res = []
for notifway_id in self.notificationways:
notifway = notifways[notifway_id]
res.extend(notifway.get_notification_commands(n_type))
# Update inner notification commands property with command name or command
if command_name:
setattr(self, n_type + '_notification_commands', [c.get_name() for c in res])
else:
setattr(self, n_type + '_notification_commands', res)
return res | Get notification commands for object type
:param notifways: list of alignak.objects.NotificationWay objects
:type notifways: NotificationWays
:param n_type: object type (host or service)
:type n_type: string
:param command_name: True to update the inner property with the name of the command,
False to update with the Command objects list
:type command_name: bool
:return: command list
:rtype: list[alignak.objects.command.Command] | Below is the the instruction that describes the task:
### Input:
Get notification commands for object type
:param notifways: list of alignak.objects.NotificationWay objects
:type notifways: NotificationWays
:param n_type: object type (host or service)
:type n_type: string
:param command_name: True to update the inner property with the name of the command,
False to update with the Command objects list
:type command_name: bool
:return: command list
:rtype: list[alignak.objects.command.Command]
### Response:
def get_notification_commands(self, notifways, n_type, command_name=False):
"""Get notification commands for object type
:param notifways: list of alignak.objects.NotificationWay objects
:type notifways: NotificationWays
:param n_type: object type (host or service)
:type n_type: string
:param command_name: True to update the inner property with the name of the command,
False to update with the Command objects list
:type command_name: bool
:return: command list
:rtype: list[alignak.objects.command.Command]
"""
res = []
for notifway_id in self.notificationways:
notifway = notifways[notifway_id]
res.extend(notifway.get_notification_commands(n_type))
# Update inner notification commands property with command name or command
if command_name:
setattr(self, n_type + '_notification_commands', [c.get_name() for c in res])
else:
setattr(self, n_type + '_notification_commands', res)
return res |
def check(self):
""" Check if there are records that are ready to start and return them if there are any
:return: tuple of WScheduleRecord or None (if there are no tasks to start)
"""
if self.__next_start is not None:
utc_now = utc_datetime()
if utc_now >= self.__next_start:
result = []
for task_source in self.__next_sources:
records = task_source.has_records()
if records is not None:
result.extend(records)
self.__update_all()
if len(result) > 0:
return tuple(result) | Check if there are records that are ready to start and return them if there are any
:return: tuple of WScheduleRecord or None (if there are no tasks to start) | Below is the the instruction that describes the task:
### Input:
Check if there are records that are ready to start and return them if there are any
:return: tuple of WScheduleRecord or None (if there are no tasks to start)
### Response:
def check(self):
""" Check if there are records that are ready to start and return them if there are any
:return: tuple of WScheduleRecord or None (if there are no tasks to start)
"""
if self.__next_start is not None:
utc_now = utc_datetime()
if utc_now >= self.__next_start:
result = []
for task_source in self.__next_sources:
records = task_source.has_records()
if records is not None:
result.extend(records)
self.__update_all()
if len(result) > 0:
return tuple(result) |
Subsets and Splits