code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def create(cls, parent=None, **kwargs):
"""Create an object and return it"""
if parent is None:
raise Exception("Parent class is required")
route = copy(parent.route)
if cls.ID_NAME is not None:
route[cls.ID_NAME] = ""
obj = cls(key=parent.key, route=route, config=parent.config)
start = datetime.now()
response = requests.post(obj._url(), auth=(obj.key, ""), data=kwargs)
cls._delay_for_ratelimits(start)
if response.status_code not in cls.TRUTHY_CODES:
return cls._handle_request_exception(response)
# No envelope on post requests
data = response.json()
obj.route[obj.ID_NAME] = data.get("id", data.get(obj.ID_NAME))
obj.data = data
return obj | Create an object and return it | Below is the the instruction that describes the task:
### Input:
Create an object and return it
### Response:
def create(cls, parent=None, **kwargs):
"""Create an object and return it"""
if parent is None:
raise Exception("Parent class is required")
route = copy(parent.route)
if cls.ID_NAME is not None:
route[cls.ID_NAME] = ""
obj = cls(key=parent.key, route=route, config=parent.config)
start = datetime.now()
response = requests.post(obj._url(), auth=(obj.key, ""), data=kwargs)
cls._delay_for_ratelimits(start)
if response.status_code not in cls.TRUTHY_CODES:
return cls._handle_request_exception(response)
# No envelope on post requests
data = response.json()
obj.route[obj.ID_NAME] = data.get("id", data.get(obj.ID_NAME))
obj.data = data
return obj |
def from_csv(cls, path):
"""
Get box vectors from comma-separated values in file `path`.
The csv file must containt only one line, which in turn can contain
three values (orthogonal vectors) or nine values (triclinic box).
The values should be in nanometers.
Parameters
----------
path : str
Path to CSV file
Returns
-------
vectors : simtk.unit.Quantity([3, 3], unit=nanometers
"""
with open(path) as f:
fields = map(float, next(f).split(','))
if len(fields) == 3:
return u.Quantity([[fields[0], 0, 0],
[0, fields[1], 0],
[0, 0, fields[2]]], unit=u.nanometers)
elif len(fields) == 9:
return u.Quantity([fields[0:3],
fields[3:6],
fields[6:9]], unit=u.nanometers)
else:
raise ValueError('This type of CSV is not supported. Please '
'provide a comma-separated list of three or nine '
'floats in a single-line file.') | Get box vectors from comma-separated values in file `path`.
The csv file must containt only one line, which in turn can contain
three values (orthogonal vectors) or nine values (triclinic box).
The values should be in nanometers.
Parameters
----------
path : str
Path to CSV file
Returns
-------
vectors : simtk.unit.Quantity([3, 3], unit=nanometers | Below is the the instruction that describes the task:
### Input:
Get box vectors from comma-separated values in file `path`.
The csv file must containt only one line, which in turn can contain
three values (orthogonal vectors) or nine values (triclinic box).
The values should be in nanometers.
Parameters
----------
path : str
Path to CSV file
Returns
-------
vectors : simtk.unit.Quantity([3, 3], unit=nanometers
### Response:
def from_csv(cls, path):
"""
Get box vectors from comma-separated values in file `path`.
The csv file must containt only one line, which in turn can contain
three values (orthogonal vectors) or nine values (triclinic box).
The values should be in nanometers.
Parameters
----------
path : str
Path to CSV file
Returns
-------
vectors : simtk.unit.Quantity([3, 3], unit=nanometers
"""
with open(path) as f:
fields = map(float, next(f).split(','))
if len(fields) == 3:
return u.Quantity([[fields[0], 0, 0],
[0, fields[1], 0],
[0, 0, fields[2]]], unit=u.nanometers)
elif len(fields) == 9:
return u.Quantity([fields[0:3],
fields[3:6],
fields[6:9]], unit=u.nanometers)
else:
raise ValueError('This type of CSV is not supported. Please '
'provide a comma-separated list of three or nine '
'floats in a single-line file.') |
def _bucket_events(self, event_iterable):
"""
Convert an iterable of events into an iterable of lists of events
per bucket.
"""
current_bucket_time = None
current_bucket_events = None
for event in event_iterable:
event_bucket_time = self._bucket_time(event[TIMESTAMP_FIELD])
if current_bucket_time is None or current_bucket_time < event_bucket_time:
if current_bucket_events is not None:
yield current_bucket_events
current_bucket_time = event_bucket_time
current_bucket_events = []
current_bucket_events.append(event)
if current_bucket_events is not None and current_bucket_events != []:
yield current_bucket_events | Convert an iterable of events into an iterable of lists of events
per bucket. | Below is the the instruction that describes the task:
### Input:
Convert an iterable of events into an iterable of lists of events
per bucket.
### Response:
def _bucket_events(self, event_iterable):
"""
Convert an iterable of events into an iterable of lists of events
per bucket.
"""
current_bucket_time = None
current_bucket_events = None
for event in event_iterable:
event_bucket_time = self._bucket_time(event[TIMESTAMP_FIELD])
if current_bucket_time is None or current_bucket_time < event_bucket_time:
if current_bucket_events is not None:
yield current_bucket_events
current_bucket_time = event_bucket_time
current_bucket_events = []
current_bucket_events.append(event)
if current_bucket_events is not None and current_bucket_events != []:
yield current_bucket_events |
def deliver(self, message, to):
"""
Deliver our message
Arguments:
- `message`: MIMEMultipart
Return: None
Exceptions: None
"""
# Send the message via local SMTP server.
s = smtplib.SMTP(self.host, self.port)
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
s.sendmail(message['From'], to, message.as_string())
s.quit()
return | Deliver our message
Arguments:
- `message`: MIMEMultipart
Return: None
Exceptions: None | Below is the the instruction that describes the task:
### Input:
Deliver our message
Arguments:
- `message`: MIMEMultipart
Return: None
Exceptions: None
### Response:
def deliver(self, message, to):
"""
Deliver our message
Arguments:
- `message`: MIMEMultipart
Return: None
Exceptions: None
"""
# Send the message via local SMTP server.
s = smtplib.SMTP(self.host, self.port)
# sendmail function takes 3 arguments: sender's address, recipient's address
# and message to send - here it is sent as one string.
s.sendmail(message['From'], to, message.as_string())
s.quit()
return |
def cfloat64_array_to_numpy(cptr, length):
"""Convert a ctypes double pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_double)):
return np.fromiter(cptr, dtype=np.float64, count=length)
else:
raise RuntimeError('Expected double pointer') | Convert a ctypes double pointer array to a numpy array. | Below is the the instruction that describes the task:
### Input:
Convert a ctypes double pointer array to a numpy array.
### Response:
def cfloat64_array_to_numpy(cptr, length):
"""Convert a ctypes double pointer array to a numpy array."""
if isinstance(cptr, ctypes.POINTER(ctypes.c_double)):
return np.fromiter(cptr, dtype=np.float64, count=length)
else:
raise RuntimeError('Expected double pointer') |
def _fix_slicing_order(self, outer_fields, inner_select, order, inner_table_name):
"""
Apply any necessary fixes to the outer_fields, inner_select, and order
strings due to slicing.
"""
# Using ROW_NUMBER requires an ordering
if order is None:
meta = self.query.get_meta()
column = meta.pk.db_column or meta.pk.get_attname()
order = '{0}.{1} ASC'.format(
inner_table_name,
self.connection.ops.quote_name(column),
)
else:
alias_id = 0
# remap order for injected subselect
new_order = []
for x in order.split(','):
# find the ordering direction
m = _re_find_order_direction.search(x)
if m:
direction = m.groups()[0]
else:
direction = 'ASC'
# remove the ordering direction
x = _re_find_order_direction.sub('', x)
# remove any namespacing or table name from the column name
col = x.rsplit('.', 1)[-1]
# Is the ordering column missing from the inner select?
# 'inner_select' contains the full query without the leading 'SELECT '.
# It's possible that this can get a false hit if the ordering
# column is used in the WHERE while not being in the SELECT. It's
# not worth the complexity to properly handle that edge case.
if x not in inner_select:
# Ordering requires the column to be selected by the inner select
alias_id += 1
# alias column name
col = '{left_sql_quote}{0}___o{1}{right_sql_quote}'.format(
col.strip(self.connection.ops.left_sql_quote+self.connection.ops.right_sql_quote),
alias_id,
left_sql_quote=self.connection.ops.left_sql_quote,
right_sql_quote=self.connection.ops.right_sql_quote,
)
# add alias to inner_select
inner_select = '({0}) AS {1}, {2}'.format(x, col, inner_select)
new_order.append('{0}.{1} {2}'.format(inner_table_name, col, direction))
order = ', '.join(new_order)
return outer_fields, inner_select, order | Apply any necessary fixes to the outer_fields, inner_select, and order
strings due to slicing. | Below is the the instruction that describes the task:
### Input:
Apply any necessary fixes to the outer_fields, inner_select, and order
strings due to slicing.
### Response:
def _fix_slicing_order(self, outer_fields, inner_select, order, inner_table_name):
"""
Apply any necessary fixes to the outer_fields, inner_select, and order
strings due to slicing.
"""
# Using ROW_NUMBER requires an ordering
if order is None:
meta = self.query.get_meta()
column = meta.pk.db_column or meta.pk.get_attname()
order = '{0}.{1} ASC'.format(
inner_table_name,
self.connection.ops.quote_name(column),
)
else:
alias_id = 0
# remap order for injected subselect
new_order = []
for x in order.split(','):
# find the ordering direction
m = _re_find_order_direction.search(x)
if m:
direction = m.groups()[0]
else:
direction = 'ASC'
# remove the ordering direction
x = _re_find_order_direction.sub('', x)
# remove any namespacing or table name from the column name
col = x.rsplit('.', 1)[-1]
# Is the ordering column missing from the inner select?
# 'inner_select' contains the full query without the leading 'SELECT '.
# It's possible that this can get a false hit if the ordering
# column is used in the WHERE while not being in the SELECT. It's
# not worth the complexity to properly handle that edge case.
if x not in inner_select:
# Ordering requires the column to be selected by the inner select
alias_id += 1
# alias column name
col = '{left_sql_quote}{0}___o{1}{right_sql_quote}'.format(
col.strip(self.connection.ops.left_sql_quote+self.connection.ops.right_sql_quote),
alias_id,
left_sql_quote=self.connection.ops.left_sql_quote,
right_sql_quote=self.connection.ops.right_sql_quote,
)
# add alias to inner_select
inner_select = '({0}) AS {1}, {2}'.format(x, col, inner_select)
new_order.append('{0}.{1} {2}'.format(inner_table_name, col, direction))
order = ', '.join(new_order)
return outer_fields, inner_select, order |
def convert_to_adjacency_matrix(matrix):
"""
Converts transition matrix into adjacency matrix
:param matrix: The matrix to be converted
:returns: adjacency matrix
"""
for i in range(matrix.shape[0]):
if isspmatrix(matrix):
col = find(matrix[:,i])[2]
else:
col = matrix[:,i].T.tolist()[0]
coeff = max( Fraction(c).limit_denominator().denominator for c in col )
matrix[:,i] *= coeff
return matrix | Converts transition matrix into adjacency matrix
:param matrix: The matrix to be converted
:returns: adjacency matrix | Below is the the instruction that describes the task:
### Input:
Converts transition matrix into adjacency matrix
:param matrix: The matrix to be converted
:returns: adjacency matrix
### Response:
def convert_to_adjacency_matrix(matrix):
"""
Converts transition matrix into adjacency matrix
:param matrix: The matrix to be converted
:returns: adjacency matrix
"""
for i in range(matrix.shape[0]):
if isspmatrix(matrix):
col = find(matrix[:,i])[2]
else:
col = matrix[:,i].T.tolist()[0]
coeff = max( Fraction(c).limit_denominator().denominator for c in col )
matrix[:,i] *= coeff
return matrix |
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, keep_attr_order=True):
"""
Kind of like urlparse.parse_qs, except returns an ordered dict.
Also avoids replicating that function's bad habit of overriding the
built-in 'dict' type.
Taken from below with modification:
<https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py>
"""
od = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list)
for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
od[name].append(value)
return od | Kind of like urlparse.parse_qs, except returns an ordered dict.
Also avoids replicating that function's bad habit of overriding the
built-in 'dict' type.
Taken from below with modification:
<https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py> | Below is the the instruction that describes the task:
### Input:
Kind of like urlparse.parse_qs, except returns an ordered dict.
Also avoids replicating that function's bad habit of overriding the
built-in 'dict' type.
Taken from below with modification:
<https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py>
### Response:
def parse_qs(qs, keep_blank_values=0, strict_parsing=0, keep_attr_order=True):
"""
Kind of like urlparse.parse_qs, except returns an ordered dict.
Also avoids replicating that function's bad habit of overriding the
built-in 'dict' type.
Taken from below with modification:
<https://bitbucket.org/btubbs/thumpy/raw/8cdece404f15/thumpy.py>
"""
od = DefaultOrderedDict(list) if keep_attr_order else defaultdict(list)
for name, value in parse_qsl(qs, keep_blank_values, strict_parsing):
od[name].append(value)
return od |
def get_processing_block_ids(self):
"""Get list of processing block ids using the processing block id"""
# Initialise empty list
_processing_block_ids = []
# Pattern used to search processing block ids
pattern = '*:processing_block:*'
block_ids = self._db.get_ids(pattern)
for block_id in block_ids:
id_split = block_id.split(':')[-1]
_processing_block_ids.append(id_split)
return sorted(_processing_block_ids) | Get list of processing block ids using the processing block id | Below is the the instruction that describes the task:
### Input:
Get list of processing block ids using the processing block id
### Response:
def get_processing_block_ids(self):
"""Get list of processing block ids using the processing block id"""
# Initialise empty list
_processing_block_ids = []
# Pattern used to search processing block ids
pattern = '*:processing_block:*'
block_ids = self._db.get_ids(pattern)
for block_id in block_ids:
id_split = block_id.split(':')[-1]
_processing_block_ids.append(id_split)
return sorted(_processing_block_ids) |
def exec_container(self,
asset_url,
algorithm_url,
resource_group_name,
account_name,
account_key,
location,
share_name_input='compute',
share_name_output='output',
docker_image='python:3.6-alpine',
memory=1.5,
cpu=1):
"""Prepare a docker image that will run in the cloud, mounting the asset and executing the algorithm.
:param asset_url
:param algorithm_url
:param resource_group_name:
:param account_name:
:param account_key:
:param share_name_input:
:param share_name_output:
:param location:
"""
try:
container_group_name = 'compute' + str(int(time.time()))
result_file = self._create_container_group(resource_group_name=resource_group_name,
name=container_group_name,
image=docker_image,
location=location,
memory=memory,
cpu=cpu,
algorithm=algorithm_url,
asset=asset_url,
input_mount_point='/input',
output_moint_point='/output',
account_name=account_name,
account_key=account_key,
share_name_input=share_name_input,
share_name_output=share_name_output
)
while self.client.container_groups.get(resource_group_name,
container_group_name).provisioning_state != 'Succeeded':
logging.info("Waiting to resources ")
while self.client.container_groups.get(resource_group_name, container_group_name). \
containers[0].instance_view.current_state.state != 'Terminated':
logging.info("Waiting to terminate")
self.delete_vm(container_group_name, resource_group_name)
return result_file
except Exception:
logging.error("There was a problem executing your container")
raise Exception | Prepare a docker image that will run in the cloud, mounting the asset and executing the algorithm.
:param asset_url
:param algorithm_url
:param resource_group_name:
:param account_name:
:param account_key:
:param share_name_input:
:param share_name_output:
:param location: | Below is the the instruction that describes the task:
### Input:
Prepare a docker image that will run in the cloud, mounting the asset and executing the algorithm.
:param asset_url
:param algorithm_url
:param resource_group_name:
:param account_name:
:param account_key:
:param share_name_input:
:param share_name_output:
:param location:
### Response:
def exec_container(self,
asset_url,
algorithm_url,
resource_group_name,
account_name,
account_key,
location,
share_name_input='compute',
share_name_output='output',
docker_image='python:3.6-alpine',
memory=1.5,
cpu=1):
"""Prepare a docker image that will run in the cloud, mounting the asset and executing the algorithm.
:param asset_url
:param algorithm_url
:param resource_group_name:
:param account_name:
:param account_key:
:param share_name_input:
:param share_name_output:
:param location:
"""
try:
container_group_name = 'compute' + str(int(time.time()))
result_file = self._create_container_group(resource_group_name=resource_group_name,
name=container_group_name,
image=docker_image,
location=location,
memory=memory,
cpu=cpu,
algorithm=algorithm_url,
asset=asset_url,
input_mount_point='/input',
output_moint_point='/output',
account_name=account_name,
account_key=account_key,
share_name_input=share_name_input,
share_name_output=share_name_output
)
while self.client.container_groups.get(resource_group_name,
container_group_name).provisioning_state != 'Succeeded':
logging.info("Waiting to resources ")
while self.client.container_groups.get(resource_group_name, container_group_name). \
containers[0].instance_view.current_state.state != 'Terminated':
logging.info("Waiting to terminate")
self.delete_vm(container_group_name, resource_group_name)
return result_file
except Exception:
logging.error("There was a problem executing your container")
raise Exception |
def _custom_dtype_getter(self, getter, name, shape=None, dtype=DEFAULT_DTYPE,
*args, **kwargs):
"""Creates variables in fp32, then casts to fp16 if necessary.
This function is a custom getter. A custom getter is a function with the
same signature as tf.get_variable, except it has an additional getter
parameter. Custom getters can be passed as the `custom_getter` parameter of
tf.variable_scope. Then, tf.get_variable will call the custom getter,
instead of directly getting a variable itself. This can be used to change
the types of variables that are retrieved with tf.get_variable.
The `getter` parameter is the underlying variable getter, that would have
been called if no custom getter was used. Custom getters typically get a
variable with `getter`, then modify it in some way.
This custom getter will create an fp32 variable. If a low precision
(e.g. float16) variable was requested it will then cast the variable to the
requested dtype. The reason we do not directly create variables in low
precision dtypes is that applying small gradients to such variables may
cause the variable not to change.
Args:
getter: The underlying variable getter, that has the same signature as
tf.get_variable and returns a variable.
name: The name of the variable to get.
shape: The shape of the variable to get.
dtype: The dtype of the variable to get. Note that if this is a low
precision dtype, the variable will be created as a tf.float32 variable,
then cast to the appropriate dtype
*args: Additional arguments to pass unmodified to getter.
**kwargs: Additional keyword arguments to pass unmodified to getter.
Returns:
A variable which is cast to fp16 if necessary.
"""
if dtype in CASTABLE_TYPES:
var = getter(name, shape, tf.float32, *args, **kwargs)
return tf.cast(var, dtype=dtype, name=name + '_cast')
else:
return getter(name, shape, dtype, *args, **kwargs) | Creates variables in fp32, then casts to fp16 if necessary.
This function is a custom getter. A custom getter is a function with the
same signature as tf.get_variable, except it has an additional getter
parameter. Custom getters can be passed as the `custom_getter` parameter of
tf.variable_scope. Then, tf.get_variable will call the custom getter,
instead of directly getting a variable itself. This can be used to change
the types of variables that are retrieved with tf.get_variable.
The `getter` parameter is the underlying variable getter, that would have
been called if no custom getter was used. Custom getters typically get a
variable with `getter`, then modify it in some way.
This custom getter will create an fp32 variable. If a low precision
(e.g. float16) variable was requested it will then cast the variable to the
requested dtype. The reason we do not directly create variables in low
precision dtypes is that applying small gradients to such variables may
cause the variable not to change.
Args:
getter: The underlying variable getter, that has the same signature as
tf.get_variable and returns a variable.
name: The name of the variable to get.
shape: The shape of the variable to get.
dtype: The dtype of the variable to get. Note that if this is a low
precision dtype, the variable will be created as a tf.float32 variable,
then cast to the appropriate dtype
*args: Additional arguments to pass unmodified to getter.
**kwargs: Additional keyword arguments to pass unmodified to getter.
Returns:
A variable which is cast to fp16 if necessary. | Below is the the instruction that describes the task:
### Input:
Creates variables in fp32, then casts to fp16 if necessary.
This function is a custom getter. A custom getter is a function with the
same signature as tf.get_variable, except it has an additional getter
parameter. Custom getters can be passed as the `custom_getter` parameter of
tf.variable_scope. Then, tf.get_variable will call the custom getter,
instead of directly getting a variable itself. This can be used to change
the types of variables that are retrieved with tf.get_variable.
The `getter` parameter is the underlying variable getter, that would have
been called if no custom getter was used. Custom getters typically get a
variable with `getter`, then modify it in some way.
This custom getter will create an fp32 variable. If a low precision
(e.g. float16) variable was requested it will then cast the variable to the
requested dtype. The reason we do not directly create variables in low
precision dtypes is that applying small gradients to such variables may
cause the variable not to change.
Args:
getter: The underlying variable getter, that has the same signature as
tf.get_variable and returns a variable.
name: The name of the variable to get.
shape: The shape of the variable to get.
dtype: The dtype of the variable to get. Note that if this is a low
precision dtype, the variable will be created as a tf.float32 variable,
then cast to the appropriate dtype
*args: Additional arguments to pass unmodified to getter.
**kwargs: Additional keyword arguments to pass unmodified to getter.
Returns:
A variable which is cast to fp16 if necessary.
### Response:
def _custom_dtype_getter(self, getter, name, shape=None, dtype=DEFAULT_DTYPE,
*args, **kwargs):
"""Creates variables in fp32, then casts to fp16 if necessary.
This function is a custom getter. A custom getter is a function with the
same signature as tf.get_variable, except it has an additional getter
parameter. Custom getters can be passed as the `custom_getter` parameter of
tf.variable_scope. Then, tf.get_variable will call the custom getter,
instead of directly getting a variable itself. This can be used to change
the types of variables that are retrieved with tf.get_variable.
The `getter` parameter is the underlying variable getter, that would have
been called if no custom getter was used. Custom getters typically get a
variable with `getter`, then modify it in some way.
This custom getter will create an fp32 variable. If a low precision
(e.g. float16) variable was requested it will then cast the variable to the
requested dtype. The reason we do not directly create variables in low
precision dtypes is that applying small gradients to such variables may
cause the variable not to change.
Args:
getter: The underlying variable getter, that has the same signature as
tf.get_variable and returns a variable.
name: The name of the variable to get.
shape: The shape of the variable to get.
dtype: The dtype of the variable to get. Note that if this is a low
precision dtype, the variable will be created as a tf.float32 variable,
then cast to the appropriate dtype
*args: Additional arguments to pass unmodified to getter.
**kwargs: Additional keyword arguments to pass unmodified to getter.
Returns:
A variable which is cast to fp16 if necessary.
"""
if dtype in CASTABLE_TYPES:
var = getter(name, shape, tf.float32, *args, **kwargs)
return tf.cast(var, dtype=dtype, name=name + '_cast')
else:
return getter(name, shape, dtype, *args, **kwargs) |
def do_NOTIFY(self): # pylint: disable=invalid-name
"""Serve a ``NOTIFY`` request.
A ``NOTIFY`` request will be sent by a Sonos device when a state
variable changes. See the `UPnP Spec §4.3 [pdf]
<http://upnp.org/specs/arch/UPnP-arch
-DeviceArchitecture-v1.1.pdf>`_ for details.
"""
timestamp = time.time()
headers = requests.structures.CaseInsensitiveDict(self.headers)
seq = headers['seq'] # Event sequence number
sid = headers['sid'] # Event Subscription Identifier
content_length = int(headers['content-length'])
content = self.rfile.read(content_length)
# Find the relevant service and queue from the sid
with _subscriptions_lock:
subscription = _subscriptions.get(sid)
# It might have been removed by another thread
if subscription:
service = subscription.service
log.info(
"Event %s received for %s service on thread %s at %s", seq,
service.service_id, threading.current_thread(), timestamp)
log.debug("Event content: %s", content)
variables = parse_event_xml(content)
# Build the Event object
event = Event(sid, seq, service, timestamp, variables)
# pass the event details on to the service so it can update its
# cache.
# pylint: disable=protected-access
service._update_cache_on_event(event)
# Put the event on the queue
subscription.events.put(event)
else:
log.info("No service registered for %s", sid)
self.send_response(200)
self.end_headers() | Serve a ``NOTIFY`` request.
A ``NOTIFY`` request will be sent by a Sonos device when a state
variable changes. See the `UPnP Spec §4.3 [pdf]
<http://upnp.org/specs/arch/UPnP-arch
-DeviceArchitecture-v1.1.pdf>`_ for details. | Below is the the instruction that describes the task:
### Input:
Serve a ``NOTIFY`` request.
A ``NOTIFY`` request will be sent by a Sonos device when a state
variable changes. See the `UPnP Spec §4.3 [pdf]
<http://upnp.org/specs/arch/UPnP-arch
-DeviceArchitecture-v1.1.pdf>`_ for details.
### Response:
def do_NOTIFY(self): # pylint: disable=invalid-name
"""Serve a ``NOTIFY`` request.
A ``NOTIFY`` request will be sent by a Sonos device when a state
variable changes. See the `UPnP Spec §4.3 [pdf]
<http://upnp.org/specs/arch/UPnP-arch
-DeviceArchitecture-v1.1.pdf>`_ for details.
"""
timestamp = time.time()
headers = requests.structures.CaseInsensitiveDict(self.headers)
seq = headers['seq'] # Event sequence number
sid = headers['sid'] # Event Subscription Identifier
content_length = int(headers['content-length'])
content = self.rfile.read(content_length)
# Find the relevant service and queue from the sid
with _subscriptions_lock:
subscription = _subscriptions.get(sid)
# It might have been removed by another thread
if subscription:
service = subscription.service
log.info(
"Event %s received for %s service on thread %s at %s", seq,
service.service_id, threading.current_thread(), timestamp)
log.debug("Event content: %s", content)
variables = parse_event_xml(content)
# Build the Event object
event = Event(sid, seq, service, timestamp, variables)
# pass the event details on to the service so it can update its
# cache.
# pylint: disable=protected-access
service._update_cache_on_event(event)
# Put the event on the queue
subscription.events.put(event)
else:
log.info("No service registered for %s", sid)
self.send_response(200)
self.end_headers() |
def _check_pillar(kwargs, pillar=None):
'''
Check the pillar for errors, refuse to run the state if there are errors
in the pillar and return the pillar errors
'''
if kwargs.get('force'):
return True
pillar_dict = pillar if pillar is not None else __pillar__
if '_errors' in pillar_dict:
return False
return True | Check the pillar for errors, refuse to run the state if there are errors
in the pillar and return the pillar errors | Below is the the instruction that describes the task:
### Input:
Check the pillar for errors, refuse to run the state if there are errors
in the pillar and return the pillar errors
### Response:
def _check_pillar(kwargs, pillar=None):
'''
Check the pillar for errors, refuse to run the state if there are errors
in the pillar and return the pillar errors
'''
if kwargs.get('force'):
return True
pillar_dict = pillar if pillar is not None else __pillar__
if '_errors' in pillar_dict:
return False
return True |
def tree_line_generator(el, max_lines=None):
"""
Internal generator that iterates through an LXML tree and yields a tuple
per line. In this context, lines are blocks of text separated by <br> tags
or by block elements. The tuples contain the following elements:
- A tuple with the element reference (element, position) for the start
of the line. The tuple consists of:
- The LXML HTML element which references the line
- Whether the text starts at the beginning of the referenced element,
or after the closing tag
- A similar tuple indicating the ending of the line.
- The email indentation level, if detected.
- The plain (non-HTML) text of the line
If max_lines is specified, the generator stops after yielding the given
amount of lines.
For example, the HTML tree "<div>foo <span>bar</span><br>baz</div>" yields:
- ((<Element div>, 'begin'), (<Element br>, 'begin'), 0, 'foo bar')
- ((<Element br>, 'end'), (<Element div>, 'end'), 0, 'baz').
To illustrate the indentation level, the HTML tree
'<div><blockquote>hi</blockquote>world</div>' yields:
- ((<Element blockquote>, 'begin'), (<Element blockquote>, 'end'), 1, 'hi')
- ((<Element blockquote>, 'end'), (<Element div>, 'end'), 0, 'world')
"""
def _trim_spaces(text):
return MULTIPLE_WHITESPACE_RE.sub(' ', text).strip()
counter = 1
if max_lines != None and counter > max_lines:
return
# Buffer for the current line.
line = ''
# The reference tuple (element, position) for the start of the line.
start_ref = None
# The indentation level at the start of the line.
start_indentation_level = None
for token in tree_token_generator(el):
if token is None:
continue
elif isinstance(token, tuple):
el, state, indentation_level = token
tag_name = el.tag.lower()
line_break = (tag_name == 'br' and state == BEGIN)
is_block = (tag_name not in INLINE_TAGS)
is_forward = (is_block and state == BEGIN and
el.attrib.get('style') in FORWARD_STYLES)
if is_block or line_break:
line = _trim_spaces(line)
if line or line_break or is_forward:
end_ref = (el, state)
yield start_ref, end_ref, start_indentation_level, line
counter += 1
if max_lines != None and counter > max_lines:
return
line = ''
if is_forward:
# Simulate forward
yield (end_ref, end_ref, start_indentation_level,
FORWARD_LINE)
counter += 1
if max_lines != None and counter > max_lines:
return
if not line:
start_ref = (el, state)
start_indentation_level = indentation_level
elif isinstance(token, string_class):
line += token
else:
raise RuntimeError('invalid token: {}'.format(token))
line = _trim_spaces(line)
if line:
yield line | Internal generator that iterates through an LXML tree and yields a tuple
per line. In this context, lines are blocks of text separated by <br> tags
or by block elements. The tuples contain the following elements:
- A tuple with the element reference (element, position) for the start
of the line. The tuple consists of:
- The LXML HTML element which references the line
- Whether the text starts at the beginning of the referenced element,
or after the closing tag
- A similar tuple indicating the ending of the line.
- The email indentation level, if detected.
- The plain (non-HTML) text of the line
If max_lines is specified, the generator stops after yielding the given
amount of lines.
For example, the HTML tree "<div>foo <span>bar</span><br>baz</div>" yields:
- ((<Element div>, 'begin'), (<Element br>, 'begin'), 0, 'foo bar')
- ((<Element br>, 'end'), (<Element div>, 'end'), 0, 'baz').
To illustrate the indentation level, the HTML tree
'<div><blockquote>hi</blockquote>world</div>' yields:
- ((<Element blockquote>, 'begin'), (<Element blockquote>, 'end'), 1, 'hi')
- ((<Element blockquote>, 'end'), (<Element div>, 'end'), 0, 'world') | Below is the the instruction that describes the task:
### Input:
Internal generator that iterates through an LXML tree and yields a tuple
per line. In this context, lines are blocks of text separated by <br> tags
or by block elements. The tuples contain the following elements:
- A tuple with the element reference (element, position) for the start
of the line. The tuple consists of:
- The LXML HTML element which references the line
- Whether the text starts at the beginning of the referenced element,
or after the closing tag
- A similar tuple indicating the ending of the line.
- The email indentation level, if detected.
- The plain (non-HTML) text of the line
If max_lines is specified, the generator stops after yielding the given
amount of lines.
For example, the HTML tree "<div>foo <span>bar</span><br>baz</div>" yields:
- ((<Element div>, 'begin'), (<Element br>, 'begin'), 0, 'foo bar')
- ((<Element br>, 'end'), (<Element div>, 'end'), 0, 'baz').
To illustrate the indentation level, the HTML tree
'<div><blockquote>hi</blockquote>world</div>' yields:
- ((<Element blockquote>, 'begin'), (<Element blockquote>, 'end'), 1, 'hi')
- ((<Element blockquote>, 'end'), (<Element div>, 'end'), 0, 'world')
### Response:
def tree_line_generator(el, max_lines=None):
"""
Internal generator that iterates through an LXML tree and yields a tuple
per line. In this context, lines are blocks of text separated by <br> tags
or by block elements. The tuples contain the following elements:
- A tuple with the element reference (element, position) for the start
of the line. The tuple consists of:
- The LXML HTML element which references the line
- Whether the text starts at the beginning of the referenced element,
or after the closing tag
- A similar tuple indicating the ending of the line.
- The email indentation level, if detected.
- The plain (non-HTML) text of the line
If max_lines is specified, the generator stops after yielding the given
amount of lines.
For example, the HTML tree "<div>foo <span>bar</span><br>baz</div>" yields:
- ((<Element div>, 'begin'), (<Element br>, 'begin'), 0, 'foo bar')
- ((<Element br>, 'end'), (<Element div>, 'end'), 0, 'baz').
To illustrate the indentation level, the HTML tree
'<div><blockquote>hi</blockquote>world</div>' yields:
- ((<Element blockquote>, 'begin'), (<Element blockquote>, 'end'), 1, 'hi')
- ((<Element blockquote>, 'end'), (<Element div>, 'end'), 0, 'world')
"""
def _trim_spaces(text):
return MULTIPLE_WHITESPACE_RE.sub(' ', text).strip()
counter = 1
if max_lines != None and counter > max_lines:
return
# Buffer for the current line.
line = ''
# The reference tuple (element, position) for the start of the line.
start_ref = None
# The indentation level at the start of the line.
start_indentation_level = None
for token in tree_token_generator(el):
if token is None:
continue
elif isinstance(token, tuple):
el, state, indentation_level = token
tag_name = el.tag.lower()
line_break = (tag_name == 'br' and state == BEGIN)
is_block = (tag_name not in INLINE_TAGS)
is_forward = (is_block and state == BEGIN and
el.attrib.get('style') in FORWARD_STYLES)
if is_block or line_break:
line = _trim_spaces(line)
if line or line_break or is_forward:
end_ref = (el, state)
yield start_ref, end_ref, start_indentation_level, line
counter += 1
if max_lines != None and counter > max_lines:
return
line = ''
if is_forward:
# Simulate forward
yield (end_ref, end_ref, start_indentation_level,
FORWARD_LINE)
counter += 1
if max_lines != None and counter > max_lines:
return
if not line:
start_ref = (el, state)
start_indentation_level = indentation_level
elif isinstance(token, string_class):
line += token
else:
raise RuntimeError('invalid token: {}'.format(token))
line = _trim_spaces(line)
if line:
yield line |
def login(self, password='', captcha='', email_code='', twofactor_code='', language='english'):
"""Attempts web login and returns on a session with cookies set
:param password: password, if it wasn't provided on instance init
:type password: :class:`str`
:param captcha: text reponse for captcha challenge
:type captcha: :class:`str`
:param email_code: email code for steam guard
:type email_code: :class:`str`
:param twofactor_code: 2FA code for steam guard
:type twofactor_code: :class:`str`
:param language: select language for steam web pages (sets language cookie)
:type language: :class:`str`
:return: a session on success and :class:`None` otherwise
:rtype: :class:`requests.Session`, :class:`None`
:raises HTTPError: any problem with http request, timeouts, 5xx, 4xx etc
:raises LoginIncorrect: wrong username or password
:raises CaptchaRequired: when captcha is needed
:raises CaptchaRequiredLoginIncorrect: when captcha is needed and login is incorrect
:raises EmailCodeRequired: when email is needed
:raises TwoFactorCodeRequired: when 2FA is needed
"""
if self.logged_on:
return self.session
if password:
self.password = password
else:
if self.password:
password = self.password
else:
raise LoginIncorrect("password is not specified")
if not captcha and self.captcha_code:
captcha = self.captcha_code
self._load_key()
resp = self._send_login(password=password, captcha=captcha, email_code=email_code, twofactor_code=twofactor_code)
if resp['success'] and resp['login_complete']:
self.logged_on = True
self.password = self.captcha_code = ''
self.captcha_gid = -1
for cookie in list(self.session.cookies):
for domain in ['store.steampowered.com', 'help.steampowered.com', 'steamcommunity.com']:
self.session.cookies.set(cookie.name, cookie.value, domain=domain, secure=cookie.secure)
self.session_id = generate_session_id()
for domain in ['store.steampowered.com', 'help.steampowered.com', 'steamcommunity.com']:
self.session.cookies.set('Steam_Language', language, domain=domain)
self.session.cookies.set('birthtime', '-3333', domain=domain)
self.session.cookies.set('sessionid', self.session_id, domain=domain)
self._finalize_login(resp)
return self.session
else:
if resp.get('captcha_needed', False):
self.captcha_gid = resp['captcha_gid']
self.captcha_code = ''
if resp.get('clear_password_field', False):
self.password = ''
raise CaptchaRequiredLoginIncorrect(resp['message'])
else:
raise CaptchaRequired(resp['message'])
elif resp.get('emailauth_needed', False):
self.steam_id = SteamID(resp['emailsteamid'])
raise EmailCodeRequired(resp['message'])
elif resp.get('requires_twofactor', False):
raise TwoFactorCodeRequired(resp['message'])
else:
self.password = ''
raise LoginIncorrect(resp['message'])
return None | Attempts web login and returns on a session with cookies set
:param password: password, if it wasn't provided on instance init
:type password: :class:`str`
:param captcha: text reponse for captcha challenge
:type captcha: :class:`str`
:param email_code: email code for steam guard
:type email_code: :class:`str`
:param twofactor_code: 2FA code for steam guard
:type twofactor_code: :class:`str`
:param language: select language for steam web pages (sets language cookie)
:type language: :class:`str`
:return: a session on success and :class:`None` otherwise
:rtype: :class:`requests.Session`, :class:`None`
:raises HTTPError: any problem with http request, timeouts, 5xx, 4xx etc
:raises LoginIncorrect: wrong username or password
:raises CaptchaRequired: when captcha is needed
:raises CaptchaRequiredLoginIncorrect: when captcha is needed and login is incorrect
:raises EmailCodeRequired: when email is needed
:raises TwoFactorCodeRequired: when 2FA is needed | Below is the the instruction that describes the task:
### Input:
Attempts web login and returns on a session with cookies set
:param password: password, if it wasn't provided on instance init
:type password: :class:`str`
:param captcha: text reponse for captcha challenge
:type captcha: :class:`str`
:param email_code: email code for steam guard
:type email_code: :class:`str`
:param twofactor_code: 2FA code for steam guard
:type twofactor_code: :class:`str`
:param language: select language for steam web pages (sets language cookie)
:type language: :class:`str`
:return: a session on success and :class:`None` otherwise
:rtype: :class:`requests.Session`, :class:`None`
:raises HTTPError: any problem with http request, timeouts, 5xx, 4xx etc
:raises LoginIncorrect: wrong username or password
:raises CaptchaRequired: when captcha is needed
:raises CaptchaRequiredLoginIncorrect: when captcha is needed and login is incorrect
:raises EmailCodeRequired: when email is needed
:raises TwoFactorCodeRequired: when 2FA is needed
### Response:
def login(self, password='', captcha='', email_code='', twofactor_code='', language='english'):
"""Attempts web login and returns on a session with cookies set
:param password: password, if it wasn't provided on instance init
:type password: :class:`str`
:param captcha: text reponse for captcha challenge
:type captcha: :class:`str`
:param email_code: email code for steam guard
:type email_code: :class:`str`
:param twofactor_code: 2FA code for steam guard
:type twofactor_code: :class:`str`
:param language: select language for steam web pages (sets language cookie)
:type language: :class:`str`
:return: a session on success and :class:`None` otherwise
:rtype: :class:`requests.Session`, :class:`None`
:raises HTTPError: any problem with http request, timeouts, 5xx, 4xx etc
:raises LoginIncorrect: wrong username or password
:raises CaptchaRequired: when captcha is needed
:raises CaptchaRequiredLoginIncorrect: when captcha is needed and login is incorrect
:raises EmailCodeRequired: when email is needed
:raises TwoFactorCodeRequired: when 2FA is needed
"""
if self.logged_on:
return self.session
if password:
self.password = password
else:
if self.password:
password = self.password
else:
raise LoginIncorrect("password is not specified")
if not captcha and self.captcha_code:
captcha = self.captcha_code
self._load_key()
resp = self._send_login(password=password, captcha=captcha, email_code=email_code, twofactor_code=twofactor_code)
if resp['success'] and resp['login_complete']:
self.logged_on = True
self.password = self.captcha_code = ''
self.captcha_gid = -1
for cookie in list(self.session.cookies):
for domain in ['store.steampowered.com', 'help.steampowered.com', 'steamcommunity.com']:
self.session.cookies.set(cookie.name, cookie.value, domain=domain, secure=cookie.secure)
self.session_id = generate_session_id()
for domain in ['store.steampowered.com', 'help.steampowered.com', 'steamcommunity.com']:
self.session.cookies.set('Steam_Language', language, domain=domain)
self.session.cookies.set('birthtime', '-3333', domain=domain)
self.session.cookies.set('sessionid', self.session_id, domain=domain)
self._finalize_login(resp)
return self.session
else:
if resp.get('captcha_needed', False):
self.captcha_gid = resp['captcha_gid']
self.captcha_code = ''
if resp.get('clear_password_field', False):
self.password = ''
raise CaptchaRequiredLoginIncorrect(resp['message'])
else:
raise CaptchaRequired(resp['message'])
elif resp.get('emailauth_needed', False):
self.steam_id = SteamID(resp['emailsteamid'])
raise EmailCodeRequired(resp['message'])
elif resp.get('requires_twofactor', False):
raise TwoFactorCodeRequired(resp['message'])
else:
self.password = ''
raise LoginIncorrect(resp['message'])
return None |
def pprint2columns(llist, max_length=60):
"""
llist = a list of strings
max_length = if a word is longer than that, for single col display
> prints a list in two columns, taking care of alignment too
"""
if len(llist) == 0:
return None
col_width = max(len(word) for word in llist) + 2 # padding
# llist length must be even, otherwise splitting fails
if not len(llist) % 2 == 0:
llist += [' '] # add a fake element
if col_width > max_length:
for el in llist:
print(el)
else:
column1 = llist[:int(len(llist) / 2)]
column2 = llist[int(len(llist) / 2):]
for c1, c2 in zip(column1, column2):
space = " " * (col_width - len(c1))
print("%s%s%s" % (c1, space, c2)) | llist = a list of strings
max_length = if a word is longer than that, for single col display
> prints a list in two columns, taking care of alignment too | Below is the the instruction that describes the task:
### Input:
llist = a list of strings
max_length = if a word is longer than that, for single col display
> prints a list in two columns, taking care of alignment too
### Response:
def pprint2columns(llist, max_length=60):
"""
llist = a list of strings
max_length = if a word is longer than that, for single col display
> prints a list in two columns, taking care of alignment too
"""
if len(llist) == 0:
return None
col_width = max(len(word) for word in llist) + 2 # padding
# llist length must be even, otherwise splitting fails
if not len(llist) % 2 == 0:
llist += [' '] # add a fake element
if col_width > max_length:
for el in llist:
print(el)
else:
column1 = llist[:int(len(llist) / 2)]
column2 = llist[int(len(llist) / 2):]
for c1, c2 in zip(column1, column2):
space = " " * (col_width - len(c1))
print("%s%s%s" % (c1, space, c2)) |
def _get_field(self, extras, field, default=None):
"""
Fetches a field from extras, and returns it. This is some Airflow
magic. The google_cloud_platform hook type adds custom UI elements
to the hook page, which allow admins to specify service_account,
key_path, etc. They get formatted as shown below.
"""
long_f = 'extra__google_cloud_platform__{}'.format(field)
if long_f in extras:
return extras[long_f]
else:
self.log.info('Field %s not found in extras.', field)
return default | Fetches a field from extras, and returns it. This is some Airflow
magic. The google_cloud_platform hook type adds custom UI elements
to the hook page, which allow admins to specify service_account,
key_path, etc. They get formatted as shown below. | Below is the the instruction that describes the task:
### Input:
Fetches a field from extras, and returns it. This is some Airflow
magic. The google_cloud_platform hook type adds custom UI elements
to the hook page, which allow admins to specify service_account,
key_path, etc. They get formatted as shown below.
### Response:
def _get_field(self, extras, field, default=None):
"""
Fetches a field from extras, and returns it. This is some Airflow
magic. The google_cloud_platform hook type adds custom UI elements
to the hook page, which allow admins to specify service_account,
key_path, etc. They get formatted as shown below.
"""
long_f = 'extra__google_cloud_platform__{}'.format(field)
if long_f in extras:
return extras[long_f]
else:
self.log.info('Field %s not found in extras.', field)
return default |
def update_case_compounds(self, case_obj, build='37'):
"""Update the compounds for a case
Loop over all coding intervals to get coordinates for all potential compound positions.
Update all variants within a gene with a bulk operation.
"""
case_id = case_obj['_id']
# Possible categories 'snv', 'sv', 'str', 'cancer':
categories = set()
# Possible variant types 'clinical', 'research':
variant_types = set()
for file_type in FILE_TYPE_MAP:
if case_obj.get('vcf_files',{}).get(file_type):
categories.add(FILE_TYPE_MAP[file_type]['category'])
variant_types.add(FILE_TYPE_MAP[file_type]['variant_type'])
coding_intervals = self.get_coding_intervals(build=build)
# Loop over all intervals
for chrom in CHROMOSOMES:
intervals = coding_intervals.get(chrom, IntervalTree())
for var_type in variant_types:
for category in categories:
LOG.info("Updating compounds on chromosome:{0}, type:{1}, category:{2} for case:{3}".format(
chrom, var_type, category, case_id))
# Fetch all variants from a chromosome
query = {
'variant_type': var_type,
'chrom': chrom,
}
# Get all variants from the database of the specific type
variant_objs = self.variants(
case_id=case_id,
query=query,
category=category,
nr_of_variants=-1,
sort_key='position'
)
# Initiate a bulk
bulk = {}
current_region = None
special = False
# Loop over the variants and check if they are in a coding region
for var_obj in variant_objs:
var_id = var_obj['_id']
var_chrom = var_obj['chromosome']
var_start = var_obj['position']
var_end = var_obj['end'] + 1
update_bulk = True
new_region = None
# Check if the variant is in a coding region
genomic_regions = coding_intervals.get(var_chrom, IntervalTree()).search(var_start, var_end)
# If the variant is in a coding region
if genomic_regions:
# We know there is data here so get the interval id
new_region = genomic_regions.pop().data
if new_region and (new_region == current_region):
# If the variant is in the same region as previous
# we add it to the same bulk
update_bulk = False
current_region = new_region
# If the variant is not in a current region we update the compounds
# from the previous region, if any. Otherwise continue
if update_bulk and bulk:
self.update_compounds(bulk)
self.update_mongo_compound_variants(bulk)
bulk = {}
if new_region:
bulk[var_id] = var_obj
if not bulk:
continue
self.update_compounds(bulk)
self.update_mongo_compound_variants(bulk)
LOG.info("All compounds updated")
return | Update the compounds for a case
Loop over all coding intervals to get coordinates for all potential compound positions.
Update all variants within a gene with a bulk operation. | Below is the the instruction that describes the task:
### Input:
Update the compounds for a case
Loop over all coding intervals to get coordinates for all potential compound positions.
Update all variants within a gene with a bulk operation.
### Response:
def update_case_compounds(self, case_obj, build='37'):
"""Update the compounds for a case
Loop over all coding intervals to get coordinates for all potential compound positions.
Update all variants within a gene with a bulk operation.
"""
case_id = case_obj['_id']
# Possible categories 'snv', 'sv', 'str', 'cancer':
categories = set()
# Possible variant types 'clinical', 'research':
variant_types = set()
for file_type in FILE_TYPE_MAP:
if case_obj.get('vcf_files',{}).get(file_type):
categories.add(FILE_TYPE_MAP[file_type]['category'])
variant_types.add(FILE_TYPE_MAP[file_type]['variant_type'])
coding_intervals = self.get_coding_intervals(build=build)
# Loop over all intervals
for chrom in CHROMOSOMES:
intervals = coding_intervals.get(chrom, IntervalTree())
for var_type in variant_types:
for category in categories:
LOG.info("Updating compounds on chromosome:{0}, type:{1}, category:{2} for case:{3}".format(
chrom, var_type, category, case_id))
# Fetch all variants from a chromosome
query = {
'variant_type': var_type,
'chrom': chrom,
}
# Get all variants from the database of the specific type
variant_objs = self.variants(
case_id=case_id,
query=query,
category=category,
nr_of_variants=-1,
sort_key='position'
)
# Initiate a bulk
bulk = {}
current_region = None
special = False
# Loop over the variants and check if they are in a coding region
for var_obj in variant_objs:
var_id = var_obj['_id']
var_chrom = var_obj['chromosome']
var_start = var_obj['position']
var_end = var_obj['end'] + 1
update_bulk = True
new_region = None
# Check if the variant is in a coding region
genomic_regions = coding_intervals.get(var_chrom, IntervalTree()).search(var_start, var_end)
# If the variant is in a coding region
if genomic_regions:
# We know there is data here so get the interval id
new_region = genomic_regions.pop().data
if new_region and (new_region == current_region):
# If the variant is in the same region as previous
# we add it to the same bulk
update_bulk = False
current_region = new_region
# If the variant is not in a current region we update the compounds
# from the previous region, if any. Otherwise continue
if update_bulk and bulk:
self.update_compounds(bulk)
self.update_mongo_compound_variants(bulk)
bulk = {}
if new_region:
bulk[var_id] = var_obj
if not bulk:
continue
self.update_compounds(bulk)
self.update_mongo_compound_variants(bulk)
LOG.info("All compounds updated")
return |
def get_comment_group_for_path(self, pathname, default_content_type=None):
"""
Obtains the comment group for a specified pathname.
:param pathname:
The path for which the comment group will be obtained.
:return:
Returns the comment group for the specified pathname
or raises a ``ValueError`` if a content type is not found
or raises a ``KeyError`` if a comment group is not found.
Usage:
>>> db = ContentTypesDatabase()
>>> db.add_config(db._test_config, 'test_config.yaml')
>>> g = db.get_comment_group_for_path
>>> g("foobar.py")
[['#', '']]
>>> g("foobar.js")
[['/*', '*/'], ['//', '']]
>>> g('foobar.rst')
Traceback (most recent call last):
...
KeyError: 'No comment groups for content type `structured-text` for file `foobar.rst` found'
# If the content type cannot be determined, we assume the content
# type to be ``python`` in this case.
>>> g('foobar.f37993ajdha73', default_content_type='python')
[['#', '']]
>>> g("foobar.f37993ajdha73")
Traceback (most recent call last):
...
ValueError: No content type defined for file path: foobar.f37993ajdha73
>>> g("foobar.f37993ajdha73", default_content_type=None)
Traceback (most recent call last):
...
ValueError: No content type defined for file path: foobar.f37993ajdha73
"""
content_type = self.guess_content_type(pathname)
if not content_type:
# Content type is not found.
if default_content_type:
content_type = default_content_type
return self.get_comment_group(content_type)
else:
raise ValueError(
"No content type defined for file path: %s" % pathname)
else:
try:
return self.get_comment_group(content_type)
except KeyError:
raise KeyError(
"No comment groups for content type `%s` for file `%s` found" % (
content_type, pathname)) | Obtains the comment group for a specified pathname.
:param pathname:
The path for which the comment group will be obtained.
:return:
Returns the comment group for the specified pathname
or raises a ``ValueError`` if a content type is not found
or raises a ``KeyError`` if a comment group is not found.
Usage:
>>> db = ContentTypesDatabase()
>>> db.add_config(db._test_config, 'test_config.yaml')
>>> g = db.get_comment_group_for_path
>>> g("foobar.py")
[['#', '']]
>>> g("foobar.js")
[['/*', '*/'], ['//', '']]
>>> g('foobar.rst')
Traceback (most recent call last):
...
KeyError: 'No comment groups for content type `structured-text` for file `foobar.rst` found'
# If the content type cannot be determined, we assume the content
# type to be ``python`` in this case.
>>> g('foobar.f37993ajdha73', default_content_type='python')
[['#', '']]
>>> g("foobar.f37993ajdha73")
Traceback (most recent call last):
...
ValueError: No content type defined for file path: foobar.f37993ajdha73
>>> g("foobar.f37993ajdha73", default_content_type=None)
Traceback (most recent call last):
...
ValueError: No content type defined for file path: foobar.f37993ajdha73 | Below is the the instruction that describes the task:
### Input:
Obtains the comment group for a specified pathname.
:param pathname:
The path for which the comment group will be obtained.
:return:
Returns the comment group for the specified pathname
or raises a ``ValueError`` if a content type is not found
or raises a ``KeyError`` if a comment group is not found.
Usage:
>>> db = ContentTypesDatabase()
>>> db.add_config(db._test_config, 'test_config.yaml')
>>> g = db.get_comment_group_for_path
>>> g("foobar.py")
[['#', '']]
>>> g("foobar.js")
[['/*', '*/'], ['//', '']]
>>> g('foobar.rst')
Traceback (most recent call last):
...
KeyError: 'No comment groups for content type `structured-text` for file `foobar.rst` found'
# If the content type cannot be determined, we assume the content
# type to be ``python`` in this case.
>>> g('foobar.f37993ajdha73', default_content_type='python')
[['#', '']]
>>> g("foobar.f37993ajdha73")
Traceback (most recent call last):
...
ValueError: No content type defined for file path: foobar.f37993ajdha73
>>> g("foobar.f37993ajdha73", default_content_type=None)
Traceback (most recent call last):
...
ValueError: No content type defined for file path: foobar.f37993ajdha73
### Response:
def get_comment_group_for_path(self, pathname, default_content_type=None):
"""
Obtains the comment group for a specified pathname.
:param pathname:
The path for which the comment group will be obtained.
:return:
Returns the comment group for the specified pathname
or raises a ``ValueError`` if a content type is not found
or raises a ``KeyError`` if a comment group is not found.
Usage:
>>> db = ContentTypesDatabase()
>>> db.add_config(db._test_config, 'test_config.yaml')
>>> g = db.get_comment_group_for_path
>>> g("foobar.py")
[['#', '']]
>>> g("foobar.js")
[['/*', '*/'], ['//', '']]
>>> g('foobar.rst')
Traceback (most recent call last):
...
KeyError: 'No comment groups for content type `structured-text` for file `foobar.rst` found'
# If the content type cannot be determined, we assume the content
# type to be ``python`` in this case.
>>> g('foobar.f37993ajdha73', default_content_type='python')
[['#', '']]
>>> g("foobar.f37993ajdha73")
Traceback (most recent call last):
...
ValueError: No content type defined for file path: foobar.f37993ajdha73
>>> g("foobar.f37993ajdha73", default_content_type=None)
Traceback (most recent call last):
...
ValueError: No content type defined for file path: foobar.f37993ajdha73
"""
content_type = self.guess_content_type(pathname)
if not content_type:
# Content type is not found.
if default_content_type:
content_type = default_content_type
return self.get_comment_group(content_type)
else:
raise ValueError(
"No content type defined for file path: %s" % pathname)
else:
try:
return self.get_comment_group(content_type)
except KeyError:
raise KeyError(
"No comment groups for content type `%s` for file `%s` found" % (
content_type, pathname)) |
def as_single_element(self):
"""
Processes the response as a single-element response,
like config_get or system_counters_get.
If there is more then one element in the response or no
elements this raises a ResponseError
"""
if self.as_return_etree is None:
return None
if len(self.as_return_etree.getchildren()) == 1:
return _populate_bunch_with_element(self.as_return_etree.
getchildren()[0])
return _populate_bunch_with_element(self.as_return_etree) | Processes the response as a single-element response,
like config_get or system_counters_get.
If there is more then one element in the response or no
elements this raises a ResponseError | Below is the the instruction that describes the task:
### Input:
Processes the response as a single-element response,
like config_get or system_counters_get.
If there is more then one element in the response or no
elements this raises a ResponseError
### Response:
def as_single_element(self):
"""
Processes the response as a single-element response,
like config_get or system_counters_get.
If there is more then one element in the response or no
elements this raises a ResponseError
"""
if self.as_return_etree is None:
return None
if len(self.as_return_etree.getchildren()) == 1:
return _populate_bunch_with_element(self.as_return_etree.
getchildren()[0])
return _populate_bunch_with_element(self.as_return_etree) |
def _complete_values(self, symbol = ""):
"""Compiles a list of possible symbols that can hold a value in
place. These consist of local vars, global vars, and functions."""
result = {}
#Also add the subroutines from the module and its dependencies.
moddict = self._generic_filter_execs(self.context.module)
self._cond_update(result, moddict, symbol)
self._cond_update(result, self.context.module.interfaces, symbol)
for depend in self.context.module.dependencies:
if depend in self.context.module.parent.modules:
#We don't want to display executables that are part of an interface, or that are embedded in
#a derived type, since those will be called through the type or interface
filtdict = self._generic_filter_execs(self.context.module.parent.modules[depend])
self._cond_update(result, filtdict, symbol)
self._cond_update(result, self.context.module.parent.modules[depend].interfaces, symbol)
#Add all the local vars if we are in an executable
if (isinstance(self.context.element, Function) or
isinstance(self.context.element, Subroutine)):
self._cond_update(result, self.element.members, symbol)
#Next add the global variables from the module
if self.context.module is not None:
self._cond_update(result, self.context.module.members, symbol)
#Next add user defined functions to the mix
for execkey in self.context.module.executables:
iexec = self.context.module.executables[execkey]
if isinstance(iexec, Function) and self._symbol_in(symbol, iexec.name):
result[iexec.name] = iexec
#Finally add the builtin functions to the mix. We need to add support
#for these in a separate file so we have their call signatures.
if symbol == "":
#Use the abbreviated list of most common fortran builtins
self._cond_update(result, cache.common_builtin, symbol)
else:
#we can use the full list as there will probably not be that
#many left over.
self._cond_update(result, cache.builtin, symbol)
return result | Compiles a list of possible symbols that can hold a value in
place. These consist of local vars, global vars, and functions. | Below is the the instruction that describes the task:
### Input:
Compiles a list of possible symbols that can hold a value in
place. These consist of local vars, global vars, and functions.
### Response:
def _complete_values(self, symbol = ""):
"""Compiles a list of possible symbols that can hold a value in
place. These consist of local vars, global vars, and functions."""
result = {}
#Also add the subroutines from the module and its dependencies.
moddict = self._generic_filter_execs(self.context.module)
self._cond_update(result, moddict, symbol)
self._cond_update(result, self.context.module.interfaces, symbol)
for depend in self.context.module.dependencies:
if depend in self.context.module.parent.modules:
#We don't want to display executables that are part of an interface, or that are embedded in
#a derived type, since those will be called through the type or interface
filtdict = self._generic_filter_execs(self.context.module.parent.modules[depend])
self._cond_update(result, filtdict, symbol)
self._cond_update(result, self.context.module.parent.modules[depend].interfaces, symbol)
#Add all the local vars if we are in an executable
if (isinstance(self.context.element, Function) or
isinstance(self.context.element, Subroutine)):
self._cond_update(result, self.element.members, symbol)
#Next add the global variables from the module
if self.context.module is not None:
self._cond_update(result, self.context.module.members, symbol)
#Next add user defined functions to the mix
for execkey in self.context.module.executables:
iexec = self.context.module.executables[execkey]
if isinstance(iexec, Function) and self._symbol_in(symbol, iexec.name):
result[iexec.name] = iexec
#Finally add the builtin functions to the mix. We need to add support
#for these in a separate file so we have their call signatures.
if symbol == "":
#Use the abbreviated list of most common fortran builtins
self._cond_update(result, cache.common_builtin, symbol)
else:
#we can use the full list as there will probably not be that
#many left over.
self._cond_update(result, cache.builtin, symbol)
return result |
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d | Return an axes dictionary for myself. | Below is the the instruction that describes the task:
### Input:
Return an axes dictionary for myself.
### Response:
def _construct_axes_dict(self, axes=None, **kwargs):
"""Return an axes dictionary for myself."""
d = {a: self._get_axis(a) for a in (axes or self._AXIS_ORDERS)}
d.update(kwargs)
return d |
def shapes(self, simplify=None, predicate=None):
"""
Return geodata as a list of Shapely shapes
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param predicate: A single-argument function to select which records to include in the output.
:return: A list of Shapely objects
"""
from shapely.wkt import loads
if not predicate:
predicate = lambda row: True
if simplify:
return [loads(row.geometry).simplify(simplify) for row in self if predicate(row)]
else:
return [loads(row.geometry) for row in self if predicate(row)] | Return geodata as a list of Shapely shapes
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param predicate: A single-argument function to select which records to include in the output.
:return: A list of Shapely objects | Below is the the instruction that describes the task:
### Input:
Return geodata as a list of Shapely shapes
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param predicate: A single-argument function to select which records to include in the output.
:return: A list of Shapely objects
### Response:
def shapes(self, simplify=None, predicate=None):
"""
Return geodata as a list of Shapely shapes
:param simplify: Integer or None. Simplify the geometry to a tolerance, in the units of the geometry.
:param predicate: A single-argument function to select which records to include in the output.
:return: A list of Shapely objects
"""
from shapely.wkt import loads
if not predicate:
predicate = lambda row: True
if simplify:
return [loads(row.geometry).simplify(simplify) for row in self if predicate(row)]
else:
return [loads(row.geometry) for row in self if predicate(row)] |
def CSWAP(control, target_1, target_2):
"""Produces a controlled-SWAP gate. This gate conditionally swaps the state of two qubits::
CSWAP = [[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
:param control: The control qubit.
:param target-1: The first target qubit.
:param target-2: The second target qubit. The two target states are swapped if the control is
in the ``|1>`` state.
"""
qubits = [unpack_qubit(q) for q in (control, target_1, target_2)]
return Gate(name="CSWAP", params=[], qubits=qubits) | Produces a controlled-SWAP gate. This gate conditionally swaps the state of two qubits::
CSWAP = [[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
:param control: The control qubit.
:param target-1: The first target qubit.
:param target-2: The second target qubit. The two target states are swapped if the control is
in the ``|1>`` state. | Below is the the instruction that describes the task:
### Input:
Produces a controlled-SWAP gate. This gate conditionally swaps the state of two qubits::
CSWAP = [[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
:param control: The control qubit.
:param target-1: The first target qubit.
:param target-2: The second target qubit. The two target states are swapped if the control is
in the ``|1>`` state.
### Response:
def CSWAP(control, target_1, target_2):
"""Produces a controlled-SWAP gate. This gate conditionally swaps the state of two qubits::
CSWAP = [[1, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1]]
:param control: The control qubit.
:param target-1: The first target qubit.
:param target-2: The second target qubit. The two target states are swapped if the control is
in the ``|1>`` state.
"""
qubits = [unpack_qubit(q) for q in (control, target_1, target_2)]
return Gate(name="CSWAP", params=[], qubits=qubits) |
def eeg_complexity(eeg, sampling_rate, times=None, index=None, include="all", exclude=None, hemisphere="both", central=True, verbose=True, shannon=True, sampen=True, multiscale=True, spectral=True, svd=True, correlation=True, higushi=True, petrosian=True, fisher=True, hurst=True, dfa=True, lyap_r=False, lyap_e=False, names="Complexity"):
"""
Compute complexity indices of epochs or raw object.
DOCS INCOMPLETE :(
"""
data = eeg_to_df(eeg, index=index, include=include, exclude=exclude, hemisphere=hemisphere, central=central)
# if data was Raw, make as if it was an Epoch so the following routine is only written once
if isinstance(data, dict) is False:
data = {0: data}
# Create time windows
if isinstance(times, tuple):
times = list(times)
if isinstance(times, list):
if isinstance(times[0], list) is False:
times = [times]
else:
times = [[0, None]]
# Deal with names
if isinstance(names, str):
prefix = [names] * len(times)
if len(times) > 1:
for time_index, time_window in enumerate(times):
prefix[time_index] = prefix[time_index] + "_%.2f_%.2f" %(time_window[0], time_window[1])
else:
prefix = names
# Iterate
complexity_all = pd.DataFrame()
for time_index, time_window in enumerate(times):
if len(times) > 1 and verbose is True:
print("Computing complexity features... window " + str(time_window) + "/" + str(len(times)))
complexity_features = {}
# Compute complexity for each channel for each epoch
index = 0
for epoch_index, epoch in data.items():
if len(times) == 1 and verbose is True:
print("Computing complexity features... " + str(round(index/len(data.items())*100, 2)) + "%")
index +=1
df = epoch[time_window[0]:time_window[1]].copy()
complexity_features[epoch_index] = {}
for channel in df:
signal = df[channel].values
features = complexity(signal, sampling_rate=sampling_rate, shannon=shannon, sampen=sampen, multiscale=multiscale, spectral=spectral, svd=svd, correlation=correlation, higushi=higushi, petrosian=petrosian, fisher=fisher, hurst=hurst, dfa=dfa, lyap_r=lyap_r, lyap_e=lyap_e)
for key, feature in features.items():
if key in complexity_features[epoch_index].keys():
complexity_features[epoch_index][key].append(feature)
else:
complexity_features[epoch_index][key] = [feature]
for epoch_index, epoch in complexity_features.items():
for feature in epoch:
complexity_features[epoch_index][feature] = pd.Series(complexity_features[epoch_index][feature]).mean()
# Convert to dataframe
complexity_features = pd.DataFrame.from_dict(complexity_features, orient="index")
complexity_features.columns = [prefix[time_index] + "_" + s for s in complexity_features.columns]
complexity_all = pd.concat([complexity_all, complexity_features], axis=1)
return(complexity_all) | Compute complexity indices of epochs or raw object.
DOCS INCOMPLETE :( | Below is the the instruction that describes the task:
### Input:
Compute complexity indices of epochs or raw object.
DOCS INCOMPLETE :(
### Response:
def eeg_complexity(eeg, sampling_rate, times=None, index=None, include="all", exclude=None, hemisphere="both", central=True, verbose=True, shannon=True, sampen=True, multiscale=True, spectral=True, svd=True, correlation=True, higushi=True, petrosian=True, fisher=True, hurst=True, dfa=True, lyap_r=False, lyap_e=False, names="Complexity"):
"""
Compute complexity indices of epochs or raw object.
DOCS INCOMPLETE :(
"""
data = eeg_to_df(eeg, index=index, include=include, exclude=exclude, hemisphere=hemisphere, central=central)
# if data was Raw, make as if it was an Epoch so the following routine is only written once
if isinstance(data, dict) is False:
data = {0: data}
# Create time windows
if isinstance(times, tuple):
times = list(times)
if isinstance(times, list):
if isinstance(times[0], list) is False:
times = [times]
else:
times = [[0, None]]
# Deal with names
if isinstance(names, str):
prefix = [names] * len(times)
if len(times) > 1:
for time_index, time_window in enumerate(times):
prefix[time_index] = prefix[time_index] + "_%.2f_%.2f" %(time_window[0], time_window[1])
else:
prefix = names
# Iterate
complexity_all = pd.DataFrame()
for time_index, time_window in enumerate(times):
if len(times) > 1 and verbose is True:
print("Computing complexity features... window " + str(time_window) + "/" + str(len(times)))
complexity_features = {}
# Compute complexity for each channel for each epoch
index = 0
for epoch_index, epoch in data.items():
if len(times) == 1 and verbose is True:
print("Computing complexity features... " + str(round(index/len(data.items())*100, 2)) + "%")
index +=1
df = epoch[time_window[0]:time_window[1]].copy()
complexity_features[epoch_index] = {}
for channel in df:
signal = df[channel].values
features = complexity(signal, sampling_rate=sampling_rate, shannon=shannon, sampen=sampen, multiscale=multiscale, spectral=spectral, svd=svd, correlation=correlation, higushi=higushi, petrosian=petrosian, fisher=fisher, hurst=hurst, dfa=dfa, lyap_r=lyap_r, lyap_e=lyap_e)
for key, feature in features.items():
if key in complexity_features[epoch_index].keys():
complexity_features[epoch_index][key].append(feature)
else:
complexity_features[epoch_index][key] = [feature]
for epoch_index, epoch in complexity_features.items():
for feature in epoch:
complexity_features[epoch_index][feature] = pd.Series(complexity_features[epoch_index][feature]).mean()
# Convert to dataframe
complexity_features = pd.DataFrame.from_dict(complexity_features, orient="index")
complexity_features.columns = [prefix[time_index] + "_" + s for s in complexity_features.columns]
complexity_all = pd.concat([complexity_all, complexity_features], axis=1)
return(complexity_all) |
def respond_redirect(self, location='/'):
"""
Respond to the client with a 301 message and redirect them with
a Location header.
:param str location: The new location to redirect the client to.
"""
self.send_response(301)
self.send_header('Content-Length', 0)
self.send_header('Location', location)
self.end_headers()
return | Respond to the client with a 301 message and redirect them with
a Location header.
:param str location: The new location to redirect the client to. | Below is the the instruction that describes the task:
### Input:
Respond to the client with a 301 message and redirect them with
a Location header.
:param str location: The new location to redirect the client to.
### Response:
def respond_redirect(self, location='/'):
"""
Respond to the client with a 301 message and redirect them with
a Location header.
:param str location: The new location to redirect the client to.
"""
self.send_response(301)
self.send_header('Content-Length', 0)
self.send_header('Location', location)
self.end_headers()
return |
def create_project(self, name, description):
"""
Create a new project with the specified name and description
:param name: str: name of the project to create
:param description: str: description of the project to create
:return: Project
"""
return self._create_item_response(
self.data_service.create_project(name, description),
Project) | Create a new project with the specified name and description
:param name: str: name of the project to create
:param description: str: description of the project to create
:return: Project | Below is the the instruction that describes the task:
### Input:
Create a new project with the specified name and description
:param name: str: name of the project to create
:param description: str: description of the project to create
:return: Project
### Response:
def create_project(self, name, description):
"""
Create a new project with the specified name and description
:param name: str: name of the project to create
:param description: str: description of the project to create
:return: Project
"""
return self._create_item_response(
self.data_service.create_project(name, description),
Project) |
def get_random_string():
""" make a random string, which we can use for bsub job IDs, so that
different jobs do not have the same job IDs.
"""
# set up a random string to associate with the run
hash_string = "%8x" % random.getrandbits(32)
hash_string = hash_string.strip()
# done't allow the random strings to be equivalent to a number, since
# the LSF cluster interprets those differently from letter-containing
# strings
while is_number(hash_string):
hash_string = "%8x" % random.getrandbits(32)
hash_string = hash_string.strip()
return hash_string | make a random string, which we can use for bsub job IDs, so that
different jobs do not have the same job IDs. | Below is the the instruction that describes the task:
### Input:
make a random string, which we can use for bsub job IDs, so that
different jobs do not have the same job IDs.
### Response:
def get_random_string():
""" make a random string, which we can use for bsub job IDs, so that
different jobs do not have the same job IDs.
"""
# set up a random string to associate with the run
hash_string = "%8x" % random.getrandbits(32)
hash_string = hash_string.strip()
# done't allow the random strings to be equivalent to a number, since
# the LSF cluster interprets those differently from letter-containing
# strings
while is_number(hash_string):
hash_string = "%8x" % random.getrandbits(32)
hash_string = hash_string.strip()
return hash_string |
def extract_upgrade_scripts(self):
"""
Extract the OpenQuake upgrade scripts from the links in the GitHub page
"""
link_pattern = '>\s*{0}\s*<'.format(self.pattern[1:-1])
page = urllib.request.urlopen(self.upgrades_url).read()
for mo in re.finditer(link_pattern, page):
scriptname = mo.group(0)[1:-1].strip()
yield self.parse_script_name(scriptname) | Extract the OpenQuake upgrade scripts from the links in the GitHub page | Below is the the instruction that describes the task:
### Input:
Extract the OpenQuake upgrade scripts from the links in the GitHub page
### Response:
def extract_upgrade_scripts(self):
"""
Extract the OpenQuake upgrade scripts from the links in the GitHub page
"""
link_pattern = '>\s*{0}\s*<'.format(self.pattern[1:-1])
page = urllib.request.urlopen(self.upgrades_url).read()
for mo in re.finditer(link_pattern, page):
scriptname = mo.group(0)[1:-1].strip()
yield self.parse_script_name(scriptname) |
def timedelta_seconds(value: datetime.timedelta) -> int:
"""Return full number of seconds from timedelta.
By default, Python returns only one day seconds, not all timedelta seconds.
:param value: Timedelta instance.
"""
return SECONDS_PER_DAY * value.days + value.seconds | Return full number of seconds from timedelta.
By default, Python returns only one day seconds, not all timedelta seconds.
:param value: Timedelta instance. | Below is the the instruction that describes the task:
### Input:
Return full number of seconds from timedelta.
By default, Python returns only one day seconds, not all timedelta seconds.
:param value: Timedelta instance.
### Response:
def timedelta_seconds(value: datetime.timedelta) -> int:
"""Return full number of seconds from timedelta.
By default, Python returns only one day seconds, not all timedelta seconds.
:param value: Timedelta instance.
"""
return SECONDS_PER_DAY * value.days + value.seconds |
def get_type_data(name):
"""Return dictionary representation of type.
Can be used to initialize primordium.type.primitives.Type
"""
name = name.upper()
try:
return {
'authority': 'okapia.net',
'namespace': 'heading',
'identifier': name,
'domain': 'Headings',
'display_name': HEADING_TYPES[name] + ' Heading Type',
'display_label': HEADING_TYPES[name],
'description': ('The heading type for the ' +
HEADING_TYPES[name] + ' heading.')
}
except KeyError:
raise NotFound('Heading Type:' + name) | Return dictionary representation of type.
Can be used to initialize primordium.type.primitives.Type | Below is the the instruction that describes the task:
### Input:
Return dictionary representation of type.
Can be used to initialize primordium.type.primitives.Type
### Response:
def get_type_data(name):
"""Return dictionary representation of type.
Can be used to initialize primordium.type.primitives.Type
"""
name = name.upper()
try:
return {
'authority': 'okapia.net',
'namespace': 'heading',
'identifier': name,
'domain': 'Headings',
'display_name': HEADING_TYPES[name] + ' Heading Type',
'display_label': HEADING_TYPES[name],
'description': ('The heading type for the ' +
HEADING_TYPES[name] + ' heading.')
}
except KeyError:
raise NotFound('Heading Type:' + name) |
def anno_parser(func):
"Look at params (annotated with `Param`) in func and return an `ArgumentParser`"
p = ArgumentParser(description=func.__doc__)
for k,v in inspect.signature(func).parameters.items():
param = func.__annotations__.get(k, Param())
kwargs = param.kwargs
if v.default != inspect.Parameter.empty: kwargs['default'] = v.default
p.add_argument(f"{param.pre}{k}", **kwargs)
return p | Look at params (annotated with `Param`) in func and return an `ArgumentParser` | Below is the the instruction that describes the task:
### Input:
Look at params (annotated with `Param`) in func and return an `ArgumentParser`
### Response:
def anno_parser(func):
"Look at params (annotated with `Param`) in func and return an `ArgumentParser`"
p = ArgumentParser(description=func.__doc__)
for k,v in inspect.signature(func).parameters.items():
param = func.__annotations__.get(k, Param())
kwargs = param.kwargs
if v.default != inspect.Parameter.empty: kwargs['default'] = v.default
p.add_argument(f"{param.pre}{k}", **kwargs)
return p |
def set_environment_variable(self, key, val):
""" Sets a variable if that variable is not already set """
if self.get_environment_variable(key) in [None, val]:
self.__dict__['environment_variables'][key] = val
else:
raise Contradiction("Could not set environment variable %s" % (key)) | Sets a variable if that variable is not already set | Below is the the instruction that describes the task:
### Input:
Sets a variable if that variable is not already set
### Response:
def set_environment_variable(self, key, val):
""" Sets a variable if that variable is not already set """
if self.get_environment_variable(key) in [None, val]:
self.__dict__['environment_variables'][key] = val
else:
raise Contradiction("Could not set environment variable %s" % (key)) |
def is50or60(msg, spd_ref, trk_ref, alt_ref):
"""Use reference ground speed and trk to determine BDS50 and DBS60.
Args:
msg (String): 28 bytes hexadecimal message string
spd_ref (float): reference speed (ADS-B ground speed), kts
trk_ref (float): reference track (ADS-B track angle), deg
alt_ref (float): reference altitude (ADS-B altitude), ft
Returns:
String or None: BDS version, or possible versions, or None if nothing matches.
"""
def vxy(v, angle):
vx = v * np.sin(np.radians(angle))
vy = v * np.cos(np.radians(angle))
return vx, vy
if not (bds50.is50(msg) and bds60.is60(msg)):
return None
h50 = bds50.trk50(msg)
v50 = bds50.gs50(msg)
if h50 is None or v50 is None:
return 'BDS50,BDS60'
h60 = bds60.hdg60(msg)
m60 = bds60.mach60(msg)
i60 = bds60.ias60(msg)
if h60 is None or (m60 is None and i60 is None):
return 'BDS50,BDS60'
m60 = np.nan if m60 is None else m60
i60 = np.nan if i60 is None else i60
XY5 = vxy(v50*aero.kts, h50)
XY6m = vxy(aero.mach2tas(m60, alt_ref*aero.ft), h60)
XY6i = vxy(aero.cas2tas(i60*aero.kts, alt_ref*aero.ft), h60)
allbds = ['BDS50', 'BDS60', 'BDS60']
X = np.array([XY5, XY6m, XY6i])
Mu = np.array(vxy(spd_ref*aero.kts, trk_ref))
# compute Mahalanobis distance matrix
# Cov = [[20**2, 0], [0, 20**2]]
# mmatrix = np.sqrt(np.dot(np.dot(X-Mu, np.linalg.inv(Cov)), (X-Mu).T))
# dist = np.diag(mmatrix)
# since the covariance matrix is identity matrix,
# M-dist is same as eculidian distance
try:
dist = np.linalg.norm(X-Mu, axis=1)
BDS = allbds[np.nanargmin(dist)]
except ValueError:
return 'BDS50,BDS60'
return BDS | Use reference ground speed and trk to determine BDS50 and DBS60.
Args:
msg (String): 28 bytes hexadecimal message string
spd_ref (float): reference speed (ADS-B ground speed), kts
trk_ref (float): reference track (ADS-B track angle), deg
alt_ref (float): reference altitude (ADS-B altitude), ft
Returns:
String or None: BDS version, or possible versions, or None if nothing matches. | Below is the the instruction that describes the task:
### Input:
Use reference ground speed and trk to determine BDS50 and DBS60.
Args:
msg (String): 28 bytes hexadecimal message string
spd_ref (float): reference speed (ADS-B ground speed), kts
trk_ref (float): reference track (ADS-B track angle), deg
alt_ref (float): reference altitude (ADS-B altitude), ft
Returns:
String or None: BDS version, or possible versions, or None if nothing matches.
### Response:
def is50or60(msg, spd_ref, trk_ref, alt_ref):
"""Use reference ground speed and trk to determine BDS50 and DBS60.
Args:
msg (String): 28 bytes hexadecimal message string
spd_ref (float): reference speed (ADS-B ground speed), kts
trk_ref (float): reference track (ADS-B track angle), deg
alt_ref (float): reference altitude (ADS-B altitude), ft
Returns:
String or None: BDS version, or possible versions, or None if nothing matches.
"""
def vxy(v, angle):
vx = v * np.sin(np.radians(angle))
vy = v * np.cos(np.radians(angle))
return vx, vy
if not (bds50.is50(msg) and bds60.is60(msg)):
return None
h50 = bds50.trk50(msg)
v50 = bds50.gs50(msg)
if h50 is None or v50 is None:
return 'BDS50,BDS60'
h60 = bds60.hdg60(msg)
m60 = bds60.mach60(msg)
i60 = bds60.ias60(msg)
if h60 is None or (m60 is None and i60 is None):
return 'BDS50,BDS60'
m60 = np.nan if m60 is None else m60
i60 = np.nan if i60 is None else i60
XY5 = vxy(v50*aero.kts, h50)
XY6m = vxy(aero.mach2tas(m60, alt_ref*aero.ft), h60)
XY6i = vxy(aero.cas2tas(i60*aero.kts, alt_ref*aero.ft), h60)
allbds = ['BDS50', 'BDS60', 'BDS60']
X = np.array([XY5, XY6m, XY6i])
Mu = np.array(vxy(spd_ref*aero.kts, trk_ref))
# compute Mahalanobis distance matrix
# Cov = [[20**2, 0], [0, 20**2]]
# mmatrix = np.sqrt(np.dot(np.dot(X-Mu, np.linalg.inv(Cov)), (X-Mu).T))
# dist = np.diag(mmatrix)
# since the covariance matrix is identity matrix,
# M-dist is same as eculidian distance
try:
dist = np.linalg.norm(X-Mu, axis=1)
BDS = allbds[np.nanargmin(dist)]
except ValueError:
return 'BDS50,BDS60'
return BDS |
def get(self, id):
"""
Get an by object by unique identifier
:id string id: the bson id of an object
:rtype: JSON
"""
try:
if self.request.headers.get("Id"):
object_ = yield self.client.find_one({self.request.headers.get("Id"): id})
else:
object_ = yield self.client.find_one_by_id(id)
if object_:
self.write(object_)
return
self.raise_error(404, "%s/%s not found" % (self.object_name, id))
except InvalidId as ex:
self.raise_error(400, message="Your ID is malformed: %s" % id)
except Exception as ex:
self.logger.error(ex)
self.raise_error() | Get an by object by unique identifier
:id string id: the bson id of an object
:rtype: JSON | Below is the the instruction that describes the task:
### Input:
Get an by object by unique identifier
:id string id: the bson id of an object
:rtype: JSON
### Response:
def get(self, id):
"""
Get an by object by unique identifier
:id string id: the bson id of an object
:rtype: JSON
"""
try:
if self.request.headers.get("Id"):
object_ = yield self.client.find_one({self.request.headers.get("Id"): id})
else:
object_ = yield self.client.find_one_by_id(id)
if object_:
self.write(object_)
return
self.raise_error(404, "%s/%s not found" % (self.object_name, id))
except InvalidId as ex:
self.raise_error(400, message="Your ID is malformed: %s" % id)
except Exception as ex:
self.logger.error(ex)
self.raise_error() |
def organization_field_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/organization_fields#show-organization-field"
api_path = "/api/v2/organization_fields/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/organization_fields#show-organization-field | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/organization_fields#show-organization-field
### Response:
def organization_field_show(self, id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/organization_fields#show-organization-field"
api_path = "/api/v2/organization_fields/{id}.json"
api_path = api_path.format(id=id)
return self.call(api_path, **kwargs) |
def shift_right(self, times=1):
"""
Finds Location shifted right by 1
:rtype: Location
"""
try:
return Location(self._rank, self._file + times)
except IndexError as e:
raise IndexError(e) | Finds Location shifted right by 1
:rtype: Location | Below is the the instruction that describes the task:
### Input:
Finds Location shifted right by 1
:rtype: Location
### Response:
def shift_right(self, times=1):
"""
Finds Location shifted right by 1
:rtype: Location
"""
try:
return Location(self._rank, self._file + times)
except IndexError as e:
raise IndexError(e) |
def find_le_index(self, k):
'Return last item with a key <= k. Raise ValueError if not found.'
i = bisect_right(self._keys, k)
if i:
return i - 1
raise ValueError('No item found with key at or below: %r' % (k,)) | Return last item with a key <= k. Raise ValueError if not found. | Below is the the instruction that describes the task:
### Input:
Return last item with a key <= k. Raise ValueError if not found.
### Response:
def find_le_index(self, k):
'Return last item with a key <= k. Raise ValueError if not found.'
i = bisect_right(self._keys, k)
if i:
return i - 1
raise ValueError('No item found with key at or below: %r' % (k,)) |
def get_metadata():
"""Get pandas.DataFrame with metadata about the PWM's. Columns:
- PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm
- info1 - additional information about the motifs
- info2
- consensus: PWM consensus sequence
"""
motifs = _load_motifs()
motif_names = sorted(list(motifs.keys()))
df = pd.Series(motif_names).str.split(expand=True)
df.rename(columns={0: "PWM_id", 1: "info1", 2: "info2"}, inplace=True)
# compute the consensus
consensus = pd.Series([PWM(motifs[m]).get_consensus() for m in motif_names])
df["consensus"] = consensus
return df | Get pandas.DataFrame with metadata about the PWM's. Columns:
- PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm
- info1 - additional information about the motifs
- info2
- consensus: PWM consensus sequence | Below is the the instruction that describes the task:
### Input:
Get pandas.DataFrame with metadata about the PWM's. Columns:
- PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm
- info1 - additional information about the motifs
- info2
- consensus: PWM consensus sequence
### Response:
def get_metadata():
"""Get pandas.DataFrame with metadata about the PWM's. Columns:
- PWM_id (id of the PWM - pass to get_pwm_list() for getting the pwm
- info1 - additional information about the motifs
- info2
- consensus: PWM consensus sequence
"""
motifs = _load_motifs()
motif_names = sorted(list(motifs.keys()))
df = pd.Series(motif_names).str.split(expand=True)
df.rename(columns={0: "PWM_id", 1: "info1", 2: "info2"}, inplace=True)
# compute the consensus
consensus = pd.Series([PWM(motifs[m]).get_consensus() for m in motif_names])
df["consensus"] = consensus
return df |
def clearContents(cls):
"""Clear contents of general pasteboard.
Future enhancement can include specifying which clipboard to clear
Returns: True on success; caller should expect to catch exceptions,
probably from AppKit (ValueError)
"""
log_msg = 'Request to clear contents of pasteboard: general'
logging.debug(log_msg)
pb = AppKit.NSPasteboard.generalPasteboard()
pb.clearContents()
return True | Clear contents of general pasteboard.
Future enhancement can include specifying which clipboard to clear
Returns: True on success; caller should expect to catch exceptions,
probably from AppKit (ValueError) | Below is the the instruction that describes the task:
### Input:
Clear contents of general pasteboard.
Future enhancement can include specifying which clipboard to clear
Returns: True on success; caller should expect to catch exceptions,
probably from AppKit (ValueError)
### Response:
def clearContents(cls):
"""Clear contents of general pasteboard.
Future enhancement can include specifying which clipboard to clear
Returns: True on success; caller should expect to catch exceptions,
probably from AppKit (ValueError)
"""
log_msg = 'Request to clear contents of pasteboard: general'
logging.debug(log_msg)
pb = AppKit.NSPasteboard.generalPasteboard()
pb.clearContents()
return True |
def unpack(self, source: IO):
"""
Read the ConstantPool from the file-like object `source`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when loading a ClassFile.
:param source: Any file-like object providing `read()`
"""
count = unpack('>H', source.read(2))[0]
for _ in repeat(None, count):
name_index, length = unpack('>HI', source.read(6))
info_blob = source.read(length)
self._table.append((name_index, info_blob)) | Read the ConstantPool from the file-like object `source`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when loading a ClassFile.
:param source: Any file-like object providing `read()` | Below is the the instruction that describes the task:
### Input:
Read the ConstantPool from the file-like object `source`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when loading a ClassFile.
:param source: Any file-like object providing `read()`
### Response:
def unpack(self, source: IO):
"""
Read the ConstantPool from the file-like object `source`.
.. note::
Advanced usage only. You will typically never need to call this
method as it will be called for you when loading a ClassFile.
:param source: Any file-like object providing `read()`
"""
count = unpack('>H', source.read(2))[0]
for _ in repeat(None, count):
name_index, length = unpack('>HI', source.read(6))
info_blob = source.read(length)
self._table.append((name_index, info_blob)) |
def compile_theme(theme_id=None):
"""Compiles a theme."""
from engineer.processors import convert_less
from engineer.themes import ThemeManager
if theme_id is None:
themes = ThemeManager.themes().values()
else:
themes = [ThemeManager.theme(theme_id)]
with(indent(2)):
puts(colored.yellow("Compiling %s themes." % len(themes)))
for theme in themes:
theme_output_path = (theme.static_root / ('stylesheets/%s_precompiled.css' % theme.id)).normpath()
puts(colored.cyan("Compiling theme %s to %s" % (theme.id, theme_output_path)))
with indent(4):
puts("Compiling...")
convert_less(theme.static_root / ('stylesheets/%s.less' % theme.id),
theme_output_path,
minify=True)
puts(colored.green("Done.", bold=True)) | Compiles a theme. | Below is the the instruction that describes the task:
### Input:
Compiles a theme.
### Response:
def compile_theme(theme_id=None):
"""Compiles a theme."""
from engineer.processors import convert_less
from engineer.themes import ThemeManager
if theme_id is None:
themes = ThemeManager.themes().values()
else:
themes = [ThemeManager.theme(theme_id)]
with(indent(2)):
puts(colored.yellow("Compiling %s themes." % len(themes)))
for theme in themes:
theme_output_path = (theme.static_root / ('stylesheets/%s_precompiled.css' % theme.id)).normpath()
puts(colored.cyan("Compiling theme %s to %s" % (theme.id, theme_output_path)))
with indent(4):
puts("Compiling...")
convert_less(theme.static_root / ('stylesheets/%s.less' % theme.id),
theme_output_path,
minify=True)
puts(colored.green("Done.", bold=True)) |
def visit_call(self, node, parent):
"""visit a CallFunc node by returning a fresh instance of it"""
newnode = nodes.Call(node.lineno, node.col_offset, parent)
starargs = _visit_or_none(node, "starargs", self, newnode)
kwargs = _visit_or_none(node, "kwargs", self, newnode)
args = [self.visit(child, newnode) for child in node.args]
if node.keywords:
keywords = [self.visit(child, newnode) for child in node.keywords]
else:
keywords = None
if starargs:
new_starargs = nodes.Starred(
col_offset=starargs.col_offset,
lineno=starargs.lineno,
parent=starargs.parent,
)
new_starargs.postinit(value=starargs)
args.append(new_starargs)
if kwargs:
new_kwargs = nodes.Keyword(
arg=None,
col_offset=kwargs.col_offset,
lineno=kwargs.lineno,
parent=kwargs.parent,
)
new_kwargs.postinit(value=kwargs)
if keywords:
keywords.append(new_kwargs)
else:
keywords = [new_kwargs]
newnode.postinit(self.visit(node.func, newnode), args, keywords)
return newnode | visit a CallFunc node by returning a fresh instance of it | Below is the the instruction that describes the task:
### Input:
visit a CallFunc node by returning a fresh instance of it
### Response:
def visit_call(self, node, parent):
"""visit a CallFunc node by returning a fresh instance of it"""
newnode = nodes.Call(node.lineno, node.col_offset, parent)
starargs = _visit_or_none(node, "starargs", self, newnode)
kwargs = _visit_or_none(node, "kwargs", self, newnode)
args = [self.visit(child, newnode) for child in node.args]
if node.keywords:
keywords = [self.visit(child, newnode) for child in node.keywords]
else:
keywords = None
if starargs:
new_starargs = nodes.Starred(
col_offset=starargs.col_offset,
lineno=starargs.lineno,
parent=starargs.parent,
)
new_starargs.postinit(value=starargs)
args.append(new_starargs)
if kwargs:
new_kwargs = nodes.Keyword(
arg=None,
col_offset=kwargs.col_offset,
lineno=kwargs.lineno,
parent=kwargs.parent,
)
new_kwargs.postinit(value=kwargs)
if keywords:
keywords.append(new_kwargs)
else:
keywords = [new_kwargs]
newnode.postinit(self.visit(node.func, newnode), args, keywords)
return newnode |
def get_beam(header):
"""
Create a :class:`AegeanTools.fits_image.Beam` object from a fits header.
BPA may be missing but will be assumed to be zero.
if BMAJ or BMIN are missing then return None instead of a beam object.
Parameters
----------
header : HDUHeader
The fits header.
Returns
-------
beam : :class:`AegeanTools.fits_image.Beam`
Beam object, with a, b, and pa in degrees.
"""
if "BPA" not in header:
log.warning("BPA not present in fits header, using 0")
bpa = 0
else:
bpa = header["BPA"]
if "BMAJ" not in header:
log.warning("BMAJ not present in fits header.")
bmaj = None
else:
bmaj = header["BMAJ"]
if "BMIN" not in header:
log.warning("BMIN not present in fits header.")
bmin = None
else:
bmin = header["BMIN"]
if None in [bmaj, bmin, bpa]:
return None
beam = Beam(bmaj, bmin, bpa)
return beam | Create a :class:`AegeanTools.fits_image.Beam` object from a fits header.
BPA may be missing but will be assumed to be zero.
if BMAJ or BMIN are missing then return None instead of a beam object.
Parameters
----------
header : HDUHeader
The fits header.
Returns
-------
beam : :class:`AegeanTools.fits_image.Beam`
Beam object, with a, b, and pa in degrees. | Below is the the instruction that describes the task:
### Input:
Create a :class:`AegeanTools.fits_image.Beam` object from a fits header.
BPA may be missing but will be assumed to be zero.
if BMAJ or BMIN are missing then return None instead of a beam object.
Parameters
----------
header : HDUHeader
The fits header.
Returns
-------
beam : :class:`AegeanTools.fits_image.Beam`
Beam object, with a, b, and pa in degrees.
### Response:
def get_beam(header):
"""
Create a :class:`AegeanTools.fits_image.Beam` object from a fits header.
BPA may be missing but will be assumed to be zero.
if BMAJ or BMIN are missing then return None instead of a beam object.
Parameters
----------
header : HDUHeader
The fits header.
Returns
-------
beam : :class:`AegeanTools.fits_image.Beam`
Beam object, with a, b, and pa in degrees.
"""
if "BPA" not in header:
log.warning("BPA not present in fits header, using 0")
bpa = 0
else:
bpa = header["BPA"]
if "BMAJ" not in header:
log.warning("BMAJ not present in fits header.")
bmaj = None
else:
bmaj = header["BMAJ"]
if "BMIN" not in header:
log.warning("BMIN not present in fits header.")
bmin = None
else:
bmin = header["BMIN"]
if None in [bmaj, bmin, bpa]:
return None
beam = Beam(bmaj, bmin, bpa)
return beam |
def safe_join(base, *paths):
"""
Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
"""
base = base
paths = [p for p in paths]
final_path = abspath(os.path.join(base, *paths))
base_path = abspath(base)
base_path_len = len(base_path)
# Ensure final_path starts with base_path (using normcase to ensure we
# don't false-negative on case insensitive operating systems like Windows)
# and that the next character after the final path is os.sep (or nothing,
# in which case final_path must be equal to base_path).
if not os.path.normcase(final_path).startswith(os.path.normcase(base_path)) \
or final_path[base_path_len:base_path_len + 1] not in ("", os.path.sep):
raise ValueError("The joined path (%s) is located outside of the base "
"path component (%s)" % (final_path, base_path))
return final_path | Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised). | Below is the the instruction that describes the task:
### Input:
Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
### Response:
def safe_join(base, *paths):
"""
Joins one or more path components to the base path component intelligently.
Returns a normalized, absolute version of the final path.
The final path must be located inside of the base path component (otherwise
a ValueError is raised).
"""
base = base
paths = [p for p in paths]
final_path = abspath(os.path.join(base, *paths))
base_path = abspath(base)
base_path_len = len(base_path)
# Ensure final_path starts with base_path (using normcase to ensure we
# don't false-negative on case insensitive operating systems like Windows)
# and that the next character after the final path is os.sep (or nothing,
# in which case final_path must be equal to base_path).
if not os.path.normcase(final_path).startswith(os.path.normcase(base_path)) \
or final_path[base_path_len:base_path_len + 1] not in ("", os.path.sep):
raise ValueError("The joined path (%s) is located outside of the base "
"path component (%s)" % (final_path, base_path))
return final_path |
def device_characteristics_str(self, indent):
"""Convenience to string method.
"""
s = "{}\n".format(self.label)
s += indent + "MAC Address: {}\n".format(self.mac_addr)
s += indent + "IP Address: {}\n".format(self.ip_addr)
s += indent + "Port: {}\n".format(self.port)
s += indent + "Power: {}\n".format(str_map(self.power_level))
s += indent + "Location: {}\n".format(self.location)
s += indent + "Group: {}\n".format(self.group)
return s | Convenience to string method. | Below is the the instruction that describes the task:
### Input:
Convenience to string method.
### Response:
def device_characteristics_str(self, indent):
"""Convenience to string method.
"""
s = "{}\n".format(self.label)
s += indent + "MAC Address: {}\n".format(self.mac_addr)
s += indent + "IP Address: {}\n".format(self.ip_addr)
s += indent + "Port: {}\n".format(self.port)
s += indent + "Power: {}\n".format(str_map(self.power_level))
s += indent + "Location: {}\n".format(self.location)
s += indent + "Group: {}\n".format(self.group)
return s |
def set(self, name, value, index=-1):
"""
Assign the ``value`` to the child having the given ``name`` at the ``index`` position
:type name: ``str``
:param name: the child name (e.g. PID)
:type value: an instance of :class:`Element <hl7apy.core.Element>`, a `str` or an instance of
:class:`ElementProxy <hl7apy.core.ElementProxy>`
:param value: the child value
:type index: ``int``
:param index: the child position (e.g. 1)
"""
# just copy the first element of the ElementProxy (e.g. message.pid = message2.pid)
if isinstance(value, ElementProxy):
value = value[0].to_er7()
name = name.upper()
reference = None if name is None else self.element.find_child_reference(name)
child_ref, child_name = (None, None) if reference is None else (reference['ref'], reference['name'])
if isinstance(value, basestring): # if the value is a basestring, parse it
child = self.element.parse_child(value, child_name=child_name, reference=child_ref)
elif isinstance(value, Element): # it is already an instance of Element
child = value
elif isinstance(value, BaseDataType):
child = self.create_element(name, False, reference)
child.value = value
else:
raise ChildNotValid(value, child_name)
if child.name != child_name: # e.g. message.pid = Segment('SPM') is forbidden
raise ChildNotValid(value, child_name)
child_to_remove = self.child_at_index(child_name, index)
if child_to_remove is None:
self.append(child)
else:
self.replace_child(child_to_remove, child)
# a set has been called, change the temporary parent to be the actual one
self.element.set_parent_to_traversal() | Assign the ``value`` to the child having the given ``name`` at the ``index`` position
:type name: ``str``
:param name: the child name (e.g. PID)
:type value: an instance of :class:`Element <hl7apy.core.Element>`, a `str` or an instance of
:class:`ElementProxy <hl7apy.core.ElementProxy>`
:param value: the child value
:type index: ``int``
:param index: the child position (e.g. 1) | Below is the the instruction that describes the task:
### Input:
Assign the ``value`` to the child having the given ``name`` at the ``index`` position
:type name: ``str``
:param name: the child name (e.g. PID)
:type value: an instance of :class:`Element <hl7apy.core.Element>`, a `str` or an instance of
:class:`ElementProxy <hl7apy.core.ElementProxy>`
:param value: the child value
:type index: ``int``
:param index: the child position (e.g. 1)
### Response:
def set(self, name, value, index=-1):
"""
Assign the ``value`` to the child having the given ``name`` at the ``index`` position
:type name: ``str``
:param name: the child name (e.g. PID)
:type value: an instance of :class:`Element <hl7apy.core.Element>`, a `str` or an instance of
:class:`ElementProxy <hl7apy.core.ElementProxy>`
:param value: the child value
:type index: ``int``
:param index: the child position (e.g. 1)
"""
# just copy the first element of the ElementProxy (e.g. message.pid = message2.pid)
if isinstance(value, ElementProxy):
value = value[0].to_er7()
name = name.upper()
reference = None if name is None else self.element.find_child_reference(name)
child_ref, child_name = (None, None) if reference is None else (reference['ref'], reference['name'])
if isinstance(value, basestring): # if the value is a basestring, parse it
child = self.element.parse_child(value, child_name=child_name, reference=child_ref)
elif isinstance(value, Element): # it is already an instance of Element
child = value
elif isinstance(value, BaseDataType):
child = self.create_element(name, False, reference)
child.value = value
else:
raise ChildNotValid(value, child_name)
if child.name != child_name: # e.g. message.pid = Segment('SPM') is forbidden
raise ChildNotValid(value, child_name)
child_to_remove = self.child_at_index(child_name, index)
if child_to_remove is None:
self.append(child)
else:
self.replace_child(child_to_remove, child)
# a set has been called, change the temporary parent to be the actual one
self.element.set_parent_to_traversal() |
def create_perturb_params(countsmat, transmat=None):
'''
Computes transition probabilities and standard errors of the transition probabilities due to
finite sampling using the MSM counts matrix. First, the transition probabilities are computed
by dividing the each element c_ij by the row-sumemd counts of row i. THe standard errors are then
computed by first computing the standard deviation of the transition probability, treating each count
as a Bernoulli process with p = t_ij (std = (t_ij - t_ij ^2)^0.5). This is then divided by the
square root of the row-summed counts of row i to obtain the standard error.
Parameters:
----------
countsmat: np.ndarray
The msm counts matrix
transmat: np.ndarray
If you have a transition matrix you want to use (e.g. MLE symmetrized), you can supply that here. This
function will use the transition probabilities from this matrix to calculate the Bernoulli standard deviations,
which will be divided by the row-summed counts in the original supplied counts matrix.
Returns:
-----------
transmat, np.ndarray:
The MSM transition matrix
scale, np.ndarray:
The matrix of standard errors for each transition probability
'''
norm = np.sum(countsmat, axis=1)
if not transmat:
transmat = (countsmat.transpose() / norm).transpose()
counts = (np.ones((len(transmat), len(transmat))) * norm).transpose()
scale = ((transmat - transmat ** 2) ** 0.5 / counts ** 0.5) + 10 ** -15
return transmat, scale | Computes transition probabilities and standard errors of the transition probabilities due to
finite sampling using the MSM counts matrix. First, the transition probabilities are computed
by dividing the each element c_ij by the row-sumemd counts of row i. THe standard errors are then
computed by first computing the standard deviation of the transition probability, treating each count
as a Bernoulli process with p = t_ij (std = (t_ij - t_ij ^2)^0.5). This is then divided by the
square root of the row-summed counts of row i to obtain the standard error.
Parameters:
----------
countsmat: np.ndarray
The msm counts matrix
transmat: np.ndarray
If you have a transition matrix you want to use (e.g. MLE symmetrized), you can supply that here. This
function will use the transition probabilities from this matrix to calculate the Bernoulli standard deviations,
which will be divided by the row-summed counts in the original supplied counts matrix.
Returns:
-----------
transmat, np.ndarray:
The MSM transition matrix
scale, np.ndarray:
The matrix of standard errors for each transition probability | Below is the the instruction that describes the task:
### Input:
Computes transition probabilities and standard errors of the transition probabilities due to
finite sampling using the MSM counts matrix. First, the transition probabilities are computed
by dividing the each element c_ij by the row-sumemd counts of row i. THe standard errors are then
computed by first computing the standard deviation of the transition probability, treating each count
as a Bernoulli process with p = t_ij (std = (t_ij - t_ij ^2)^0.5). This is then divided by the
square root of the row-summed counts of row i to obtain the standard error.
Parameters:
----------
countsmat: np.ndarray
The msm counts matrix
transmat: np.ndarray
If you have a transition matrix you want to use (e.g. MLE symmetrized), you can supply that here. This
function will use the transition probabilities from this matrix to calculate the Bernoulli standard deviations,
which will be divided by the row-summed counts in the original supplied counts matrix.
Returns:
-----------
transmat, np.ndarray:
The MSM transition matrix
scale, np.ndarray:
The matrix of standard errors for each transition probability
### Response:
def create_perturb_params(countsmat, transmat=None):
'''
Computes transition probabilities and standard errors of the transition probabilities due to
finite sampling using the MSM counts matrix. First, the transition probabilities are computed
by dividing the each element c_ij by the row-sumemd counts of row i. THe standard errors are then
computed by first computing the standard deviation of the transition probability, treating each count
as a Bernoulli process with p = t_ij (std = (t_ij - t_ij ^2)^0.5). This is then divided by the
square root of the row-summed counts of row i to obtain the standard error.
Parameters:
----------
countsmat: np.ndarray
The msm counts matrix
transmat: np.ndarray
If you have a transition matrix you want to use (e.g. MLE symmetrized), you can supply that here. This
function will use the transition probabilities from this matrix to calculate the Bernoulli standard deviations,
which will be divided by the row-summed counts in the original supplied counts matrix.
Returns:
-----------
transmat, np.ndarray:
The MSM transition matrix
scale, np.ndarray:
The matrix of standard errors for each transition probability
'''
norm = np.sum(countsmat, axis=1)
if not transmat:
transmat = (countsmat.transpose() / norm).transpose()
counts = (np.ones((len(transmat), len(transmat))) * norm).transpose()
scale = ((transmat - transmat ** 2) ** 0.5 / counts ** 0.5) + 10 ** -15
return transmat, scale |
def _resolve_graph(self, distribution_names=None, leaf_name='x'):
"""Creates a `tuple` of `tuple`s of dependencies.
This function is **experimental**. That said, we encourage its use
and ask that you report problems to `[email protected]`.
Args:
distribution_names: `list` of `str` or `None` names corresponding to each
of `model` elements. (`None`s are expanding into the
appropriate `str`.)
leaf_name: `str` used when no maker depends on a particular
`model` element.
Returns:
graph: `tuple` of `(str tuple)` pairs representing the name of each
distribution (maker) and the names of its dependencies.
#### Example
```python
d = tfd.JointDistributionSequential([
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
tfd.Normal(loc=0, scale=2.),
lambda n, g: tfd.Normal(loc=n, scale=g),
])
d._resolve_graph()
# ==> (
# ('e', ()),
# ('g', ('e',)),
# ('n', ()),
# ('x', ('n', 'g')),
# )
```
"""
# This function additionally depends on:
# self._dist_fn_args
# self._dist_fn_wrapped
# TODO(b/129008220): Robustify this procedure. Eg, handle collisions better,
# ignore args prefixed with `_`.
if distribution_names is None or any(self._dist_fn_args):
distribution_names = _resolve_distribution_names(
self._dist_fn_args, distribution_names, leaf_name)
if len(set(distribution_names)) != len(distribution_names):
raise ValueError('Distribution names must be unique: {}'.format(
distribution_names))
if len(distribution_names) != len(self._dist_fn_wrapped):
raise ValueError('Distribution names must be 1:1 with `rvs`.')
return tuple(zip(distribution_names,
tuple(() if a is None else a for a in self._dist_fn_args))) | Creates a `tuple` of `tuple`s of dependencies.
This function is **experimental**. That said, we encourage its use
and ask that you report problems to `[email protected]`.
Args:
distribution_names: `list` of `str` or `None` names corresponding to each
of `model` elements. (`None`s are expanding into the
appropriate `str`.)
leaf_name: `str` used when no maker depends on a particular
`model` element.
Returns:
graph: `tuple` of `(str tuple)` pairs representing the name of each
distribution (maker) and the names of its dependencies.
#### Example
```python
d = tfd.JointDistributionSequential([
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
tfd.Normal(loc=0, scale=2.),
lambda n, g: tfd.Normal(loc=n, scale=g),
])
d._resolve_graph()
# ==> (
# ('e', ()),
# ('g', ('e',)),
# ('n', ()),
# ('x', ('n', 'g')),
# )
``` | Below is the the instruction that describes the task:
### Input:
Creates a `tuple` of `tuple`s of dependencies.
This function is **experimental**. That said, we encourage its use
and ask that you report problems to `[email protected]`.
Args:
distribution_names: `list` of `str` or `None` names corresponding to each
of `model` elements. (`None`s are expanding into the
appropriate `str`.)
leaf_name: `str` used when no maker depends on a particular
`model` element.
Returns:
graph: `tuple` of `(str tuple)` pairs representing the name of each
distribution (maker) and the names of its dependencies.
#### Example
```python
d = tfd.JointDistributionSequential([
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
tfd.Normal(loc=0, scale=2.),
lambda n, g: tfd.Normal(loc=n, scale=g),
])
d._resolve_graph()
# ==> (
# ('e', ()),
# ('g', ('e',)),
# ('n', ()),
# ('x', ('n', 'g')),
# )
```
### Response:
def _resolve_graph(self, distribution_names=None, leaf_name='x'):
"""Creates a `tuple` of `tuple`s of dependencies.
This function is **experimental**. That said, we encourage its use
and ask that you report problems to `[email protected]`.
Args:
distribution_names: `list` of `str` or `None` names corresponding to each
of `model` elements. (`None`s are expanding into the
appropriate `str`.)
leaf_name: `str` used when no maker depends on a particular
`model` element.
Returns:
graph: `tuple` of `(str tuple)` pairs representing the name of each
distribution (maker) and the names of its dependencies.
#### Example
```python
d = tfd.JointDistributionSequential([
tfd.Independent(tfd.Exponential(rate=[100, 120]), 1),
lambda e: tfd.Gamma(concentration=e[..., 0], rate=e[..., 1]),
tfd.Normal(loc=0, scale=2.),
lambda n, g: tfd.Normal(loc=n, scale=g),
])
d._resolve_graph()
# ==> (
# ('e', ()),
# ('g', ('e',)),
# ('n', ()),
# ('x', ('n', 'g')),
# )
```
"""
# This function additionally depends on:
# self._dist_fn_args
# self._dist_fn_wrapped
# TODO(b/129008220): Robustify this procedure. Eg, handle collisions better,
# ignore args prefixed with `_`.
if distribution_names is None or any(self._dist_fn_args):
distribution_names = _resolve_distribution_names(
self._dist_fn_args, distribution_names, leaf_name)
if len(set(distribution_names)) != len(distribution_names):
raise ValueError('Distribution names must be unique: {}'.format(
distribution_names))
if len(distribution_names) != len(self._dist_fn_wrapped):
raise ValueError('Distribution names must be 1:1 with `rvs`.')
return tuple(zip(distribution_names,
tuple(() if a is None else a for a in self._dist_fn_args))) |
def process_actions(self, actions):
"""Process the actions we want to take
Args:
actions (`list`): List of actions we want to take
Returns:
`list` of notifications
"""
notices = {}
notification_contacts = {}
for action in actions:
resource = action['resource']
action_status = ActionStatus.SUCCEED
try:
if action['action'] == AuditActions.REMOVE:
action_status = self.process_action(
resource,
AuditActions.REMOVE
)
if action_status == ActionStatus.SUCCEED:
db.session.delete(action['issue'].issue)
elif action['action'] == AuditActions.STOP:
action_status = self.process_action(
resource,
AuditActions.STOP
)
if action_status == ActionStatus.SUCCEED:
action['issue'].update({
'missing_tags': action['missing_tags'],
'notes': action['notes'],
'last_alert': action['last_alert'],
'state': action['action']
})
elif action['action'] == AuditActions.FIXED:
db.session.delete(action['issue'].issue)
elif action['action'] == AuditActions.ALERT:
action['issue'].update({
'missing_tags': action['missing_tags'],
'notes': action['notes'],
'last_alert': action['last_alert'],
'state': action['action']
})
db.session.commit()
if action_status == ActionStatus.SUCCEED:
for owner in [
dict(t) for t in {tuple(d.items()) for d in (action['owners'] + self.permanent_emails)}
]:
if owner['value'] not in notification_contacts:
contact = NotificationContact(type=owner['type'], value=owner['value'])
notification_contacts[owner['value']] = contact
notices[contact] = {
'fixed': [],
'not_fixed': []
}
else:
contact = notification_contacts[owner['value']]
if action['action'] == AuditActions.FIXED:
notices[contact]['fixed'].append(action)
else:
notices[contact]['not_fixed'].append(action)
except Exception as ex:
self.log.exception('Unexpected error while processing resource {}/{}/{}/{}'.format(
action['resource'].account.account_name,
action['resource'].id,
action['resource'],
ex
))
return notices | Process the actions we want to take
Args:
actions (`list`): List of actions we want to take
Returns:
`list` of notifications | Below is the the instruction that describes the task:
### Input:
Process the actions we want to take
Args:
actions (`list`): List of actions we want to take
Returns:
`list` of notifications
### Response:
def process_actions(self, actions):
"""Process the actions we want to take
Args:
actions (`list`): List of actions we want to take
Returns:
`list` of notifications
"""
notices = {}
notification_contacts = {}
for action in actions:
resource = action['resource']
action_status = ActionStatus.SUCCEED
try:
if action['action'] == AuditActions.REMOVE:
action_status = self.process_action(
resource,
AuditActions.REMOVE
)
if action_status == ActionStatus.SUCCEED:
db.session.delete(action['issue'].issue)
elif action['action'] == AuditActions.STOP:
action_status = self.process_action(
resource,
AuditActions.STOP
)
if action_status == ActionStatus.SUCCEED:
action['issue'].update({
'missing_tags': action['missing_tags'],
'notes': action['notes'],
'last_alert': action['last_alert'],
'state': action['action']
})
elif action['action'] == AuditActions.FIXED:
db.session.delete(action['issue'].issue)
elif action['action'] == AuditActions.ALERT:
action['issue'].update({
'missing_tags': action['missing_tags'],
'notes': action['notes'],
'last_alert': action['last_alert'],
'state': action['action']
})
db.session.commit()
if action_status == ActionStatus.SUCCEED:
for owner in [
dict(t) for t in {tuple(d.items()) for d in (action['owners'] + self.permanent_emails)}
]:
if owner['value'] not in notification_contacts:
contact = NotificationContact(type=owner['type'], value=owner['value'])
notification_contacts[owner['value']] = contact
notices[contact] = {
'fixed': [],
'not_fixed': []
}
else:
contact = notification_contacts[owner['value']]
if action['action'] == AuditActions.FIXED:
notices[contact]['fixed'].append(action)
else:
notices[contact]['not_fixed'].append(action)
except Exception as ex:
self.log.exception('Unexpected error while processing resource {}/{}/{}/{}'.format(
action['resource'].account.account_name,
action['resource'].id,
action['resource'],
ex
))
return notices |
def predict(self, timeseriesX, n, m):
"""
Calculates the dependent timeseries Y for the given parameters
and independent timeseries. (y=m*x + n)
:param TimeSeries timeseriesX: the independent Timeseries.
:param float n: The interception with the x access
that has been calculated during regression
:param float m: The slope of the function
that has been calculated during regression
:return TimeSeries timeseries_y: the predicted values for the
dependent TimeSeries. Its length and first dimension will
equal to timeseriesX.
"""
new_entries = []
for entry in timeseriesX:
predicted_value = m * entry[1] + n
new_entries.append([entry[0], predicted_value])
return TimeSeries.from_twodim_list(new_entries) | Calculates the dependent timeseries Y for the given parameters
and independent timeseries. (y=m*x + n)
:param TimeSeries timeseriesX: the independent Timeseries.
:param float n: The interception with the x access
that has been calculated during regression
:param float m: The slope of the function
that has been calculated during regression
:return TimeSeries timeseries_y: the predicted values for the
dependent TimeSeries. Its length and first dimension will
equal to timeseriesX. | Below is the the instruction that describes the task:
### Input:
Calculates the dependent timeseries Y for the given parameters
and independent timeseries. (y=m*x + n)
:param TimeSeries timeseriesX: the independent Timeseries.
:param float n: The interception with the x access
that has been calculated during regression
:param float m: The slope of the function
that has been calculated during regression
:return TimeSeries timeseries_y: the predicted values for the
dependent TimeSeries. Its length and first dimension will
equal to timeseriesX.
### Response:
def predict(self, timeseriesX, n, m):
"""
Calculates the dependent timeseries Y for the given parameters
and independent timeseries. (y=m*x + n)
:param TimeSeries timeseriesX: the independent Timeseries.
:param float n: The interception with the x access
that has been calculated during regression
:param float m: The slope of the function
that has been calculated during regression
:return TimeSeries timeseries_y: the predicted values for the
dependent TimeSeries. Its length and first dimension will
equal to timeseriesX.
"""
new_entries = []
for entry in timeseriesX:
predicted_value = m * entry[1] + n
new_entries.append([entry[0], predicted_value])
return TimeSeries.from_twodim_list(new_entries) |
def _is_pid_running_on_windows(pid):
"""
Check if PID is running for Windows systems
"""
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
exit_code = ctypes.wintypes.DWORD()
ret = kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code))
is_alive = (ret == 0 or exit_code.value == _STILL_ALIVE) # pylint: disable=undefined-variable
kernel32.CloseHandle(handle)
return is_alive | Check if PID is running for Windows systems | Below is the the instruction that describes the task:
### Input:
Check if PID is running for Windows systems
### Response:
def _is_pid_running_on_windows(pid):
"""
Check if PID is running for Windows systems
"""
import ctypes.wintypes
kernel32 = ctypes.windll.kernel32
handle = kernel32.OpenProcess(1, 0, pid)
if handle == 0:
return False
exit_code = ctypes.wintypes.DWORD()
ret = kernel32.GetExitCodeProcess(handle, ctypes.byref(exit_code))
is_alive = (ret == 0 or exit_code.value == _STILL_ALIVE) # pylint: disable=undefined-variable
kernel32.CloseHandle(handle)
return is_alive |
def router_fabric_virtual_gateway_address_family_ipv6_gateway_mac_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
router = ET.SubElement(config, "router", xmlns="urn:brocade.com:mgmt:brocade-common-def")
fabric_virtual_gateway = ET.SubElement(router, "fabric-virtual-gateway", xmlns="urn:brocade.com:mgmt:brocade-anycast-gateway")
address_family = ET.SubElement(fabric_virtual_gateway, "address-family")
ipv6 = ET.SubElement(address_family, "ipv6")
gateway_mac_address = ET.SubElement(ipv6, "gateway-mac-address")
gateway_mac_address.text = kwargs.pop('gateway_mac_address')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def router_fabric_virtual_gateway_address_family_ipv6_gateway_mac_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
router = ET.SubElement(config, "router", xmlns="urn:brocade.com:mgmt:brocade-common-def")
fabric_virtual_gateway = ET.SubElement(router, "fabric-virtual-gateway", xmlns="urn:brocade.com:mgmt:brocade-anycast-gateway")
address_family = ET.SubElement(fabric_virtual_gateway, "address-family")
ipv6 = ET.SubElement(address_family, "ipv6")
gateway_mac_address = ET.SubElement(ipv6, "gateway-mac-address")
gateway_mac_address.text = kwargs.pop('gateway_mac_address')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_topology(self):
"""
Get the converted topology ready for JSON encoding
:return: converted topology assembled into a single dict
:rtype: dict
"""
topology = {'name': self._name,
'resources_type': 'local',
'topology': {},
'type': 'topology',
'version': '1.0'}
if self._links:
topology['topology']['links'] = self._links
if self._nodes:
topology['topology']['nodes'] = self._nodes
if self._servers:
topology['topology']['servers'] = self._servers
if self._notes:
topology['topology']['notes'] = self._notes
if self._shapes['ellipse']:
topology['topology']['ellipses'] = self._shapes['ellipse']
if self._shapes['rectangle']:
topology['topology']['rectangles'] = \
self._shapes['rectangle']
if self._images:
topology['topology']['images'] = self._images
return topology | Get the converted topology ready for JSON encoding
:return: converted topology assembled into a single dict
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Get the converted topology ready for JSON encoding
:return: converted topology assembled into a single dict
:rtype: dict
### Response:
def get_topology(self):
"""
Get the converted topology ready for JSON encoding
:return: converted topology assembled into a single dict
:rtype: dict
"""
topology = {'name': self._name,
'resources_type': 'local',
'topology': {},
'type': 'topology',
'version': '1.0'}
if self._links:
topology['topology']['links'] = self._links
if self._nodes:
topology['topology']['nodes'] = self._nodes
if self._servers:
topology['topology']['servers'] = self._servers
if self._notes:
topology['topology']['notes'] = self._notes
if self._shapes['ellipse']:
topology['topology']['ellipses'] = self._shapes['ellipse']
if self._shapes['rectangle']:
topology['topology']['rectangles'] = \
self._shapes['rectangle']
if self._images:
topology['topology']['images'] = self._images
return topology |
def datastore(self, domain, data_type, mapping=None):
"""Get instance of the DataStore module.
Args:
domain (str): The domain can be either "system", "organization", or "local". When using
"organization" the data store can be accessed by any Application in the entire org,
while "local" access is restricted to the App writing the data. The "system" option
should not be used in almost all cases.
data_type (str): The data type descriptor (e.g., tc:whois:cache).
Returns:
object: An instance of the DataStore Class.
"""
from .tcex_datastore import TcExDataStore
return TcExDataStore(self, domain, data_type, mapping) | Get instance of the DataStore module.
Args:
domain (str): The domain can be either "system", "organization", or "local". When using
"organization" the data store can be accessed by any Application in the entire org,
while "local" access is restricted to the App writing the data. The "system" option
should not be used in almost all cases.
data_type (str): The data type descriptor (e.g., tc:whois:cache).
Returns:
object: An instance of the DataStore Class. | Below is the the instruction that describes the task:
### Input:
Get instance of the DataStore module.
Args:
domain (str): The domain can be either "system", "organization", or "local". When using
"organization" the data store can be accessed by any Application in the entire org,
while "local" access is restricted to the App writing the data. The "system" option
should not be used in almost all cases.
data_type (str): The data type descriptor (e.g., tc:whois:cache).
Returns:
object: An instance of the DataStore Class.
### Response:
def datastore(self, domain, data_type, mapping=None):
"""Get instance of the DataStore module.
Args:
domain (str): The domain can be either "system", "organization", or "local". When using
"organization" the data store can be accessed by any Application in the entire org,
while "local" access is restricted to the App writing the data. The "system" option
should not be used in almost all cases.
data_type (str): The data type descriptor (e.g., tc:whois:cache).
Returns:
object: An instance of the DataStore Class.
"""
from .tcex_datastore import TcExDataStore
return TcExDataStore(self, domain, data_type, mapping) |
def update_avatar(self):
'''更新用户头像'''
def do_update_avatar(info, error=None):
if error or not info:
logger.error('Failed to get user avatar: %s, %s' %
(info, error))
else:
uk, uname, img_path = info
self.img_avatar.set_from_file(img_path)
self.img_avatar.props.tooltip_text = '\n'.join([
self.profile['username'],
uname,
])
if not self.profile['display-avatar']:
return
self.img_avatar.props.tooltip_text = ''
cache_path = Config.get_cache_path(self.profile['username'])
gutil.async_call(gutil.update_avatar, self.cookie, self.tokens,
cache_path, callback=do_update_avatar) | 更新用户头像 | Below is the the instruction that describes the task:
### Input:
更新用户头像
### Response:
def update_avatar(self):
'''更新用户头像'''
def do_update_avatar(info, error=None):
if error or not info:
logger.error('Failed to get user avatar: %s, %s' %
(info, error))
else:
uk, uname, img_path = info
self.img_avatar.set_from_file(img_path)
self.img_avatar.props.tooltip_text = '\n'.join([
self.profile['username'],
uname,
])
if not self.profile['display-avatar']:
return
self.img_avatar.props.tooltip_text = ''
cache_path = Config.get_cache_path(self.profile['username'])
gutil.async_call(gutil.update_avatar, self.cookie, self.tokens,
cache_path, callback=do_update_avatar) |
def clusterdown_wrapper(func):
"""
Wrapper for CLUSTERDOWN error handling.
If the cluster reports it is down it is assumed that:
- connection_pool was disconnected
- connection_pool was reseted
- refereh_table_asap set to True
It will try 3 times to rerun the command and raises ClusterDownException if it continues to fail.
"""
@wraps(func)
async def inner(*args, **kwargs):
for _ in range(0, 3):
try:
return await func(*args, **kwargs)
except ClusterDownError:
# Try again with the new cluster setup. All other errors
# should be raised.
pass
# If it fails 3 times then raise exception back to caller
raise ClusterDownError("CLUSTERDOWN error. Unable to rebuild the cluster")
return inner | Wrapper for CLUSTERDOWN error handling.
If the cluster reports it is down it is assumed that:
- connection_pool was disconnected
- connection_pool was reseted
- refereh_table_asap set to True
It will try 3 times to rerun the command and raises ClusterDownException if it continues to fail. | Below is the the instruction that describes the task:
### Input:
Wrapper for CLUSTERDOWN error handling.
If the cluster reports it is down it is assumed that:
- connection_pool was disconnected
- connection_pool was reseted
- refereh_table_asap set to True
It will try 3 times to rerun the command and raises ClusterDownException if it continues to fail.
### Response:
def clusterdown_wrapper(func):
"""
Wrapper for CLUSTERDOWN error handling.
If the cluster reports it is down it is assumed that:
- connection_pool was disconnected
- connection_pool was reseted
- refereh_table_asap set to True
It will try 3 times to rerun the command and raises ClusterDownException if it continues to fail.
"""
@wraps(func)
async def inner(*args, **kwargs):
for _ in range(0, 3):
try:
return await func(*args, **kwargs)
except ClusterDownError:
# Try again with the new cluster setup. All other errors
# should be raised.
pass
# If it fails 3 times then raise exception back to caller
raise ClusterDownError("CLUSTERDOWN error. Unable to rebuild the cluster")
return inner |
def get_journal_abstracts(self, refresh=True):
"""Return a list of ScopusAbstract objects using ScopusSearch,
but only if belonging to a Journal."""
return [abstract for abstract in self.get_abstracts(refresh=refresh) if
abstract.aggregationType == 'Journal'] | Return a list of ScopusAbstract objects using ScopusSearch,
but only if belonging to a Journal. | Below is the the instruction that describes the task:
### Input:
Return a list of ScopusAbstract objects using ScopusSearch,
but only if belonging to a Journal.
### Response:
def get_journal_abstracts(self, refresh=True):
"""Return a list of ScopusAbstract objects using ScopusSearch,
but only if belonging to a Journal."""
return [abstract for abstract in self.get_abstracts(refresh=refresh) if
abstract.aggregationType == 'Journal'] |
def _is_in_max_difference(value_1, value_2, max_difference):
''' Helper function to determine the difference of two values that can be np.uints. Works in python and numba mode.
Circumvents numba bug #1653
'''
if value_1 <= value_2:
return value_2 - value_1 <= max_difference
return value_1 - value_2 <= max_difference | Helper function to determine the difference of two values that can be np.uints. Works in python and numba mode.
Circumvents numba bug #1653 | Below is the the instruction that describes the task:
### Input:
Helper function to determine the difference of two values that can be np.uints. Works in python and numba mode.
Circumvents numba bug #1653
### Response:
def _is_in_max_difference(value_1, value_2, max_difference):
''' Helper function to determine the difference of two values that can be np.uints. Works in python and numba mode.
Circumvents numba bug #1653
'''
if value_1 <= value_2:
return value_2 - value_1 <= max_difference
return value_1 - value_2 <= max_difference |
def start_connect(self):
"""Tries to connect to the Heron Server
``loop()`` method needs to be called after this.
"""
Log.debug("In start_connect() of %s" % self._get_classname())
# TODO: specify buffer size, exception handling
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# when ready, handle_connect is called
self._connecting = True
self.connect(self.endpoint) | Tries to connect to the Heron Server
``loop()`` method needs to be called after this. | Below is the the instruction that describes the task:
### Input:
Tries to connect to the Heron Server
``loop()`` method needs to be called after this.
### Response:
def start_connect(self):
"""Tries to connect to the Heron Server
``loop()`` method needs to be called after this.
"""
Log.debug("In start_connect() of %s" % self._get_classname())
# TODO: specify buffer size, exception handling
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
# when ready, handle_connect is called
self._connecting = True
self.connect(self.endpoint) |
def pdu_to_function_code_or_raise_error(resp_pdu):
""" Parse response PDU and return of :class:`ModbusFunction` or
raise error.
:param resp_pdu: PDU of response.
:return: Subclass of :class:`ModbusFunction` matching the response.
:raises ModbusError: When response contains error code.
"""
function_code = struct.unpack('>B', resp_pdu[0:1])[0]
if function_code not in function_code_to_function_map.keys():
error_code = struct.unpack('>B', resp_pdu[1:2])[0]
raise error_code_to_exception_map[error_code]
return function_code | Parse response PDU and return of :class:`ModbusFunction` or
raise error.
:param resp_pdu: PDU of response.
:return: Subclass of :class:`ModbusFunction` matching the response.
:raises ModbusError: When response contains error code. | Below is the the instruction that describes the task:
### Input:
Parse response PDU and return of :class:`ModbusFunction` or
raise error.
:param resp_pdu: PDU of response.
:return: Subclass of :class:`ModbusFunction` matching the response.
:raises ModbusError: When response contains error code.
### Response:
def pdu_to_function_code_or_raise_error(resp_pdu):
""" Parse response PDU and return of :class:`ModbusFunction` or
raise error.
:param resp_pdu: PDU of response.
:return: Subclass of :class:`ModbusFunction` matching the response.
:raises ModbusError: When response contains error code.
"""
function_code = struct.unpack('>B', resp_pdu[0:1])[0]
if function_code not in function_code_to_function_map.keys():
error_code = struct.unpack('>B', resp_pdu[1:2])[0]
raise error_code_to_exception_map[error_code]
return function_code |
def fit(self, X, y, cv=None, class_weight='auto'):
""" Fits X to outcomes y, using clf """
# Incorporate error checking such as :
# if isinstance(self.classifier, ScikitClassifier):
# do one thingNone
# otherwiseNone.
self.X = X
self.y = y
self.set_class_weight(class_weight=class_weight, y=y)
self.clf = self.clf.fit(X, y)
return self.clf | Fits X to outcomes y, using clf | Below is the the instruction that describes the task:
### Input:
Fits X to outcomes y, using clf
### Response:
def fit(self, X, y, cv=None, class_weight='auto'):
""" Fits X to outcomes y, using clf """
# Incorporate error checking such as :
# if isinstance(self.classifier, ScikitClassifier):
# do one thingNone
# otherwiseNone.
self.X = X
self.y = y
self.set_class_weight(class_weight=class_weight, y=y)
self.clf = self.clf.fit(X, y)
return self.clf |
def new(cls, settings, *args, **kwargs):
"""
Create a new Cloud instance based on the Settings
"""
logger.debug('Initializing new "%s" Instance object' % settings['CLOUD'])
cloud = settings['CLOUD']
if cloud == 'bare':
self = BareInstance(settings=settings, *args, **kwargs)
elif cloud == 'aws':
self = AWSInstance(settings=settings, *args, **kwargs)
elif cloud == 'gcp':
self = GCPInstance(settings=settings, *args, **kwargs)
else:
raise DSBException('Cloud "%s" not supported' % cloud)
return self | Create a new Cloud instance based on the Settings | Below is the the instruction that describes the task:
### Input:
Create a new Cloud instance based on the Settings
### Response:
def new(cls, settings, *args, **kwargs):
"""
Create a new Cloud instance based on the Settings
"""
logger.debug('Initializing new "%s" Instance object' % settings['CLOUD'])
cloud = settings['CLOUD']
if cloud == 'bare':
self = BareInstance(settings=settings, *args, **kwargs)
elif cloud == 'aws':
self = AWSInstance(settings=settings, *args, **kwargs)
elif cloud == 'gcp':
self = GCPInstance(settings=settings, *args, **kwargs)
else:
raise DSBException('Cloud "%s" not supported' % cloud)
return self |
def _build_command(self, cmd_1, cmd_2=None,
select=False, select_command=None):
"""
Constructs the complete command.
:param cmd_1: Light command 1.
:param cmd_2: Light command 2.
:param select: If command requires selection.
:param select_command: Selection command bytes.
:return: The complete command.
"""
return CommandLegacy(cmd_1, cmd_2,
self._group_number, select, select_command) | Constructs the complete command.
:param cmd_1: Light command 1.
:param cmd_2: Light command 2.
:param select: If command requires selection.
:param select_command: Selection command bytes.
:return: The complete command. | Below is the the instruction that describes the task:
### Input:
Constructs the complete command.
:param cmd_1: Light command 1.
:param cmd_2: Light command 2.
:param select: If command requires selection.
:param select_command: Selection command bytes.
:return: The complete command.
### Response:
def _build_command(self, cmd_1, cmd_2=None,
select=False, select_command=None):
"""
Constructs the complete command.
:param cmd_1: Light command 1.
:param cmd_2: Light command 2.
:param select: If command requires selection.
:param select_command: Selection command bytes.
:return: The complete command.
"""
return CommandLegacy(cmd_1, cmd_2,
self._group_number, select, select_command) |
def get_tshark_interfaces(tshark_path=None):
"""
Returns a list of interface numbers from the output tshark -D. Used
internally to capture on multiple interfaces.
"""
parameters = [get_process_path(tshark_path), '-D']
with open(os.devnull, 'w') as null:
tshark_interfaces = subprocess.check_output(parameters, stderr=null).decode("utf-8")
return [line.split('.')[0] for line in tshark_interfaces.splitlines()] | Returns a list of interface numbers from the output tshark -D. Used
internally to capture on multiple interfaces. | Below is the the instruction that describes the task:
### Input:
Returns a list of interface numbers from the output tshark -D. Used
internally to capture on multiple interfaces.
### Response:
def get_tshark_interfaces(tshark_path=None):
"""
Returns a list of interface numbers from the output tshark -D. Used
internally to capture on multiple interfaces.
"""
parameters = [get_process_path(tshark_path), '-D']
with open(os.devnull, 'w') as null:
tshark_interfaces = subprocess.check_output(parameters, stderr=null).decode("utf-8")
return [line.split('.')[0] for line in tshark_interfaces.splitlines()] |
def remove_threadlocal(self, name):
"""
Args:
name (str | unicode): Remove entry with `name` from current thread's context
"""
with self._lock:
if self._tpayload is not None:
if name in self._tpayload.context:
del self._tpayload.context[name]
if not self._tpayload.context:
self._tpayload = None | Args:
name (str | unicode): Remove entry with `name` from current thread's context | Below is the the instruction that describes the task:
### Input:
Args:
name (str | unicode): Remove entry with `name` from current thread's context
### Response:
def remove_threadlocal(self, name):
"""
Args:
name (str | unicode): Remove entry with `name` from current thread's context
"""
with self._lock:
if self._tpayload is not None:
if name in self._tpayload.context:
del self._tpayload.context[name]
if not self._tpayload.context:
self._tpayload = None |
def _update_parent_attachments(self):
""" Tries to update the parent property 'has_attachments' """
try:
self._parent.has_attachments = bool(len(self.__attachments))
except AttributeError:
pass | Tries to update the parent property 'has_attachments' | Below is the the instruction that describes the task:
### Input:
Tries to update the parent property 'has_attachments'
### Response:
def _update_parent_attachments(self):
""" Tries to update the parent property 'has_attachments' """
try:
self._parent.has_attachments = bool(len(self.__attachments))
except AttributeError:
pass |
def update_flagfile(flags_path, new_threshold):
"""Updates the flagfile at `flags_path`, changing the value for
`resign_threshold` to `new_threshold`
"""
if abs(new_threshold) > 1:
raise ValueError("Invalid new percentile for resign threshold")
with tf.gfile.GFile(flags_path) as f:
lines = f.read()
if new_threshold > 0:
new_threshold *= -1
if not RESIGN_FLAG_REGEX.search(lines):
print("Resign threshold flag not found in flagfile {}! Aborting.".format(flags_path))
sys.exit(1)
old_threshold = RESIGN_FLAG_REGEX.search(lines).groups(1)
lines = re.sub(RESIGN_FLAG_REGEX, "--resign_threshold={:.3f}".format(new_threshold), lines)
if abs(float(old_threshold[0]) - new_threshold) < 0.001:
print("Not updating percentiles; {} ~= {:.3f}".format(
old_threshold[0], new_threshold), flush=True)
else:
print("Updated percentile from {} to {:.3f}".format(
old_threshold[0], new_threshold), flush=True)
with tf.gfile.GFile(flags_path, 'w') as f:
f.write(lines) | Updates the flagfile at `flags_path`, changing the value for
`resign_threshold` to `new_threshold` | Below is the the instruction that describes the task:
### Input:
Updates the flagfile at `flags_path`, changing the value for
`resign_threshold` to `new_threshold`
### Response:
def update_flagfile(flags_path, new_threshold):
"""Updates the flagfile at `flags_path`, changing the value for
`resign_threshold` to `new_threshold`
"""
if abs(new_threshold) > 1:
raise ValueError("Invalid new percentile for resign threshold")
with tf.gfile.GFile(flags_path) as f:
lines = f.read()
if new_threshold > 0:
new_threshold *= -1
if not RESIGN_FLAG_REGEX.search(lines):
print("Resign threshold flag not found in flagfile {}! Aborting.".format(flags_path))
sys.exit(1)
old_threshold = RESIGN_FLAG_REGEX.search(lines).groups(1)
lines = re.sub(RESIGN_FLAG_REGEX, "--resign_threshold={:.3f}".format(new_threshold), lines)
if abs(float(old_threshold[0]) - new_threshold) < 0.001:
print("Not updating percentiles; {} ~= {:.3f}".format(
old_threshold[0], new_threshold), flush=True)
else:
print("Updated percentile from {} to {:.3f}".format(
old_threshold[0], new_threshold), flush=True)
with tf.gfile.GFile(flags_path, 'w') as f:
f.write(lines) |
def _from_binary_reparse(cls, binary_stream):
"""See base class."""
''' Reparse type flags - 4
Reparse tag - 4 bits
Reserved - 12 bits
Reparse type - 2 bits
Reparse data length - 2
Padding - 2
'''
#content = cls._REPR.unpack(binary_view[:cls._REPR.size])
reparse_tag, data_len = cls._REPR.unpack(binary_stream[:cls._REPR.size])
#reparse_tag (type, flags) data_len, guid, data
reparse_type = ReparseType(reparse_tag & 0x0000FFFF)
reparse_flags = ReparseFlags((reparse_tag & 0xF0000000) >> 28)
guid = None #guid exists only in third party reparse points
if reparse_flags & ReparseFlags.IS_MICROSOFT:#a microsoft tag
if reparse_type is ReparseType.SYMLINK:
data = SymbolicLink.create_from_binary(binary_stream[cls._REPR.size:])
elif reparse_type is ReparseType.MOUNT_POINT:
data = JunctionOrMount.create_from_binary(binary_stream[cls._REPR.size:])
else:
data = binary_stream[cls._REPR.size:].tobytes()
else:
guid = UUID(bytes_le=binary_stream[cls._REPR.size:cls._REPR.size+16].tobytes())
data = binary_stream[cls._REPR.size+16:].tobytes()
nw_obj = cls((reparse_type, reparse_flags, data_len, guid, data))
_MOD_LOGGER.debug("Attempted to unpack REPARSE_POINT from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj | See base class. | Below is the the instruction that describes the task:
### Input:
See base class.
### Response:
def _from_binary_reparse(cls, binary_stream):
"""See base class."""
''' Reparse type flags - 4
Reparse tag - 4 bits
Reserved - 12 bits
Reparse type - 2 bits
Reparse data length - 2
Padding - 2
'''
#content = cls._REPR.unpack(binary_view[:cls._REPR.size])
reparse_tag, data_len = cls._REPR.unpack(binary_stream[:cls._REPR.size])
#reparse_tag (type, flags) data_len, guid, data
reparse_type = ReparseType(reparse_tag & 0x0000FFFF)
reparse_flags = ReparseFlags((reparse_tag & 0xF0000000) >> 28)
guid = None #guid exists only in third party reparse points
if reparse_flags & ReparseFlags.IS_MICROSOFT:#a microsoft tag
if reparse_type is ReparseType.SYMLINK:
data = SymbolicLink.create_from_binary(binary_stream[cls._REPR.size:])
elif reparse_type is ReparseType.MOUNT_POINT:
data = JunctionOrMount.create_from_binary(binary_stream[cls._REPR.size:])
else:
data = binary_stream[cls._REPR.size:].tobytes()
else:
guid = UUID(bytes_le=binary_stream[cls._REPR.size:cls._REPR.size+16].tobytes())
data = binary_stream[cls._REPR.size+16:].tobytes()
nw_obj = cls((reparse_type, reparse_flags, data_len, guid, data))
_MOD_LOGGER.debug("Attempted to unpack REPARSE_POINT from \"%s\"\nResult: %s", binary_stream.tobytes(), nw_obj)
return nw_obj |
def stop(self):
'''
Shuts the server down and waits for server thread to join
'''
self._server.shutdown()
self._server.server_close()
self._thread.join()
self.running = False | Shuts the server down and waits for server thread to join | Below is the the instruction that describes the task:
### Input:
Shuts the server down and waits for server thread to join
### Response:
def stop(self):
'''
Shuts the server down and waits for server thread to join
'''
self._server.shutdown()
self._server.server_close()
self._thread.join()
self.running = False |
def _set_internal_compiler_error(self):
"""
Adds the false positive to description and changes severity to low
"""
self.severity = "Low"
self.description_tail += (
" This issue is reported for internal compiler generated code."
)
self.description = "%s\n%s" % (self.description_head, self.description_tail)
self.code = "" | Adds the false positive to description and changes severity to low | Below is the the instruction that describes the task:
### Input:
Adds the false positive to description and changes severity to low
### Response:
def _set_internal_compiler_error(self):
"""
Adds the false positive to description and changes severity to low
"""
self.severity = "Low"
self.description_tail += (
" This issue is reported for internal compiler generated code."
)
self.description = "%s\n%s" % (self.description_head, self.description_tail)
self.code = "" |
def unauthorized(cls, errors=None):
"""Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json | Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance. | Below is the the instruction that describes the task:
### Input:
Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
### Response:
def unauthorized(cls, errors=None):
"""Shortcut API for HTTP 401 `Unauthorized` response.
Args:
errors (list): Response key/value data.
Returns:
WSResponse Instance.
"""
if cls.expose_status: # pragma: no cover
cls.response.content_type = 'application/json'
cls.response._status_line = '401 Unauthorized'
return cls(401, errors=errors).to_json |
def _duplicate_example(self, request):
"""Duplicates the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response.
"""
index = int(request.args.get('index'))
if index >= len(self.examples):
return http_util.Respond(request, {'error': 'invalid index provided'},
'application/json', code=400)
new_example = self.example_class()
new_example.CopyFrom(self.examples[index])
self.examples.append(new_example)
self.updated_example_indices.add(len(self.examples) - 1)
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json') | Duplicates the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response. | Below is the the instruction that describes the task:
### Input:
Duplicates the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response.
### Response:
def _duplicate_example(self, request):
"""Duplicates the specified example.
Args:
request: A request that should contain 'index'.
Returns:
An empty response.
"""
index = int(request.args.get('index'))
if index >= len(self.examples):
return http_util.Respond(request, {'error': 'invalid index provided'},
'application/json', code=400)
new_example = self.example_class()
new_example.CopyFrom(self.examples[index])
self.examples.append(new_example)
self.updated_example_indices.add(len(self.examples) - 1)
self.generate_sprite([ex.SerializeToString() for ex in self.examples])
return http_util.Respond(request, {}, 'application/json') |
def write_table_file(self, table_file=None):
"""Write the table to self._table_file"""
if self._table is None:
raise RuntimeError("No table to write")
if table_file is not None:
self._table_file = table_file
if self._table_file is None:
raise RuntimeError("No output file specified for table")
write_tables_to_fits(self._table_file, [self._table], clobber=True,
namelist=['FILE_ARCHIVE']) | Write the table to self._table_file | Below is the the instruction that describes the task:
### Input:
Write the table to self._table_file
### Response:
def write_table_file(self, table_file=None):
"""Write the table to self._table_file"""
if self._table is None:
raise RuntimeError("No table to write")
if table_file is not None:
self._table_file = table_file
if self._table_file is None:
raise RuntimeError("No output file specified for table")
write_tables_to_fits(self._table_file, [self._table], clobber=True,
namelist=['FILE_ARCHIVE']) |
def getConnectionInfo(self, wanInterfaceId=1, timeout=1):
"""Execute GetInfo action to get WAN connection information's.
:param int wanInterfaceId: the id of the WAN device
:param float timeout: the timeout to wait for the action to be executed
:return: WAN connection information's.
:rtype: ConnectionInfo
"""
namespace = Wan.getServiceType("getConnectionInfo") + str(wanInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetInfo", timeout=timeout)
return ConnectionInfo(results) | Execute GetInfo action to get WAN connection information's.
:param int wanInterfaceId: the id of the WAN device
:param float timeout: the timeout to wait for the action to be executed
:return: WAN connection information's.
:rtype: ConnectionInfo | Below is the the instruction that describes the task:
### Input:
Execute GetInfo action to get WAN connection information's.
:param int wanInterfaceId: the id of the WAN device
:param float timeout: the timeout to wait for the action to be executed
:return: WAN connection information's.
:rtype: ConnectionInfo
### Response:
def getConnectionInfo(self, wanInterfaceId=1, timeout=1):
"""Execute GetInfo action to get WAN connection information's.
:param int wanInterfaceId: the id of the WAN device
:param float timeout: the timeout to wait for the action to be executed
:return: WAN connection information's.
:rtype: ConnectionInfo
"""
namespace = Wan.getServiceType("getConnectionInfo") + str(wanInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetInfo", timeout=timeout)
return ConnectionInfo(results) |
def validate(self):
"""Dictionary with validation methods as values"""
depr = self._all_deprecated
return dict((key, val[1]) for key, val in
six.iteritems(self.defaultParams)
if key not in depr) | Dictionary with validation methods as values | Below is the the instruction that describes the task:
### Input:
Dictionary with validation methods as values
### Response:
def validate(self):
"""Dictionary with validation methods as values"""
depr = self._all_deprecated
return dict((key, val[1]) for key, val in
six.iteritems(self.defaultParams)
if key not in depr) |
def _get_string_match_value(self, string, string_match_type):
"""Gets the match value"""
if string_match_type == Type(**get_type_data('EXACT')):
return string
elif string_match_type == Type(**get_type_data('IGNORECASE')):
return re.compile('^' + string, re.I)
elif string_match_type == Type(**get_type_data('WORD')):
return re.compile('.*' + string + '.*')
elif string_match_type == Type(**get_type_data('WORDIGNORECASE')):
return re.compile('.*' + string + '.*', re.I) | Gets the match value | Below is the the instruction that describes the task:
### Input:
Gets the match value
### Response:
def _get_string_match_value(self, string, string_match_type):
"""Gets the match value"""
if string_match_type == Type(**get_type_data('EXACT')):
return string
elif string_match_type == Type(**get_type_data('IGNORECASE')):
return re.compile('^' + string, re.I)
elif string_match_type == Type(**get_type_data('WORD')):
return re.compile('.*' + string + '.*')
elif string_match_type == Type(**get_type_data('WORDIGNORECASE')):
return re.compile('.*' + string + '.*', re.I) |
def _grow_overlaps(dna, melting_temp, require_even, length_max, overlap_min,
min_exception):
'''Grows equidistant overlaps until they meet specified constraints.
:param dna: Input sequence.
:type dna: coral.DNA
:param melting_temp: Ideal Tm of the overlaps, in degrees C.
:type melting_temp: float
:param require_even: Require that the number of oligonucleotides is even.
:type require_even: bool
:param length_max: Maximum oligo size (e.g. 60bp price point cutoff)
range.
:type length_range: int
:param overlap_min: Minimum overlap size.
:type overlap_min: int
:param min_exception: In order to meet melting_temp and overlap_min
settings, allow overlaps less than overlap_min to
continue growing above melting_temp.
:type min_exception: bool
:returns: Oligos, their overlapping regions, overlap Tms, and overlap
indices.
:rtype: tuple
'''
# TODO: prevent growing overlaps from bumping into each other -
# should halt when it happens, give warning, let user decide if they still
# want the current construct
# Another option would be to start over, moving the starting positions
# near the problem region a little farther from each other - this would
# put the AT-rich region in the middle of the spanning oligo
# Try bare minimum number of oligos
oligo_n = len(dna) // length_max + 1
# Adjust number of oligos if even number required
if require_even:
oligo_increment = 2
if oligo_n % 2 == 1:
oligo_n += 1
else:
oligo_increment = 1
# Increase oligo number until the minimum oligo_len is less than length_max
while float(len(dna)) / oligo_n > length_max:
oligo_n += oligo_increment
# Loop until all overlaps meet minimum Tm and length
tm_met = False
len_met = False
while(not tm_met or not len_met):
# Calculate initial number of overlaps
overlap_n = oligo_n - 1
# Place overlaps approximately equidistant over sequence length
overlap_interval = float(len(dna)) / oligo_n
starts = [int(overlap_interval * (i + 1)) for i in range(overlap_n)]
ends = [index + 1 for index in starts]
# Fencepost for while loop
# Initial overlaps (1 base) and their tms
overlaps = [dna[start:end] for start, end in zip(starts, ends)]
overlap_tms = [coral.analysis.tm(overlap) for overlap in overlaps]
index = overlap_tms.index(min(overlap_tms))
# Initial oligos - includes the 1 base overlaps.
# All the oligos are in the same direction - reverse
# complementation of every other one happens later
oligo_starts = [0] + starts
oligo_ends = ends + [len(dna)]
oligo_indices = [oligo_starts, oligo_ends]
oligos = [dna[start:end] for start, end in zip(*oligo_indices)]
# Oligo won't be maxed in first pass. tm_met and len_met will be false
maxed = False
while not (tm_met and len_met) and not maxed:
# Recalculate overlaps and their Tms
overlaps = _recalculate_overlaps(dna, overlaps, oligo_indices)
# Tm calculation is bottleneck - only recalculate changed overlap
overlap_tms[index] = coral.analysis.tm(overlaps[index])
# Find lowest-Tm overlap and its index.
index = overlap_tms.index(min(overlap_tms))
# Move overlap at that index
oligos = _expand_overlap(dna, oligo_indices, index, oligos,
length_max)
# Regenerate conditions
maxed = any([len(x) == length_max for x in oligos])
tm_met = all([x >= melting_temp for x in overlap_tms])
if min_exception:
len_met = True
else:
len_met = all([len(x) >= overlap_min for x in overlaps])
# TODO: add test for min_exception case (use rob's sequence from
# 20130624 with 65C Tm)
if min_exception:
len_met = all([len(x) >= overlap_min for x in overlaps])
# See if len_met is true - if so do nothing
if len_met:
break
else:
while not len_met and not maxed:
# Recalculate overlaps and their Tms
overlaps = _recalculate_overlaps(dna, overlaps,
oligo_indices)
# Overlap to increase is the shortest one
overlap_lens = [len(overlap) for overlap in overlaps]
index = overlap_lens.index(min(overlap_lens))
# Increase left or right oligo
oligos = _expand_overlap(dna, oligo_indices, index, oligos,
length_max)
# Recalculate conditions
maxed = any([len(x) == length_max for x in oligos])
len_met = all([len(x) >= overlap_min for x in overlaps])
# Recalculate tms to reflect any changes (some are redundant)
overlap_tms[index] = coral.analysis.tm(overlaps[index])
# Outcome could be that len_met happened *or* maxed out
# length of one of the oligos. If len_met happened, should be
# done so long as tm_met has been satisfied. If maxed happened,
# len_met will not have been met, even if tm_met is satisfied,
# and script will reattempt with more oligos
oligo_n += oligo_increment
# Calculate location of overlaps
overlap_indices = [(oligo_indices[0][x + 1], oligo_indices[1][x]) for x in
range(overlap_n)]
return oligos, overlaps, overlap_tms, overlap_indices | Grows equidistant overlaps until they meet specified constraints.
:param dna: Input sequence.
:type dna: coral.DNA
:param melting_temp: Ideal Tm of the overlaps, in degrees C.
:type melting_temp: float
:param require_even: Require that the number of oligonucleotides is even.
:type require_even: bool
:param length_max: Maximum oligo size (e.g. 60bp price point cutoff)
range.
:type length_range: int
:param overlap_min: Minimum overlap size.
:type overlap_min: int
:param min_exception: In order to meet melting_temp and overlap_min
settings, allow overlaps less than overlap_min to
continue growing above melting_temp.
:type min_exception: bool
:returns: Oligos, their overlapping regions, overlap Tms, and overlap
indices.
:rtype: tuple | Below is the the instruction that describes the task:
### Input:
Grows equidistant overlaps until they meet specified constraints.
:param dna: Input sequence.
:type dna: coral.DNA
:param melting_temp: Ideal Tm of the overlaps, in degrees C.
:type melting_temp: float
:param require_even: Require that the number of oligonucleotides is even.
:type require_even: bool
:param length_max: Maximum oligo size (e.g. 60bp price point cutoff)
range.
:type length_range: int
:param overlap_min: Minimum overlap size.
:type overlap_min: int
:param min_exception: In order to meet melting_temp and overlap_min
settings, allow overlaps less than overlap_min to
continue growing above melting_temp.
:type min_exception: bool
:returns: Oligos, their overlapping regions, overlap Tms, and overlap
indices.
:rtype: tuple
### Response:
def _grow_overlaps(dna, melting_temp, require_even, length_max, overlap_min,
min_exception):
'''Grows equidistant overlaps until they meet specified constraints.
:param dna: Input sequence.
:type dna: coral.DNA
:param melting_temp: Ideal Tm of the overlaps, in degrees C.
:type melting_temp: float
:param require_even: Require that the number of oligonucleotides is even.
:type require_even: bool
:param length_max: Maximum oligo size (e.g. 60bp price point cutoff)
range.
:type length_range: int
:param overlap_min: Minimum overlap size.
:type overlap_min: int
:param min_exception: In order to meet melting_temp and overlap_min
settings, allow overlaps less than overlap_min to
continue growing above melting_temp.
:type min_exception: bool
:returns: Oligos, their overlapping regions, overlap Tms, and overlap
indices.
:rtype: tuple
'''
# TODO: prevent growing overlaps from bumping into each other -
# should halt when it happens, give warning, let user decide if they still
# want the current construct
# Another option would be to start over, moving the starting positions
# near the problem region a little farther from each other - this would
# put the AT-rich region in the middle of the spanning oligo
# Try bare minimum number of oligos
oligo_n = len(dna) // length_max + 1
# Adjust number of oligos if even number required
if require_even:
oligo_increment = 2
if oligo_n % 2 == 1:
oligo_n += 1
else:
oligo_increment = 1
# Increase oligo number until the minimum oligo_len is less than length_max
while float(len(dna)) / oligo_n > length_max:
oligo_n += oligo_increment
# Loop until all overlaps meet minimum Tm and length
tm_met = False
len_met = False
while(not tm_met or not len_met):
# Calculate initial number of overlaps
overlap_n = oligo_n - 1
# Place overlaps approximately equidistant over sequence length
overlap_interval = float(len(dna)) / oligo_n
starts = [int(overlap_interval * (i + 1)) for i in range(overlap_n)]
ends = [index + 1 for index in starts]
# Fencepost for while loop
# Initial overlaps (1 base) and their tms
overlaps = [dna[start:end] for start, end in zip(starts, ends)]
overlap_tms = [coral.analysis.tm(overlap) for overlap in overlaps]
index = overlap_tms.index(min(overlap_tms))
# Initial oligos - includes the 1 base overlaps.
# All the oligos are in the same direction - reverse
# complementation of every other one happens later
oligo_starts = [0] + starts
oligo_ends = ends + [len(dna)]
oligo_indices = [oligo_starts, oligo_ends]
oligos = [dna[start:end] for start, end in zip(*oligo_indices)]
# Oligo won't be maxed in first pass. tm_met and len_met will be false
maxed = False
while not (tm_met and len_met) and not maxed:
# Recalculate overlaps and their Tms
overlaps = _recalculate_overlaps(dna, overlaps, oligo_indices)
# Tm calculation is bottleneck - only recalculate changed overlap
overlap_tms[index] = coral.analysis.tm(overlaps[index])
# Find lowest-Tm overlap and its index.
index = overlap_tms.index(min(overlap_tms))
# Move overlap at that index
oligos = _expand_overlap(dna, oligo_indices, index, oligos,
length_max)
# Regenerate conditions
maxed = any([len(x) == length_max for x in oligos])
tm_met = all([x >= melting_temp for x in overlap_tms])
if min_exception:
len_met = True
else:
len_met = all([len(x) >= overlap_min for x in overlaps])
# TODO: add test for min_exception case (use rob's sequence from
# 20130624 with 65C Tm)
if min_exception:
len_met = all([len(x) >= overlap_min for x in overlaps])
# See if len_met is true - if so do nothing
if len_met:
break
else:
while not len_met and not maxed:
# Recalculate overlaps and their Tms
overlaps = _recalculate_overlaps(dna, overlaps,
oligo_indices)
# Overlap to increase is the shortest one
overlap_lens = [len(overlap) for overlap in overlaps]
index = overlap_lens.index(min(overlap_lens))
# Increase left or right oligo
oligos = _expand_overlap(dna, oligo_indices, index, oligos,
length_max)
# Recalculate conditions
maxed = any([len(x) == length_max for x in oligos])
len_met = all([len(x) >= overlap_min for x in overlaps])
# Recalculate tms to reflect any changes (some are redundant)
overlap_tms[index] = coral.analysis.tm(overlaps[index])
# Outcome could be that len_met happened *or* maxed out
# length of one of the oligos. If len_met happened, should be
# done so long as tm_met has been satisfied. If maxed happened,
# len_met will not have been met, even if tm_met is satisfied,
# and script will reattempt with more oligos
oligo_n += oligo_increment
# Calculate location of overlaps
overlap_indices = [(oligo_indices[0][x + 1], oligo_indices[1][x]) for x in
range(overlap_n)]
return oligos, overlaps, overlap_tms, overlap_indices |
def _extract_datasets_to_harvest(cls, report):
"""Extrae de un reporte los datos necesarios para reconocer qué
datasets marcar para cosecha en cualquier generador.
Args:
report (str o list): Reporte (lista de dicts) o path a uno.
Returns:
list: Lista de tuplas con los títulos de catálogo y dataset de cada
reporte extraído.
"""
assert isinstance(report, string_types + (list,))
# Si `report` es una lista de tuplas con longitud 2, asumimos que es un
# reporte procesado para extraer los datasets a harvestear. Se devuelve
# intacta.
if (isinstance(report, list) and all([isinstance(x, tuple) and
len(x) == 2 for x in report])):
return report
table = readers.read_table(report)
table_keys = table[0].keys()
expected_keys = ["catalog_metadata_url", "dataset_title",
"dataset_accrualPeriodicity"]
# Verifico la presencia de las claves básicas de un config de harvester
for key in expected_keys:
if key not in table_keys:
raise KeyError("""
El reporte no contiene la clave obligatoria {}. Pruebe con otro archivo.
""".format(key))
if "harvest" in table_keys:
# El archivo es un reporte de datasets.
datasets_to_harvest = [
(row["catalog_metadata_url"], row["dataset_title"]) for row in
table if int(row["harvest"])]
else:
# El archivo es un config de harvester.
datasets_to_harvest = [
(row["catalog_metadata_url"], row["dataset_title"]) for row in
table]
return datasets_to_harvest | Extrae de un reporte los datos necesarios para reconocer qué
datasets marcar para cosecha en cualquier generador.
Args:
report (str o list): Reporte (lista de dicts) o path a uno.
Returns:
list: Lista de tuplas con los títulos de catálogo y dataset de cada
reporte extraído. | Below is the the instruction that describes the task:
### Input:
Extrae de un reporte los datos necesarios para reconocer qué
datasets marcar para cosecha en cualquier generador.
Args:
report (str o list): Reporte (lista de dicts) o path a uno.
Returns:
list: Lista de tuplas con los títulos de catálogo y dataset de cada
reporte extraído.
### Response:
def _extract_datasets_to_harvest(cls, report):
"""Extrae de un reporte los datos necesarios para reconocer qué
datasets marcar para cosecha en cualquier generador.
Args:
report (str o list): Reporte (lista de dicts) o path a uno.
Returns:
list: Lista de tuplas con los títulos de catálogo y dataset de cada
reporte extraído.
"""
assert isinstance(report, string_types + (list,))
# Si `report` es una lista de tuplas con longitud 2, asumimos que es un
# reporte procesado para extraer los datasets a harvestear. Se devuelve
# intacta.
if (isinstance(report, list) and all([isinstance(x, tuple) and
len(x) == 2 for x in report])):
return report
table = readers.read_table(report)
table_keys = table[0].keys()
expected_keys = ["catalog_metadata_url", "dataset_title",
"dataset_accrualPeriodicity"]
# Verifico la presencia de las claves básicas de un config de harvester
for key in expected_keys:
if key not in table_keys:
raise KeyError("""
El reporte no contiene la clave obligatoria {}. Pruebe con otro archivo.
""".format(key))
if "harvest" in table_keys:
# El archivo es un reporte de datasets.
datasets_to_harvest = [
(row["catalog_metadata_url"], row["dataset_title"]) for row in
table if int(row["harvest"])]
else:
# El archivo es un config de harvester.
datasets_to_harvest = [
(row["catalog_metadata_url"], row["dataset_title"]) for row in
table]
return datasets_to_harvest |
def _getCachedValue(obj, relicFunc, resultType):
"""
Retrieves a value from obj.cached (if not None) or calls @relicFunc and
caches the result (of @resultType) int obj.cached.
This is a common implementation for orderG1/G2/Gt and generatotG1/G2/Gt
"""
# If the value has not been previously cached, fetch
if not obj.cached:
obj.cached = resultType()
relicFunc(byref(obj.cached))
return obj.cached | Retrieves a value from obj.cached (if not None) or calls @relicFunc and
caches the result (of @resultType) int obj.cached.
This is a common implementation for orderG1/G2/Gt and generatotG1/G2/Gt | Below is the the instruction that describes the task:
### Input:
Retrieves a value from obj.cached (if not None) or calls @relicFunc and
caches the result (of @resultType) int obj.cached.
This is a common implementation for orderG1/G2/Gt and generatotG1/G2/Gt
### Response:
def _getCachedValue(obj, relicFunc, resultType):
"""
Retrieves a value from obj.cached (if not None) or calls @relicFunc and
caches the result (of @resultType) int obj.cached.
This is a common implementation for orderG1/G2/Gt and generatotG1/G2/Gt
"""
# If the value has not been previously cached, fetch
if not obj.cached:
obj.cached = resultType()
relicFunc(byref(obj.cached))
return obj.cached |
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
return self.std(ddof=ddof) / np.sqrt(self.count()) | Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : integer, default 1
degrees of freedom | Below is the the instruction that describes the task:
### Input:
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : integer, default 1
degrees of freedom
### Response:
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values.
For multiple groupings, the result index will be a MultiIndex.
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
return self.std(ddof=ddof) / np.sqrt(self.count()) |
def IsAllSpent(self):
"""
Flag indicating if all balance is spend.
Returns:
bool:
"""
for item in self.Items:
if item == CoinState.Confirmed:
return False
return True | Flag indicating if all balance is spend.
Returns:
bool: | Below is the the instruction that describes the task:
### Input:
Flag indicating if all balance is spend.
Returns:
bool:
### Response:
def IsAllSpent(self):
"""
Flag indicating if all balance is spend.
Returns:
bool:
"""
for item in self.Items:
if item == CoinState.Confirmed:
return False
return True |
def _from_docstring_rst(doc):
"""
format from docstring to ReStructured Text
"""
def format_fn(line, status):
""" format function """
if re_from_data.match(line):
line = re_from_data.sub(r"**\1** ", line)
status["add_line"] = True
line = re_from_defaults.sub(r"*\1*", line)
if status["listing"]:
# parameters
if re_from_param.match(line):
m = re_from_param.match(line)
line = " - ``{}`` {}".format(m.group(1), m.group(3))
# status items
elif re_from_status.match(line):
m = re_from_status.match(line)
line = " - ``{}`` {}".format(m.group(1), m.group(3))
# bullets
elif re_from_item.match(line):
line = re_from_item.sub(r" -", line)
# is continuation line
else:
line = " " * 4 + line.lstrip()
# in .rst format code samples use double backticks vs single ones for
# .md This converts them.
line = re_lone_backtick.sub("``", line)
return line
return _reformat_docstring(doc, format_fn, code_newline="\n") | format from docstring to ReStructured Text | Below is the the instruction that describes the task:
### Input:
format from docstring to ReStructured Text
### Response:
def _from_docstring_rst(doc):
"""
format from docstring to ReStructured Text
"""
def format_fn(line, status):
""" format function """
if re_from_data.match(line):
line = re_from_data.sub(r"**\1** ", line)
status["add_line"] = True
line = re_from_defaults.sub(r"*\1*", line)
if status["listing"]:
# parameters
if re_from_param.match(line):
m = re_from_param.match(line)
line = " - ``{}`` {}".format(m.group(1), m.group(3))
# status items
elif re_from_status.match(line):
m = re_from_status.match(line)
line = " - ``{}`` {}".format(m.group(1), m.group(3))
# bullets
elif re_from_item.match(line):
line = re_from_item.sub(r" -", line)
# is continuation line
else:
line = " " * 4 + line.lstrip()
# in .rst format code samples use double backticks vs single ones for
# .md This converts them.
line = re_lone_backtick.sub("``", line)
return line
return _reformat_docstring(doc, format_fn, code_newline="\n") |
def hash_data(salt, value, hash_alg=None):
"""
Hashes a value together with a salt with the given hash algorithm.
:type salt: str
:type hash_alg: str
:type value: str
:param salt: hash salt
:param hash_alg: the hash algorithm to use (default: SHA512)
:param value: value to hash together with the salt
:return: hashed value
"""
hash_alg = hash_alg or 'sha512'
hasher = hashlib.new(hash_alg)
hasher.update(value.encode('utf-8'))
hasher.update(salt.encode('utf-8'))
value_hashed = hasher.hexdigest()
return value_hashed | Hashes a value together with a salt with the given hash algorithm.
:type salt: str
:type hash_alg: str
:type value: str
:param salt: hash salt
:param hash_alg: the hash algorithm to use (default: SHA512)
:param value: value to hash together with the salt
:return: hashed value | Below is the the instruction that describes the task:
### Input:
Hashes a value together with a salt with the given hash algorithm.
:type salt: str
:type hash_alg: str
:type value: str
:param salt: hash salt
:param hash_alg: the hash algorithm to use (default: SHA512)
:param value: value to hash together with the salt
:return: hashed value
### Response:
def hash_data(salt, value, hash_alg=None):
"""
Hashes a value together with a salt with the given hash algorithm.
:type salt: str
:type hash_alg: str
:type value: str
:param salt: hash salt
:param hash_alg: the hash algorithm to use (default: SHA512)
:param value: value to hash together with the salt
:return: hashed value
"""
hash_alg = hash_alg or 'sha512'
hasher = hashlib.new(hash_alg)
hasher.update(value.encode('utf-8'))
hasher.update(salt.encode('utf-8'))
value_hashed = hasher.hexdigest()
return value_hashed |
def listTheExtras(self, deleteAlso):
""" Use ConfigObj's get_extra_values() call to find any extra/unknown
parameters we may have loaded. Return a string similar to findTheLost.
If deleteAlso is True, this will also delete any extra/unknown items.
"""
# get list of extras
extras = configobj.get_extra_values(self)
# extras is in format: [(sections, key), (sections, key), ]
# but we need: [(sections, key, result), ...] - set all results to
# a bool just to make it the right shape. BUT, since we are in
# here anyway, make that bool mean something - hide info in it about
# whether that extra item is a section (1) or just a single par (0)
#
# simplified, this is: expanded = [ (x+(abool,)) for x in extras]
expanded = [ (x+ \
( bool(len(x[0])<1 and hasattr(self[x[1]], 'keys')), ) \
) for x in extras]
retval = ''
if expanded:
retval = flattened2str(expanded, extra=1)
# but before we return, delete them (from ourself!) if requested to
if deleteAlso:
for tup_to_del in extras:
target = self
# descend the tree to the dict where this items is located.
# (this works because target is not a copy (because the dict
# type is mutable))
location = tup_to_del[0]
for subdict in location: target = target[subdict]
# delete it
target.pop(tup_to_del[1])
return retval | Use ConfigObj's get_extra_values() call to find any extra/unknown
parameters we may have loaded. Return a string similar to findTheLost.
If deleteAlso is True, this will also delete any extra/unknown items. | Below is the the instruction that describes the task:
### Input:
Use ConfigObj's get_extra_values() call to find any extra/unknown
parameters we may have loaded. Return a string similar to findTheLost.
If deleteAlso is True, this will also delete any extra/unknown items.
### Response:
def listTheExtras(self, deleteAlso):
""" Use ConfigObj's get_extra_values() call to find any extra/unknown
parameters we may have loaded. Return a string similar to findTheLost.
If deleteAlso is True, this will also delete any extra/unknown items.
"""
# get list of extras
extras = configobj.get_extra_values(self)
# extras is in format: [(sections, key), (sections, key), ]
# but we need: [(sections, key, result), ...] - set all results to
# a bool just to make it the right shape. BUT, since we are in
# here anyway, make that bool mean something - hide info in it about
# whether that extra item is a section (1) or just a single par (0)
#
# simplified, this is: expanded = [ (x+(abool,)) for x in extras]
expanded = [ (x+ \
( bool(len(x[0])<1 and hasattr(self[x[1]], 'keys')), ) \
) for x in extras]
retval = ''
if expanded:
retval = flattened2str(expanded, extra=1)
# but before we return, delete them (from ourself!) if requested to
if deleteAlso:
for tup_to_del in extras:
target = self
# descend the tree to the dict where this items is located.
# (this works because target is not a copy (because the dict
# type is mutable))
location = tup_to_del[0]
for subdict in location: target = target[subdict]
# delete it
target.pop(tup_to_del[1])
return retval |
def get_resampled_top_edge(self, angle_var=0.1):
"""
This methods computes a simplified representation of a fault top edge
by removing the points that are not describing a change of direction,
provided a certain tolerance angle.
:param float angle_var:
Number representing the maximum deviation (in degrees) admitted
without the creation of a new segment
:returns:
A :class:`~openquake.hazardlib.geo.line.Line` representing the
rupture surface's top edge.
"""
mesh = self.mesh
top_edge = [Point(mesh.lons[0][0], mesh.lats[0][0], mesh.depths[0][0])]
for i in range(len(mesh.triangulate()[1][0]) - 1):
v1 = numpy.asarray(mesh.triangulate()[1][0][i])
v2 = numpy.asarray(mesh.triangulate()[1][0][i + 1])
cosang = numpy.dot(v1, v2)
sinang = numpy.linalg.norm(numpy.cross(v1, v2))
angle = math.degrees(numpy.arctan2(sinang, cosang))
if abs(angle) > angle_var:
top_edge.append(Point(mesh.lons[0][i + 1],
mesh.lats[0][i + 1],
mesh.depths[0][i + 1]))
top_edge.append(Point(mesh.lons[0][-1],
mesh.lats[0][-1], mesh.depths[0][-1]))
line_top_edge = Line(top_edge)
return line_top_edge | This methods computes a simplified representation of a fault top edge
by removing the points that are not describing a change of direction,
provided a certain tolerance angle.
:param float angle_var:
Number representing the maximum deviation (in degrees) admitted
without the creation of a new segment
:returns:
A :class:`~openquake.hazardlib.geo.line.Line` representing the
rupture surface's top edge. | Below is the the instruction that describes the task:
### Input:
This methods computes a simplified representation of a fault top edge
by removing the points that are not describing a change of direction,
provided a certain tolerance angle.
:param float angle_var:
Number representing the maximum deviation (in degrees) admitted
without the creation of a new segment
:returns:
A :class:`~openquake.hazardlib.geo.line.Line` representing the
rupture surface's top edge.
### Response:
def get_resampled_top_edge(self, angle_var=0.1):
"""
This methods computes a simplified representation of a fault top edge
by removing the points that are not describing a change of direction,
provided a certain tolerance angle.
:param float angle_var:
Number representing the maximum deviation (in degrees) admitted
without the creation of a new segment
:returns:
A :class:`~openquake.hazardlib.geo.line.Line` representing the
rupture surface's top edge.
"""
mesh = self.mesh
top_edge = [Point(mesh.lons[0][0], mesh.lats[0][0], mesh.depths[0][0])]
for i in range(len(mesh.triangulate()[1][0]) - 1):
v1 = numpy.asarray(mesh.triangulate()[1][0][i])
v2 = numpy.asarray(mesh.triangulate()[1][0][i + 1])
cosang = numpy.dot(v1, v2)
sinang = numpy.linalg.norm(numpy.cross(v1, v2))
angle = math.degrees(numpy.arctan2(sinang, cosang))
if abs(angle) > angle_var:
top_edge.append(Point(mesh.lons[0][i + 1],
mesh.lats[0][i + 1],
mesh.depths[0][i + 1]))
top_edge.append(Point(mesh.lons[0][-1],
mesh.lats[0][-1], mesh.depths[0][-1]))
line_top_edge = Line(top_edge)
return line_top_edge |
def collect_num(self):
"""获取答案收藏数
:return: 答案收藏数量
:rtype: int
"""
element = self.soup.find("a", {
"data-za-a": "click_answer_collected_count"
})
if element is None:
return 0
else:
return int(element.get_text()) | 获取答案收藏数
:return: 答案收藏数量
:rtype: int | Below is the the instruction that describes the task:
### Input:
获取答案收藏数
:return: 答案收藏数量
:rtype: int
### Response:
def collect_num(self):
"""获取答案收藏数
:return: 答案收藏数量
:rtype: int
"""
element = self.soup.find("a", {
"data-za-a": "click_answer_collected_count"
})
if element is None:
return 0
else:
return int(element.get_text()) |
def db_optimize(name,
table=None,
**connection_args):
'''
Optimizes the full database or just a given table
CLI Example:
.. code-block:: bash
salt '*' mysql.db_optimize dbname
'''
ret = []
if table is None:
# we need to optimize all tables
tables = db_tables(name, **connection_args)
for table in tables:
log.info('Optimizing table \'%s\' in db \'%s\'..', name, table)
ret.append(__optimize_table(name, table, **connection_args))
else:
log.info('Optimizing table \'%s\' in db \'%s\'..', name, table)
ret = __optimize_table(name, table, **connection_args)
return ret | Optimizes the full database or just a given table
CLI Example:
.. code-block:: bash
salt '*' mysql.db_optimize dbname | Below is the the instruction that describes the task:
### Input:
Optimizes the full database or just a given table
CLI Example:
.. code-block:: bash
salt '*' mysql.db_optimize dbname
### Response:
def db_optimize(name,
table=None,
**connection_args):
'''
Optimizes the full database or just a given table
CLI Example:
.. code-block:: bash
salt '*' mysql.db_optimize dbname
'''
ret = []
if table is None:
# we need to optimize all tables
tables = db_tables(name, **connection_args)
for table in tables:
log.info('Optimizing table \'%s\' in db \'%s\'..', name, table)
ret.append(__optimize_table(name, table, **connection_args))
else:
log.info('Optimizing table \'%s\' in db \'%s\'..', name, table)
ret = __optimize_table(name, table, **connection_args)
return ret |
def __dict_invert(self, data):
"""Helper function for merge.
Takes a dictionary whose values are lists and returns a dict with
the elements of each list as keys and the original keys as values.
"""
outdict = {}
for k,lst in data.items():
if isinstance(lst, str):
lst = lst.split()
for entry in lst:
outdict[entry] = k
return outdict | Helper function for merge.
Takes a dictionary whose values are lists and returns a dict with
the elements of each list as keys and the original keys as values. | Below is the the instruction that describes the task:
### Input:
Helper function for merge.
Takes a dictionary whose values are lists and returns a dict with
the elements of each list as keys and the original keys as values.
### Response:
def __dict_invert(self, data):
"""Helper function for merge.
Takes a dictionary whose values are lists and returns a dict with
the elements of each list as keys and the original keys as values.
"""
outdict = {}
for k,lst in data.items():
if isinstance(lst, str):
lst = lst.split()
for entry in lst:
outdict[entry] = k
return outdict |
def _find_own_cgroups():
"""
For all subsystems, return the information in which (sub-)cgroup this process is in.
(Each process is in exactly cgroup in each hierarchy.)
@return a generator of tuples (subsystem, cgroup)
"""
try:
with open('/proc/self/cgroup', 'rt') as ownCgroupsFile:
for cgroup in _parse_proc_pid_cgroup(ownCgroupsFile):
yield cgroup
except IOError:
logging.exception('Cannot read /proc/self/cgroup') | For all subsystems, return the information in which (sub-)cgroup this process is in.
(Each process is in exactly cgroup in each hierarchy.)
@return a generator of tuples (subsystem, cgroup) | Below is the the instruction that describes the task:
### Input:
For all subsystems, return the information in which (sub-)cgroup this process is in.
(Each process is in exactly cgroup in each hierarchy.)
@return a generator of tuples (subsystem, cgroup)
### Response:
def _find_own_cgroups():
"""
For all subsystems, return the information in which (sub-)cgroup this process is in.
(Each process is in exactly cgroup in each hierarchy.)
@return a generator of tuples (subsystem, cgroup)
"""
try:
with open('/proc/self/cgroup', 'rt') as ownCgroupsFile:
for cgroup in _parse_proc_pid_cgroup(ownCgroupsFile):
yield cgroup
except IOError:
logging.exception('Cannot read /proc/self/cgroup') |
def missing(self, field, last=True):
'''
Numeric fields support specific handling for missing fields in a doc.
The missing value can be _last, _first, or a custom value
(that will be used for missing docs as the sort value).
missing('price')
> {"price" : {"missing": "_last" } }
missing('price',False)
> {"price" : {"missing": "_first"} }
'''
if last:
self.append({field: {'missing': '_last'}})
else:
self.append({field: {'missing': '_first'}})
return self | Numeric fields support specific handling for missing fields in a doc.
The missing value can be _last, _first, or a custom value
(that will be used for missing docs as the sort value).
missing('price')
> {"price" : {"missing": "_last" } }
missing('price',False)
> {"price" : {"missing": "_first"} } | Below is the the instruction that describes the task:
### Input:
Numeric fields support specific handling for missing fields in a doc.
The missing value can be _last, _first, or a custom value
(that will be used for missing docs as the sort value).
missing('price')
> {"price" : {"missing": "_last" } }
missing('price',False)
> {"price" : {"missing": "_first"} }
### Response:
def missing(self, field, last=True):
'''
Numeric fields support specific handling for missing fields in a doc.
The missing value can be _last, _first, or a custom value
(that will be used for missing docs as the sort value).
missing('price')
> {"price" : {"missing": "_last" } }
missing('price',False)
> {"price" : {"missing": "_first"} }
'''
if last:
self.append({field: {'missing': '_last'}})
else:
self.append({field: {'missing': '_first'}})
return self |
def Read(self, file_object):
"""Reads dependency definitions.
Args:
file_object (file): file-like object to read from.
Yields:
DependencyDefinition: dependency definition.
"""
config_parser = configparser.RawConfigParser()
# pylint: disable=deprecated-method
# TODO: replace readfp by read_file, check if Python 2 compatible
config_parser.readfp(file_object)
for section_name in config_parser.sections():
dependency_definition = DependencyDefinition(section_name)
for value_name in self._VALUE_NAMES:
value = self._GetConfigValue(config_parser, section_name, value_name)
setattr(dependency_definition, value_name, value)
yield dependency_definition | Reads dependency definitions.
Args:
file_object (file): file-like object to read from.
Yields:
DependencyDefinition: dependency definition. | Below is the the instruction that describes the task:
### Input:
Reads dependency definitions.
Args:
file_object (file): file-like object to read from.
Yields:
DependencyDefinition: dependency definition.
### Response:
def Read(self, file_object):
"""Reads dependency definitions.
Args:
file_object (file): file-like object to read from.
Yields:
DependencyDefinition: dependency definition.
"""
config_parser = configparser.RawConfigParser()
# pylint: disable=deprecated-method
# TODO: replace readfp by read_file, check if Python 2 compatible
config_parser.readfp(file_object)
for section_name in config_parser.sections():
dependency_definition = DependencyDefinition(section_name)
for value_name in self._VALUE_NAMES:
value = self._GetConfigValue(config_parser, section_name, value_name)
setattr(dependency_definition, value_name, value)
yield dependency_definition |
def create_environment(self, env_name, version_label=None,
solution_stack_name=None, cname_prefix=None, description=None,
option_settings=None, tier_name='WebServer', tier_type='Standard', tier_version='1.1'):
"""
Creates a new environment
"""
out("Creating environment: " + str(env_name) + ", tier_name:" + str(tier_name) + ", tier_type:" + str(tier_type))
self.ebs.create_environment(self.app_name, env_name,
version_label=version_label,
solution_stack_name=solution_stack_name,
cname_prefix=cname_prefix,
description=description,
option_settings=option_settings,
tier_type=tier_type,
tier_name=tier_name,
tier_version=tier_version) | Creates a new environment | Below is the the instruction that describes the task:
### Input:
Creates a new environment
### Response:
def create_environment(self, env_name, version_label=None,
solution_stack_name=None, cname_prefix=None, description=None,
option_settings=None, tier_name='WebServer', tier_type='Standard', tier_version='1.1'):
"""
Creates a new environment
"""
out("Creating environment: " + str(env_name) + ", tier_name:" + str(tier_name) + ", tier_type:" + str(tier_type))
self.ebs.create_environment(self.app_name, env_name,
version_label=version_label,
solution_stack_name=solution_stack_name,
cname_prefix=cname_prefix,
description=description,
option_settings=option_settings,
tier_type=tier_type,
tier_name=tier_name,
tier_version=tier_version) |
def _autoinsert_quotes(self, key):
"""Control how to automatically insert quotes in various situations."""
char = {Qt.Key_QuoteDbl: '"', Qt.Key_Apostrophe: '\''}[key]
line_text = self.editor.get_text('sol', 'eol')
line_to_cursor = self.editor.get_text('sol', 'cursor')
cursor = self.editor.textCursor()
last_three = self.editor.get_text('sol', 'cursor')[-3:]
last_two = self.editor.get_text('sol', 'cursor')[-2:]
trailing_text = self.editor.get_text('cursor', 'eol').strip()
if self.editor.has_selected_text():
text = self.editor.get_selected_text()
self.editor.insert_text("{0}{1}{0}".format(char, text))
# keep text selected, for inserting multiple quotes
cursor.movePosition(QTextCursor.Left, QTextCursor.MoveAnchor, 1)
cursor.movePosition(QTextCursor.Left, QTextCursor.KeepAnchor,
len(text))
self.editor.setTextCursor(cursor)
elif self.editor.in_comment():
self.editor.insert_text(char)
elif (len(trailing_text) > 0 and
not unmatched_quotes_in_line(line_to_cursor) == char and
not trailing_text[0] in (',', ':', ';', ')', ']', '}')):
self.editor.insert_text(char)
elif (unmatched_quotes_in_line(line_text) and
(not last_three == 3*char)):
self.editor.insert_text(char)
# Move to the right if we are before a quote
elif self.editor.next_char() == char:
cursor.movePosition(QTextCursor.NextCharacter,
QTextCursor.KeepAnchor, 1)
cursor.clearSelection()
self.editor.setTextCursor(cursor)
# Automatic insertion of triple double quotes (for docstrings)
elif last_three == 3*char:
self.editor.insert_text(3*char)
cursor = self.editor.textCursor()
cursor.movePosition(QTextCursor.PreviousCharacter,
QTextCursor.KeepAnchor, 3)
cursor.clearSelection()
self.editor.setTextCursor(cursor)
# If last two chars are quotes, just insert one more because most
# probably the user wants to write a docstring
elif last_two == 2*char:
self.editor.insert_text(char)
self.editor.delayed_popup_docstring()
# Automatic insertion of quotes
else:
self.editor.insert_text(2*char)
cursor = self.editor.textCursor()
cursor.movePosition(QTextCursor.PreviousCharacter)
self.editor.setTextCursor(cursor) | Control how to automatically insert quotes in various situations. | Below is the the instruction that describes the task:
### Input:
Control how to automatically insert quotes in various situations.
### Response:
def _autoinsert_quotes(self, key):
"""Control how to automatically insert quotes in various situations."""
char = {Qt.Key_QuoteDbl: '"', Qt.Key_Apostrophe: '\''}[key]
line_text = self.editor.get_text('sol', 'eol')
line_to_cursor = self.editor.get_text('sol', 'cursor')
cursor = self.editor.textCursor()
last_three = self.editor.get_text('sol', 'cursor')[-3:]
last_two = self.editor.get_text('sol', 'cursor')[-2:]
trailing_text = self.editor.get_text('cursor', 'eol').strip()
if self.editor.has_selected_text():
text = self.editor.get_selected_text()
self.editor.insert_text("{0}{1}{0}".format(char, text))
# keep text selected, for inserting multiple quotes
cursor.movePosition(QTextCursor.Left, QTextCursor.MoveAnchor, 1)
cursor.movePosition(QTextCursor.Left, QTextCursor.KeepAnchor,
len(text))
self.editor.setTextCursor(cursor)
elif self.editor.in_comment():
self.editor.insert_text(char)
elif (len(trailing_text) > 0 and
not unmatched_quotes_in_line(line_to_cursor) == char and
not trailing_text[0] in (',', ':', ';', ')', ']', '}')):
self.editor.insert_text(char)
elif (unmatched_quotes_in_line(line_text) and
(not last_three == 3*char)):
self.editor.insert_text(char)
# Move to the right if we are before a quote
elif self.editor.next_char() == char:
cursor.movePosition(QTextCursor.NextCharacter,
QTextCursor.KeepAnchor, 1)
cursor.clearSelection()
self.editor.setTextCursor(cursor)
# Automatic insertion of triple double quotes (for docstrings)
elif last_three == 3*char:
self.editor.insert_text(3*char)
cursor = self.editor.textCursor()
cursor.movePosition(QTextCursor.PreviousCharacter,
QTextCursor.KeepAnchor, 3)
cursor.clearSelection()
self.editor.setTextCursor(cursor)
# If last two chars are quotes, just insert one more because most
# probably the user wants to write a docstring
elif last_two == 2*char:
self.editor.insert_text(char)
self.editor.delayed_popup_docstring()
# Automatic insertion of quotes
else:
self.editor.insert_text(2*char)
cursor = self.editor.textCursor()
cursor.movePosition(QTextCursor.PreviousCharacter)
self.editor.setTextCursor(cursor) |
def key_exists(key_id, region=None, key=None, keyid=None, profile=None):
'''
Check for the existence of a key.
CLI example::
salt myminion boto_kms.key_exists 'alias/mykey'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key = conn.describe_key(key_id)
# TODO: add to context cache
r['result'] = True
except boto.exception.BotoServerError as e:
if isinstance(e, boto.kms.exceptions.NotFoundException):
r['result'] = False
return r
r['error'] = __utils__['boto.get_error'](e)
return r | Check for the existence of a key.
CLI example::
salt myminion boto_kms.key_exists 'alias/mykey' | Below is the the instruction that describes the task:
### Input:
Check for the existence of a key.
CLI example::
salt myminion boto_kms.key_exists 'alias/mykey'
### Response:
def key_exists(key_id, region=None, key=None, keyid=None, profile=None):
'''
Check for the existence of a key.
CLI example::
salt myminion boto_kms.key_exists 'alias/mykey'
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
r = {}
try:
key = conn.describe_key(key_id)
# TODO: add to context cache
r['result'] = True
except boto.exception.BotoServerError as e:
if isinstance(e, boto.kms.exceptions.NotFoundException):
r['result'] = False
return r
r['error'] = __utils__['boto.get_error'](e)
return r |
def _collapse_outgroup(tree, taxdicts):
""" collapse outgroup in ete Tree for easier viewing """
## check that all tests have the same outgroup
outg = taxdicts[0]["p4"]
if not all([i["p4"] == outg for i in taxdicts]):
raise Exception("no good")
## prune tree, keep only one sample from outgroup
tre = ete.Tree(tree.write(format=1)) #tree.copy(method="deepcopy")
alltax = [i for i in tre.get_leaf_names() if i not in outg]
alltax += [outg[0]]
tre.prune(alltax)
tre.search_nodes(name=outg[0])[0].name = "outgroup"
tre.ladderize()
## remove other ougroups from taxdicts
taxd = copy.deepcopy(taxdicts)
newtaxdicts = []
for test in taxd:
#test["p4"] = [outg[0]]
test["p4"] = ["outgroup"]
newtaxdicts.append(test)
return tre, newtaxdicts | collapse outgroup in ete Tree for easier viewing | Below is the the instruction that describes the task:
### Input:
collapse outgroup in ete Tree for easier viewing
### Response:
def _collapse_outgroup(tree, taxdicts):
""" collapse outgroup in ete Tree for easier viewing """
## check that all tests have the same outgroup
outg = taxdicts[0]["p4"]
if not all([i["p4"] == outg for i in taxdicts]):
raise Exception("no good")
## prune tree, keep only one sample from outgroup
tre = ete.Tree(tree.write(format=1)) #tree.copy(method="deepcopy")
alltax = [i for i in tre.get_leaf_names() if i not in outg]
alltax += [outg[0]]
tre.prune(alltax)
tre.search_nodes(name=outg[0])[0].name = "outgroup"
tre.ladderize()
## remove other ougroups from taxdicts
taxd = copy.deepcopy(taxdicts)
newtaxdicts = []
for test in taxd:
#test["p4"] = [outg[0]]
test["p4"] = ["outgroup"]
newtaxdicts.append(test)
return tre, newtaxdicts |
def _shuffled(seq):
"""Deterministically shuffle identically under both py2 + py3."""
fixed_random = random.Random()
if six.PY2: # pragma: no cover (py2)
fixed_random.seed(FIXED_RANDOM_SEED)
else: # pragma: no cover (py3)
fixed_random.seed(FIXED_RANDOM_SEED, version=1)
seq = list(seq)
random.shuffle(seq, random=fixed_random.random)
return seq | Deterministically shuffle identically under both py2 + py3. | Below is the the instruction that describes the task:
### Input:
Deterministically shuffle identically under both py2 + py3.
### Response:
def _shuffled(seq):
"""Deterministically shuffle identically under both py2 + py3."""
fixed_random = random.Random()
if six.PY2: # pragma: no cover (py2)
fixed_random.seed(FIXED_RANDOM_SEED)
else: # pragma: no cover (py3)
fixed_random.seed(FIXED_RANDOM_SEED, version=1)
seq = list(seq)
random.shuffle(seq, random=fixed_random.random)
return seq |
def bind(self, fn: Callable[[Any], 'Cont']) -> 'Cont':
r"""Chain continuation passing functions.
Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c
"""
return Cont(lambda c: self.run(lambda a: fn(a).run(c))) | r"""Chain continuation passing functions.
Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c | Below is the the instruction that describes the task:
### Input:
r"""Chain continuation passing functions.
Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c
### Response:
def bind(self, fn: Callable[[Any], 'Cont']) -> 'Cont':
r"""Chain continuation passing functions.
Haskell: m >>= k = Cont $ \c -> runCont m $ \a -> runCont (k a) c
"""
return Cont(lambda c: self.run(lambda a: fn(a).run(c))) |
def brpoplpush(self, source, destination, timeout=0):
"""Emulate brpoplpush"""
transfer_item = self.brpop(source, timeout)
if transfer_item is None:
return None
key, val = transfer_item
self.lpush(destination, val)
return val | Emulate brpoplpush | Below is the the instruction that describes the task:
### Input:
Emulate brpoplpush
### Response:
def brpoplpush(self, source, destination, timeout=0):
"""Emulate brpoplpush"""
transfer_item = self.brpop(source, timeout)
if transfer_item is None:
return None
key, val = transfer_item
self.lpush(destination, val)
return val |
def get_agile_board(self, board_id):
"""
Get agile board info by id
:param board_id:
:return:
"""
url = 'rest/agile/1.0/board/{}'.format(str(board_id))
return self.get(url) | Get agile board info by id
:param board_id:
:return: | Below is the the instruction that describes the task:
### Input:
Get agile board info by id
:param board_id:
:return:
### Response:
def get_agile_board(self, board_id):
"""
Get agile board info by id
:param board_id:
:return:
"""
url = 'rest/agile/1.0/board/{}'.format(str(board_id))
return self.get(url) |
Subsets and Splits