code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def get_slide_number(self):
"""
Return the slide-number for this trigger
"""
a, slide_number, b = self.get_id_parts()
if slide_number > 5000:
slide_number = 5000 - slide_number
return slide_number | Return the slide-number for this trigger | Below is the the instruction that describes the task:
### Input:
Return the slide-number for this trigger
### Response:
def get_slide_number(self):
"""
Return the slide-number for this trigger
"""
a, slide_number, b = self.get_id_parts()
if slide_number > 5000:
slide_number = 5000 - slide_number
return slide_number |
def _set_persistent_module(mod):
'''
Add a module to loader.conf to make it persistent.
'''
if not mod or mod in mod_list(True) or mod not in \
available():
return set()
__salt__['file.append'](_LOADER_CONF, _LOAD_MODULE.format(mod))
return set([mod]) | Add a module to loader.conf to make it persistent. | Below is the the instruction that describes the task:
### Input:
Add a module to loader.conf to make it persistent.
### Response:
def _set_persistent_module(mod):
'''
Add a module to loader.conf to make it persistent.
'''
if not mod or mod in mod_list(True) or mod not in \
available():
return set()
__salt__['file.append'](_LOADER_CONF, _LOAD_MODULE.format(mod))
return set([mod]) |
def log_to_stream(stream=sys.stderr, level=logging.NOTSET,
fmt=logging.BASIC_FORMAT):
""" Add :class:`logging.StreamHandler` to logger which logs to a stream.
:param stream. Stream to log to, default STDERR.
:param level: Log level, default NOTSET.
:param fmt: String with log format, default is BASIC_FORMAT.
"""
fmt = Formatter(fmt)
handler = StreamHandler()
handler.setFormatter(fmt)
handler.setLevel(level)
log.addHandler(handler) | Add :class:`logging.StreamHandler` to logger which logs to a stream.
:param stream. Stream to log to, default STDERR.
:param level: Log level, default NOTSET.
:param fmt: String with log format, default is BASIC_FORMAT. | Below is the the instruction that describes the task:
### Input:
Add :class:`logging.StreamHandler` to logger which logs to a stream.
:param stream. Stream to log to, default STDERR.
:param level: Log level, default NOTSET.
:param fmt: String with log format, default is BASIC_FORMAT.
### Response:
def log_to_stream(stream=sys.stderr, level=logging.NOTSET,
fmt=logging.BASIC_FORMAT):
""" Add :class:`logging.StreamHandler` to logger which logs to a stream.
:param stream. Stream to log to, default STDERR.
:param level: Log level, default NOTSET.
:param fmt: String with log format, default is BASIC_FORMAT.
"""
fmt = Formatter(fmt)
handler = StreamHandler()
handler.setFormatter(fmt)
handler.setLevel(level)
log.addHandler(handler) |
def remove_checksum(path):
"""
Remove the checksum of an image from cache if exists
"""
path = '{}.md5sum'.format(path)
if os.path.exists(path):
os.remove(path) | Remove the checksum of an image from cache if exists | Below is the the instruction that describes the task:
### Input:
Remove the checksum of an image from cache if exists
### Response:
def remove_checksum(path):
"""
Remove the checksum of an image from cache if exists
"""
path = '{}.md5sum'.format(path)
if os.path.exists(path):
os.remove(path) |
def addSourceGroup(self, sourceGroupUri, weight):
"""
add a list of relevant sources by specifying a whole source group to the topic page
@param sourceGroupUri: uri of the source group to add
@param weight: importance of the provided list of sources (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sourceGroups"].append({"uri": sourceGroupUri, "wgt": weight}) | add a list of relevant sources by specifying a whole source group to the topic page
@param sourceGroupUri: uri of the source group to add
@param weight: importance of the provided list of sources (typically in range 1 - 50) | Below is the the instruction that describes the task:
### Input:
add a list of relevant sources by specifying a whole source group to the topic page
@param sourceGroupUri: uri of the source group to add
@param weight: importance of the provided list of sources (typically in range 1 - 50)
### Response:
def addSourceGroup(self, sourceGroupUri, weight):
"""
add a list of relevant sources by specifying a whole source group to the topic page
@param sourceGroupUri: uri of the source group to add
@param weight: importance of the provided list of sources (typically in range 1 - 50)
"""
assert isinstance(weight, (float, int)), "weight value has to be a positive or negative integer"
self.topicPage["sourceGroups"].append({"uri": sourceGroupUri, "wgt": weight}) |
def build_command(self, parameter_values, command=None):
"""
Build the command for this step using the given parameter values.
Even if the original configuration only declared a single `command`,
this function will return a list of shell commands. It is the caller's
responsibility to concatenate them, likely using the semicolon or
double ampersands.
It is also possible to override the `command`.
:param parameter_values: Parameter values to augment any parameter defaults.
:type parameter_values: dict[str, object]
:param command: Overriding command; leave falsy to not override.
:type command: str|list[str]|None
:return: list of commands
:rtype: list[str]
"""
command = (command or self.command)
# merge defaults with passed values
# ignore flag default values as they are special
# undefined flag will remain undefined regardless of default value
values = dict(self.get_parameter_defaults(include_flags=False), **parameter_values)
parameter_map = ParameterMap(parameters=self.parameters, values=values)
return build_command(command, parameter_map) | Build the command for this step using the given parameter values.
Even if the original configuration only declared a single `command`,
this function will return a list of shell commands. It is the caller's
responsibility to concatenate them, likely using the semicolon or
double ampersands.
It is also possible to override the `command`.
:param parameter_values: Parameter values to augment any parameter defaults.
:type parameter_values: dict[str, object]
:param command: Overriding command; leave falsy to not override.
:type command: str|list[str]|None
:return: list of commands
:rtype: list[str] | Below is the the instruction that describes the task:
### Input:
Build the command for this step using the given parameter values.
Even if the original configuration only declared a single `command`,
this function will return a list of shell commands. It is the caller's
responsibility to concatenate them, likely using the semicolon or
double ampersands.
It is also possible to override the `command`.
:param parameter_values: Parameter values to augment any parameter defaults.
:type parameter_values: dict[str, object]
:param command: Overriding command; leave falsy to not override.
:type command: str|list[str]|None
:return: list of commands
:rtype: list[str]
### Response:
def build_command(self, parameter_values, command=None):
"""
Build the command for this step using the given parameter values.
Even if the original configuration only declared a single `command`,
this function will return a list of shell commands. It is the caller's
responsibility to concatenate them, likely using the semicolon or
double ampersands.
It is also possible to override the `command`.
:param parameter_values: Parameter values to augment any parameter defaults.
:type parameter_values: dict[str, object]
:param command: Overriding command; leave falsy to not override.
:type command: str|list[str]|None
:return: list of commands
:rtype: list[str]
"""
command = (command or self.command)
# merge defaults with passed values
# ignore flag default values as they are special
# undefined flag will remain undefined regardless of default value
values = dict(self.get_parameter_defaults(include_flags=False), **parameter_values)
parameter_map = ParameterMap(parameters=self.parameters, values=values)
return build_command(command, parameter_map) |
def authenticate(self, verify=True):
""" Creates an authenticated and internal oauth2 handler needed for \
queries to Twitter and verifies credentials if needed. If ``verify`` \
is true, it also checks if the user credentials are valid. \
The **default** value is *True*
:param verify: boolean variable to \
directly check. Default value is ``True``
"""
self.__oauth = OAuth1(self.__consumer_key,
client_secret=self.__consumer_secret,
resource_owner_key=self.__access_token,
resource_owner_secret=self.__access_token_secret)
if verify:
r = requests.get(self._base_url + self._verify_url,
auth=self.__oauth,
proxies={"https": self.__proxy})
self.check_http_status(r.status_code) | Creates an authenticated and internal oauth2 handler needed for \
queries to Twitter and verifies credentials if needed. If ``verify`` \
is true, it also checks if the user credentials are valid. \
The **default** value is *True*
:param verify: boolean variable to \
directly check. Default value is ``True`` | Below is the the instruction that describes the task:
### Input:
Creates an authenticated and internal oauth2 handler needed for \
queries to Twitter and verifies credentials if needed. If ``verify`` \
is true, it also checks if the user credentials are valid. \
The **default** value is *True*
:param verify: boolean variable to \
directly check. Default value is ``True``
### Response:
def authenticate(self, verify=True):
""" Creates an authenticated and internal oauth2 handler needed for \
queries to Twitter and verifies credentials if needed. If ``verify`` \
is true, it also checks if the user credentials are valid. \
The **default** value is *True*
:param verify: boolean variable to \
directly check. Default value is ``True``
"""
self.__oauth = OAuth1(self.__consumer_key,
client_secret=self.__consumer_secret,
resource_owner_key=self.__access_token,
resource_owner_secret=self.__access_token_secret)
if verify:
r = requests.get(self._base_url + self._verify_url,
auth=self.__oauth,
proxies={"https": self.__proxy})
self.check_http_status(r.status_code) |
def sru(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
"""SRU cell as in https://arxiv.org/abs/1709.02755.
As defined in the paper:
(1) x'_t = W x_t
(2) f_t = sigmoid(Wf x_t + bf)
(3) r_t = sigmoid(Wr x_t + br)
(4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t
(5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t
This version uses functional ops to be faster on GPUs with TF-1.9+.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
"""
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
if is_xla_compiled(): # On TPU the XLA does a good job with while.
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
try:
from tensorflow.contrib.recurrent.python.ops import functional_rnn # pylint: disable=g-import-not-at-top
except ImportError:
tf.logging.info("functional_rnn not found, using sru_with_scan instead")
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
# We assume x is [batch, ..., channels] and treat all ... as time.
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
cell = CumsumprodCell(initial_state)
# Calculate SRU on each layer.
for i in range(num_layers):
# The parallel part of the SRU.
x_orig = x
x, f, r = tf.split(
layers().Dense(3 * x_shape[-1], name="kernel_%d" % i)(x), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.
# Calculate states.
concat = tf.concat([x_times_one_minus_f, f], axis=-1)
c_states, _ = functional_rnn.functional_rnn(
cell, concat, time_major=False)
# Final output.
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h # Next layer.
return tf.reshape(x, x_shape) | SRU cell as in https://arxiv.org/abs/1709.02755.
As defined in the paper:
(1) x'_t = W x_t
(2) f_t = sigmoid(Wf x_t + bf)
(3) r_t = sigmoid(Wr x_t + br)
(4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t
(5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t
This version uses functional ops to be faster on GPUs with TF-1.9+.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive. | Below is the the instruction that describes the task:
### Input:
SRU cell as in https://arxiv.org/abs/1709.02755.
As defined in the paper:
(1) x'_t = W x_t
(2) f_t = sigmoid(Wf x_t + bf)
(3) r_t = sigmoid(Wr x_t + br)
(4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t
(5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t
This version uses functional ops to be faster on GPUs with TF-1.9+.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
### Response:
def sru(x,
num_layers=2,
activation=None,
initial_state=None,
name=None,
reuse=None):
"""SRU cell as in https://arxiv.org/abs/1709.02755.
As defined in the paper:
(1) x'_t = W x_t
(2) f_t = sigmoid(Wf x_t + bf)
(3) r_t = sigmoid(Wr x_t + br)
(4) c_t = f_t * c_{t-1} + (1 - f_t) * x'_t
(5) h_t = r_t * activation(c_t) + (1 - r_t) * x_t
This version uses functional ops to be faster on GPUs with TF-1.9+.
Args:
x: A tensor of shape [batch, ..., channels] ; ... is treated as time.
num_layers: How many SRU layers; default is 2 as results for 1 disappoint.
activation: Optional activation function, try tf.nn.tanh or tf.nn.relu.
initial_state: Optional initial c-state, set to zeros if None.
name: Optional name, "sru" by default.
reuse: Optional reuse.
Returns:
A tensor of the same shape as x.
Raises:
ValueError: if num_layers is not positive.
"""
if num_layers < 1:
raise ValueError("Number of layers must be positive: %d" % num_layers)
if is_xla_compiled(): # On TPU the XLA does a good job with while.
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
try:
from tensorflow.contrib.recurrent.python.ops import functional_rnn # pylint: disable=g-import-not-at-top
except ImportError:
tf.logging.info("functional_rnn not found, using sru_with_scan instead")
return sru_with_scan(x, num_layers, activation, initial_state, name, reuse)
with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse):
# We assume x is [batch, ..., channels] and treat all ... as time.
x_shape = shape_list(x)
x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]])
initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]])
cell = CumsumprodCell(initial_state)
# Calculate SRU on each layer.
for i in range(num_layers):
# The parallel part of the SRU.
x_orig = x
x, f, r = tf.split(
layers().Dense(3 * x_shape[-1], name="kernel_%d" % i)(x), 3, axis=-1)
f, r = tf.sigmoid(f), tf.sigmoid(r)
x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed.
# Calculate states.
concat = tf.concat([x_times_one_minus_f, f], axis=-1)
c_states, _ = functional_rnn.functional_rnn(
cell, concat, time_major=False)
# Final output.
if activation is not None:
c_states = activation(c_states)
h = c_states * r + (1.0 - r) * x_orig
x = h # Next layer.
return tf.reshape(x, x_shape) |
def add_to_deleted_models(sender, instance=None, *args, **kwargs):
"""
Whenever a model is deleted, we record its ID in a separate model for tracking purposes. During serialization, we will mark
the model as deleted in the store.
"""
if issubclass(sender, SyncableModel):
instance._update_deleted_models() | Whenever a model is deleted, we record its ID in a separate model for tracking purposes. During serialization, we will mark
the model as deleted in the store. | Below is the the instruction that describes the task:
### Input:
Whenever a model is deleted, we record its ID in a separate model for tracking purposes. During serialization, we will mark
the model as deleted in the store.
### Response:
def add_to_deleted_models(sender, instance=None, *args, **kwargs):
"""
Whenever a model is deleted, we record its ID in a separate model for tracking purposes. During serialization, we will mark
the model as deleted in the store.
"""
if issubclass(sender, SyncableModel):
instance._update_deleted_models() |
def delete_action_cache(self, action_key):
"""Delete action needs and excludes from cache.
.. note:: It returns the action if a cache system is defined.
:param action_key: The unique action name.
"""
if self.cache:
self.cache.delete(
self.app.config['ACCESS_ACTION_CACHE_PREFIX'] +
action_key
) | Delete action needs and excludes from cache.
.. note:: It returns the action if a cache system is defined.
:param action_key: The unique action name. | Below is the the instruction that describes the task:
### Input:
Delete action needs and excludes from cache.
.. note:: It returns the action if a cache system is defined.
:param action_key: The unique action name.
### Response:
def delete_action_cache(self, action_key):
"""Delete action needs and excludes from cache.
.. note:: It returns the action if a cache system is defined.
:param action_key: The unique action name.
"""
if self.cache:
self.cache.delete(
self.app.config['ACCESS_ACTION_CACHE_PREFIX'] +
action_key
) |
def set_acls(path, acls, version=-1, profile=None, hosts=None, scheme=None,
username=None, password=None, default_acl=None):
'''
Set acls on a znode
path
path to znode
acls
list of acl dictionaries to set on the znode
version
only set acls if version matches (Default: -1 (always matches))
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.set_acls /test/name acls='[{"username": "gtmanfred", "password": "test", "all": True}]' profile=prod
'''
conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme,
username=username, password=password, default_acl=default_acl)
if acls is None:
acls = []
acls = [make_digest_acl(**acl) for acl in acls]
conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme,
username=username, password=password, default_acl=default_acl)
return conn.set_acls(path, acls, version) | Set acls on a znode
path
path to znode
acls
list of acl dictionaries to set on the znode
version
only set acls if version matches (Default: -1 (always matches))
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.set_acls /test/name acls='[{"username": "gtmanfred", "password": "test", "all": True}]' profile=prod | Below is the the instruction that describes the task:
### Input:
Set acls on a znode
path
path to znode
acls
list of acl dictionaries to set on the znode
version
only set acls if version matches (Default: -1 (always matches))
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.set_acls /test/name acls='[{"username": "gtmanfred", "password": "test", "all": True}]' profile=prod
### Response:
def set_acls(path, acls, version=-1, profile=None, hosts=None, scheme=None,
username=None, password=None, default_acl=None):
'''
Set acls on a znode
path
path to znode
acls
list of acl dictionaries to set on the znode
version
only set acls if version matches (Default: -1 (always matches))
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.set_acls /test/name acls='[{"username": "gtmanfred", "password": "test", "all": True}]' profile=prod
'''
conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme,
username=username, password=password, default_acl=default_acl)
if acls is None:
acls = []
acls = [make_digest_acl(**acl) for acl in acls]
conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme,
username=username, password=password, default_acl=default_acl)
return conn.set_acls(path, acls, version) |
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise | Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist. | Below is the the instruction that describes the task:
### Input:
Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
### Response:
def remove_existing_pidfile(pidfile_path):
""" Remove the named PID file if it exists.
Removing a PID file that doesn't already exist puts us in the
desired state, so we ignore the condition if the file does not
exist.
"""
try:
os.remove(pidfile_path)
except OSError as exc:
if exc.errno == errno.ENOENT:
pass
else:
raise |
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ProcessCollector, self).get_default_config()
config.update({
'use_sudo': False,
'sudo_cmd': self.find_binary('/usr/bin/sudo'),
})
return config | Returns the default collector settings | Below is the the instruction that describes the task:
### Input:
Returns the default collector settings
### Response:
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ProcessCollector, self).get_default_config()
config.update({
'use_sudo': False,
'sudo_cmd': self.find_binary('/usr/bin/sudo'),
})
return config |
def backprop(self, input_data, df_output, cache=None):
""" Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Input data to compute activations for.
df_output : ``GPUArray``
Gradients with respect to the activations of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
"""
# Get cache if it wasn't provided
if cache is None:
cache = self.feed_forward(input_data,
prediction=False)
if len(cache) == 2:
activations, dropout_mask = cache
else:
activations = cache[0]
# Multiply the binary mask with the incoming gradients
if self.dropout > 0 and dropout_mask is not None:
apply_dropout_mask(df_output, dropout_mask)
# Get gradient wrt activation function
df_activations = self.df(activations)
delta = mult_matrix(df_activations, df_output)
# Gradient wrt weights
df_W = linalg.dot(input_data, delta, transa='T')
# Gradient wrt bias
df_b = matrix_sum_out_axis(delta, 0)
# Gradient wrt inputs
df_input = linalg.dot(delta, self.W, transb='T')
# L1 weight decay
if self.l1_penalty_weight:
df_W += self.l1_penalty_weight * sign(self.W)
# L2 weight decay
if self.l2_penalty_weight:
df_W += self.l2_penalty_weight * self.W
return (df_W, df_b), df_input | Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Input data to compute activations for.
df_output : ``GPUArray``
Gradients with respect to the activations of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input. | Below is the the instruction that describes the task:
### Input:
Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Input data to compute activations for.
df_output : ``GPUArray``
Gradients with respect to the activations of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
### Response:
def backprop(self, input_data, df_output, cache=None):
""" Backpropagate through the hidden layer
**Parameters:**
input_data : ``GPUArray``
Input data to compute activations for.
df_output : ``GPUArray``
Gradients with respect to the activations of this layer
(received from the layer above).
cache : list of ``GPUArray``
Cache obtained from forward pass. If the cache is
provided, then the activations are not recalculated.
**Returns:**
gradients : tuple of ``GPUArray``
Gradients with respect to the weights and biases in the
form ``(df_weights, df_biases)``.
df_input : ``GPUArray``
Gradients with respect to the input.
"""
# Get cache if it wasn't provided
if cache is None:
cache = self.feed_forward(input_data,
prediction=False)
if len(cache) == 2:
activations, dropout_mask = cache
else:
activations = cache[0]
# Multiply the binary mask with the incoming gradients
if self.dropout > 0 and dropout_mask is not None:
apply_dropout_mask(df_output, dropout_mask)
# Get gradient wrt activation function
df_activations = self.df(activations)
delta = mult_matrix(df_activations, df_output)
# Gradient wrt weights
df_W = linalg.dot(input_data, delta, transa='T')
# Gradient wrt bias
df_b = matrix_sum_out_axis(delta, 0)
# Gradient wrt inputs
df_input = linalg.dot(delta, self.W, transb='T')
# L1 weight decay
if self.l1_penalty_weight:
df_W += self.l1_penalty_weight * sign(self.W)
# L2 weight decay
if self.l2_penalty_weight:
df_W += self.l2_penalty_weight * self.W
return (df_W, df_b), df_input |
def wider_next_conv(layer, start_dim, total_dim, n_add, weighted=True):
'''wider next conv layer.
'''
n_dim = get_n_dim(layer)
if not weighted:
return get_conv_class(n_dim)(layer.input_channel + n_add,
layer.filters,
kernel_size=layer.kernel_size,
stride=layer.stride)
n_filters = layer.filters
teacher_w, teacher_b = layer.get_weights()
new_weight_shape = list(teacher_w.shape)
new_weight_shape[1] = n_add
new_weight = np.zeros(tuple(new_weight_shape))
student_w = np.concatenate((teacher_w[:, :start_dim, ...].copy(),
add_noise(new_weight, teacher_w),
teacher_w[:, start_dim:total_dim, ...].copy()), axis=1)
new_layer = get_conv_class(n_dim)(layer.input_channel + n_add,
n_filters,
kernel_size=layer.kernel_size,
stride=layer.stride)
new_layer.set_weights((student_w, teacher_b))
return new_layer | wider next conv layer. | Below is the the instruction that describes the task:
### Input:
wider next conv layer.
### Response:
def wider_next_conv(layer, start_dim, total_dim, n_add, weighted=True):
'''wider next conv layer.
'''
n_dim = get_n_dim(layer)
if not weighted:
return get_conv_class(n_dim)(layer.input_channel + n_add,
layer.filters,
kernel_size=layer.kernel_size,
stride=layer.stride)
n_filters = layer.filters
teacher_w, teacher_b = layer.get_weights()
new_weight_shape = list(teacher_w.shape)
new_weight_shape[1] = n_add
new_weight = np.zeros(tuple(new_weight_shape))
student_w = np.concatenate((teacher_w[:, :start_dim, ...].copy(),
add_noise(new_weight, teacher_w),
teacher_w[:, start_dim:total_dim, ...].copy()), axis=1)
new_layer = get_conv_class(n_dim)(layer.input_channel + n_add,
n_filters,
kernel_size=layer.kernel_size,
stride=layer.stride)
new_layer.set_weights((student_w, teacher_b))
return new_layer |
def set_name(self,name=None):
"""
Set the name of the object. If no name is given, the
name is extracted via the extract_name method.
"""
if name:
self.name = name[:254]
else:
self.name = self.extract_name()[:254]
self.save()
return self.name | Set the name of the object. If no name is given, the
name is extracted via the extract_name method. | Below is the the instruction that describes the task:
### Input:
Set the name of the object. If no name is given, the
name is extracted via the extract_name method.
### Response:
def set_name(self,name=None):
"""
Set the name of the object. If no name is given, the
name is extracted via the extract_name method.
"""
if name:
self.name = name[:254]
else:
self.name = self.extract_name()[:254]
self.save()
return self.name |
def _pad(expr, width, side='left', fillchar=' '):
"""
Pad strings in the sequence or scalar with an additional character to specified side.
:param expr:
:param width: Minimum width of resulting string; additional characters will be filled with spaces
:param side: {‘left’, ‘right’, ‘both’}, default ‘left’
:param fillchar: Additional character for filling, default is whitespace
:return: sequence or scalar
"""
if not isinstance(fillchar, six.string_types):
msg = 'fillchar must be a character, not {0}'
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError('fillchar must be a character, not str')
if side not in ('left', 'right', 'both'):
raise ValueError('Invalid side')
return _string_op(expr, Pad, _width=width, _side=side, _fillchar=fillchar) | Pad strings in the sequence or scalar with an additional character to specified side.
:param expr:
:param width: Minimum width of resulting string; additional characters will be filled with spaces
:param side: {‘left’, ‘right’, ‘both’}, default ‘left’
:param fillchar: Additional character for filling, default is whitespace
:return: sequence or scalar | Below is the the instruction that describes the task:
### Input:
Pad strings in the sequence or scalar with an additional character to specified side.
:param expr:
:param width: Minimum width of resulting string; additional characters will be filled with spaces
:param side: {‘left’, ‘right’, ‘both’}, default ‘left’
:param fillchar: Additional character for filling, default is whitespace
:return: sequence or scalar
### Response:
def _pad(expr, width, side='left', fillchar=' '):
"""
Pad strings in the sequence or scalar with an additional character to specified side.
:param expr:
:param width: Minimum width of resulting string; additional characters will be filled with spaces
:param side: {‘left’, ‘right’, ‘both’}, default ‘left’
:param fillchar: Additional character for filling, default is whitespace
:return: sequence or scalar
"""
if not isinstance(fillchar, six.string_types):
msg = 'fillchar must be a character, not {0}'
raise TypeError(msg.format(type(fillchar).__name__))
if len(fillchar) != 1:
raise TypeError('fillchar must be a character, not str')
if side not in ('left', 'right', 'both'):
raise ValueError('Invalid side')
return _string_op(expr, Pad, _width=width, _side=side, _fillchar=fillchar) |
def set(self, key: str, value: Any) -> None:
"""
Stores a setting to the database of its object.
The write to the database is performed immediately and the cache in the cache backend is flushed.
The cache within this object will be updated correctly.
"""
wc = self._write_cache()
if key in wc:
s = wc[key]
else:
s = self._type(object=self._obj, key=key)
s.value = self._serialize(value)
s.save()
self._cache()[key] = s.value
wc[key] = s
self._flush_external_cache() | Stores a setting to the database of its object.
The write to the database is performed immediately and the cache in the cache backend is flushed.
The cache within this object will be updated correctly. | Below is the the instruction that describes the task:
### Input:
Stores a setting to the database of its object.
The write to the database is performed immediately and the cache in the cache backend is flushed.
The cache within this object will be updated correctly.
### Response:
def set(self, key: str, value: Any) -> None:
"""
Stores a setting to the database of its object.
The write to the database is performed immediately and the cache in the cache backend is flushed.
The cache within this object will be updated correctly.
"""
wc = self._write_cache()
if key in wc:
s = wc[key]
else:
s = self._type(object=self._obj, key=key)
s.value = self._serialize(value)
s.save()
self._cache()[key] = s.value
wc[key] = s
self._flush_external_cache() |
def future_request(self, msg, timeout=None, use_mid=None):
"""Send a request messsage, with future replies.
Parameters
----------
msg : Message object
The request Message to send.
timeout : float in seconds
How long to wait for a reply. The default is the
the timeout set when creating the AsyncClient.
use_mid : boolean, optional
Whether to use message IDs. Default is to use message IDs
if the server supports them.
Returns
-------
A tornado.concurrent.Future that resolves with:
reply : Message object
The reply message received.
informs : list of Message objects
A list of the inform messages received.
"""
if timeout is None:
timeout = self._request_timeout
f = tornado_Future()
informs = []
def reply_cb(msg):
f.set_result((msg, informs))
def inform_cb(msg):
informs.append(msg)
try:
self.callback_request(msg, reply_cb=reply_cb, inform_cb=inform_cb,
timeout=timeout, use_mid=use_mid)
except Exception:
f.set_exc_info(sys.exc_info())
return f | Send a request messsage, with future replies.
Parameters
----------
msg : Message object
The request Message to send.
timeout : float in seconds
How long to wait for a reply. The default is the
the timeout set when creating the AsyncClient.
use_mid : boolean, optional
Whether to use message IDs. Default is to use message IDs
if the server supports them.
Returns
-------
A tornado.concurrent.Future that resolves with:
reply : Message object
The reply message received.
informs : list of Message objects
A list of the inform messages received. | Below is the the instruction that describes the task:
### Input:
Send a request messsage, with future replies.
Parameters
----------
msg : Message object
The request Message to send.
timeout : float in seconds
How long to wait for a reply. The default is the
the timeout set when creating the AsyncClient.
use_mid : boolean, optional
Whether to use message IDs. Default is to use message IDs
if the server supports them.
Returns
-------
A tornado.concurrent.Future that resolves with:
reply : Message object
The reply message received.
informs : list of Message objects
A list of the inform messages received.
### Response:
def future_request(self, msg, timeout=None, use_mid=None):
"""Send a request messsage, with future replies.
Parameters
----------
msg : Message object
The request Message to send.
timeout : float in seconds
How long to wait for a reply. The default is the
the timeout set when creating the AsyncClient.
use_mid : boolean, optional
Whether to use message IDs. Default is to use message IDs
if the server supports them.
Returns
-------
A tornado.concurrent.Future that resolves with:
reply : Message object
The reply message received.
informs : list of Message objects
A list of the inform messages received.
"""
if timeout is None:
timeout = self._request_timeout
f = tornado_Future()
informs = []
def reply_cb(msg):
f.set_result((msg, informs))
def inform_cb(msg):
informs.append(msg)
try:
self.callback_request(msg, reply_cb=reply_cb, inform_cb=inform_cb,
timeout=timeout, use_mid=use_mid)
except Exception:
f.set_exc_info(sys.exc_info())
return f |
def signature_validate(signature, error = None) :
"is signature a valid sequence of zero or more complete types."
error, my_error = _get_error(error)
result = dbus.dbus_signature_validate(signature.encode(), error._dbobj) != 0
my_error.raise_if_set()
return \
result | is signature a valid sequence of zero or more complete types. | Below is the the instruction that describes the task:
### Input:
is signature a valid sequence of zero or more complete types.
### Response:
def signature_validate(signature, error = None) :
"is signature a valid sequence of zero or more complete types."
error, my_error = _get_error(error)
result = dbus.dbus_signature_validate(signature.encode(), error._dbobj) != 0
my_error.raise_if_set()
return \
result |
def average(var, key, N):
'''average over N points'''
global average_data
if not key in average_data:
average_data[key] = [var]*N
return var
average_data[key].pop(0)
average_data[key].append(var)
return sum(average_data[key])/N | average over N points | Below is the the instruction that describes the task:
### Input:
average over N points
### Response:
def average(var, key, N):
'''average over N points'''
global average_data
if not key in average_data:
average_data[key] = [var]*N
return var
average_data[key].pop(0)
average_data[key].append(var)
return sum(average_data[key])/N |
def to_binary_string(obj, encoding=None):
"""Convert `obj` to binary string (bytes in Python 3, str in Python 2)"""
if PY2:
# Python 2
if encoding is None:
return str(obj)
else:
return obj.encode(encoding)
else:
# Python 3
return bytes(obj, 'utf-8' if encoding is None else encoding) | Convert `obj` to binary string (bytes in Python 3, str in Python 2) | Below is the the instruction that describes the task:
### Input:
Convert `obj` to binary string (bytes in Python 3, str in Python 2)
### Response:
def to_binary_string(obj, encoding=None):
"""Convert `obj` to binary string (bytes in Python 3, str in Python 2)"""
if PY2:
# Python 2
if encoding is None:
return str(obj)
else:
return obj.encode(encoding)
else:
# Python 3
return bytes(obj, 'utf-8' if encoding is None else encoding) |
def _get_run_hash(self):
"""Gets the hash of the nextflow file"""
# Get name and path of the pipeline from the log file
pipeline_path = get_nextflow_filepath(self.log_file)
# Get hash from the entire pipeline file
pipeline_hash = hashlib.md5()
with open(pipeline_path, "rb") as fh:
for chunk in iter(lambda: fh.read(4096), b""):
pipeline_hash.update(chunk)
# Get hash from the current working dir and hostname
workdir = self.workdir.encode("utf8")
hostname = socket.gethostname().encode("utf8")
hardware_addr = str(uuid.getnode()).encode("utf8")
dir_hash = hashlib.md5(workdir + hostname + hardware_addr)
return pipeline_hash.hexdigest() + dir_hash.hexdigest() | Gets the hash of the nextflow file | Below is the the instruction that describes the task:
### Input:
Gets the hash of the nextflow file
### Response:
def _get_run_hash(self):
"""Gets the hash of the nextflow file"""
# Get name and path of the pipeline from the log file
pipeline_path = get_nextflow_filepath(self.log_file)
# Get hash from the entire pipeline file
pipeline_hash = hashlib.md5()
with open(pipeline_path, "rb") as fh:
for chunk in iter(lambda: fh.read(4096), b""):
pipeline_hash.update(chunk)
# Get hash from the current working dir and hostname
workdir = self.workdir.encode("utf8")
hostname = socket.gethostname().encode("utf8")
hardware_addr = str(uuid.getnode()).encode("utf8")
dir_hash = hashlib.md5(workdir + hostname + hardware_addr)
return pipeline_hash.hexdigest() + dir_hash.hexdigest() |
def _brownian_eigs(n_grid, lag_time, grad_potential, xmin, xmax, reflect_bc):
"""Analytic eigenvalues/eigenvectors for 1D Brownian dynamics
"""
transmat = _brownian_transmat(n_grid, lag_time, grad_potential, xmin, xmax, reflect_bc)
u, lv, rv = _solve_msm_eigensystem(transmat, k=len(transmat) - 1)
return u, rv | Analytic eigenvalues/eigenvectors for 1D Brownian dynamics | Below is the the instruction that describes the task:
### Input:
Analytic eigenvalues/eigenvectors for 1D Brownian dynamics
### Response:
def _brownian_eigs(n_grid, lag_time, grad_potential, xmin, xmax, reflect_bc):
"""Analytic eigenvalues/eigenvectors for 1D Brownian dynamics
"""
transmat = _brownian_transmat(n_grid, lag_time, grad_potential, xmin, xmax, reflect_bc)
u, lv, rv = _solve_msm_eigensystem(transmat, k=len(transmat) - 1)
return u, rv |
def get_user(session, user_id):
"""Get user."""
try:
user_id = int(user_id)
except ValueError:
user_id = find_user(session, user_id)
resp = _make_request(session, USER_URL, user_id)
if not resp:
raise VooblyError('user id not found')
return resp[0] | Get user. | Below is the the instruction that describes the task:
### Input:
Get user.
### Response:
def get_user(session, user_id):
"""Get user."""
try:
user_id = int(user_id)
except ValueError:
user_id = find_user(session, user_id)
resp = _make_request(session, USER_URL, user_id)
if not resp:
raise VooblyError('user id not found')
return resp[0] |
def setup(__pkg: str) -> jinja2.Environment:
"""Configure a new Jinja environment with our filters.
Args:
__pkg: Package name to use as base for templates searches
Returns:
Configured Jinja environment
"""
dirs = [path.join(d, 'templates')
for d in xdg_basedir.get_data_dirs(__pkg)]
env = jinja2.Environment(
autoescape=jinja2.select_autoescape(['html', 'xml']),
loader=jinja2.ChoiceLoader([jinja2.FileSystemLoader(s) for s in dirs]))
env.loader.loaders.append(jinja2.PackageLoader(__pkg, 'templates'))
env.filters.update(FILTERS)
return env | Configure a new Jinja environment with our filters.
Args:
__pkg: Package name to use as base for templates searches
Returns:
Configured Jinja environment | Below is the the instruction that describes the task:
### Input:
Configure a new Jinja environment with our filters.
Args:
__pkg: Package name to use as base for templates searches
Returns:
Configured Jinja environment
### Response:
def setup(__pkg: str) -> jinja2.Environment:
"""Configure a new Jinja environment with our filters.
Args:
__pkg: Package name to use as base for templates searches
Returns:
Configured Jinja environment
"""
dirs = [path.join(d, 'templates')
for d in xdg_basedir.get_data_dirs(__pkg)]
env = jinja2.Environment(
autoescape=jinja2.select_autoescape(['html', 'xml']),
loader=jinja2.ChoiceLoader([jinja2.FileSystemLoader(s) for s in dirs]))
env.loader.loaders.append(jinja2.PackageLoader(__pkg, 'templates'))
env.filters.update(FILTERS)
return env |
def get_path_from_to(self,from_tid, to_tid):
"""
This function returns the path (in terms of phrase types) from one term to another
@type from_tid: string
@param from_tid: one term id
@type to_tid: string
@param to_tid: another term id
@rtype: list
@return: the path, list of phrase types
"""
shortest_subsumer = self.get_least_common_subsumer(from_tid, to_tid)
#print 'From:',self.naf.get_term(from_tid).get_lemma()
#print 'To:',self.naf.get_term(to_tid).get_lemma()
termid_from = self.terminal_for_term.get(from_tid)
termid_to = self.terminal_for_term.get(to_tid)
path_from = self.paths_for_terminal[termid_from][0]
path_to = self.paths_for_terminal[termid_to][0]
if shortest_subsumer is None:
return None
complete_path = []
for node in path_from:
complete_path.append(node)
if node == shortest_subsumer: break
begin=False
for node in path_to[-1::-1]:
if begin:
complete_path.append(node)
if node==shortest_subsumer:
begin=True
labels = [self.label_for_nonter[nonter] for nonter in complete_path]
return labels | This function returns the path (in terms of phrase types) from one term to another
@type from_tid: string
@param from_tid: one term id
@type to_tid: string
@param to_tid: another term id
@rtype: list
@return: the path, list of phrase types | Below is the the instruction that describes the task:
### Input:
This function returns the path (in terms of phrase types) from one term to another
@type from_tid: string
@param from_tid: one term id
@type to_tid: string
@param to_tid: another term id
@rtype: list
@return: the path, list of phrase types
### Response:
def get_path_from_to(self,from_tid, to_tid):
"""
This function returns the path (in terms of phrase types) from one term to another
@type from_tid: string
@param from_tid: one term id
@type to_tid: string
@param to_tid: another term id
@rtype: list
@return: the path, list of phrase types
"""
shortest_subsumer = self.get_least_common_subsumer(from_tid, to_tid)
#print 'From:',self.naf.get_term(from_tid).get_lemma()
#print 'To:',self.naf.get_term(to_tid).get_lemma()
termid_from = self.terminal_for_term.get(from_tid)
termid_to = self.terminal_for_term.get(to_tid)
path_from = self.paths_for_terminal[termid_from][0]
path_to = self.paths_for_terminal[termid_to][0]
if shortest_subsumer is None:
return None
complete_path = []
for node in path_from:
complete_path.append(node)
if node == shortest_subsumer: break
begin=False
for node in path_to[-1::-1]:
if begin:
complete_path.append(node)
if node==shortest_subsumer:
begin=True
labels = [self.label_for_nonter[nonter] for nonter in complete_path]
return labels |
def polygon_to_geohashes(polygon, precision, inner=True):
"""
:param polygon: shapely polygon.
:param precision: int. Geohashes' precision that form resulting polygon.
:param inner: bool, default 'True'. If false, geohashes that are completely outside from the polygon are ignored.
:return: set. Set of geohashes that form the polygon.
"""
inner_geohashes = set()
outer_geohashes = set()
envelope = polygon.envelope
centroid = polygon.centroid
testing_geohashes = queue.Queue()
testing_geohashes.put(geohash.encode(centroid.y, centroid.x, precision))
while not testing_geohashes.empty():
current_geohash = testing_geohashes.get()
if current_geohash not in inner_geohashes and current_geohash not in outer_geohashes:
current_polygon = geohash_to_polygon(current_geohash)
condition = envelope.contains(current_polygon) if inner else envelope.intersects(current_polygon)
if condition:
if inner:
if polygon.contains(current_polygon):
inner_geohashes.add(current_geohash)
else:
outer_geohashes.add(current_geohash)
else:
if polygon.intersects(current_polygon):
inner_geohashes.add(current_geohash)
else:
outer_geohashes.add(current_geohash)
for neighbor in geohash.neighbors(current_geohash):
if neighbor not in inner_geohashes and neighbor not in outer_geohashes:
testing_geohashes.put(neighbor)
return inner_geohashes | :param polygon: shapely polygon.
:param precision: int. Geohashes' precision that form resulting polygon.
:param inner: bool, default 'True'. If false, geohashes that are completely outside from the polygon are ignored.
:return: set. Set of geohashes that form the polygon. | Below is the the instruction that describes the task:
### Input:
:param polygon: shapely polygon.
:param precision: int. Geohashes' precision that form resulting polygon.
:param inner: bool, default 'True'. If false, geohashes that are completely outside from the polygon are ignored.
:return: set. Set of geohashes that form the polygon.
### Response:
def polygon_to_geohashes(polygon, precision, inner=True):
"""
:param polygon: shapely polygon.
:param precision: int. Geohashes' precision that form resulting polygon.
:param inner: bool, default 'True'. If false, geohashes that are completely outside from the polygon are ignored.
:return: set. Set of geohashes that form the polygon.
"""
inner_geohashes = set()
outer_geohashes = set()
envelope = polygon.envelope
centroid = polygon.centroid
testing_geohashes = queue.Queue()
testing_geohashes.put(geohash.encode(centroid.y, centroid.x, precision))
while not testing_geohashes.empty():
current_geohash = testing_geohashes.get()
if current_geohash not in inner_geohashes and current_geohash not in outer_geohashes:
current_polygon = geohash_to_polygon(current_geohash)
condition = envelope.contains(current_polygon) if inner else envelope.intersects(current_polygon)
if condition:
if inner:
if polygon.contains(current_polygon):
inner_geohashes.add(current_geohash)
else:
outer_geohashes.add(current_geohash)
else:
if polygon.intersects(current_polygon):
inner_geohashes.add(current_geohash)
else:
outer_geohashes.add(current_geohash)
for neighbor in geohash.neighbors(current_geohash):
if neighbor not in inner_geohashes and neighbor not in outer_geohashes:
testing_geohashes.put(neighbor)
return inner_geohashes |
def table_convert_geometry(metadata, table_name):
"""Get table metadata from the database."""
from sqlalchemy import Table
from ..orm import Geometry
table = Table(table_name, metadata, autoload=True)
for c in table.columns:
# HACK! Sqlalchemy sees spatialte GEOMETRY types
# as NUMERIC
if c.name == 'geometry':
c.type = Geometry # What about variants?
return table | Get table metadata from the database. | Below is the the instruction that describes the task:
### Input:
Get table metadata from the database.
### Response:
def table_convert_geometry(metadata, table_name):
"""Get table metadata from the database."""
from sqlalchemy import Table
from ..orm import Geometry
table = Table(table_name, metadata, autoload=True)
for c in table.columns:
# HACK! Sqlalchemy sees spatialte GEOMETRY types
# as NUMERIC
if c.name == 'geometry':
c.type = Geometry # What about variants?
return table |
def process_Text( self, text, **kwargs ):
''' Executes the preprocessing pipeline on estnltk's Text object.
Returns a list: lines of analyses in the VISL CG3 input format;
'''
mrf_lines = convert_Text_to_mrf( text )
return self.process_mrf_lines( mrf_lines, **kwargs ) | Executes the preprocessing pipeline on estnltk's Text object.
Returns a list: lines of analyses in the VISL CG3 input format; | Below is the the instruction that describes the task:
### Input:
Executes the preprocessing pipeline on estnltk's Text object.
Returns a list: lines of analyses in the VISL CG3 input format;
### Response:
def process_Text( self, text, **kwargs ):
''' Executes the preprocessing pipeline on estnltk's Text object.
Returns a list: lines of analyses in the VISL CG3 input format;
'''
mrf_lines = convert_Text_to_mrf( text )
return self.process_mrf_lines( mrf_lines, **kwargs ) |
def getScans(self, modifications=False, fdr=True):
"""
get a random scan
"""
if fdr:
sql = self.base_sql+"WHERE p.ConfidenceLevel >= {} and p.SearchEngineRank <= {} {}".format(self.clvl, self.srank, self.extra)
try:
self.cur.execute(sql)
except sqlite3.OperationalError:
sql = self.base_sql+"WHERE p.ConfidenceLevel >= {} {}".format(self.clvl, self.extra)
self.cur.execute(sql)
else:
sql = self.base_sql
self.cur.execute(sql)
while True:
# results = self.cur.fetchmany(1000)
# if not results:
# break
try:
tup = self.cur.fetchone()
except:
sys.stderr.write('Error fetching scan:\n{}\n'.format(traceback.format_exc()))
else:
while tup is not None:
if tup is None:
break
if tup[1] is not None:
scan = self.parseFullScan(tup, modifications=modifications)
scan.spectrumId = tup[3]
yield scan
try:
tup = self.cur.fetchone()
except:
sys.stderr.write('Error fetching scan:\n{}\n'.format(traceback.format_exc()))
if tup is None:
break
yield None | get a random scan | Below is the the instruction that describes the task:
### Input:
get a random scan
### Response:
def getScans(self, modifications=False, fdr=True):
"""
get a random scan
"""
if fdr:
sql = self.base_sql+"WHERE p.ConfidenceLevel >= {} and p.SearchEngineRank <= {} {}".format(self.clvl, self.srank, self.extra)
try:
self.cur.execute(sql)
except sqlite3.OperationalError:
sql = self.base_sql+"WHERE p.ConfidenceLevel >= {} {}".format(self.clvl, self.extra)
self.cur.execute(sql)
else:
sql = self.base_sql
self.cur.execute(sql)
while True:
# results = self.cur.fetchmany(1000)
# if not results:
# break
try:
tup = self.cur.fetchone()
except:
sys.stderr.write('Error fetching scan:\n{}\n'.format(traceback.format_exc()))
else:
while tup is not None:
if tup is None:
break
if tup[1] is not None:
scan = self.parseFullScan(tup, modifications=modifications)
scan.spectrumId = tup[3]
yield scan
try:
tup = self.cur.fetchone()
except:
sys.stderr.write('Error fetching scan:\n{}\n'.format(traceback.format_exc()))
if tup is None:
break
yield None |
def gen_stm(src, dst):
"""Return a STM instruction.
"""
return ReilBuilder.build(ReilMnemonic.STM, src, ReilEmptyOperand(), dst) | Return a STM instruction. | Below is the the instruction that describes the task:
### Input:
Return a STM instruction.
### Response:
def gen_stm(src, dst):
"""Return a STM instruction.
"""
return ReilBuilder.build(ReilMnemonic.STM, src, ReilEmptyOperand(), dst) |
def _process_unresolved_indirect_jumps(self):
"""
Resolve all unresolved indirect jumps found in previous scanning.
Currently we support resolving the following types of indirect jumps:
- Ijk_Call: indirect calls where the function address is passed in from a proceeding basic block
- Ijk_Boring: jump tables
- For an up-to-date list, see analyses/cfg/indirect_jump_resolvers
:return: A set of concrete indirect jump targets (ints).
:rtype: set
"""
l.info("%d indirect jumps to resolve.", len(self._indirect_jumps_to_resolve))
all_targets = set()
for idx, jump in enumerate(self._indirect_jumps_to_resolve): # type:int,IndirectJump
if self._low_priority:
self._release_gil(idx, 20, 0.0001)
all_targets |= self._process_one_indirect_jump(jump)
self._indirect_jumps_to_resolve.clear()
return all_targets | Resolve all unresolved indirect jumps found in previous scanning.
Currently we support resolving the following types of indirect jumps:
- Ijk_Call: indirect calls where the function address is passed in from a proceeding basic block
- Ijk_Boring: jump tables
- For an up-to-date list, see analyses/cfg/indirect_jump_resolvers
:return: A set of concrete indirect jump targets (ints).
:rtype: set | Below is the the instruction that describes the task:
### Input:
Resolve all unresolved indirect jumps found in previous scanning.
Currently we support resolving the following types of indirect jumps:
- Ijk_Call: indirect calls where the function address is passed in from a proceeding basic block
- Ijk_Boring: jump tables
- For an up-to-date list, see analyses/cfg/indirect_jump_resolvers
:return: A set of concrete indirect jump targets (ints).
:rtype: set
### Response:
def _process_unresolved_indirect_jumps(self):
"""
Resolve all unresolved indirect jumps found in previous scanning.
Currently we support resolving the following types of indirect jumps:
- Ijk_Call: indirect calls where the function address is passed in from a proceeding basic block
- Ijk_Boring: jump tables
- For an up-to-date list, see analyses/cfg/indirect_jump_resolvers
:return: A set of concrete indirect jump targets (ints).
:rtype: set
"""
l.info("%d indirect jumps to resolve.", len(self._indirect_jumps_to_resolve))
all_targets = set()
for idx, jump in enumerate(self._indirect_jumps_to_resolve): # type:int,IndirectJump
if self._low_priority:
self._release_gil(idx, 20, 0.0001)
all_targets |= self._process_one_indirect_jump(jump)
self._indirect_jumps_to_resolve.clear()
return all_targets |
def qname(self):
"""Get the 'qualified' name of the node.
For example: module.name, module.class.name ...
:returns: The qualified name.
:rtype: str
"""
# pylint: disable=no-member; github.com/pycqa/astroid/issues/278
if self.parent is None:
return self.name
return "%s.%s" % (self.parent.frame().qname(), self.name) | Get the 'qualified' name of the node.
For example: module.name, module.class.name ...
:returns: The qualified name.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Get the 'qualified' name of the node.
For example: module.name, module.class.name ...
:returns: The qualified name.
:rtype: str
### Response:
def qname(self):
"""Get the 'qualified' name of the node.
For example: module.name, module.class.name ...
:returns: The qualified name.
:rtype: str
"""
# pylint: disable=no-member; github.com/pycqa/astroid/issues/278
if self.parent is None:
return self.name
return "%s.%s" % (self.parent.frame().qname(), self.name) |
def prepare_tokens(ctx, input, annotations, tout, lout):
"""Prepare tokenized and tagged corpus file from those supplied by CHEMDNER."""
click.echo('chemdataextractor.chemdner.prepare_tokens')
# Collect the annotations into a dict
anndict = defaultdict(list)
for line in annotations:
pmid, ta, start, end, text, category = line.strip().split('\t')
anndict[(pmid, ta)].append((int(start), int(end), text))
# Process the corpus
for line in input:
pmid, title, abstract = line.strip().split(u'\t')
for t, section, anns in [(Title(title), 'T', anndict.get((pmid, u'T'), [])), (Paragraph(abstract), u'A', anndict.get((pmid, u'A'), []))]:
# Write our tokens with POS and IOB tags
tagged = _prep_tags(t, anns)
for i, sentence in enumerate(tagged):
tout.write(u' '.join([u'/'.join([token, tag, label]) for token, tag, label in sentence]))
lout.write(u' '.join([u'/'.join([token, label]) for token, tag, label in sentence]))
tout.write(u'\n')
lout.write(u'\n')
tout.write(u'\n')
lout.write(u'\n') | Prepare tokenized and tagged corpus file from those supplied by CHEMDNER. | Below is the the instruction that describes the task:
### Input:
Prepare tokenized and tagged corpus file from those supplied by CHEMDNER.
### Response:
def prepare_tokens(ctx, input, annotations, tout, lout):
"""Prepare tokenized and tagged corpus file from those supplied by CHEMDNER."""
click.echo('chemdataextractor.chemdner.prepare_tokens')
# Collect the annotations into a dict
anndict = defaultdict(list)
for line in annotations:
pmid, ta, start, end, text, category = line.strip().split('\t')
anndict[(pmid, ta)].append((int(start), int(end), text))
# Process the corpus
for line in input:
pmid, title, abstract = line.strip().split(u'\t')
for t, section, anns in [(Title(title), 'T', anndict.get((pmid, u'T'), [])), (Paragraph(abstract), u'A', anndict.get((pmid, u'A'), []))]:
# Write our tokens with POS and IOB tags
tagged = _prep_tags(t, anns)
for i, sentence in enumerate(tagged):
tout.write(u' '.join([u'/'.join([token, tag, label]) for token, tag, label in sentence]))
lout.write(u' '.join([u'/'.join([token, label]) for token, tag, label in sentence]))
tout.write(u'\n')
lout.write(u'\n')
tout.write(u'\n')
lout.write(u'\n') |
def predict_moments(self, X, nsamples=200, likelihood_args=()):
r"""
Predictive moments, in particular mean and variance, of a Bayesian GLM.
This function uses Monte-Carlo sampling to evaluate the predictive mean
and variance of a Bayesian GLM. The exact expressions evaluated are,
.. math ::
\mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y] &=
\int \mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]
p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w},
\mathbb{V}[y^* | \mathbf{x^*}, \mathbf{X}, y] &=
\int \left(\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]
- \mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y]\right)^2
p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w},
where :math:`\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]` is the
the expected value of :math:`y^*` from the likelihood, and
:math:`p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi)` is the posterior
distribution over weights (from ``learn``). Here are few concrete
examples of how we can use these values,
- Gaussian likelihood: these are just the predicted mean and variance,
see ``revrand.regression.predict``
- Bernoulli likelihood: The expected value is the probability,
:math:`p(y^* = 1)`, i.e. the probability of class one. The variance
may not be so useful.
- Poisson likelihood: The expected value is similar conceptually to the
Gaussian case, and is also a *continuous* value. The median (50%
quantile) from ``predict_interval`` is a discrete value. Again,
the variance in this instance may not be so useful.
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, d
dimensions).
nsamples : int, optional
Number of samples for sampling the expected moments from the
predictive distribution.
likelihood_args : sequence, optional
sequence of arguments to pass to the likelihood function. These are
non-learnable parameters. They can be scalars or arrays of length
N.
Returns
-------
Ey : ndarray
The expected value of y* for the query inputs, X* of shape (N*,).
Vy : ndarray
The expected variance of y* (excluding likelihood noise terms) for
the query inputs, X* of shape (N*,).
"""
# Get latent function samples
N = X.shape[0]
ys = np.empty((N, nsamples))
fsamples = self._sample_func(X, nsamples)
# Push samples though likelihood expected value
Eyargs = tuple(chain(atleast_list(self.like_hypers_), likelihood_args))
for i, f in enumerate(fsamples):
ys[:, i] = self.likelihood.Ey(f, *Eyargs)
# Average transformed samples (MC integration)
Ey = ys.mean(axis=1)
Vy = ((ys - Ey[:, np.newaxis])**2).mean(axis=1)
return Ey, Vy | r"""
Predictive moments, in particular mean and variance, of a Bayesian GLM.
This function uses Monte-Carlo sampling to evaluate the predictive mean
and variance of a Bayesian GLM. The exact expressions evaluated are,
.. math ::
\mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y] &=
\int \mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]
p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w},
\mathbb{V}[y^* | \mathbf{x^*}, \mathbf{X}, y] &=
\int \left(\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]
- \mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y]\right)^2
p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w},
where :math:`\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]` is the
the expected value of :math:`y^*` from the likelihood, and
:math:`p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi)` is the posterior
distribution over weights (from ``learn``). Here are few concrete
examples of how we can use these values,
- Gaussian likelihood: these are just the predicted mean and variance,
see ``revrand.regression.predict``
- Bernoulli likelihood: The expected value is the probability,
:math:`p(y^* = 1)`, i.e. the probability of class one. The variance
may not be so useful.
- Poisson likelihood: The expected value is similar conceptually to the
Gaussian case, and is also a *continuous* value. The median (50%
quantile) from ``predict_interval`` is a discrete value. Again,
the variance in this instance may not be so useful.
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, d
dimensions).
nsamples : int, optional
Number of samples for sampling the expected moments from the
predictive distribution.
likelihood_args : sequence, optional
sequence of arguments to pass to the likelihood function. These are
non-learnable parameters. They can be scalars or arrays of length
N.
Returns
-------
Ey : ndarray
The expected value of y* for the query inputs, X* of shape (N*,).
Vy : ndarray
The expected variance of y* (excluding likelihood noise terms) for
the query inputs, X* of shape (N*,). | Below is the the instruction that describes the task:
### Input:
r"""
Predictive moments, in particular mean and variance, of a Bayesian GLM.
This function uses Monte-Carlo sampling to evaluate the predictive mean
and variance of a Bayesian GLM. The exact expressions evaluated are,
.. math ::
\mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y] &=
\int \mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]
p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w},
\mathbb{V}[y^* | \mathbf{x^*}, \mathbf{X}, y] &=
\int \left(\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]
- \mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y]\right)^2
p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w},
where :math:`\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]` is the
the expected value of :math:`y^*` from the likelihood, and
:math:`p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi)` is the posterior
distribution over weights (from ``learn``). Here are few concrete
examples of how we can use these values,
- Gaussian likelihood: these are just the predicted mean and variance,
see ``revrand.regression.predict``
- Bernoulli likelihood: The expected value is the probability,
:math:`p(y^* = 1)`, i.e. the probability of class one. The variance
may not be so useful.
- Poisson likelihood: The expected value is similar conceptually to the
Gaussian case, and is also a *continuous* value. The median (50%
quantile) from ``predict_interval`` is a discrete value. Again,
the variance in this instance may not be so useful.
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, d
dimensions).
nsamples : int, optional
Number of samples for sampling the expected moments from the
predictive distribution.
likelihood_args : sequence, optional
sequence of arguments to pass to the likelihood function. These are
non-learnable parameters. They can be scalars or arrays of length
N.
Returns
-------
Ey : ndarray
The expected value of y* for the query inputs, X* of shape (N*,).
Vy : ndarray
The expected variance of y* (excluding likelihood noise terms) for
the query inputs, X* of shape (N*,).
### Response:
def predict_moments(self, X, nsamples=200, likelihood_args=()):
r"""
Predictive moments, in particular mean and variance, of a Bayesian GLM.
This function uses Monte-Carlo sampling to evaluate the predictive mean
and variance of a Bayesian GLM. The exact expressions evaluated are,
.. math ::
\mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y] &=
\int \mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]
p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w},
\mathbb{V}[y^* | \mathbf{x^*}, \mathbf{X}, y] &=
\int \left(\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]
- \mathbb{E}[y^* | \mathbf{x^*}, \mathbf{X}, y]\right)^2
p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi) d\mathbf{w},
where :math:`\mathbb{E}[y^* | \mathbf{w}, \phi(\mathbf{x}^*)]` is the
the expected value of :math:`y^*` from the likelihood, and
:math:`p(\mathbf{w} | \mathbf{y}, \boldsymbol\Phi)` is the posterior
distribution over weights (from ``learn``). Here are few concrete
examples of how we can use these values,
- Gaussian likelihood: these are just the predicted mean and variance,
see ``revrand.regression.predict``
- Bernoulli likelihood: The expected value is the probability,
:math:`p(y^* = 1)`, i.e. the probability of class one. The variance
may not be so useful.
- Poisson likelihood: The expected value is similar conceptually to the
Gaussian case, and is also a *continuous* value. The median (50%
quantile) from ``predict_interval`` is a discrete value. Again,
the variance in this instance may not be so useful.
Parameters
----------
X : ndarray
(N*,d) array query input dataset (N* samples, d
dimensions).
nsamples : int, optional
Number of samples for sampling the expected moments from the
predictive distribution.
likelihood_args : sequence, optional
sequence of arguments to pass to the likelihood function. These are
non-learnable parameters. They can be scalars or arrays of length
N.
Returns
-------
Ey : ndarray
The expected value of y* for the query inputs, X* of shape (N*,).
Vy : ndarray
The expected variance of y* (excluding likelihood noise terms) for
the query inputs, X* of shape (N*,).
"""
# Get latent function samples
N = X.shape[0]
ys = np.empty((N, nsamples))
fsamples = self._sample_func(X, nsamples)
# Push samples though likelihood expected value
Eyargs = tuple(chain(atleast_list(self.like_hypers_), likelihood_args))
for i, f in enumerate(fsamples):
ys[:, i] = self.likelihood.Ey(f, *Eyargs)
# Average transformed samples (MC integration)
Ey = ys.mean(axis=1)
Vy = ((ys - Ey[:, np.newaxis])**2).mean(axis=1)
return Ey, Vy |
def getCorrect(self, tolerance):
"""
Returns the number of nodes within tolerance of the target.
"""
return Numeric.add.reduce(Numeric.fabs(self.target - self.activation) < tolerance) | Returns the number of nodes within tolerance of the target. | Below is the the instruction that describes the task:
### Input:
Returns the number of nodes within tolerance of the target.
### Response:
def getCorrect(self, tolerance):
"""
Returns the number of nodes within tolerance of the target.
"""
return Numeric.add.reduce(Numeric.fabs(self.target - self.activation) < tolerance) |
def write_command(self, request_id, msg):
"""Send "insert" etc. command, returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, the command message.
"""
self.send_message(msg, 0)
response = helpers._unpack_response(self.receive_message(1, request_id))
assert response['number_returned'] == 1
result = response['data'][0]
# Raises NotMasterError or OperationFailure.
helpers._check_command_response(result)
return result | Send "insert" etc. command, returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, the command message. | Below is the the instruction that describes the task:
### Input:
Send "insert" etc. command, returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, the command message.
### Response:
def write_command(self, request_id, msg):
"""Send "insert" etc. command, returning response as a dict.
Can raise ConnectionFailure or OperationFailure.
:Parameters:
- `request_id`: an int.
- `msg`: bytes, the command message.
"""
self.send_message(msg, 0)
response = helpers._unpack_response(self.receive_message(1, request_id))
assert response['number_returned'] == 1
result = response['data'][0]
# Raises NotMasterError or OperationFailure.
helpers._check_command_response(result)
return result |
async def select(query):
"""Perform SELECT query asynchronously.
"""
assert isinstance(query, peewee.SelectQuery),\
("Error, trying to run select coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
result = AsyncQueryWrapper(cursor=cursor, query=query)
try:
while True:
await result.fetchone()
except GeneratorExit:
pass
finally:
await cursor.release()
return result | Perform SELECT query asynchronously. | Below is the the instruction that describes the task:
### Input:
Perform SELECT query asynchronously.
### Response:
async def select(query):
"""Perform SELECT query asynchronously.
"""
assert isinstance(query, peewee.SelectQuery),\
("Error, trying to run select coroutine"
"with wrong query class %s" % str(query))
cursor = await _execute_query_async(query)
result = AsyncQueryWrapper(cursor=cursor, query=query)
try:
while True:
await result.fetchone()
except GeneratorExit:
pass
finally:
await cursor.release()
return result |
def create_datasource(jboss_config, name, datasource_properties, profile=None):
'''
Create datasource in running jboss instance
jboss_config
Configuration dictionary with properties specified above.
name
Datasource name
datasource_properties
A dictionary of datasource properties to be created:
- driver-name: mysql
- connection-url: 'jdbc:mysql://localhost:3306/sampleDatabase'
- jndi-name: 'java:jboss/datasources/sampleDS'
- user-name: sampleuser
- password: secret
- min-pool-size: 3
- use-java-context: True
profile
The profile name (JBoss domain mode only)
CLI Example:
.. code-block:: bash
salt '*' jboss7.create_datasource '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' 'my_datasource' '{"driver-name": "mysql", "connection-url": "jdbc:mysql://localhost:3306/sampleDatabase", "jndi-name": "java:jboss/datasources/sampleDS", "user-name": "sampleuser", "password": "secret", "min-pool-size": 3, "use-java-context": True}'
'''
log.debug("======================== MODULE FUNCTION: jboss7.create_datasource, name=%s, profile=%s", name, profile)
ds_resource_description = __get_datasource_resource_description(jboss_config, name, profile)
operation = '/subsystem=datasources/data-source="{name}":add({properties})'.format(
name=name,
properties=__get_properties_assignment_string(datasource_properties, ds_resource_description)
)
if profile is not None:
operation = '/profile="{profile}"'.format(profile=profile) + operation
return __salt__['jboss7_cli.run_operation'](jboss_config, operation, fail_on_error=False) | Create datasource in running jboss instance
jboss_config
Configuration dictionary with properties specified above.
name
Datasource name
datasource_properties
A dictionary of datasource properties to be created:
- driver-name: mysql
- connection-url: 'jdbc:mysql://localhost:3306/sampleDatabase'
- jndi-name: 'java:jboss/datasources/sampleDS'
- user-name: sampleuser
- password: secret
- min-pool-size: 3
- use-java-context: True
profile
The profile name (JBoss domain mode only)
CLI Example:
.. code-block:: bash
salt '*' jboss7.create_datasource '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' 'my_datasource' '{"driver-name": "mysql", "connection-url": "jdbc:mysql://localhost:3306/sampleDatabase", "jndi-name": "java:jboss/datasources/sampleDS", "user-name": "sampleuser", "password": "secret", "min-pool-size": 3, "use-java-context": True}' | Below is the the instruction that describes the task:
### Input:
Create datasource in running jboss instance
jboss_config
Configuration dictionary with properties specified above.
name
Datasource name
datasource_properties
A dictionary of datasource properties to be created:
- driver-name: mysql
- connection-url: 'jdbc:mysql://localhost:3306/sampleDatabase'
- jndi-name: 'java:jboss/datasources/sampleDS'
- user-name: sampleuser
- password: secret
- min-pool-size: 3
- use-java-context: True
profile
The profile name (JBoss domain mode only)
CLI Example:
.. code-block:: bash
salt '*' jboss7.create_datasource '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' 'my_datasource' '{"driver-name": "mysql", "connection-url": "jdbc:mysql://localhost:3306/sampleDatabase", "jndi-name": "java:jboss/datasources/sampleDS", "user-name": "sampleuser", "password": "secret", "min-pool-size": 3, "use-java-context": True}'
### Response:
def create_datasource(jboss_config, name, datasource_properties, profile=None):
'''
Create datasource in running jboss instance
jboss_config
Configuration dictionary with properties specified above.
name
Datasource name
datasource_properties
A dictionary of datasource properties to be created:
- driver-name: mysql
- connection-url: 'jdbc:mysql://localhost:3306/sampleDatabase'
- jndi-name: 'java:jboss/datasources/sampleDS'
- user-name: sampleuser
- password: secret
- min-pool-size: 3
- use-java-context: True
profile
The profile name (JBoss domain mode only)
CLI Example:
.. code-block:: bash
salt '*' jboss7.create_datasource '{"cli_path": "integration.modules.sysmod.SysModuleTest.test_valid_docs", "controller": "10.11.12.13:9999", "cli_user": "jbossadm", "cli_password": "jbossadm"}' 'my_datasource' '{"driver-name": "mysql", "connection-url": "jdbc:mysql://localhost:3306/sampleDatabase", "jndi-name": "java:jboss/datasources/sampleDS", "user-name": "sampleuser", "password": "secret", "min-pool-size": 3, "use-java-context": True}'
'''
log.debug("======================== MODULE FUNCTION: jboss7.create_datasource, name=%s, profile=%s", name, profile)
ds_resource_description = __get_datasource_resource_description(jboss_config, name, profile)
operation = '/subsystem=datasources/data-source="{name}":add({properties})'.format(
name=name,
properties=__get_properties_assignment_string(datasource_properties, ds_resource_description)
)
if profile is not None:
operation = '/profile="{profile}"'.format(profile=profile) + operation
return __salt__['jboss7_cli.run_operation'](jboss_config, operation, fail_on_error=False) |
def _read_starlog(self):
""" read history.data or star.log file again"""
sldir = self.sldir
slname = self.slname
slaname = slname+'sa'
if not os.path.exists(sldir+'/'+slaname):
print('No '+self.slname+'sa file found, create new one from '+self.slname)
_cleanstarlog(sldir+'/'+slname)
else:
if self.clean_starlog:
print('Requested new '+self.slname+'sa; create new from '+self.slname)
_cleanstarlog(sldir+'/'+slname)
else:
print('Using old '+self.slname+'sa file ...')
cmd=os.popen('wc '+sldir+'/'+slaname)
cmd_out=cmd.readline()
cnum_cycles=cmd_out.split()[0]
num_cycles=int(cnum_cycles) - 6
filename=sldir+'/'+slaname
header_attr,cols,data = _read_mesafile(filename,data_rows=num_cycles)
self.cols = cols
self.header_attr = header_attr
self.data = data | read history.data or star.log file again | Below is the the instruction that describes the task:
### Input:
read history.data or star.log file again
### Response:
def _read_starlog(self):
""" read history.data or star.log file again"""
sldir = self.sldir
slname = self.slname
slaname = slname+'sa'
if not os.path.exists(sldir+'/'+slaname):
print('No '+self.slname+'sa file found, create new one from '+self.slname)
_cleanstarlog(sldir+'/'+slname)
else:
if self.clean_starlog:
print('Requested new '+self.slname+'sa; create new from '+self.slname)
_cleanstarlog(sldir+'/'+slname)
else:
print('Using old '+self.slname+'sa file ...')
cmd=os.popen('wc '+sldir+'/'+slaname)
cmd_out=cmd.readline()
cnum_cycles=cmd_out.split()[0]
num_cycles=int(cnum_cycles) - 6
filename=sldir+'/'+slaname
header_attr,cols,data = _read_mesafile(filename,data_rows=num_cycles)
self.cols = cols
self.header_attr = header_attr
self.data = data |
def contrastive_loss(left, right, y, margin, extra=False, scope="constrastive_loss"):
r"""Loss for Siamese networks as described in the paper:
`Learning a Similarity Metric Discriminatively, with Application to Face
Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_ by Chopra et al.
.. math::
\frac{1}{2} [y \cdot d^2 + (1-y) \cdot \max(0, m - d)^2], d = \Vert l - r \Vert_2
Args:
left (tf.Tensor): left feature vectors of shape [Batch, N].
right (tf.Tensor): right feature vectors of shape [Batch, N].
y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar.
margin (float): horizon for negative examples (y==0).
extra (bool): also return distances for pos and neg.
Returns:
tf.Tensor: constrastive_loss (averaged over the batch), (and optionally average_pos_dist, average_neg_dist)
"""
with tf.name_scope(scope):
y = tf.cast(y, tf.float32)
delta = tf.reduce_sum(tf.square(left - right), 1)
delta_sqrt = tf.sqrt(delta + 1e-10)
match_loss = delta
missmatch_loss = tf.square(tf.nn.relu(margin - delta_sqrt))
loss = tf.reduce_mean(0.5 * (y * match_loss + (1 - y) * missmatch_loss))
if extra:
num_pos = tf.count_nonzero(y)
num_neg = tf.count_nonzero(1 - y)
pos_dist = tf.where(tf.equal(num_pos, 0), 0.,
tf.reduce_sum(y * delta_sqrt) / tf.cast(num_pos, tf.float32),
name="pos-dist")
neg_dist = tf.where(tf.equal(num_neg, 0), 0.,
tf.reduce_sum((1 - y) * delta_sqrt) / tf.cast(num_neg, tf.float32),
name="neg-dist")
return loss, pos_dist, neg_dist
else:
return loss | r"""Loss for Siamese networks as described in the paper:
`Learning a Similarity Metric Discriminatively, with Application to Face
Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_ by Chopra et al.
.. math::
\frac{1}{2} [y \cdot d^2 + (1-y) \cdot \max(0, m - d)^2], d = \Vert l - r \Vert_2
Args:
left (tf.Tensor): left feature vectors of shape [Batch, N].
right (tf.Tensor): right feature vectors of shape [Batch, N].
y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar.
margin (float): horizon for negative examples (y==0).
extra (bool): also return distances for pos and neg.
Returns:
tf.Tensor: constrastive_loss (averaged over the batch), (and optionally average_pos_dist, average_neg_dist) | Below is the the instruction that describes the task:
### Input:
r"""Loss for Siamese networks as described in the paper:
`Learning a Similarity Metric Discriminatively, with Application to Face
Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_ by Chopra et al.
.. math::
\frac{1}{2} [y \cdot d^2 + (1-y) \cdot \max(0, m - d)^2], d = \Vert l - r \Vert_2
Args:
left (tf.Tensor): left feature vectors of shape [Batch, N].
right (tf.Tensor): right feature vectors of shape [Batch, N].
y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar.
margin (float): horizon for negative examples (y==0).
extra (bool): also return distances for pos and neg.
Returns:
tf.Tensor: constrastive_loss (averaged over the batch), (and optionally average_pos_dist, average_neg_dist)
### Response:
def contrastive_loss(left, right, y, margin, extra=False, scope="constrastive_loss"):
r"""Loss for Siamese networks as described in the paper:
`Learning a Similarity Metric Discriminatively, with Application to Face
Verification <http://yann.lecun.com/exdb/publis/pdf/chopra-05.pdf>`_ by Chopra et al.
.. math::
\frac{1}{2} [y \cdot d^2 + (1-y) \cdot \max(0, m - d)^2], d = \Vert l - r \Vert_2
Args:
left (tf.Tensor): left feature vectors of shape [Batch, N].
right (tf.Tensor): right feature vectors of shape [Batch, N].
y (tf.Tensor): binary labels of shape [Batch]. 1: similar, 0: not similar.
margin (float): horizon for negative examples (y==0).
extra (bool): also return distances for pos and neg.
Returns:
tf.Tensor: constrastive_loss (averaged over the batch), (and optionally average_pos_dist, average_neg_dist)
"""
with tf.name_scope(scope):
y = tf.cast(y, tf.float32)
delta = tf.reduce_sum(tf.square(left - right), 1)
delta_sqrt = tf.sqrt(delta + 1e-10)
match_loss = delta
missmatch_loss = tf.square(tf.nn.relu(margin - delta_sqrt))
loss = tf.reduce_mean(0.5 * (y * match_loss + (1 - y) * missmatch_loss))
if extra:
num_pos = tf.count_nonzero(y)
num_neg = tf.count_nonzero(1 - y)
pos_dist = tf.where(tf.equal(num_pos, 0), 0.,
tf.reduce_sum(y * delta_sqrt) / tf.cast(num_pos, tf.float32),
name="pos-dist")
neg_dist = tf.where(tf.equal(num_neg, 0), 0.,
tf.reduce_sum((1 - y) * delta_sqrt) / tf.cast(num_neg, tf.float32),
name="neg-dist")
return loss, pos_dist, neg_dist
else:
return loss |
def remove(self, key, preserve_data=False):
"""
:param key: Document unique identifier.
Remove the document from the search index.
"""
if self.members.remove(key) != 1:
raise KeyError('Document with key "%s" not found.' % key)
document_hash = self._get_hash(key)
content = decode(document_hash['content'])
if not preserve_data:
document_hash.clear()
for word in self.tokenizer.tokenize(content):
word_key = self.get_key(word)
del word_key[key]
if len(word_key) == 0:
word_key.clear() | :param key: Document unique identifier.
Remove the document from the search index. | Below is the the instruction that describes the task:
### Input:
:param key: Document unique identifier.
Remove the document from the search index.
### Response:
def remove(self, key, preserve_data=False):
"""
:param key: Document unique identifier.
Remove the document from the search index.
"""
if self.members.remove(key) != 1:
raise KeyError('Document with key "%s" not found.' % key)
document_hash = self._get_hash(key)
content = decode(document_hash['content'])
if not preserve_data:
document_hash.clear()
for word in self.tokenizer.tokenize(content):
word_key = self.get_key(word)
del word_key[key]
if len(word_key) == 0:
word_key.clear() |
def dump_string_to_file(string, filepath):
"""Dump @string as a line to @filepath."""
create_dirs(
os.path.dirname(filepath)
)
with open(filepath, 'a') as outfile:
outfile.write(string)
outfile.write('\n') | Dump @string as a line to @filepath. | Below is the the instruction that describes the task:
### Input:
Dump @string as a line to @filepath.
### Response:
def dump_string_to_file(string, filepath):
"""Dump @string as a line to @filepath."""
create_dirs(
os.path.dirname(filepath)
)
with open(filepath, 'a') as outfile:
outfile.write(string)
outfile.write('\n') |
def check_api_response(self, response):
"""Check API response and raise exceptions if needed.
:param requests.models.Response response: request response to check
"""
# check response
if response.status_code == 200:
return True
elif response.status_code >= 400:
logging.error(
"{}: {} - {} - URL: {}".format(
response.status_code,
response.reason,
response.json().get("error"),
response.request.url,
)
)
return False, response.status_code | Check API response and raise exceptions if needed.
:param requests.models.Response response: request response to check | Below is the the instruction that describes the task:
### Input:
Check API response and raise exceptions if needed.
:param requests.models.Response response: request response to check
### Response:
def check_api_response(self, response):
"""Check API response and raise exceptions if needed.
:param requests.models.Response response: request response to check
"""
# check response
if response.status_code == 200:
return True
elif response.status_code >= 400:
logging.error(
"{}: {} - {} - URL: {}".format(
response.status_code,
response.reason,
response.json().get("error"),
response.request.url,
)
)
return False, response.status_code |
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out | Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise. | Below is the the instruction that describes the task:
### Input:
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
### Response:
def to_native_string(string, encoding='ascii'):
"""
Given a string object, regardless of type, returns a representation of that
string in the native string type, encoding and decoding where necessary.
This assumes ASCII unless told otherwise.
"""
out = None
if isinstance(string, builtin_str):
out = string
else:
if is_py2:
out = string.encode(encoding)
else:
out = string.decode(encoding)
return out |
def streaming_callback(self, body_part):
"""Handles a streaming chunk of the response.
The streaming_response callback gives no indication about whether the
received chunk is the last in the stream. The "last_response" instance
variable allows us to keep track of the last received chunk of the
response. Each time this is called, the previous chunk is emitted. The
done() method is expected to be called after the response completes to
ensure that the last piece of data is sent.
Args:
body_part: A chunk of the streaming response.
"""
b64_body_string = base64.b64encode(body_part).decode('utf-8')
response = {
'message_id': self._message_id,
'data': b64_body_string,
}
if self._last_response is None:
# This represents the first chunk of data to be streamed to the caller.
# Attach status and header information to this item.
response.update(self._generate_metadata_body())
else:
self._last_response['done'] = False
self._write_message_func(self._last_response)
self._last_response = response | Handles a streaming chunk of the response.
The streaming_response callback gives no indication about whether the
received chunk is the last in the stream. The "last_response" instance
variable allows us to keep track of the last received chunk of the
response. Each time this is called, the previous chunk is emitted. The
done() method is expected to be called after the response completes to
ensure that the last piece of data is sent.
Args:
body_part: A chunk of the streaming response. | Below is the the instruction that describes the task:
### Input:
Handles a streaming chunk of the response.
The streaming_response callback gives no indication about whether the
received chunk is the last in the stream. The "last_response" instance
variable allows us to keep track of the last received chunk of the
response. Each time this is called, the previous chunk is emitted. The
done() method is expected to be called after the response completes to
ensure that the last piece of data is sent.
Args:
body_part: A chunk of the streaming response.
### Response:
def streaming_callback(self, body_part):
"""Handles a streaming chunk of the response.
The streaming_response callback gives no indication about whether the
received chunk is the last in the stream. The "last_response" instance
variable allows us to keep track of the last received chunk of the
response. Each time this is called, the previous chunk is emitted. The
done() method is expected to be called after the response completes to
ensure that the last piece of data is sent.
Args:
body_part: A chunk of the streaming response.
"""
b64_body_string = base64.b64encode(body_part).decode('utf-8')
response = {
'message_id': self._message_id,
'data': b64_body_string,
}
if self._last_response is None:
# This represents the first chunk of data to be streamed to the caller.
# Attach status and header information to this item.
response.update(self._generate_metadata_body())
else:
self._last_response['done'] = False
self._write_message_func(self._last_response)
self._last_response = response |
def quantile(arg, quantile, interpolation='linear'):
"""
Return value at the given quantile, a la numpy.percentile.
Parameters
----------
quantile : float/int or array-like
0 <= quantile <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantile
if scalar input, scalar type, same as input
if array input, list of scalar type
"""
if isinstance(quantile, collections.abc.Sequence):
op = ops.MultiQuantile(arg, quantile, interpolation)
else:
op = ops.Quantile(arg, quantile, interpolation)
return op.to_expr() | Return value at the given quantile, a la numpy.percentile.
Parameters
----------
quantile : float/int or array-like
0 <= quantile <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantile
if scalar input, scalar type, same as input
if array input, list of scalar type | Below is the the instruction that describes the task:
### Input:
Return value at the given quantile, a la numpy.percentile.
Parameters
----------
quantile : float/int or array-like
0 <= quantile <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantile
if scalar input, scalar type, same as input
if array input, list of scalar type
### Response:
def quantile(arg, quantile, interpolation='linear'):
"""
Return value at the given quantile, a la numpy.percentile.
Parameters
----------
quantile : float/int or array-like
0 <= quantile <= 1, the quantile(s) to compute
interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'}
This optional parameter specifies the interpolation method to use,
when the desired quantile lies between two data points `i` and `j`:
* linear: `i + (j - i) * fraction`, where `fraction` is the
fractional part of the index surrounded by `i` and `j`.
* lower: `i`.
* higher: `j`.
* nearest: `i` or `j` whichever is nearest.
* midpoint: (`i` + `j`) / 2.
Returns
-------
quantile
if scalar input, scalar type, same as input
if array input, list of scalar type
"""
if isinstance(quantile, collections.abc.Sequence):
op = ops.MultiQuantile(arg, quantile, interpolation)
else:
op = ops.Quantile(arg, quantile, interpolation)
return op.to_expr() |
def match(ctx, features, profile, gps_precision):
"""Mapbox Map Matching API lets you use snap your GPS traces
to the OpenStreetMap road and path network.
$ mapbox mapmatching trace.geojson
An access token is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
features = list(features)
if len(features) != 1:
raise click.BadParameter(
"Mapmatching requires a single LineString feature")
service = mapbox.MapMatcher(access_token=access_token)
try:
res = service.match(
features[0],
profile=profile,
gps_precision=gps_precision)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if res.status_code == 200:
stdout = click.open_file('-', 'w')
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip()) | Mapbox Map Matching API lets you use snap your GPS traces
to the OpenStreetMap road and path network.
$ mapbox mapmatching trace.geojson
An access token is required, see `mapbox --help`. | Below is the the instruction that describes the task:
### Input:
Mapbox Map Matching API lets you use snap your GPS traces
to the OpenStreetMap road and path network.
$ mapbox mapmatching trace.geojson
An access token is required, see `mapbox --help`.
### Response:
def match(ctx, features, profile, gps_precision):
"""Mapbox Map Matching API lets you use snap your GPS traces
to the OpenStreetMap road and path network.
$ mapbox mapmatching trace.geojson
An access token is required, see `mapbox --help`.
"""
access_token = (ctx.obj and ctx.obj.get('access_token')) or None
features = list(features)
if len(features) != 1:
raise click.BadParameter(
"Mapmatching requires a single LineString feature")
service = mapbox.MapMatcher(access_token=access_token)
try:
res = service.match(
features[0],
profile=profile,
gps_precision=gps_precision)
except mapbox.errors.ValidationError as exc:
raise click.BadParameter(str(exc))
if res.status_code == 200:
stdout = click.open_file('-', 'w')
click.echo(res.text, file=stdout)
else:
raise MapboxCLIException(res.text.strip()) |
def wait_for_at_least_one_message(self, channel):
"""
Reads until we receive at least one message we can unpack. Return all found messages.
"""
unpacker = msgpack.Unpacker(encoding='utf-8')
while True:
try:
start = time.time()
chunk = self.ssh_channel[channel].recv(1024)
end = time.time()
self.read_speeds.append( len(chunk) / (end-start) )
if len(self.read_speeds) > 20:
self.read_speeds = self.read_speeds[10:]
if chunk == b'':
# happens only when connection broke. If nothing is to be received, it hangs instead.
self.connection_error(channel, 'Connection broken w')
return False
except Exception as error:
self.connection_error(channel, error)
raise
unpacker.feed(chunk)
messages = [m for m in unpacker]
if messages:
return messages | Reads until we receive at least one message we can unpack. Return all found messages. | Below is the the instruction that describes the task:
### Input:
Reads until we receive at least one message we can unpack. Return all found messages.
### Response:
def wait_for_at_least_one_message(self, channel):
"""
Reads until we receive at least one message we can unpack. Return all found messages.
"""
unpacker = msgpack.Unpacker(encoding='utf-8')
while True:
try:
start = time.time()
chunk = self.ssh_channel[channel].recv(1024)
end = time.time()
self.read_speeds.append( len(chunk) / (end-start) )
if len(self.read_speeds) > 20:
self.read_speeds = self.read_speeds[10:]
if chunk == b'':
# happens only when connection broke. If nothing is to be received, it hangs instead.
self.connection_error(channel, 'Connection broken w')
return False
except Exception as error:
self.connection_error(channel, error)
raise
unpacker.feed(chunk)
messages = [m for m in unpacker]
if messages:
return messages |
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
credentials = None
content = keyring.get_password(self._service_name, self._user_name)
if content is not None:
try:
credentials = client.Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials | Retrieve Credential from file.
Returns:
oauth2client.client.Credentials | Below is the the instruction that describes the task:
### Input:
Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
### Response:
def locked_get(self):
"""Retrieve Credential from file.
Returns:
oauth2client.client.Credentials
"""
credentials = None
content = keyring.get_password(self._service_name, self._user_name)
if content is not None:
try:
credentials = client.Credentials.new_from_json(content)
credentials.set_store(self)
except ValueError:
pass
return credentials |
def generate(env):
"""Add Builders and construction variables for m4 to an Environment."""
M4Action = SCons.Action.Action('$M4COM', '$M4COMSTR')
bld = SCons.Builder.Builder(action = M4Action, src_suffix = '.m4')
env['BUILDERS']['M4'] = bld
# .m4 files might include other files, and it would be pretty hard
# to write a scanner for it, so let's just cd to the dir of the m4
# file and run from there.
# The src_suffix setup is like so: file.c.m4 -> file.c,
# file.cpp.m4 -> file.cpp etc.
env['M4'] = 'm4'
env['M4FLAGS'] = SCons.Util.CLVar('-E')
env['M4COM'] = 'cd ${SOURCE.rsrcdir} && $M4 $M4FLAGS < ${SOURCE.file} > ${TARGET.abspath}' | Add Builders and construction variables for m4 to an Environment. | Below is the the instruction that describes the task:
### Input:
Add Builders and construction variables for m4 to an Environment.
### Response:
def generate(env):
"""Add Builders and construction variables for m4 to an Environment."""
M4Action = SCons.Action.Action('$M4COM', '$M4COMSTR')
bld = SCons.Builder.Builder(action = M4Action, src_suffix = '.m4')
env['BUILDERS']['M4'] = bld
# .m4 files might include other files, and it would be pretty hard
# to write a scanner for it, so let's just cd to the dir of the m4
# file and run from there.
# The src_suffix setup is like so: file.c.m4 -> file.c,
# file.cpp.m4 -> file.cpp etc.
env['M4'] = 'm4'
env['M4FLAGS'] = SCons.Util.CLVar('-E')
env['M4COM'] = 'cd ${SOURCE.rsrcdir} && $M4 $M4FLAGS < ${SOURCE.file} > ${TARGET.abspath}' |
def getModPath(self, *paths):
'''
Construct a path relative to this module's working directory.
Args:
*paths: A list of path strings
Notes:
This creates the module specific directory if it does not exist.
Returns:
(str): The full path (or None if no cortex dir is configured).
'''
dirn = self.getModDir()
return s_common.genpath(dirn, *paths) | Construct a path relative to this module's working directory.
Args:
*paths: A list of path strings
Notes:
This creates the module specific directory if it does not exist.
Returns:
(str): The full path (or None if no cortex dir is configured). | Below is the the instruction that describes the task:
### Input:
Construct a path relative to this module's working directory.
Args:
*paths: A list of path strings
Notes:
This creates the module specific directory if it does not exist.
Returns:
(str): The full path (or None if no cortex dir is configured).
### Response:
def getModPath(self, *paths):
'''
Construct a path relative to this module's working directory.
Args:
*paths: A list of path strings
Notes:
This creates the module specific directory if it does not exist.
Returns:
(str): The full path (or None if no cortex dir is configured).
'''
dirn = self.getModDir()
return s_common.genpath(dirn, *paths) |
def enrollment_identity(self, enrollment_identity):
"""
Sets the enrollment_identity of this EnrollmentId.
Enrollment identity.
:param enrollment_identity: The enrollment_identity of this EnrollmentId.
:type: str
"""
if enrollment_identity is None:
raise ValueError("Invalid value for `enrollment_identity`, must not be `None`")
if enrollment_identity is not None and not re.search('^A-[A-Za-z0-9:]{95}$', enrollment_identity):
raise ValueError("Invalid value for `enrollment_identity`, must be a follow pattern or equal to `/^A-[A-Za-z0-9:]{95}$/`")
self._enrollment_identity = enrollment_identity | Sets the enrollment_identity of this EnrollmentId.
Enrollment identity.
:param enrollment_identity: The enrollment_identity of this EnrollmentId.
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the enrollment_identity of this EnrollmentId.
Enrollment identity.
:param enrollment_identity: The enrollment_identity of this EnrollmentId.
:type: str
### Response:
def enrollment_identity(self, enrollment_identity):
"""
Sets the enrollment_identity of this EnrollmentId.
Enrollment identity.
:param enrollment_identity: The enrollment_identity of this EnrollmentId.
:type: str
"""
if enrollment_identity is None:
raise ValueError("Invalid value for `enrollment_identity`, must not be `None`")
if enrollment_identity is not None and not re.search('^A-[A-Za-z0-9:]{95}$', enrollment_identity):
raise ValueError("Invalid value for `enrollment_identity`, must be a follow pattern or equal to `/^A-[A-Za-z0-9:]{95}$/`")
self._enrollment_identity = enrollment_identity |
def _colored_time(self, time_taken, color=None):
"""Get formatted and colored string for a given time taken."""
if self.timer_no_color:
return "{0:0.4f}s".format(time_taken)
return _colorize("{0:0.4f}s".format(time_taken), color) | Get formatted and colored string for a given time taken. | Below is the the instruction that describes the task:
### Input:
Get formatted and colored string for a given time taken.
### Response:
def _colored_time(self, time_taken, color=None):
"""Get formatted and colored string for a given time taken."""
if self.timer_no_color:
return "{0:0.4f}s".format(time_taken)
return _colorize("{0:0.4f}s".format(time_taken), color) |
def log(self, level, msg=None, *args, **kwargs):
"""Writes log out at any arbitray level."""
return self._log(level, msg, args, kwargs) | Writes log out at any arbitray level. | Below is the the instruction that describes the task:
### Input:
Writes log out at any arbitray level.
### Response:
def log(self, level, msg=None, *args, **kwargs):
"""Writes log out at any arbitray level."""
return self._log(level, msg, args, kwargs) |
def parse_estimateReadFiltering(self):
"""Find estimateReadFiltering output. Only the output from --table is supported."""
self.deeptools_estimateReadFiltering = dict()
for f in self.find_log_files('deeptools/estimateReadFiltering'):
parsed_data = self.parseEstimateReadFilteringFile(f)
for k, v in parsed_data.items():
if k in self.deeptools_estimateReadFiltering:
log.warning("Replacing duplicate sample {}.".format(k))
self.deeptools_estimateReadFiltering[k] = v
if len(parsed_data) > 0:
self.add_data_source(f, section='estimateReadFiltering')
if len(self.deeptools_estimateReadFiltering) > 0:
header = OrderedDict()
header["M Entries"] = {
'title': 'M entries',
'description': 'Number of entries in the file (millions)'}
header["pct_Aligned"] = {
'title': '% Aligned',
'description': 'Percent of aligned entries',
'scale': 'YlGn',
'min': 0,
'max': 100
}
header["pct_Filtered"] = {
'title': '% Tot. Filtered',
'description': 'Percent of alignment that would be filtered for any reason.',
'scale': 'OrRd',
'min': 0,
'max': 100
}
header["pct_Blacklisted"] = {
'title': '% Blacklisted',
'description': 'Percent of alignments falling (at least partially) inside a blacklisted region',
'scale': 'YlOrRd',
'min': 0,
'max': 100
}
header["pct_MAPQ"] = {
'title': '% MAPQ', 'description':
'Percent of alignments having MAPQ scores below the specified threshold',
'scale': 'YlOrBn',
'min': 0,
'max': 100
}
header["pct_Missing_Flags"] = {
'title': '% Missing Flags',
'description': 'Percent of alignments lacking at least on flag specified by --samFlagInclude',
'scale': 'PuRd',
'min': 0,
'max': 100
}
header["pct_Forbidden_Flags"] = {
'title': '% Forbidden Flags',
'description': 'Percent of alignments having at least one flag specified by --samFlagExclude',
'scale': 'OrRd',
'min': 0,
'max': 100
}
header["pct_deepTools_Dupes"] = {
'title': '% deepTools Dupes',
'description': 'Percent of alignments marked by deepTools as being duplicates',
'scale': 'PuRd',
'min': 0,
'max': 100
}
header["pct_Duplication"] = {
'title': '% Duplication',
'description': 'Percent of alignments originally marked as being duplicates',
'scale': 'OrRd',
'min': 0,
'max': 100
}
header["pct_Singletons"] = {
'title': '% Singletons',
'description': 'Percent of alignments that are singletons (i.e., paired-end reads where the mates don\'t align as a pair',
'scale': 'OrRd',
'min': 0,
'max': 100
}
header["pct_Strand_Filtered"] = {
'title': '% Strand Filtered',
'description': 'Percent of alignments arising from the wrong strand',
'scale': 'OrRd',
'min': 0,
'max': 100
}
tdata = dict()
for k, v in self.deeptools_estimateReadFiltering.items():
tdata[k] = {
'M Entries': v['total'] / 1000000.0,
'pct_Aligned': 100. * v['mapped'] / float(v['total']),
'pct_Filtered': 100. * v['filtered'] / float(v['total']),
'pct_Blacklisted': 100. * v['blacklisted'] / float(v['total']),
'pct_Below_MAPQ': 100. * v['mapq'] / float(v['total']),
'pct_Missing_Flags': 100. * v['required flags'] / float(v['total']),
'pct_Forbidden_Flags': 100. * v['excluded flags'] / float(v['total']),
'pct_deepTools_Dupes': 100. * v['internal dupes'] / float(v['total']),
'pct_Duplication': 100. * v['dupes'] / float(v['total']),
'pct_Singletons': 100. * v['singletons'] / float(v['total']),
'pct_Strand_Filtered': 100. * v['strand'] / float(v['total'])
}
config = {'namespace': 'deepTools bamPEFragmentSize'}
self.add_section(
name = "Filtering metrics",
anchor = "estimateReadFiltering",
description = "Estimated percentages of alignments filtered independently for each setting in `estimateReadFiltering`",
plot = table.plot(tdata, header, config)
)
return len(self.deeptools_estimateReadFiltering) | Find estimateReadFiltering output. Only the output from --table is supported. | Below is the the instruction that describes the task:
### Input:
Find estimateReadFiltering output. Only the output from --table is supported.
### Response:
def parse_estimateReadFiltering(self):
"""Find estimateReadFiltering output. Only the output from --table is supported."""
self.deeptools_estimateReadFiltering = dict()
for f in self.find_log_files('deeptools/estimateReadFiltering'):
parsed_data = self.parseEstimateReadFilteringFile(f)
for k, v in parsed_data.items():
if k in self.deeptools_estimateReadFiltering:
log.warning("Replacing duplicate sample {}.".format(k))
self.deeptools_estimateReadFiltering[k] = v
if len(parsed_data) > 0:
self.add_data_source(f, section='estimateReadFiltering')
if len(self.deeptools_estimateReadFiltering) > 0:
header = OrderedDict()
header["M Entries"] = {
'title': 'M entries',
'description': 'Number of entries in the file (millions)'}
header["pct_Aligned"] = {
'title': '% Aligned',
'description': 'Percent of aligned entries',
'scale': 'YlGn',
'min': 0,
'max': 100
}
header["pct_Filtered"] = {
'title': '% Tot. Filtered',
'description': 'Percent of alignment that would be filtered for any reason.',
'scale': 'OrRd',
'min': 0,
'max': 100
}
header["pct_Blacklisted"] = {
'title': '% Blacklisted',
'description': 'Percent of alignments falling (at least partially) inside a blacklisted region',
'scale': 'YlOrRd',
'min': 0,
'max': 100
}
header["pct_MAPQ"] = {
'title': '% MAPQ', 'description':
'Percent of alignments having MAPQ scores below the specified threshold',
'scale': 'YlOrBn',
'min': 0,
'max': 100
}
header["pct_Missing_Flags"] = {
'title': '% Missing Flags',
'description': 'Percent of alignments lacking at least on flag specified by --samFlagInclude',
'scale': 'PuRd',
'min': 0,
'max': 100
}
header["pct_Forbidden_Flags"] = {
'title': '% Forbidden Flags',
'description': 'Percent of alignments having at least one flag specified by --samFlagExclude',
'scale': 'OrRd',
'min': 0,
'max': 100
}
header["pct_deepTools_Dupes"] = {
'title': '% deepTools Dupes',
'description': 'Percent of alignments marked by deepTools as being duplicates',
'scale': 'PuRd',
'min': 0,
'max': 100
}
header["pct_Duplication"] = {
'title': '% Duplication',
'description': 'Percent of alignments originally marked as being duplicates',
'scale': 'OrRd',
'min': 0,
'max': 100
}
header["pct_Singletons"] = {
'title': '% Singletons',
'description': 'Percent of alignments that are singletons (i.e., paired-end reads where the mates don\'t align as a pair',
'scale': 'OrRd',
'min': 0,
'max': 100
}
header["pct_Strand_Filtered"] = {
'title': '% Strand Filtered',
'description': 'Percent of alignments arising from the wrong strand',
'scale': 'OrRd',
'min': 0,
'max': 100
}
tdata = dict()
for k, v in self.deeptools_estimateReadFiltering.items():
tdata[k] = {
'M Entries': v['total'] / 1000000.0,
'pct_Aligned': 100. * v['mapped'] / float(v['total']),
'pct_Filtered': 100. * v['filtered'] / float(v['total']),
'pct_Blacklisted': 100. * v['blacklisted'] / float(v['total']),
'pct_Below_MAPQ': 100. * v['mapq'] / float(v['total']),
'pct_Missing_Flags': 100. * v['required flags'] / float(v['total']),
'pct_Forbidden_Flags': 100. * v['excluded flags'] / float(v['total']),
'pct_deepTools_Dupes': 100. * v['internal dupes'] / float(v['total']),
'pct_Duplication': 100. * v['dupes'] / float(v['total']),
'pct_Singletons': 100. * v['singletons'] / float(v['total']),
'pct_Strand_Filtered': 100. * v['strand'] / float(v['total'])
}
config = {'namespace': 'deepTools bamPEFragmentSize'}
self.add_section(
name = "Filtering metrics",
anchor = "estimateReadFiltering",
description = "Estimated percentages of alignments filtered independently for each setting in `estimateReadFiltering`",
plot = table.plot(tdata, header, config)
)
return len(self.deeptools_estimateReadFiltering) |
def fromtabix(filename, reference=None, start=None, stop=None, region=None,
header=None):
"""
Extract rows from a tabix indexed file, e.g.::
>>> import petl as etl
>>> # activate bio extensions
... import petlx.bio
>>> table1 = etl.fromtabix('fixture/test.bed.gz',
... region='Pf3D7_02_v3')
>>> table1
+---------------+----------+----------+-----------------------------+
| #chrom | start | end | region |
+===============+==========+==========+=============================+
| 'Pf3D7_02_v3' | '0' | '23100' | 'SubtelomericRepeat' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '23100' | '105800' | 'SubtelomericHypervariable' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '447300' | '450450' | 'Centromere' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '450450' | '862500' | 'Core' |
+---------------+----------+----------+-----------------------------+
...
>>> table2 = etl.fromtabix('fixture/test.bed.gz',
... region='Pf3D7_02_v3:110000-120000')
>>> table2
+---------------+----------+----------+--------+
| #chrom | start | end | region |
+===============+==========+==========+========+
| 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' |
+---------------+----------+----------+--------+
"""
return TabixView(filename, reference, start, stop, region, header) | Extract rows from a tabix indexed file, e.g.::
>>> import petl as etl
>>> # activate bio extensions
... import petlx.bio
>>> table1 = etl.fromtabix('fixture/test.bed.gz',
... region='Pf3D7_02_v3')
>>> table1
+---------------+----------+----------+-----------------------------+
| #chrom | start | end | region |
+===============+==========+==========+=============================+
| 'Pf3D7_02_v3' | '0' | '23100' | 'SubtelomericRepeat' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '23100' | '105800' | 'SubtelomericHypervariable' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '447300' | '450450' | 'Centromere' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '450450' | '862500' | 'Core' |
+---------------+----------+----------+-----------------------------+
...
>>> table2 = etl.fromtabix('fixture/test.bed.gz',
... region='Pf3D7_02_v3:110000-120000')
>>> table2
+---------------+----------+----------+--------+
| #chrom | start | end | region |
+===============+==========+==========+========+
| 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' |
+---------------+----------+----------+--------+ | Below is the the instruction that describes the task:
### Input:
Extract rows from a tabix indexed file, e.g.::
>>> import petl as etl
>>> # activate bio extensions
... import petlx.bio
>>> table1 = etl.fromtabix('fixture/test.bed.gz',
... region='Pf3D7_02_v3')
>>> table1
+---------------+----------+----------+-----------------------------+
| #chrom | start | end | region |
+===============+==========+==========+=============================+
| 'Pf3D7_02_v3' | '0' | '23100' | 'SubtelomericRepeat' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '23100' | '105800' | 'SubtelomericHypervariable' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '447300' | '450450' | 'Centromere' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '450450' | '862500' | 'Core' |
+---------------+----------+----------+-----------------------------+
...
>>> table2 = etl.fromtabix('fixture/test.bed.gz',
... region='Pf3D7_02_v3:110000-120000')
>>> table2
+---------------+----------+----------+--------+
| #chrom | start | end | region |
+===============+==========+==========+========+
| 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' |
+---------------+----------+----------+--------+
### Response:
def fromtabix(filename, reference=None, start=None, stop=None, region=None,
header=None):
"""
Extract rows from a tabix indexed file, e.g.::
>>> import petl as etl
>>> # activate bio extensions
... import petlx.bio
>>> table1 = etl.fromtabix('fixture/test.bed.gz',
... region='Pf3D7_02_v3')
>>> table1
+---------------+----------+----------+-----------------------------+
| #chrom | start | end | region |
+===============+==========+==========+=============================+
| 'Pf3D7_02_v3' | '0' | '23100' | 'SubtelomericRepeat' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '23100' | '105800' | 'SubtelomericHypervariable' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '447300' | '450450' | 'Centromere' |
+---------------+----------+----------+-----------------------------+
| 'Pf3D7_02_v3' | '450450' | '862500' | 'Core' |
+---------------+----------+----------+-----------------------------+
...
>>> table2 = etl.fromtabix('fixture/test.bed.gz',
... region='Pf3D7_02_v3:110000-120000')
>>> table2
+---------------+----------+----------+--------+
| #chrom | start | end | region |
+===============+==========+==========+========+
| 'Pf3D7_02_v3' | '105800' | '447300' | 'Core' |
+---------------+----------+----------+--------+
"""
return TabixView(filename, reference, start, stop, region, header) |
def is_valid_project(self, path):
"""Check if a directory is a valid Spyder project"""
spy_project_dir = osp.join(path, '.spyproject')
if osp.isdir(path) and osp.isdir(spy_project_dir):
return True
else:
return False | Check if a directory is a valid Spyder project | Below is the the instruction that describes the task:
### Input:
Check if a directory is a valid Spyder project
### Response:
def is_valid_project(self, path):
"""Check if a directory is a valid Spyder project"""
spy_project_dir = osp.join(path, '.spyproject')
if osp.isdir(path) and osp.isdir(spy_project_dir):
return True
else:
return False |
def stop_data_frame_transform(self, transform_id, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-data-frame-transform.html>`_
:arg transform_id: The id of the transform to stop
:arg timeout: Controls the time to wait until the transform has stopped.
Default to 30 seconds
:arg wait_for_completion: Whether to wait for the transform to fully
stop before returning or not. Default to false
"""
if transform_id in SKIP_IN_PATH:
raise ValueError(
"Empty value passed for a required argument 'transform_id'."
)
return self.transport.perform_request(
"POST",
_make_path("_data_frame", "transforms", transform_id, "_stop"),
params=params,
) | `<https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-data-frame-transform.html>`_
:arg transform_id: The id of the transform to stop
:arg timeout: Controls the time to wait until the transform has stopped.
Default to 30 seconds
:arg wait_for_completion: Whether to wait for the transform to fully
stop before returning or not. Default to false | Below is the the instruction that describes the task:
### Input:
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-data-frame-transform.html>`_
:arg transform_id: The id of the transform to stop
:arg timeout: Controls the time to wait until the transform has stopped.
Default to 30 seconds
:arg wait_for_completion: Whether to wait for the transform to fully
stop before returning or not. Default to false
### Response:
def stop_data_frame_transform(self, transform_id, params=None):
"""
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/stop-data-frame-transform.html>`_
:arg transform_id: The id of the transform to stop
:arg timeout: Controls the time to wait until the transform has stopped.
Default to 30 seconds
:arg wait_for_completion: Whether to wait for the transform to fully
stop before returning or not. Default to false
"""
if transform_id in SKIP_IN_PATH:
raise ValueError(
"Empty value passed for a required argument 'transform_id'."
)
return self.transport.perform_request(
"POST",
_make_path("_data_frame", "transforms", transform_id, "_stop"),
params=params,
) |
def delete_tag(self, tag_name, **kwargs):
"""delete a tag by name
Args:
tag_name (string): name of tag to delete
"""
resp = self._delete(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name),
**kwargs)
resp.raise_for_status()
# successful delete returns 204, which has no associated json
return resp | delete a tag by name
Args:
tag_name (string): name of tag to delete | Below is the the instruction that describes the task:
### Input:
delete a tag by name
Args:
tag_name (string): name of tag to delete
### Response:
def delete_tag(self, tag_name, **kwargs):
"""delete a tag by name
Args:
tag_name (string): name of tag to delete
"""
resp = self._delete(self._u(self._TAG_ENDPOINT_SUFFIX, tag_name),
**kwargs)
resp.raise_for_status()
# successful delete returns 204, which has no associated json
return resp |
def _segment_color(self, color_im, bounding_box, bgmodel, cfg, vis_segmentation=False):
""" Re-segments a color image to isolate an object of interest using foreground masking and kmeans """
# read params
foreground_mask_tolerance = cfg['foreground_mask_tolerance']
color_seg_rgb_weight = cfg['color_seg_rgb_weight']
color_seg_num_clusters = cfg['color_seg_num_clusters']
color_seg_hsv_weight = cfg['color_seg_hsv_weight']
color_seg_dist_pctile = cfg['color_seg_dist_pctile']
color_seg_dist_thresh = cfg['color_seg_dist_thresh']
color_seg_min_bg_dist = cfg['color_seg_min_bg_dist']
min_contour_area= cfg['min_contour_area']
contour_dist_thresh = cfg['contour_dist_thresh']
# foreground masking
binary_im = color_im.foreground_mask(foreground_mask_tolerance, bgmodel=bgmodel)
binary_im = binary_im.prune_contours(area_thresh=min_contour_area, dist_thresh=contour_dist_thresh)
if binary_im is None:
return None, None, None
color_im = color_im.mask_binary(binary_im)
# kmeans segmentation
segment_im = color_im.segment_kmeans(color_seg_rgb_weight,
color_seg_num_clusters,
hue_weight=color_seg_hsv_weight)
# keep the segment that is farthest from the background
bg_dists = []
hsv_bgmodel = 255 * np.array(colorsys.rgb_to_hsv(float(bgmodel[0]) / 255,
float(bgmodel[1]) / 255,
float(bgmodel[2]) / 255))
hsv_bgmodel = np.r_[color_seg_rgb_weight * np.array(bgmodel), color_seg_hsv_weight * hsv_bgmodel[:1]]
for k in range(segment_im.num_segments-1):
seg_mask = segment_im.segment_mask(k)
color_im_segment = color_im.mask_binary(seg_mask)
color_im_segment_data = color_im_segment.nonzero_data()
color_im_segment_data = np.c_[color_seg_rgb_weight * color_im_segment_data, color_seg_hsv_weight * color_im_segment.nonzero_hsv_data()[:,:1]]
# take the median distance from the background
bg_dist = np.median(np.linalg.norm(color_im_segment_data - hsv_bgmodel, axis=1))
if vis_segmentation:
logging.info('BG Dist for segment %d: %.4f' %(k, bg_dist))
bg_dists.append(bg_dist)
# sort by distance
dists_and_indices = zip(np.arange(len(bg_dists)), bg_dists)
dists_and_indices.sort(key = lambda x: x[1], reverse=True)
# mask out the segment in the binary image
if color_seg_num_clusters > 1 and abs(dists_and_indices[0][1] - dists_and_indices[1][1]) > color_seg_dist_thresh and dists_and_indices[1][1] < color_seg_min_bg_dist:
obj_segment = dists_and_indices[0][0]
obj_seg_mask = segment_im.segment_mask(obj_segment)
binary_im = binary_im.mask_binary(obj_seg_mask)
binary_im, diff_px = binary_im.center_nonzero()
bounding_box = Box(bounding_box.min_pt.astype(np.float32) - diff_px,
bounding_box.max_pt.astype(np.float32) - diff_px,
bounding_box.frame)
if vis_segmentation:
plt.figure()
plt.subplot(1,3,1)
plt.imshow(color_im.data)
plt.axis('off')
plt.subplot(1,3,2)
plt.imshow(segment_im.data)
plt.colorbar()
plt.axis('off')
plt.subplot(1,3,3)
plt.imshow(binary_im.data, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
return binary_im, segment_im, bounding_box | Re-segments a color image to isolate an object of interest using foreground masking and kmeans | Below is the the instruction that describes the task:
### Input:
Re-segments a color image to isolate an object of interest using foreground masking and kmeans
### Response:
def _segment_color(self, color_im, bounding_box, bgmodel, cfg, vis_segmentation=False):
""" Re-segments a color image to isolate an object of interest using foreground masking and kmeans """
# read params
foreground_mask_tolerance = cfg['foreground_mask_tolerance']
color_seg_rgb_weight = cfg['color_seg_rgb_weight']
color_seg_num_clusters = cfg['color_seg_num_clusters']
color_seg_hsv_weight = cfg['color_seg_hsv_weight']
color_seg_dist_pctile = cfg['color_seg_dist_pctile']
color_seg_dist_thresh = cfg['color_seg_dist_thresh']
color_seg_min_bg_dist = cfg['color_seg_min_bg_dist']
min_contour_area= cfg['min_contour_area']
contour_dist_thresh = cfg['contour_dist_thresh']
# foreground masking
binary_im = color_im.foreground_mask(foreground_mask_tolerance, bgmodel=bgmodel)
binary_im = binary_im.prune_contours(area_thresh=min_contour_area, dist_thresh=contour_dist_thresh)
if binary_im is None:
return None, None, None
color_im = color_im.mask_binary(binary_im)
# kmeans segmentation
segment_im = color_im.segment_kmeans(color_seg_rgb_weight,
color_seg_num_clusters,
hue_weight=color_seg_hsv_weight)
# keep the segment that is farthest from the background
bg_dists = []
hsv_bgmodel = 255 * np.array(colorsys.rgb_to_hsv(float(bgmodel[0]) / 255,
float(bgmodel[1]) / 255,
float(bgmodel[2]) / 255))
hsv_bgmodel = np.r_[color_seg_rgb_weight * np.array(bgmodel), color_seg_hsv_weight * hsv_bgmodel[:1]]
for k in range(segment_im.num_segments-1):
seg_mask = segment_im.segment_mask(k)
color_im_segment = color_im.mask_binary(seg_mask)
color_im_segment_data = color_im_segment.nonzero_data()
color_im_segment_data = np.c_[color_seg_rgb_weight * color_im_segment_data, color_seg_hsv_weight * color_im_segment.nonzero_hsv_data()[:,:1]]
# take the median distance from the background
bg_dist = np.median(np.linalg.norm(color_im_segment_data - hsv_bgmodel, axis=1))
if vis_segmentation:
logging.info('BG Dist for segment %d: %.4f' %(k, bg_dist))
bg_dists.append(bg_dist)
# sort by distance
dists_and_indices = zip(np.arange(len(bg_dists)), bg_dists)
dists_and_indices.sort(key = lambda x: x[1], reverse=True)
# mask out the segment in the binary image
if color_seg_num_clusters > 1 and abs(dists_and_indices[0][1] - dists_and_indices[1][1]) > color_seg_dist_thresh and dists_and_indices[1][1] < color_seg_min_bg_dist:
obj_segment = dists_and_indices[0][0]
obj_seg_mask = segment_im.segment_mask(obj_segment)
binary_im = binary_im.mask_binary(obj_seg_mask)
binary_im, diff_px = binary_im.center_nonzero()
bounding_box = Box(bounding_box.min_pt.astype(np.float32) - diff_px,
bounding_box.max_pt.astype(np.float32) - diff_px,
bounding_box.frame)
if vis_segmentation:
plt.figure()
plt.subplot(1,3,1)
plt.imshow(color_im.data)
plt.axis('off')
plt.subplot(1,3,2)
plt.imshow(segment_im.data)
plt.colorbar()
plt.axis('off')
plt.subplot(1,3,3)
plt.imshow(binary_im.data, cmap=plt.cm.gray)
plt.axis('off')
plt.show()
return binary_im, segment_im, bounding_box |
def quitter():
"""Overide the psiTurk quitter route."""
exp = experiment(session)
exp.log("Quitter route was hit.")
return Response(
dumps({"status": "success"}),
status=200,
mimetype='application/json') | Overide the psiTurk quitter route. | Below is the the instruction that describes the task:
### Input:
Overide the psiTurk quitter route.
### Response:
def quitter():
"""Overide the psiTurk quitter route."""
exp = experiment(session)
exp.log("Quitter route was hit.")
return Response(
dumps({"status": "success"}),
status=200,
mimetype='application/json') |
def _load_json_file_or_url(self, json_path_or_url):
'''Return the JSON at the local path or URL as a dict
This method raises DataPackageRegistryException if there were any
errors.
'''
try:
if os.path.isfile(json_path_or_url):
with open(json_path_or_url, 'r') as f:
result = json.load(f)
else:
res = requests.get(json_path_or_url)
res.raise_for_status()
result = res.json()
return result
except (ValueError,
requests.exceptions.RequestException) as e:
six.raise_from(DataPackageRegistryException(e), e) | Return the JSON at the local path or URL as a dict
This method raises DataPackageRegistryException if there were any
errors. | Below is the the instruction that describes the task:
### Input:
Return the JSON at the local path or URL as a dict
This method raises DataPackageRegistryException if there were any
errors.
### Response:
def _load_json_file_or_url(self, json_path_or_url):
'''Return the JSON at the local path or URL as a dict
This method raises DataPackageRegistryException if there were any
errors.
'''
try:
if os.path.isfile(json_path_or_url):
with open(json_path_or_url, 'r') as f:
result = json.load(f)
else:
res = requests.get(json_path_or_url)
res.raise_for_status()
result = res.json()
return result
except (ValueError,
requests.exceptions.RequestException) as e:
six.raise_from(DataPackageRegistryException(e), e) |
def set_edge_weight(self, edge, wt):
"""
Set the weight of an edge.
@type edge: edge
@param edge: One edge.
@type wt: number
@param wt: Edge weight.
"""
self.set_edge_properties(edge, weight=wt )
if not self.DIRECTED:
self.set_edge_properties((edge[1], edge[0]) , weight=wt ) | Set the weight of an edge.
@type edge: edge
@param edge: One edge.
@type wt: number
@param wt: Edge weight. | Below is the the instruction that describes the task:
### Input:
Set the weight of an edge.
@type edge: edge
@param edge: One edge.
@type wt: number
@param wt: Edge weight.
### Response:
def set_edge_weight(self, edge, wt):
"""
Set the weight of an edge.
@type edge: edge
@param edge: One edge.
@type wt: number
@param wt: Edge weight.
"""
self.set_edge_properties(edge, weight=wt )
if not self.DIRECTED:
self.set_edge_properties((edge[1], edge[0]) , weight=wt ) |
def get_file_info_web(self, fname, delim='<BR>\n'):
"""
gathers info on a python program in list and formats as string
"""
txt = ''
f = mod_file.File(fname[0])
txt += '<sup>' + f.name + '</sup>' + delim
txt += '<sup>' + fname[1] + '</sup>' + delim
txt += '<sub><sup><span white-space:nowrap;>' + f.GetDateAsString(f.date_modified)[2:10] + '</span></sup></sub>' + delim
txt += '<sup><sup>' + str(f.size) + '</sup></sup>' + delim
return txt + '\n' | gathers info on a python program in list and formats as string | Below is the the instruction that describes the task:
### Input:
gathers info on a python program in list and formats as string
### Response:
def get_file_info_web(self, fname, delim='<BR>\n'):
"""
gathers info on a python program in list and formats as string
"""
txt = ''
f = mod_file.File(fname[0])
txt += '<sup>' + f.name + '</sup>' + delim
txt += '<sup>' + fname[1] + '</sup>' + delim
txt += '<sub><sup><span white-space:nowrap;>' + f.GetDateAsString(f.date_modified)[2:10] + '</span></sup></sub>' + delim
txt += '<sup><sup>' + str(f.size) + '</sup></sup>' + delim
return txt + '\n' |
def from_dict(cls: typing.Type[T], dikt) -> T:
"""Returns the dict as a model"""
return util.deserialize_model(dikt, cls) | Returns the dict as a model | Below is the the instruction that describes the task:
### Input:
Returns the dict as a model
### Response:
def from_dict(cls: typing.Type[T], dikt) -> T:
"""Returns the dict as a model"""
return util.deserialize_model(dikt, cls) |
def set_unit(self, unit):
"""Set the GPS step scale
"""
# accept all core time units
if unit is None or (isinstance(unit, units.NamedUnit) and
unit.physical_type == 'time'):
self._unit = unit
return
# convert float to custom unit in seconds
if isinstance(unit, Number):
unit = units.Unit(unit * units.second)
# otherwise, should be able to convert to a time unit
try:
unit = units.Unit(unit)
except ValueError as exc:
# catch annoying plurals
try:
unit = units.Unit(str(unit).rstrip('s'))
except ValueError:
raise exc
# decompose and check that it's actually a time unit
dec = unit.decompose()
if dec.bases != [units.second]:
raise ValueError("Cannot set GPS unit to %s" % unit)
# check equivalent units
for other in TIME_UNITS:
if other.decompose().scale == dec.scale:
self._unit = other
return
raise ValueError("Unrecognised unit: %s" % unit) | Set the GPS step scale | Below is the the instruction that describes the task:
### Input:
Set the GPS step scale
### Response:
def set_unit(self, unit):
"""Set the GPS step scale
"""
# accept all core time units
if unit is None or (isinstance(unit, units.NamedUnit) and
unit.physical_type == 'time'):
self._unit = unit
return
# convert float to custom unit in seconds
if isinstance(unit, Number):
unit = units.Unit(unit * units.second)
# otherwise, should be able to convert to a time unit
try:
unit = units.Unit(unit)
except ValueError as exc:
# catch annoying plurals
try:
unit = units.Unit(str(unit).rstrip('s'))
except ValueError:
raise exc
# decompose and check that it's actually a time unit
dec = unit.decompose()
if dec.bases != [units.second]:
raise ValueError("Cannot set GPS unit to %s" % unit)
# check equivalent units
for other in TIME_UNITS:
if other.decompose().scale == dec.scale:
self._unit = other
return
raise ValueError("Unrecognised unit: %s" % unit) |
def industry_code(self):
"""
[str] 国民经济行业分类代码,具体可参考“Industry列表” (股票专用)
"""
try:
return self.__dict__["industry_code"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'industry_code' ".format(self.order_book_id)
) | [str] 国民经济行业分类代码,具体可参考“Industry列表” (股票专用) | Below is the the instruction that describes the task:
### Input:
[str] 国民经济行业分类代码,具体可参考“Industry列表” (股票专用)
### Response:
def industry_code(self):
"""
[str] 国民经济行业分类代码,具体可参考“Industry列表” (股票专用)
"""
try:
return self.__dict__["industry_code"]
except (KeyError, ValueError):
raise AttributeError(
"Instrument(order_book_id={}) has no attribute 'industry_code' ".format(self.order_book_id)
) |
def set_camera(self,
angles=None,
distance=None,
center=None,
resolution=None,
fov=None):
"""
Create a camera object for self.camera, and add
a transform to self.graph for it.
If arguments are not passed sane defaults will be figured
out which show the mesh roughly centered.
Parameters
-----------
angles : (3,) float
Initial euler angles in radians
distance : float
Distance from centroid
center : (3,) float
Point camera should be center on
camera : Camera object
Object that stores camera parameters
"""
if fov is None:
fov = np.array([60, 45])
# if no geometry nothing to set camera to
if len(self.geometry) == 0:
return
# set with no rotation by default
if angles is None:
angles = np.zeros(3)
rotation = transformations.euler_matrix(*angles)
transform = cameras.look_at(self.bounds_corners,
fov=fov,
rotation=rotation,
distance=distance,
center=center)
if hasattr(self, '_camera') and self._camera is not None:
self.camera.fov = fov
self.camera._scene = self
self.camera.transform = transform
else:
# create a new camera object
self.camera = cameras.Camera(fov=fov,
scene=self,
transform=transform)
return self.camera | Create a camera object for self.camera, and add
a transform to self.graph for it.
If arguments are not passed sane defaults will be figured
out which show the mesh roughly centered.
Parameters
-----------
angles : (3,) float
Initial euler angles in radians
distance : float
Distance from centroid
center : (3,) float
Point camera should be center on
camera : Camera object
Object that stores camera parameters | Below is the the instruction that describes the task:
### Input:
Create a camera object for self.camera, and add
a transform to self.graph for it.
If arguments are not passed sane defaults will be figured
out which show the mesh roughly centered.
Parameters
-----------
angles : (3,) float
Initial euler angles in radians
distance : float
Distance from centroid
center : (3,) float
Point camera should be center on
camera : Camera object
Object that stores camera parameters
### Response:
def set_camera(self,
angles=None,
distance=None,
center=None,
resolution=None,
fov=None):
"""
Create a camera object for self.camera, and add
a transform to self.graph for it.
If arguments are not passed sane defaults will be figured
out which show the mesh roughly centered.
Parameters
-----------
angles : (3,) float
Initial euler angles in radians
distance : float
Distance from centroid
center : (3,) float
Point camera should be center on
camera : Camera object
Object that stores camera parameters
"""
if fov is None:
fov = np.array([60, 45])
# if no geometry nothing to set camera to
if len(self.geometry) == 0:
return
# set with no rotation by default
if angles is None:
angles = np.zeros(3)
rotation = transformations.euler_matrix(*angles)
transform = cameras.look_at(self.bounds_corners,
fov=fov,
rotation=rotation,
distance=distance,
center=center)
if hasattr(self, '_camera') and self._camera is not None:
self.camera.fov = fov
self.camera._scene = self
self.camera.transform = transform
else:
# create a new camera object
self.camera = cameras.Camera(fov=fov,
scene=self,
transform=transform)
return self.camera |
def get_algorithm(alg: str) -> Callable:
"""
:param alg: The name of the requested `JSON Web Algorithm <https://tools.ietf.org/html/rfc7519#ref-JWA>`_. `RFC7518 <https://tools.ietf.org/html/rfc7518#section-3.2>`_ is related.
:type alg: str
:return: The requested algorithm.
:rtype: Callable
:raises: ValueError
"""
if alg not in algorithms:
raise ValueError('Invalid algorithm: {:s}'.format(alg))
return algorithms[alg] | :param alg: The name of the requested `JSON Web Algorithm <https://tools.ietf.org/html/rfc7519#ref-JWA>`_. `RFC7518 <https://tools.ietf.org/html/rfc7518#section-3.2>`_ is related.
:type alg: str
:return: The requested algorithm.
:rtype: Callable
:raises: ValueError | Below is the the instruction that describes the task:
### Input:
:param alg: The name of the requested `JSON Web Algorithm <https://tools.ietf.org/html/rfc7519#ref-JWA>`_. `RFC7518 <https://tools.ietf.org/html/rfc7518#section-3.2>`_ is related.
:type alg: str
:return: The requested algorithm.
:rtype: Callable
:raises: ValueError
### Response:
def get_algorithm(alg: str) -> Callable:
"""
:param alg: The name of the requested `JSON Web Algorithm <https://tools.ietf.org/html/rfc7519#ref-JWA>`_. `RFC7518 <https://tools.ietf.org/html/rfc7518#section-3.2>`_ is related.
:type alg: str
:return: The requested algorithm.
:rtype: Callable
:raises: ValueError
"""
if alg not in algorithms:
raise ValueError('Invalid algorithm: {:s}'.format(alg))
return algorithms[alg] |
def beta_covariance(self):
"""
Estimates the covariance-matrix of the optimal beta.
Returns
-------
beta-covariance : ndarray
(Xᵀ(s((1-𝛿)K + 𝛿I))⁻¹X)⁻¹.
References
----------
.. Rencher, A. C., & Schaalje, G. B. (2008). Linear models in statistics. John
Wiley & Sons.
"""
from numpy_sugar.linalg import ddot
tX = self._X["tX"]
Q = concatenate(self._QS[0], axis=1)
S0 = self._QS[1]
D = self.v0 * S0 + self.v1
D = D.tolist() + [self.v1] * (len(self._y) - len(D))
D = asarray(D)
A = inv(tX.T @ (Q @ ddot(1 / D, Q.T @ tX)))
VT = self._X["VT"]
H = lstsq(VT, A, rcond=None)[0]
return lstsq(VT, H.T, rcond=None)[0] | Estimates the covariance-matrix of the optimal beta.
Returns
-------
beta-covariance : ndarray
(Xᵀ(s((1-𝛿)K + 𝛿I))⁻¹X)⁻¹.
References
----------
.. Rencher, A. C., & Schaalje, G. B. (2008). Linear models in statistics. John
Wiley & Sons. | Below is the the instruction that describes the task:
### Input:
Estimates the covariance-matrix of the optimal beta.
Returns
-------
beta-covariance : ndarray
(Xᵀ(s((1-𝛿)K + 𝛿I))⁻¹X)⁻¹.
References
----------
.. Rencher, A. C., & Schaalje, G. B. (2008). Linear models in statistics. John
Wiley & Sons.
### Response:
def beta_covariance(self):
"""
Estimates the covariance-matrix of the optimal beta.
Returns
-------
beta-covariance : ndarray
(Xᵀ(s((1-𝛿)K + 𝛿I))⁻¹X)⁻¹.
References
----------
.. Rencher, A. C., & Schaalje, G. B. (2008). Linear models in statistics. John
Wiley & Sons.
"""
from numpy_sugar.linalg import ddot
tX = self._X["tX"]
Q = concatenate(self._QS[0], axis=1)
S0 = self._QS[1]
D = self.v0 * S0 + self.v1
D = D.tolist() + [self.v1] * (len(self._y) - len(D))
D = asarray(D)
A = inv(tX.T @ (Q @ ddot(1 / D, Q.T @ tX)))
VT = self._X["VT"]
H = lstsq(VT, A, rcond=None)[0]
return lstsq(VT, H.T, rcond=None)[0] |
def prior_prior_model_dict(self):
"""
Returns
-------
prior_prior_model_dict: {Prior: PriorModel}
A dictionary mapping priors to associated prior models. Each prior will only have one prior model; if a
prior is shared by two prior models then one of those prior models will be in this dictionary.
"""
return {prior: prior_model[1] for prior_model in self.prior_model_tuples for _, prior in
prior_model[1].prior_tuples} | Returns
-------
prior_prior_model_dict: {Prior: PriorModel}
A dictionary mapping priors to associated prior models. Each prior will only have one prior model; if a
prior is shared by two prior models then one of those prior models will be in this dictionary. | Below is the the instruction that describes the task:
### Input:
Returns
-------
prior_prior_model_dict: {Prior: PriorModel}
A dictionary mapping priors to associated prior models. Each prior will only have one prior model; if a
prior is shared by two prior models then one of those prior models will be in this dictionary.
### Response:
def prior_prior_model_dict(self):
"""
Returns
-------
prior_prior_model_dict: {Prior: PriorModel}
A dictionary mapping priors to associated prior models. Each prior will only have one prior model; if a
prior is shared by two prior models then one of those prior models will be in this dictionary.
"""
return {prior: prior_model[1] for prior_model in self.prior_model_tuples for _, prior in
prior_model[1].prior_tuples} |
def _initial_guess(self):
"""
Quadratic fit to get an initial guess for the parameters.
Returns:
tuple: (e0, b0, b1, v0)
"""
a, b, c = np.polyfit(self.volumes, self.energies, 2)
self.eos_params = [a, b, c]
v0 = -b/(2*a)
e0 = a*(v0**2) + b*v0 + c
b0 = 2 * a * v0
b1 = 4 # b1 is usually a small number like 4
vmin, vmax = min(self.volumes), max(self.volumes)
if not vmin < v0 and v0 < vmax:
raise EOSError('The minimum volume of a fitted parabola is '
'not in the input volumes\n.')
return e0, b0, b1, v0 | Quadratic fit to get an initial guess for the parameters.
Returns:
tuple: (e0, b0, b1, v0) | Below is the the instruction that describes the task:
### Input:
Quadratic fit to get an initial guess for the parameters.
Returns:
tuple: (e0, b0, b1, v0)
### Response:
def _initial_guess(self):
"""
Quadratic fit to get an initial guess for the parameters.
Returns:
tuple: (e0, b0, b1, v0)
"""
a, b, c = np.polyfit(self.volumes, self.energies, 2)
self.eos_params = [a, b, c]
v0 = -b/(2*a)
e0 = a*(v0**2) + b*v0 + c
b0 = 2 * a * v0
b1 = 4 # b1 is usually a small number like 4
vmin, vmax = min(self.volumes), max(self.volumes)
if not vmin < v0 and v0 < vmax:
raise EOSError('The minimum volume of a fitted parabola is '
'not in the input volumes\n.')
return e0, b0, b1, v0 |
def handle_one_command(self):
"""Process a single command. May have many lines."""
while True:
yield from self.write_prompt()
codeobj = yield from self.read_command()
if codeobj is not None:
yield from self.run_command(codeobj) | Process a single command. May have many lines. | Below is the the instruction that describes the task:
### Input:
Process a single command. May have many lines.
### Response:
def handle_one_command(self):
"""Process a single command. May have many lines."""
while True:
yield from self.write_prompt()
codeobj = yield from self.read_command()
if codeobj is not None:
yield from self.run_command(codeobj) |
def venn2(subsets, set_labels=('A', 'B'), set_colors=('r', 'g'), alpha=0.4, normalize_to=1.0, ax=None, subset_label_formatter=None):
'''Plots a 2-set area-weighted Venn diagram.
The subsets parameter can be one of the following:
- A list (or a tuple) containing two set objects.
- A dict, providing sizes of three diagram regions.
The regions are identified via two-letter binary codes ('10', '01', and '11'), hence a valid set could look like:
{'10': 10, '01': 20, '11': 40}. Unmentioned codes are considered to map to 0.
- A list (or a tuple) with three numbers, denoting the sizes of the regions in the following order:
(10, 01, 11)
``set_labels`` parameter is a list of two strings - set labels. Set it to None to disable set labels.
The ``set_colors`` parameter should be a list of two elements, specifying the "base colors" of the two circles.
The color of circle intersection will be computed based on those.
The ``normalize_to`` parameter specifies the total (on-axes) area of the circles to be drawn. Sometimes tuning it (together
with the overall fiture size) may be useful to fit the text labels better.
The return value is a ``VennDiagram`` object, that keeps references to the ``Text`` and ``Patch`` objects used on the plot
and lets you know the centers and radii of the circles, if you need it.
The ``ax`` parameter specifies the axes on which the plot will be drawn (None means current axes).
The ``subset_label_formatter`` parameter is a function that can be passed to format the labels
that describe the size of each subset.
>>> from matplotlib_venn import *
>>> v = venn2(subsets={'10': 1, '01': 1, '11': 1}, set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=(1, 1, 1), linestyle='dashed')
>>> v.get_patch_by_id('10').set_alpha(1.0)
>>> v.get_patch_by_id('10').set_color('white')
>>> v.get_label_by_id('10').set_text('Unknown')
>>> v.get_label_by_id('A').set_text('Set A')
You can provide sets themselves rather than subset sizes:
>>> v = venn2(subsets=[set([1,2]), set([2,3,4,5])], set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=[set([1,2]), set([2,3,4,5])], linestyle='dashed')
>>> print("%0.2f" % (v.get_circle_radius(1)/v.get_circle_radius(0)))
1.41
'''
if isinstance(subsets, dict):
subsets = [subsets.get(t, 0) for t in ['10', '01', '11']]
elif len(subsets) == 2:
subsets = compute_venn2_subsets(*subsets)
if subset_label_formatter is None:
subset_label_formatter = str
areas = compute_venn2_areas(subsets, normalize_to)
centers, radii = solve_venn2_circles(areas)
regions = compute_venn2_regions(centers, radii)
colors = compute_venn2_colors(set_colors)
if ax is None:
ax = gca()
prepare_venn_axes(ax, centers, radii)
# Create and add patches and subset labels
patches = [r.make_patch() for r in regions]
for (p, c) in zip(patches, colors):
if p is not None:
p.set_facecolor(c)
p.set_edgecolor('none')
p.set_alpha(alpha)
ax.add_patch(p)
label_positions = [r.label_position() for r in regions]
subset_labels = [ax.text(lbl[0], lbl[1], subset_label_formatter(s), va='center', ha='center') if lbl is not None else None for (lbl, s) in zip(label_positions, subsets)]
# Position set labels
if set_labels is not None:
padding = np.mean([r * 0.1 for r in radii])
label_positions = [centers[0] + np.array([0.0, - radii[0] - padding]),
centers[1] + np.array([0.0, - radii[1] - padding])]
labels = [ax.text(pos[0], pos[1], txt, size='large', ha='right', va='top') for (pos, txt) in zip(label_positions, set_labels)]
labels[1].set_ha('left')
else:
labels = None
return VennDiagram(patches, subset_labels, labels, centers, radii) | Plots a 2-set area-weighted Venn diagram.
The subsets parameter can be one of the following:
- A list (or a tuple) containing two set objects.
- A dict, providing sizes of three diagram regions.
The regions are identified via two-letter binary codes ('10', '01', and '11'), hence a valid set could look like:
{'10': 10, '01': 20, '11': 40}. Unmentioned codes are considered to map to 0.
- A list (or a tuple) with three numbers, denoting the sizes of the regions in the following order:
(10, 01, 11)
``set_labels`` parameter is a list of two strings - set labels. Set it to None to disable set labels.
The ``set_colors`` parameter should be a list of two elements, specifying the "base colors" of the two circles.
The color of circle intersection will be computed based on those.
The ``normalize_to`` parameter specifies the total (on-axes) area of the circles to be drawn. Sometimes tuning it (together
with the overall fiture size) may be useful to fit the text labels better.
The return value is a ``VennDiagram`` object, that keeps references to the ``Text`` and ``Patch`` objects used on the plot
and lets you know the centers and radii of the circles, if you need it.
The ``ax`` parameter specifies the axes on which the plot will be drawn (None means current axes).
The ``subset_label_formatter`` parameter is a function that can be passed to format the labels
that describe the size of each subset.
>>> from matplotlib_venn import *
>>> v = venn2(subsets={'10': 1, '01': 1, '11': 1}, set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=(1, 1, 1), linestyle='dashed')
>>> v.get_patch_by_id('10').set_alpha(1.0)
>>> v.get_patch_by_id('10').set_color('white')
>>> v.get_label_by_id('10').set_text('Unknown')
>>> v.get_label_by_id('A').set_text('Set A')
You can provide sets themselves rather than subset sizes:
>>> v = venn2(subsets=[set([1,2]), set([2,3,4,5])], set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=[set([1,2]), set([2,3,4,5])], linestyle='dashed')
>>> print("%0.2f" % (v.get_circle_radius(1)/v.get_circle_radius(0)))
1.41 | Below is the the instruction that describes the task:
### Input:
Plots a 2-set area-weighted Venn diagram.
The subsets parameter can be one of the following:
- A list (or a tuple) containing two set objects.
- A dict, providing sizes of three diagram regions.
The regions are identified via two-letter binary codes ('10', '01', and '11'), hence a valid set could look like:
{'10': 10, '01': 20, '11': 40}. Unmentioned codes are considered to map to 0.
- A list (or a tuple) with three numbers, denoting the sizes of the regions in the following order:
(10, 01, 11)
``set_labels`` parameter is a list of two strings - set labels. Set it to None to disable set labels.
The ``set_colors`` parameter should be a list of two elements, specifying the "base colors" of the two circles.
The color of circle intersection will be computed based on those.
The ``normalize_to`` parameter specifies the total (on-axes) area of the circles to be drawn. Sometimes tuning it (together
with the overall fiture size) may be useful to fit the text labels better.
The return value is a ``VennDiagram`` object, that keeps references to the ``Text`` and ``Patch`` objects used on the plot
and lets you know the centers and radii of the circles, if you need it.
The ``ax`` parameter specifies the axes on which the plot will be drawn (None means current axes).
The ``subset_label_formatter`` parameter is a function that can be passed to format the labels
that describe the size of each subset.
>>> from matplotlib_venn import *
>>> v = venn2(subsets={'10': 1, '01': 1, '11': 1}, set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=(1, 1, 1), linestyle='dashed')
>>> v.get_patch_by_id('10').set_alpha(1.0)
>>> v.get_patch_by_id('10').set_color('white')
>>> v.get_label_by_id('10').set_text('Unknown')
>>> v.get_label_by_id('A').set_text('Set A')
You can provide sets themselves rather than subset sizes:
>>> v = venn2(subsets=[set([1,2]), set([2,3,4,5])], set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=[set([1,2]), set([2,3,4,5])], linestyle='dashed')
>>> print("%0.2f" % (v.get_circle_radius(1)/v.get_circle_radius(0)))
1.41
### Response:
def venn2(subsets, set_labels=('A', 'B'), set_colors=('r', 'g'), alpha=0.4, normalize_to=1.0, ax=None, subset_label_formatter=None):
'''Plots a 2-set area-weighted Venn diagram.
The subsets parameter can be one of the following:
- A list (or a tuple) containing two set objects.
- A dict, providing sizes of three diagram regions.
The regions are identified via two-letter binary codes ('10', '01', and '11'), hence a valid set could look like:
{'10': 10, '01': 20, '11': 40}. Unmentioned codes are considered to map to 0.
- A list (or a tuple) with three numbers, denoting the sizes of the regions in the following order:
(10, 01, 11)
``set_labels`` parameter is a list of two strings - set labels. Set it to None to disable set labels.
The ``set_colors`` parameter should be a list of two elements, specifying the "base colors" of the two circles.
The color of circle intersection will be computed based on those.
The ``normalize_to`` parameter specifies the total (on-axes) area of the circles to be drawn. Sometimes tuning it (together
with the overall fiture size) may be useful to fit the text labels better.
The return value is a ``VennDiagram`` object, that keeps references to the ``Text`` and ``Patch`` objects used on the plot
and lets you know the centers and radii of the circles, if you need it.
The ``ax`` parameter specifies the axes on which the plot will be drawn (None means current axes).
The ``subset_label_formatter`` parameter is a function that can be passed to format the labels
that describe the size of each subset.
>>> from matplotlib_venn import *
>>> v = venn2(subsets={'10': 1, '01': 1, '11': 1}, set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=(1, 1, 1), linestyle='dashed')
>>> v.get_patch_by_id('10').set_alpha(1.0)
>>> v.get_patch_by_id('10').set_color('white')
>>> v.get_label_by_id('10').set_text('Unknown')
>>> v.get_label_by_id('A').set_text('Set A')
You can provide sets themselves rather than subset sizes:
>>> v = venn2(subsets=[set([1,2]), set([2,3,4,5])], set_labels = ('A', 'B'))
>>> c = venn2_circles(subsets=[set([1,2]), set([2,3,4,5])], linestyle='dashed')
>>> print("%0.2f" % (v.get_circle_radius(1)/v.get_circle_radius(0)))
1.41
'''
if isinstance(subsets, dict):
subsets = [subsets.get(t, 0) for t in ['10', '01', '11']]
elif len(subsets) == 2:
subsets = compute_venn2_subsets(*subsets)
if subset_label_formatter is None:
subset_label_formatter = str
areas = compute_venn2_areas(subsets, normalize_to)
centers, radii = solve_venn2_circles(areas)
regions = compute_venn2_regions(centers, radii)
colors = compute_venn2_colors(set_colors)
if ax is None:
ax = gca()
prepare_venn_axes(ax, centers, radii)
# Create and add patches and subset labels
patches = [r.make_patch() for r in regions]
for (p, c) in zip(patches, colors):
if p is not None:
p.set_facecolor(c)
p.set_edgecolor('none')
p.set_alpha(alpha)
ax.add_patch(p)
label_positions = [r.label_position() for r in regions]
subset_labels = [ax.text(lbl[0], lbl[1], subset_label_formatter(s), va='center', ha='center') if lbl is not None else None for (lbl, s) in zip(label_positions, subsets)]
# Position set labels
if set_labels is not None:
padding = np.mean([r * 0.1 for r in radii])
label_positions = [centers[0] + np.array([0.0, - radii[0] - padding]),
centers[1] + np.array([0.0, - radii[1] - padding])]
labels = [ax.text(pos[0], pos[1], txt, size='large', ha='right', va='top') for (pos, txt) in zip(label_positions, set_labels)]
labels[1].set_ha('left')
else:
labels = None
return VennDiagram(patches, subset_labels, labels, centers, radii) |
def trotterize(first_pauli_term, second_pauli_term, trotter_order=1,
trotter_steps=1):
"""
Create a Quil program that approximates exp( (A + B)t) where A and B are
PauliTerm operators.
:param PauliTerm first_pauli_term: PauliTerm denoted `A`
:param PauliTerm second_pauli_term: PauliTerm denoted `B`
:param int trotter_order: Optional argument indicating the Suzuki-Trotter
approximation order--only accepts orders 1, 2, 3, 4.
:param int trotter_steps: Optional argument indicating the number of products
to decompose the exponential into.
:return: Quil program
:rtype: Program
"""
if not (1 <= trotter_order < 5):
raise ValueError("trotterize only accepts trotter_order in {1, 2, 3, 4}.")
commutator = (first_pauli_term * second_pauli_term) + \
(-1 * second_pauli_term * first_pauli_term)
prog = Program()
if is_zero(commutator):
param_exp_prog_one = exponential_map(first_pauli_term)
exp_prog = param_exp_prog_one(1)
prog += exp_prog
param_exp_prog_two = exponential_map(second_pauli_term)
exp_prog = param_exp_prog_two(1)
prog += exp_prog
return prog
order_slices = suzuki_trotter(trotter_order, trotter_steps)
for coeff, operator in order_slices:
if operator == 0:
param_prog = exponential_map(coeff * first_pauli_term)
exp_prog = param_prog(1)
prog += exp_prog
else:
param_prog = exponential_map(coeff * second_pauli_term)
exp_prog = param_prog(1)
prog += exp_prog
return prog | Create a Quil program that approximates exp( (A + B)t) where A and B are
PauliTerm operators.
:param PauliTerm first_pauli_term: PauliTerm denoted `A`
:param PauliTerm second_pauli_term: PauliTerm denoted `B`
:param int trotter_order: Optional argument indicating the Suzuki-Trotter
approximation order--only accepts orders 1, 2, 3, 4.
:param int trotter_steps: Optional argument indicating the number of products
to decompose the exponential into.
:return: Quil program
:rtype: Program | Below is the the instruction that describes the task:
### Input:
Create a Quil program that approximates exp( (A + B)t) where A and B are
PauliTerm operators.
:param PauliTerm first_pauli_term: PauliTerm denoted `A`
:param PauliTerm second_pauli_term: PauliTerm denoted `B`
:param int trotter_order: Optional argument indicating the Suzuki-Trotter
approximation order--only accepts orders 1, 2, 3, 4.
:param int trotter_steps: Optional argument indicating the number of products
to decompose the exponential into.
:return: Quil program
:rtype: Program
### Response:
def trotterize(first_pauli_term, second_pauli_term, trotter_order=1,
trotter_steps=1):
"""
Create a Quil program that approximates exp( (A + B)t) where A and B are
PauliTerm operators.
:param PauliTerm first_pauli_term: PauliTerm denoted `A`
:param PauliTerm second_pauli_term: PauliTerm denoted `B`
:param int trotter_order: Optional argument indicating the Suzuki-Trotter
approximation order--only accepts orders 1, 2, 3, 4.
:param int trotter_steps: Optional argument indicating the number of products
to decompose the exponential into.
:return: Quil program
:rtype: Program
"""
if not (1 <= trotter_order < 5):
raise ValueError("trotterize only accepts trotter_order in {1, 2, 3, 4}.")
commutator = (first_pauli_term * second_pauli_term) + \
(-1 * second_pauli_term * first_pauli_term)
prog = Program()
if is_zero(commutator):
param_exp_prog_one = exponential_map(first_pauli_term)
exp_prog = param_exp_prog_one(1)
prog += exp_prog
param_exp_prog_two = exponential_map(second_pauli_term)
exp_prog = param_exp_prog_two(1)
prog += exp_prog
return prog
order_slices = suzuki_trotter(trotter_order, trotter_steps)
for coeff, operator in order_slices:
if operator == 0:
param_prog = exponential_map(coeff * first_pauli_term)
exp_prog = param_prog(1)
prog += exp_prog
else:
param_prog = exponential_map(coeff * second_pauli_term)
exp_prog = param_prog(1)
prog += exp_prog
return prog |
def dump_stats(self, fdump, close=True):
"""
Dump the logged data to a file.
The argument `file` can be either a filename or an open file object
that requires write access. `close` controls if the file is closed
before leaving this method (the default behaviour).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if isinstance(fdump, type('')):
fdump = open(fdump, 'wb')
pickle.dump(self.index, fdump, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.snapshots, fdump, protocol=pickle.HIGHEST_PROTOCOL)
if close:
fdump.close() | Dump the logged data to a file.
The argument `file` can be either a filename or an open file object
that requires write access. `close` controls if the file is closed
before leaving this method (the default behaviour). | Below is the the instruction that describes the task:
### Input:
Dump the logged data to a file.
The argument `file` can be either a filename or an open file object
that requires write access. `close` controls if the file is closed
before leaving this method (the default behaviour).
### Response:
def dump_stats(self, fdump, close=True):
"""
Dump the logged data to a file.
The argument `file` can be either a filename or an open file object
that requires write access. `close` controls if the file is closed
before leaving this method (the default behaviour).
"""
if self.tracker:
self.tracker.stop_periodic_snapshots()
if isinstance(fdump, type('')):
fdump = open(fdump, 'wb')
pickle.dump(self.index, fdump, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(self.snapshots, fdump, protocol=pickle.HIGHEST_PROTOCOL)
if close:
fdump.close() |
def average(self, axis):
"""Returns d-1 dimensional histogram of (estimated) mean value of axis
NB this is very different from averaging over the axis!!!
"""
axis = self.get_axis_number(axis)
avg_hist = np.ma.average(self.all_axis_bin_centers(axis),
weights=self.histogram, axis=axis)
if self.dimensions == 2:
new_hist = Hist1d
else:
new_hist = Histdd
return new_hist.from_histogram(histogram=avg_hist,
bin_edges=itemgetter(*self.other_axes(axis))(self.bin_edges),
axis_names=self.axis_names_without(axis)) | Returns d-1 dimensional histogram of (estimated) mean value of axis
NB this is very different from averaging over the axis!!! | Below is the the instruction that describes the task:
### Input:
Returns d-1 dimensional histogram of (estimated) mean value of axis
NB this is very different from averaging over the axis!!!
### Response:
def average(self, axis):
"""Returns d-1 dimensional histogram of (estimated) mean value of axis
NB this is very different from averaging over the axis!!!
"""
axis = self.get_axis_number(axis)
avg_hist = np.ma.average(self.all_axis_bin_centers(axis),
weights=self.histogram, axis=axis)
if self.dimensions == 2:
new_hist = Hist1d
else:
new_hist = Histdd
return new_hist.from_histogram(histogram=avg_hist,
bin_edges=itemgetter(*self.other_axes(axis))(self.bin_edges),
axis_names=self.axis_names_without(axis)) |
def ints(self, qlist):
"""
Converts a sequence of chimera_index node labels into
linear_index node labels, preserving order
Parameters
----------
qlist : sequence of ints
The chimera_index node labels
Returns
-------
rlist : iterable of tuples
The linear_lindex node lables corresponding to qlist
"""
m, n, t = self.args
return (((n*i + j)*2 + u)*t + k for (i, j, u, k) in qlist) | Converts a sequence of chimera_index node labels into
linear_index node labels, preserving order
Parameters
----------
qlist : sequence of ints
The chimera_index node labels
Returns
-------
rlist : iterable of tuples
The linear_lindex node lables corresponding to qlist | Below is the the instruction that describes the task:
### Input:
Converts a sequence of chimera_index node labels into
linear_index node labels, preserving order
Parameters
----------
qlist : sequence of ints
The chimera_index node labels
Returns
-------
rlist : iterable of tuples
The linear_lindex node lables corresponding to qlist
### Response:
def ints(self, qlist):
"""
Converts a sequence of chimera_index node labels into
linear_index node labels, preserving order
Parameters
----------
qlist : sequence of ints
The chimera_index node labels
Returns
-------
rlist : iterable of tuples
The linear_lindex node lables corresponding to qlist
"""
m, n, t = self.args
return (((n*i + j)*2 + u)*t + k for (i, j, u, k) in qlist) |
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ':' in expr:
type, value = expr.split(':', 1)
if type == 'name':
return value
else:
type = expr
return _describe_token_type(type) | Like `describe_token` but for token expressions. | Below is the the instruction that describes the task:
### Input:
Like `describe_token` but for token expressions.
### Response:
def describe_token_expr(expr):
"""Like `describe_token` but for token expressions."""
if ':' in expr:
type, value = expr.split(':', 1)
if type == 'name':
return value
else:
type = expr
return _describe_token_type(type) |
def Plane(center=(0, 0, 0), direction=(0, 0, 1), i_size=1, j_size=1,
i_resolution=10, j_resolution=10):
"""
Create a plane
Parameters
----------
center : list or np.ndarray
Location of the centroid in [x, y, z]
direction : list or np.ndarray
Direction cylinder points to in [x, y, z]
i_size : float
Size of the plane in the i direction.
j_size : float
Size of the plane in the i direction.
i_resolution : int
Number of points on the plane in the i direction.
j_resolution : int
Number of points on the plane in the j direction.
Returns
-------
plane : vtki.PolyData
Plane mesh
"""
planeSource = vtk.vtkPlaneSource()
planeSource.SetXResolution(i_resolution)
planeSource.SetYResolution(j_resolution)
planeSource.Update()
surf = PolyData(planeSource.GetOutput())
surf.points[:, 0] *= i_size
surf.points[:, 1] *= j_size
surf.rotate_y(-90)
translate(surf, center, direction)
return surf | Create a plane
Parameters
----------
center : list or np.ndarray
Location of the centroid in [x, y, z]
direction : list or np.ndarray
Direction cylinder points to in [x, y, z]
i_size : float
Size of the plane in the i direction.
j_size : float
Size of the plane in the i direction.
i_resolution : int
Number of points on the plane in the i direction.
j_resolution : int
Number of points on the plane in the j direction.
Returns
-------
plane : vtki.PolyData
Plane mesh | Below is the the instruction that describes the task:
### Input:
Create a plane
Parameters
----------
center : list or np.ndarray
Location of the centroid in [x, y, z]
direction : list or np.ndarray
Direction cylinder points to in [x, y, z]
i_size : float
Size of the plane in the i direction.
j_size : float
Size of the plane in the i direction.
i_resolution : int
Number of points on the plane in the i direction.
j_resolution : int
Number of points on the plane in the j direction.
Returns
-------
plane : vtki.PolyData
Plane mesh
### Response:
def Plane(center=(0, 0, 0), direction=(0, 0, 1), i_size=1, j_size=1,
i_resolution=10, j_resolution=10):
"""
Create a plane
Parameters
----------
center : list or np.ndarray
Location of the centroid in [x, y, z]
direction : list or np.ndarray
Direction cylinder points to in [x, y, z]
i_size : float
Size of the plane in the i direction.
j_size : float
Size of the plane in the i direction.
i_resolution : int
Number of points on the plane in the i direction.
j_resolution : int
Number of points on the plane in the j direction.
Returns
-------
plane : vtki.PolyData
Plane mesh
"""
planeSource = vtk.vtkPlaneSource()
planeSource.SetXResolution(i_resolution)
planeSource.SetYResolution(j_resolution)
planeSource.Update()
surf = PolyData(planeSource.GetOutput())
surf.points[:, 0] *= i_size
surf.points[:, 1] *= j_size
surf.rotate_y(-90)
translate(surf, center, direction)
return surf |
def p_inputunit(p):
'''inputunit : simple_list simple_list_terminator
| NEWLINE
| error NEWLINE
| EOF'''
# XXX
if p.lexer._parserstate & flags.parser.CMDSUBST:
p.lexer._parserstate.add(flags.parser.EOFTOKEN)
if isinstance(p[1], ast.node):
p[0] = p[1]
# accept right here in case the input contains more lines that are
# not part of the current command
p.accept() | inputunit : simple_list simple_list_terminator
| NEWLINE
| error NEWLINE
| EOF | Below is the the instruction that describes the task:
### Input:
inputunit : simple_list simple_list_terminator
| NEWLINE
| error NEWLINE
| EOF
### Response:
def p_inputunit(p):
'''inputunit : simple_list simple_list_terminator
| NEWLINE
| error NEWLINE
| EOF'''
# XXX
if p.lexer._parserstate & flags.parser.CMDSUBST:
p.lexer._parserstate.add(flags.parser.EOFTOKEN)
if isinstance(p[1], ast.node):
p[0] = p[1]
# accept right here in case the input contains more lines that are
# not part of the current command
p.accept() |
def generateCoincMatrix(nCoinc=10, length=500, activity=50):
"""
Generate a coincidence matrix. This is used to generate random inputs to the
temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row.
"""
coincMatrix0 = SM32(int(nCoinc), int(length))
theOnes = numpy.array([1.0] * activity, dtype=numpy.float32)
for rowIdx in xrange(nCoinc):
coinc = numpy.array(random.sample(xrange(length),
activity), dtype=numpy.uint32)
coinc.sort()
coincMatrix0.setRowFromSparse(rowIdx, coinc, theOnes)
# This is the right code to use, it's faster, but it derails the unit
# testing of the pooling for now.
coincMatrix = SM32(int(nCoinc), int(length))
coincMatrix.initializeWithFixedNNZR(activity)
return coincMatrix0 | Generate a coincidence matrix. This is used to generate random inputs to the
temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row. | Below is the the instruction that describes the task:
### Input:
Generate a coincidence matrix. This is used to generate random inputs to the
temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row.
### Response:
def generateCoincMatrix(nCoinc=10, length=500, activity=50):
"""
Generate a coincidence matrix. This is used to generate random inputs to the
temporal learner and to compare the predicted output against.
It generates a matrix of nCoinc rows, each row has length 'length' and has
a total of 'activity' bits on.
Parameters:
-----------------------------------------------
nCoinc: the number of rows to generate
length: the length of each row
activity: the number of ones to put into each row.
"""
coincMatrix0 = SM32(int(nCoinc), int(length))
theOnes = numpy.array([1.0] * activity, dtype=numpy.float32)
for rowIdx in xrange(nCoinc):
coinc = numpy.array(random.sample(xrange(length),
activity), dtype=numpy.uint32)
coinc.sort()
coincMatrix0.setRowFromSparse(rowIdx, coinc, theOnes)
# This is the right code to use, it's faster, but it derails the unit
# testing of the pooling for now.
coincMatrix = SM32(int(nCoinc), int(length))
coincMatrix.initializeWithFixedNNZR(activity)
return coincMatrix0 |
def max_pool(input_layer, kernel, stride, edges=PAD_SAME, name=PROVIDED):
"""Performs max pooling.
`kernel` is the patch that will be pooled and it describes the pooling along
each of the 4 dimensions. `stride` is how big to take each step.
Because more often than not, pooling is only done
on the width and height of the image, the following shorthands are supported:
* scalar (e.g. 3): Square pooling on the image
(`[b, c, r, d] = [1, 3, 3, 1]`).
* singleton list (e.g. [3]): Square pooling on the image
(`[b, c, r, d] = [1, 3, 3, 1]`).
* list of length 2 (e.g. [3, 2]): Square pooling on the image
(`[b, c, r, d] = [1, 3, 2, 1]`).
Args:
input_layer: The chainable object, supplied.
kernel: The size of the patch for the pool, either an int or a length 1 or
2 sequence (if length 1 or int, it is expanded).
stride: The strides as a length 1, 2 or 4 sequence or an integer. If an
int, length 1 or 2, the stride in the first and last dimensions are 1.
edges: Either `pt.PAD_SAME` or `pt.PAD_VALID` to control the padding.
name: The name for this operation is also used to create/find the
parameter variables.
Returns:
Handle to this layer.
"""
return _pool(input_layer, tf.nn.max_pool, kernel, stride, edges, name) | Performs max pooling.
`kernel` is the patch that will be pooled and it describes the pooling along
each of the 4 dimensions. `stride` is how big to take each step.
Because more often than not, pooling is only done
on the width and height of the image, the following shorthands are supported:
* scalar (e.g. 3): Square pooling on the image
(`[b, c, r, d] = [1, 3, 3, 1]`).
* singleton list (e.g. [3]): Square pooling on the image
(`[b, c, r, d] = [1, 3, 3, 1]`).
* list of length 2 (e.g. [3, 2]): Square pooling on the image
(`[b, c, r, d] = [1, 3, 2, 1]`).
Args:
input_layer: The chainable object, supplied.
kernel: The size of the patch for the pool, either an int or a length 1 or
2 sequence (if length 1 or int, it is expanded).
stride: The strides as a length 1, 2 or 4 sequence or an integer. If an
int, length 1 or 2, the stride in the first and last dimensions are 1.
edges: Either `pt.PAD_SAME` or `pt.PAD_VALID` to control the padding.
name: The name for this operation is also used to create/find the
parameter variables.
Returns:
Handle to this layer. | Below is the the instruction that describes the task:
### Input:
Performs max pooling.
`kernel` is the patch that will be pooled and it describes the pooling along
each of the 4 dimensions. `stride` is how big to take each step.
Because more often than not, pooling is only done
on the width and height of the image, the following shorthands are supported:
* scalar (e.g. 3): Square pooling on the image
(`[b, c, r, d] = [1, 3, 3, 1]`).
* singleton list (e.g. [3]): Square pooling on the image
(`[b, c, r, d] = [1, 3, 3, 1]`).
* list of length 2 (e.g. [3, 2]): Square pooling on the image
(`[b, c, r, d] = [1, 3, 2, 1]`).
Args:
input_layer: The chainable object, supplied.
kernel: The size of the patch for the pool, either an int or a length 1 or
2 sequence (if length 1 or int, it is expanded).
stride: The strides as a length 1, 2 or 4 sequence or an integer. If an
int, length 1 or 2, the stride in the first and last dimensions are 1.
edges: Either `pt.PAD_SAME` or `pt.PAD_VALID` to control the padding.
name: The name for this operation is also used to create/find the
parameter variables.
Returns:
Handle to this layer.
### Response:
def max_pool(input_layer, kernel, stride, edges=PAD_SAME, name=PROVIDED):
"""Performs max pooling.
`kernel` is the patch that will be pooled and it describes the pooling along
each of the 4 dimensions. `stride` is how big to take each step.
Because more often than not, pooling is only done
on the width and height of the image, the following shorthands are supported:
* scalar (e.g. 3): Square pooling on the image
(`[b, c, r, d] = [1, 3, 3, 1]`).
* singleton list (e.g. [3]): Square pooling on the image
(`[b, c, r, d] = [1, 3, 3, 1]`).
* list of length 2 (e.g. [3, 2]): Square pooling on the image
(`[b, c, r, d] = [1, 3, 2, 1]`).
Args:
input_layer: The chainable object, supplied.
kernel: The size of the patch for the pool, either an int or a length 1 or
2 sequence (if length 1 or int, it is expanded).
stride: The strides as a length 1, 2 or 4 sequence or an integer. If an
int, length 1 or 2, the stride in the first and last dimensions are 1.
edges: Either `pt.PAD_SAME` or `pt.PAD_VALID` to control the padding.
name: The name for this operation is also used to create/find the
parameter variables.
Returns:
Handle to this layer.
"""
return _pool(input_layer, tf.nn.max_pool, kernel, stride, edges, name) |
def get_context(self, string):
""" Assuming the cursor is at the end of the specified string, get the
context (a list of names) for the symbol at cursor position.
"""
context = []
reversed_tokens = list(self._lexer.get_tokens(string))
reversed_tokens.reverse()
# Pygments often tacks on a newline when none is specified in the input.
# Remove this newline.
if reversed_tokens and reversed_tokens[0][1].endswith('\n') and \
not string.endswith('\n'):
reversed_tokens.pop(0)
current_op = ''
for token, text in reversed_tokens:
if is_token_subtype(token, Token.Name):
# Handle a trailing separator, e.g 'foo.bar.'
if current_op in self._name_separators:
if not context:
context.insert(0, '')
# Handle non-separator operators and punction.
elif current_op:
break
context.insert(0, text)
current_op = ''
# Pygments doesn't understand that, e.g., '->' is a single operator
# in C++. This is why we have to build up an operator from
# potentially several tokens.
elif token is Token.Operator or token is Token.Punctuation:
current_op = text + current_op
# Break on anything that is not a Operator, Punctuation, or Name.
else:
break
return context | Assuming the cursor is at the end of the specified string, get the
context (a list of names) for the symbol at cursor position. | Below is the the instruction that describes the task:
### Input:
Assuming the cursor is at the end of the specified string, get the
context (a list of names) for the symbol at cursor position.
### Response:
def get_context(self, string):
""" Assuming the cursor is at the end of the specified string, get the
context (a list of names) for the symbol at cursor position.
"""
context = []
reversed_tokens = list(self._lexer.get_tokens(string))
reversed_tokens.reverse()
# Pygments often tacks on a newline when none is specified in the input.
# Remove this newline.
if reversed_tokens and reversed_tokens[0][1].endswith('\n') and \
not string.endswith('\n'):
reversed_tokens.pop(0)
current_op = ''
for token, text in reversed_tokens:
if is_token_subtype(token, Token.Name):
# Handle a trailing separator, e.g 'foo.bar.'
if current_op in self._name_separators:
if not context:
context.insert(0, '')
# Handle non-separator operators and punction.
elif current_op:
break
context.insert(0, text)
current_op = ''
# Pygments doesn't understand that, e.g., '->' is a single operator
# in C++. This is why we have to build up an operator from
# potentially several tokens.
elif token is Token.Operator or token is Token.Punctuation:
current_op = text + current_op
# Break on anything that is not a Operator, Punctuation, or Name.
else:
break
return context |
def anopheles(self):
"""
:rtype: Anopheles
"""
list_of_anopheles = []
desc = self.et.find("description")
if desc is not None:
for anopheles in desc.findall("anopheles"):
list_of_anopheles.append(Anopheles(anopheles))
return list_of_anopheles | :rtype: Anopheles | Below is the the instruction that describes the task:
### Input:
:rtype: Anopheles
### Response:
def anopheles(self):
"""
:rtype: Anopheles
"""
list_of_anopheles = []
desc = self.et.find("description")
if desc is not None:
for anopheles in desc.findall("anopheles"):
list_of_anopheles.append(Anopheles(anopheles))
return list_of_anopheles |
def create_parser(self):
"""Create the CLI parser."""
parser = argparse.ArgumentParser(
description=PROGRAM_DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(PROGRAM_EPILOG))
parser.add_argument(
'filename',
metavar='FILE_NAME',
nargs='*',
help='the I/O file name')
subparsers = parser.add_subparsers(
dest='parser', title='markdown parser')
subparsers.required = True
# github + cmark + gitlab + commonmarker.
github = subparsers.add_parser(
'github',
aliases=['cmark', 'gitlab', 'commonmarker'],
description='Use Commonmark rules to generate an output. If no \
option is selected, the default output will be an \
unordered list with the respective default values \
as listed below')
megroup = github.add_mutually_exclusive_group()
megroup.add_argument(
'-u',
'--unordered-list-marker',
choices=md_parser['github']['list']['unordered']['bullet_markers'],
nargs='?',
const=md_parser['github']['list']['unordered']['default_marker'],
help='set the marker and enables unordered list. Defaults to ' +
md_parser['github']['list']['unordered']['default_marker'])
megroup.add_argument(
'-o',
'--ordered-list-marker',
choices=md_parser['github']['list']['ordered']['closing_markers'],
nargs='?',
const=md_parser['github']['list']['ordered']
['default_closing_marker'],
help='set the marker and enables ordered lists. Defaults to ' +
md_parser['github']['list']['ordered']['default_closing_marker'])
github.add_argument(
'-l',
'--header-levels',
choices=[
str(i)
for i in range(1, md_parser['github']['header']['max_levels'] +
1)
],
nargs='?',
const=str(md_parser['github']['header']['default_keep_levels']),
help='set the maximum level of headers to be considered as part \
of the TOC. Defaults to ' + str(
md_parser['github']['header']['default_keep_levels']))
github.set_defaults(
header_levels=md_parser['github']['header']['default_keep_levels'])
# Redcarpet.
redcarpet = subparsers.add_parser(
'redcarpet',
description='Use Redcarpet rules to generate an output. If no \
option is selected, the default output will be an \
unordered list with the respective default values \
as listed below. Gitlab rules are the same as \
Redcarpet except that conflicts are avoided with \
duplicate headers.')
megroup = redcarpet.add_mutually_exclusive_group()
megroup.add_argument(
'-u',
'--unordered-list-marker',
choices=md_parser['redcarpet']['list']['unordered']
['bullet_markers'],
nargs='?',
const=md_parser['redcarpet']['list']['unordered']
['default_marker'],
help='set the marker and enables unordered list. Defaults to ' +
md_parser['redcarpet']['list']['unordered']['default_marker'])
megroup.add_argument(
'-o',
'--ordered-list-marker',
choices=md_parser['redcarpet']['list']['ordered']
['closing_markers'],
nargs='?',
const=md_parser['redcarpet']['list']['ordered']
['default_closing_marker'],
help='set the marker and enables ordered lists. Defaults to ' +
md_parser['redcarpet']['list']['ordered']['default_closing_marker']
)
redcarpet.add_argument(
'-l',
'--header-levels',
choices=[
str(i) for i in range(
1, md_parser['redcarpet']['header']['max_levels'] + 1)
],
nargs='?',
const=str(md_parser['redcarpet']['header']['default_keep_levels']),
help='set the maximum level of headers to be considered as part \
of the TOC. Defaults to ' + str(
md_parser['redcarpet']['header']['default_keep_levels']))
redcarpet.set_defaults(header_levels=md_parser['redcarpet']['header']
['default_keep_levels'])
c_or_i = parser.add_mutually_exclusive_group()
c_or_i.add_argument(
'-c',
'--no-list-coherence',
action='store_true',
help='avoids checking for TOC list coherence')
c_or_i.add_argument(
'-i',
'--no-indentation',
action='store_true',
help='avoids adding indentations to the TOC')
parser.add_argument(
'-l',
'--no-links',
action='store_true',
help='avoids adding links to the TOC')
parser.add_argument(
'-m',
'--toc-marker',
metavar='TOC_MARKER',
help='set the string to be used as the marker for positioning the \
table of contents. Defaults to ' +
common_defaults['toc_marker'])
parser.add_argument(
'-p',
'--in-place',
action='store_true',
help='overwrite the input file')
parser.add_argument(
'-v',
'--version',
action='version',
version=VERSION_NAME + ' ' + VERSION_NUMBER)
parser.set_defaults(toc_marker=common_defaults['toc_marker'])
parser.set_defaults(func=CliToApi().write_toc)
return parser | Create the CLI parser. | Below is the the instruction that describes the task:
### Input:
Create the CLI parser.
### Response:
def create_parser(self):
"""Create the CLI parser."""
parser = argparse.ArgumentParser(
description=PROGRAM_DESCRIPTION,
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(PROGRAM_EPILOG))
parser.add_argument(
'filename',
metavar='FILE_NAME',
nargs='*',
help='the I/O file name')
subparsers = parser.add_subparsers(
dest='parser', title='markdown parser')
subparsers.required = True
# github + cmark + gitlab + commonmarker.
github = subparsers.add_parser(
'github',
aliases=['cmark', 'gitlab', 'commonmarker'],
description='Use Commonmark rules to generate an output. If no \
option is selected, the default output will be an \
unordered list with the respective default values \
as listed below')
megroup = github.add_mutually_exclusive_group()
megroup.add_argument(
'-u',
'--unordered-list-marker',
choices=md_parser['github']['list']['unordered']['bullet_markers'],
nargs='?',
const=md_parser['github']['list']['unordered']['default_marker'],
help='set the marker and enables unordered list. Defaults to ' +
md_parser['github']['list']['unordered']['default_marker'])
megroup.add_argument(
'-o',
'--ordered-list-marker',
choices=md_parser['github']['list']['ordered']['closing_markers'],
nargs='?',
const=md_parser['github']['list']['ordered']
['default_closing_marker'],
help='set the marker and enables ordered lists. Defaults to ' +
md_parser['github']['list']['ordered']['default_closing_marker'])
github.add_argument(
'-l',
'--header-levels',
choices=[
str(i)
for i in range(1, md_parser['github']['header']['max_levels'] +
1)
],
nargs='?',
const=str(md_parser['github']['header']['default_keep_levels']),
help='set the maximum level of headers to be considered as part \
of the TOC. Defaults to ' + str(
md_parser['github']['header']['default_keep_levels']))
github.set_defaults(
header_levels=md_parser['github']['header']['default_keep_levels'])
# Redcarpet.
redcarpet = subparsers.add_parser(
'redcarpet',
description='Use Redcarpet rules to generate an output. If no \
option is selected, the default output will be an \
unordered list with the respective default values \
as listed below. Gitlab rules are the same as \
Redcarpet except that conflicts are avoided with \
duplicate headers.')
megroup = redcarpet.add_mutually_exclusive_group()
megroup.add_argument(
'-u',
'--unordered-list-marker',
choices=md_parser['redcarpet']['list']['unordered']
['bullet_markers'],
nargs='?',
const=md_parser['redcarpet']['list']['unordered']
['default_marker'],
help='set the marker and enables unordered list. Defaults to ' +
md_parser['redcarpet']['list']['unordered']['default_marker'])
megroup.add_argument(
'-o',
'--ordered-list-marker',
choices=md_parser['redcarpet']['list']['ordered']
['closing_markers'],
nargs='?',
const=md_parser['redcarpet']['list']['ordered']
['default_closing_marker'],
help='set the marker and enables ordered lists. Defaults to ' +
md_parser['redcarpet']['list']['ordered']['default_closing_marker']
)
redcarpet.add_argument(
'-l',
'--header-levels',
choices=[
str(i) for i in range(
1, md_parser['redcarpet']['header']['max_levels'] + 1)
],
nargs='?',
const=str(md_parser['redcarpet']['header']['default_keep_levels']),
help='set the maximum level of headers to be considered as part \
of the TOC. Defaults to ' + str(
md_parser['redcarpet']['header']['default_keep_levels']))
redcarpet.set_defaults(header_levels=md_parser['redcarpet']['header']
['default_keep_levels'])
c_or_i = parser.add_mutually_exclusive_group()
c_or_i.add_argument(
'-c',
'--no-list-coherence',
action='store_true',
help='avoids checking for TOC list coherence')
c_or_i.add_argument(
'-i',
'--no-indentation',
action='store_true',
help='avoids adding indentations to the TOC')
parser.add_argument(
'-l',
'--no-links',
action='store_true',
help='avoids adding links to the TOC')
parser.add_argument(
'-m',
'--toc-marker',
metavar='TOC_MARKER',
help='set the string to be used as the marker for positioning the \
table of contents. Defaults to ' +
common_defaults['toc_marker'])
parser.add_argument(
'-p',
'--in-place',
action='store_true',
help='overwrite the input file')
parser.add_argument(
'-v',
'--version',
action='version',
version=VERSION_NAME + ' ' + VERSION_NUMBER)
parser.set_defaults(toc_marker=common_defaults['toc_marker'])
parser.set_defaults(func=CliToApi().write_toc)
return parser |
def cli(context, verbose, api_key, base_url, workers):
'''Planet API Client'''
configure_logging(verbose)
client_params.clear()
client_params['api_key'] = api_key
client_params['workers'] = workers
if base_url:
client_params['base_url'] = base_url | Planet API Client | Below is the the instruction that describes the task:
### Input:
Planet API Client
### Response:
def cli(context, verbose, api_key, base_url, workers):
'''Planet API Client'''
configure_logging(verbose)
client_params.clear()
client_params['api_key'] = api_key
client_params['workers'] = workers
if base_url:
client_params['base_url'] = base_url |
def PrepareHttpRequest(self, method_config, request, global_params=None,
upload=None, upload_config=None, download=None):
"""Prepares an HTTP request to be sent."""
request_type = _LoadClass(
method_config.request_type_name, self.__client.MESSAGES_MODULE)
util.Typecheck(request, request_type)
request = self.__client.ProcessRequest(method_config, request)
http_request = http_wrapper.Request(
http_method=method_config.http_method)
self.__SetBaseHeaders(http_request, self.__client)
self.__SetBody(http_request, method_config, request, upload)
url_builder = _UrlBuilder(
self.__client.url, relative_path=method_config.relative_path)
url_builder.query_params = self.__ConstructQueryParams(
method_config.query_params, request, global_params)
# It's important that upload and download go before we fill in the
# relative path, so that they can replace it.
if upload is not None:
upload.ConfigureRequest(upload_config, http_request, url_builder)
if download is not None:
download.ConfigureRequest(http_request, url_builder)
url_builder.relative_path = self.__ConstructRelativePath(
method_config, request, relative_path=url_builder.relative_path)
self.__FinalizeRequest(http_request, url_builder)
return self.__client.ProcessHttpRequest(http_request) | Prepares an HTTP request to be sent. | Below is the the instruction that describes the task:
### Input:
Prepares an HTTP request to be sent.
### Response:
def PrepareHttpRequest(self, method_config, request, global_params=None,
upload=None, upload_config=None, download=None):
"""Prepares an HTTP request to be sent."""
request_type = _LoadClass(
method_config.request_type_name, self.__client.MESSAGES_MODULE)
util.Typecheck(request, request_type)
request = self.__client.ProcessRequest(method_config, request)
http_request = http_wrapper.Request(
http_method=method_config.http_method)
self.__SetBaseHeaders(http_request, self.__client)
self.__SetBody(http_request, method_config, request, upload)
url_builder = _UrlBuilder(
self.__client.url, relative_path=method_config.relative_path)
url_builder.query_params = self.__ConstructQueryParams(
method_config.query_params, request, global_params)
# It's important that upload and download go before we fill in the
# relative path, so that they can replace it.
if upload is not None:
upload.ConfigureRequest(upload_config, http_request, url_builder)
if download is not None:
download.ConfigureRequest(http_request, url_builder)
url_builder.relative_path = self.__ConstructRelativePath(
method_config, request, relative_path=url_builder.relative_path)
self.__FinalizeRequest(http_request, url_builder)
return self.__client.ProcessHttpRequest(http_request) |
def get_userpass_value(cli_value, config, key, prompt_strategy):
"""Gets the username / password from config.
Uses the following rules:
1. If it is specified on the cli (`cli_value`), use that.
2. If `config[key]` is specified, use that.
3. Otherwise prompt using `prompt_strategy`.
:param cli_value: The value supplied from the command line or `None`.
:type cli_value: unicode or `None`
:param config: Config dictionary
:type config: dict
:param key: Key to find the config value.
:type key: unicode
:prompt_strategy: Argumentless function to return fallback value.
:type prompt_strategy: function
:returns: The value for the username / password
:rtype: unicode
"""
if cli_value is not None:
return cli_value
elif config.get(key):
return config[key]
else:
return prompt_strategy() | Gets the username / password from config.
Uses the following rules:
1. If it is specified on the cli (`cli_value`), use that.
2. If `config[key]` is specified, use that.
3. Otherwise prompt using `prompt_strategy`.
:param cli_value: The value supplied from the command line or `None`.
:type cli_value: unicode or `None`
:param config: Config dictionary
:type config: dict
:param key: Key to find the config value.
:type key: unicode
:prompt_strategy: Argumentless function to return fallback value.
:type prompt_strategy: function
:returns: The value for the username / password
:rtype: unicode | Below is the the instruction that describes the task:
### Input:
Gets the username / password from config.
Uses the following rules:
1. If it is specified on the cli (`cli_value`), use that.
2. If `config[key]` is specified, use that.
3. Otherwise prompt using `prompt_strategy`.
:param cli_value: The value supplied from the command line or `None`.
:type cli_value: unicode or `None`
:param config: Config dictionary
:type config: dict
:param key: Key to find the config value.
:type key: unicode
:prompt_strategy: Argumentless function to return fallback value.
:type prompt_strategy: function
:returns: The value for the username / password
:rtype: unicode
### Response:
def get_userpass_value(cli_value, config, key, prompt_strategy):
"""Gets the username / password from config.
Uses the following rules:
1. If it is specified on the cli (`cli_value`), use that.
2. If `config[key]` is specified, use that.
3. Otherwise prompt using `prompt_strategy`.
:param cli_value: The value supplied from the command line or `None`.
:type cli_value: unicode or `None`
:param config: Config dictionary
:type config: dict
:param key: Key to find the config value.
:type key: unicode
:prompt_strategy: Argumentless function to return fallback value.
:type prompt_strategy: function
:returns: The value for the username / password
:rtype: unicode
"""
if cli_value is not None:
return cli_value
elif config.get(key):
return config[key]
else:
return prompt_strategy() |
def get_language_tabs(request, current_language, available_languages, css_class=None):
"""
Determine the language tabs to show.
"""
tabs = TabsList(css_class=css_class)
get = request.GET.copy() # QueryDict object
tab_languages = []
site_id = getattr(settings, 'SITE_ID', None)
for lang_dict in appsettings.PARLER_LANGUAGES.get(site_id, ()):
code = lang_dict['code']
title = get_language_title(code)
get['language'] = code
url = '?{0}'.format(get.urlencode())
if code == current_language:
status = 'current'
elif code in available_languages:
status = 'available'
else:
status = 'empty'
tabs.append((url, title, code, status))
tab_languages.append(code)
# Additional stale translations in the database?
if appsettings.PARLER_SHOW_EXCLUDED_LANGUAGE_TABS:
for code in available_languages:
if code not in tab_languages:
get['language'] = code
url = '?{0}'.format(get.urlencode())
if code == current_language:
status = 'current'
else:
status = 'available'
tabs.append((url, get_language_title(code), code, status))
tabs.current_is_translated = current_language in available_languages
tabs.allow_deletion = len(available_languages) > 1
return tabs | Determine the language tabs to show. | Below is the the instruction that describes the task:
### Input:
Determine the language tabs to show.
### Response:
def get_language_tabs(request, current_language, available_languages, css_class=None):
"""
Determine the language tabs to show.
"""
tabs = TabsList(css_class=css_class)
get = request.GET.copy() # QueryDict object
tab_languages = []
site_id = getattr(settings, 'SITE_ID', None)
for lang_dict in appsettings.PARLER_LANGUAGES.get(site_id, ()):
code = lang_dict['code']
title = get_language_title(code)
get['language'] = code
url = '?{0}'.format(get.urlencode())
if code == current_language:
status = 'current'
elif code in available_languages:
status = 'available'
else:
status = 'empty'
tabs.append((url, title, code, status))
tab_languages.append(code)
# Additional stale translations in the database?
if appsettings.PARLER_SHOW_EXCLUDED_LANGUAGE_TABS:
for code in available_languages:
if code not in tab_languages:
get['language'] = code
url = '?{0}'.format(get.urlencode())
if code == current_language:
status = 'current'
else:
status = 'available'
tabs.append((url, get_language_title(code), code, status))
tabs.current_is_translated = current_language in available_languages
tabs.allow_deletion = len(available_languages) > 1
return tabs |
def generate_challenges(self, num, root_seed):
""" Generate the specified number of hash challenges.
:param num: The number of hash challenges we want to generate.
:param root_seed: Some value that we use to generate our seeds from.
"""
# Generate a series of seeds
seeds = self.generate_seeds(num, root_seed, self.secret)
blocks = self.pick_blocks(num, root_seed)
# List of 2-tuples (seed, hash_response)
self.challenges = []
# Generate the corresponding hash for each seed
for i in range(num):
self.challenges.append(Challenge(blocks[i], seeds[i]))
response = self.meet_challenge(self.challenges[i])
self.challenges[i].response = response | Generate the specified number of hash challenges.
:param num: The number of hash challenges we want to generate.
:param root_seed: Some value that we use to generate our seeds from. | Below is the the instruction that describes the task:
### Input:
Generate the specified number of hash challenges.
:param num: The number of hash challenges we want to generate.
:param root_seed: Some value that we use to generate our seeds from.
### Response:
def generate_challenges(self, num, root_seed):
""" Generate the specified number of hash challenges.
:param num: The number of hash challenges we want to generate.
:param root_seed: Some value that we use to generate our seeds from.
"""
# Generate a series of seeds
seeds = self.generate_seeds(num, root_seed, self.secret)
blocks = self.pick_blocks(num, root_seed)
# List of 2-tuples (seed, hash_response)
self.challenges = []
# Generate the corresponding hash for each seed
for i in range(num):
self.challenges.append(Challenge(blocks[i], seeds[i]))
response = self.meet_challenge(self.challenges[i])
self.challenges[i].response = response |
def _set_af_vrf(self, v, load=False):
"""
Setter method for af_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_vrf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("af_vrf_name",af_vrf.af_vrf, yang_name="af-vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='af-vrf-name', extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}), is_container='list', yang_name="af-vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """af_vrf must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("af_vrf_name",af_vrf.af_vrf, yang_name="af-vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='af-vrf-name', extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}), is_container='list', yang_name="af-vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)""",
})
self.__af_vrf = t
if hasattr(self, '_set'):
self._set() | Setter method for af_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_vrf() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for af_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_vrf() directly.
### Response:
def _set_af_vrf(self, v, load=False):
"""
Setter method for af_vrf, mapped from YANG variable /rbridge_id/router/router_bgp/address_family/ipv4/ipv4_unicast/af_vrf (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_af_vrf is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_af_vrf() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("af_vrf_name",af_vrf.af_vrf, yang_name="af-vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='af-vrf-name', extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}), is_container='list', yang_name="af-vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """af_vrf must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("af_vrf_name",af_vrf.af_vrf, yang_name="af-vrf", rest_name="vrf", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='af-vrf-name', extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}), is_container='list', yang_name="af-vrf", rest_name="vrf", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'VRF unicast', u'alt-name': u'vrf', u'cli-suppress-list-no': None, u'callpoint': u'AfIpv4UcastVrf', u'cli-full-command': None, u'cli-full-no': None, u'cli-mode-name': u'config-bgp-ipv4u-vrf'}}, namespace='urn:brocade.com:mgmt:brocade-bgp', defining_module='brocade-bgp', yang_type='list', is_config=True)""",
})
self.__af_vrf = t
if hasattr(self, '_set'):
self._set() |
def calcR2(predTst, yTest, axis=0):
"""calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
R2
"""
rss = np.sum((yTest - predTst) ** 2, axis=axis)
tss = np.sum((yTest - yTest.mean()) ** 2, axis=axis)
return 1 - rss/tss | calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
R2 | Below is the the instruction that describes the task:
### Input:
calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
R2
### Response:
def calcR2(predTst, yTest, axis=0):
"""calculate coefficient of determination. Assumes that axis=0 is time
Parameters
----------
predTst : np.array, predicted reponse for yTest
yTest : np.array, acxtually observed response for yTest
Returns
-------
aryFunc : np.array
R2
"""
rss = np.sum((yTest - predTst) ** 2, axis=axis)
tss = np.sum((yTest - yTest.mean()) ** 2, axis=axis)
return 1 - rss/tss |
def stop():
"""Stop the server, invalidating any viewer URLs.
This allows any previously-referenced data arrays to be garbage collected if there are no other
references to them.
"""
global global_server
if global_server is not None:
ioloop = global_server.ioloop
def stop_ioloop():
ioloop.stop()
ioloop.close()
global_server.ioloop.add_callback(stop_ioloop)
global_server = None | Stop the server, invalidating any viewer URLs.
This allows any previously-referenced data arrays to be garbage collected if there are no other
references to them. | Below is the the instruction that describes the task:
### Input:
Stop the server, invalidating any viewer URLs.
This allows any previously-referenced data arrays to be garbage collected if there are no other
references to them.
### Response:
def stop():
"""Stop the server, invalidating any viewer URLs.
This allows any previously-referenced data arrays to be garbage collected if there are no other
references to them.
"""
global global_server
if global_server is not None:
ioloop = global_server.ioloop
def stop_ioloop():
ioloop.stop()
ioloop.close()
global_server.ioloop.add_callback(stop_ioloop)
global_server = None |
def length_prefix(length, offset):
"""Construct the prefix to lists or strings denoting their length.
:param length: the length of the item in bytes
:param offset: ``0x80`` when encoding raw bytes, ``0xc0`` when encoding a
list
"""
if length < 56:
return ALL_BYTES[offset + length]
elif length < LONG_LENGTH:
length_string = int_to_big_endian(length)
return ALL_BYTES[offset + 56 - 1 + len(length_string)] + length_string
else:
raise ValueError('Length greater than 256**8') | Construct the prefix to lists or strings denoting their length.
:param length: the length of the item in bytes
:param offset: ``0x80`` when encoding raw bytes, ``0xc0`` when encoding a
list | Below is the the instruction that describes the task:
### Input:
Construct the prefix to lists or strings denoting their length.
:param length: the length of the item in bytes
:param offset: ``0x80`` when encoding raw bytes, ``0xc0`` when encoding a
list
### Response:
def length_prefix(length, offset):
"""Construct the prefix to lists or strings denoting their length.
:param length: the length of the item in bytes
:param offset: ``0x80`` when encoding raw bytes, ``0xc0`` when encoding a
list
"""
if length < 56:
return ALL_BYTES[offset + length]
elif length < LONG_LENGTH:
length_string = int_to_big_endian(length)
return ALL_BYTES[offset + 56 - 1 + len(length_string)] + length_string
else:
raise ValueError('Length greater than 256**8') |
def from_path(cls, path):
"""Creates rule instance from path.
:type path: pathlib.Path
:rtype: Rule
"""
name = path.name[:-3]
with logs.debug_time(u'Importing rule: {};'.format(name)):
rule_module = load_source(name, str(path))
priority = getattr(rule_module, 'priority', DEFAULT_PRIORITY)
return cls(name, rule_module.match,
rule_module.get_new_command,
getattr(rule_module, 'enabled_by_default', True),
getattr(rule_module, 'side_effect', None),
settings.priority.get(name, priority),
getattr(rule_module, 'requires_output', True)) | Creates rule instance from path.
:type path: pathlib.Path
:rtype: Rule | Below is the the instruction that describes the task:
### Input:
Creates rule instance from path.
:type path: pathlib.Path
:rtype: Rule
### Response:
def from_path(cls, path):
"""Creates rule instance from path.
:type path: pathlib.Path
:rtype: Rule
"""
name = path.name[:-3]
with logs.debug_time(u'Importing rule: {};'.format(name)):
rule_module = load_source(name, str(path))
priority = getattr(rule_module, 'priority', DEFAULT_PRIORITY)
return cls(name, rule_module.match,
rule_module.get_new_command,
getattr(rule_module, 'enabled_by_default', True),
getattr(rule_module, 'side_effect', None),
settings.priority.get(name, priority),
getattr(rule_module, 'requires_output', True)) |
def generate(self, src=None, identifier=None):
"""Generate static files for one source image."""
self.src = src
self.identifier = identifier
# Get image details and calculate tiles
im = self.manipulator_klass()
im.srcfile = self.src
im.set_max_image_pixels(self.max_image_pixels)
im.do_first()
width = im.width
height = im.height
scale_factors = im.scale_factors(self.tilesize)
# Setup destination and IIIF identifier
self.setup_destination()
# Write out images
for (region, size) in static_partial_tile_sizes(width, height, self.tilesize, scale_factors):
self.generate_tile(region, size)
sizes = []
for size in static_full_sizes(width, height, self.tilesize):
# See https://github.com/zimeon/iiif/issues/9
sizes.append({'width': size[0], 'height': size[1]})
self.generate_tile('full', size)
for request in self.extras:
request.identifier = self.identifier
if (request.is_scaled_full_image()):
sizes.append({'width': request.size_wh[0],
'height': request.size_wh[1]})
self.generate_file(request)
# Write info.json
qualities = ['default'] if (self.api_version > '1.1') else ['native']
info = IIIFInfo(level=0, server_and_prefix=self.prefix, identifier=self.identifier,
width=width, height=height, scale_factors=scale_factors,
tile_width=self.tilesize, tile_height=self.tilesize,
formats=['jpg'], qualities=qualities, sizes=sizes,
api_version=self.api_version)
json_file = os.path.join(self.dst, self.identifier, 'info.json')
if (self.dryrun):
self.logger.warning(
"dryrun mode, would write the following files:")
self.logger.warning("%s / %s/%s" %
(self.dst, self.identifier, 'info.json'))
else:
with open(json_file, 'w') as f:
f.write(info.as_json())
f.close()
self.logger.info("%s / %s/%s" %
(self.dst, self.identifier, 'info.json'))
self.logger.debug("Written %s" % (json_file)) | Generate static files for one source image. | Below is the the instruction that describes the task:
### Input:
Generate static files for one source image.
### Response:
def generate(self, src=None, identifier=None):
"""Generate static files for one source image."""
self.src = src
self.identifier = identifier
# Get image details and calculate tiles
im = self.manipulator_klass()
im.srcfile = self.src
im.set_max_image_pixels(self.max_image_pixels)
im.do_first()
width = im.width
height = im.height
scale_factors = im.scale_factors(self.tilesize)
# Setup destination and IIIF identifier
self.setup_destination()
# Write out images
for (region, size) in static_partial_tile_sizes(width, height, self.tilesize, scale_factors):
self.generate_tile(region, size)
sizes = []
for size in static_full_sizes(width, height, self.tilesize):
# See https://github.com/zimeon/iiif/issues/9
sizes.append({'width': size[0], 'height': size[1]})
self.generate_tile('full', size)
for request in self.extras:
request.identifier = self.identifier
if (request.is_scaled_full_image()):
sizes.append({'width': request.size_wh[0],
'height': request.size_wh[1]})
self.generate_file(request)
# Write info.json
qualities = ['default'] if (self.api_version > '1.1') else ['native']
info = IIIFInfo(level=0, server_and_prefix=self.prefix, identifier=self.identifier,
width=width, height=height, scale_factors=scale_factors,
tile_width=self.tilesize, tile_height=self.tilesize,
formats=['jpg'], qualities=qualities, sizes=sizes,
api_version=self.api_version)
json_file = os.path.join(self.dst, self.identifier, 'info.json')
if (self.dryrun):
self.logger.warning(
"dryrun mode, would write the following files:")
self.logger.warning("%s / %s/%s" %
(self.dst, self.identifier, 'info.json'))
else:
with open(json_file, 'w') as f:
f.write(info.as_json())
f.close()
self.logger.info("%s / %s/%s" %
(self.dst, self.identifier, 'info.json'))
self.logger.debug("Written %s" % (json_file)) |
def get_accept_license(request):
"""This produces JSON data for a user (at ``uid``) to view the license(s)
they have accepted or will need to accept for a publication (at ``id``).
"""
publication_id = request.matchdict['id']
user_id = request.matchdict['uid']
# FIXME Is this an active publication?
# TODO Verify the accepting user is the one making the request.
# For each pending document, accept the license.
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""
SELECT row_to_json(combined_rows) FROM (
SELECT
pd.uuid AS id,
ident_hash(pd.uuid, pd.major_version, pd.minor_version) \
AS ident_hash,
accepted AS is_accepted
FROM
pending_documents AS pd
NATURAL JOIN license_acceptances AS la
WHERE pd.publication_id = %s AND user_id = %s
) as combined_rows;""",
(publication_id, user_id))
user_documents = [r[0] for r in cursor.fetchall()]
return {'publication_id': publication_id,
'user_id': user_id,
'documents': user_documents,
} | This produces JSON data for a user (at ``uid``) to view the license(s)
they have accepted or will need to accept for a publication (at ``id``). | Below is the the instruction that describes the task:
### Input:
This produces JSON data for a user (at ``uid``) to view the license(s)
they have accepted or will need to accept for a publication (at ``id``).
### Response:
def get_accept_license(request):
"""This produces JSON data for a user (at ``uid``) to view the license(s)
they have accepted or will need to accept for a publication (at ``id``).
"""
publication_id = request.matchdict['id']
user_id = request.matchdict['uid']
# FIXME Is this an active publication?
# TODO Verify the accepting user is the one making the request.
# For each pending document, accept the license.
with db_connect() as db_conn:
with db_conn.cursor() as cursor:
cursor.execute("""
SELECT row_to_json(combined_rows) FROM (
SELECT
pd.uuid AS id,
ident_hash(pd.uuid, pd.major_version, pd.minor_version) \
AS ident_hash,
accepted AS is_accepted
FROM
pending_documents AS pd
NATURAL JOIN license_acceptances AS la
WHERE pd.publication_id = %s AND user_id = %s
) as combined_rows;""",
(publication_id, user_id))
user_documents = [r[0] for r in cursor.fetchall()]
return {'publication_id': publication_id,
'user_id': user_id,
'documents': user_documents,
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.