code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def minmax(self, minimum=None, maximum=None):
"""Min/Max
Sets or gets the minimum and/or maximum values for the Node. For
getting, returns {"minimum":mixed,"maximum":mixed}
Arguments:
minimum {mixed} -- The minimum value
maximum {mixed} -- The maximum value
Raises:
TypeError, ValueError
Returns:
None | dict
"""
# If neither min or max is set, this is a getter
if minimum is None and maximum is None:
return {"minimum": self._minimum, "maximum": self._maximum};
# If the minimum is set
if minimum != None:
# If the current type is a date, datetime, ip, or time
if self._type in ['base64', 'date', 'datetime', 'ip', 'time']:
# Make sure the value is valid for the type
if not isinstance(minimum, basestring) \
or not _typeToRegex[self._type].match(minimum):
raise ValueError('__minimum__')
# Else if the type is an int (unsigned, timestamp), or a string in
# which the min/max are lengths
elif self._type in ['int', 'string', 'timestamp', 'uint']:
# If the value is not a valid int or long
if not isinstance(minimum, (int, long)):
# If it's a valid representation of an integer
if isinstance(minimum, basestring) \
and _typeToRegex['int'].match(minimum):
# Convert it
minimum = int(minimum, 0)
# Else, raise an error
else:
raise ValueError('__minimum__')
# If the type is meant to be unsigned
if self._type in ['base64', 'string', 'timestamp', 'uint']:
# And it's below zero
if minimum < 0:
raise ValueError('__minimum__')
# Else if the type is decimal
elif self._type == 'decimal':
# Store it if it's valid, else throw a ValueError
try:
minimum = Decimal(minimum)
except ValueError:
raise ValueError('__minimum__')
# Else if the type is float
elif self._type == 'float':
# Store it if it's valid, else throw a ValueError
try:
minimum = float(minimum)
except ValueError:
raise ValueError('__minimum__')
# Else if the type is price
elif self._type == 'price':
# If it's not a valid representation of a price
if not isinstance(minimum, basestring) or not _typeToRegex['price'].match(minimum):
raise ValueError('__minimum__')
# Store it as a Decimal
minimum = Decimal(minimum)
# Else we can't have a minimum
else:
raise TypeError('can not set __minimum__ for ' + self._type)
# Store the minimum
self._minimum = minimum
# If the maximum is set
if maximum != None:
# If the current type is a date, datetime, ip, or time
if self._type in ['date', 'datetime', 'ip', 'time']:
# Make sure the value is valid for the type
if not isinstance(maximum, basestring) \
or not _typeToRegex[self._type].match(maximum):
raise ValueError('__maximum__')
# Else if the type is an int (unsigned, timestamp), or a string in
# which the min/max are lengths
elif self._type in ['int', 'string', 'timestamp', 'uint']:
# If the value is not a valid int or long
if not isinstance(maximum, (int, long)):
# If it's a valid representation of an integer
if isinstance(maximum, basestring) \
and _typeToRegex['int'].match(maximum):
# Convert it
maximum = int(maximum, 0)
# Else, raise an error
else:
raise ValueError('__minimum__')
# If the type is meant to be unsigned
if self._type in ['string', 'timestamp', 'uint']:
# And it's below zero
if maximum < 0:
raise ValueError('__maximum__')
# Else if the type is decimal
elif self._type == 'decimal':
# Store it if it's valid, else throw a ValueError
try:
maximum = Decimal(maximum)
except ValueError:
raise ValueError('__maximum__')
# Else if the type is float
elif self._type == 'float':
# Store it if it's valid, else throw a ValueError
try:
minimum = float(minimum)
except ValueError:
raise ValueError('__maximum__')
# Else if the type is price
elif self._type == 'price':
# If it's not a valid representation of a price
if not isinstance(maximum, basestring) or not _typeToRegex['price'].match(maximum):
raise ValueError('__maximum__')
# Store it as a Decimal
maximum = Decimal(maximum)
# Else we can't have a maximum
else:
raise TypeError('can not set __maximum__ for ' + self._type)
# If we also have a minimum
if self._minimum is not None:
# If the type is an IP
if self._type == 'ip':
# If the min is above the max, we have a problem
if self.__compare_ips(self._minimum, maximum) == 1:
raise ValueError('__maximum__')
# Else any other data type
else:
# If the min is above the max, we have a problem
if self._minimum > maximum:
raise ValueError('__maximum__')
# Store the maximum
self._maximum = maximum | Min/Max
Sets or gets the minimum and/or maximum values for the Node. For
getting, returns {"minimum":mixed,"maximum":mixed}
Arguments:
minimum {mixed} -- The minimum value
maximum {mixed} -- The maximum value
Raises:
TypeError, ValueError
Returns:
None | dict | Below is the the instruction that describes the task:
### Input:
Min/Max
Sets or gets the minimum and/or maximum values for the Node. For
getting, returns {"minimum":mixed,"maximum":mixed}
Arguments:
minimum {mixed} -- The minimum value
maximum {mixed} -- The maximum value
Raises:
TypeError, ValueError
Returns:
None | dict
### Response:
def minmax(self, minimum=None, maximum=None):
"""Min/Max
Sets or gets the minimum and/or maximum values for the Node. For
getting, returns {"minimum":mixed,"maximum":mixed}
Arguments:
minimum {mixed} -- The minimum value
maximum {mixed} -- The maximum value
Raises:
TypeError, ValueError
Returns:
None | dict
"""
# If neither min or max is set, this is a getter
if minimum is None and maximum is None:
return {"minimum": self._minimum, "maximum": self._maximum};
# If the minimum is set
if minimum != None:
# If the current type is a date, datetime, ip, or time
if self._type in ['base64', 'date', 'datetime', 'ip', 'time']:
# Make sure the value is valid for the type
if not isinstance(minimum, basestring) \
or not _typeToRegex[self._type].match(minimum):
raise ValueError('__minimum__')
# Else if the type is an int (unsigned, timestamp), or a string in
# which the min/max are lengths
elif self._type in ['int', 'string', 'timestamp', 'uint']:
# If the value is not a valid int or long
if not isinstance(minimum, (int, long)):
# If it's a valid representation of an integer
if isinstance(minimum, basestring) \
and _typeToRegex['int'].match(minimum):
# Convert it
minimum = int(minimum, 0)
# Else, raise an error
else:
raise ValueError('__minimum__')
# If the type is meant to be unsigned
if self._type in ['base64', 'string', 'timestamp', 'uint']:
# And it's below zero
if minimum < 0:
raise ValueError('__minimum__')
# Else if the type is decimal
elif self._type == 'decimal':
# Store it if it's valid, else throw a ValueError
try:
minimum = Decimal(minimum)
except ValueError:
raise ValueError('__minimum__')
# Else if the type is float
elif self._type == 'float':
# Store it if it's valid, else throw a ValueError
try:
minimum = float(minimum)
except ValueError:
raise ValueError('__minimum__')
# Else if the type is price
elif self._type == 'price':
# If it's not a valid representation of a price
if not isinstance(minimum, basestring) or not _typeToRegex['price'].match(minimum):
raise ValueError('__minimum__')
# Store it as a Decimal
minimum = Decimal(minimum)
# Else we can't have a minimum
else:
raise TypeError('can not set __minimum__ for ' + self._type)
# Store the minimum
self._minimum = minimum
# If the maximum is set
if maximum != None:
# If the current type is a date, datetime, ip, or time
if self._type in ['date', 'datetime', 'ip', 'time']:
# Make sure the value is valid for the type
if not isinstance(maximum, basestring) \
or not _typeToRegex[self._type].match(maximum):
raise ValueError('__maximum__')
# Else if the type is an int (unsigned, timestamp), or a string in
# which the min/max are lengths
elif self._type in ['int', 'string', 'timestamp', 'uint']:
# If the value is not a valid int or long
if not isinstance(maximum, (int, long)):
# If it's a valid representation of an integer
if isinstance(maximum, basestring) \
and _typeToRegex['int'].match(maximum):
# Convert it
maximum = int(maximum, 0)
# Else, raise an error
else:
raise ValueError('__minimum__')
# If the type is meant to be unsigned
if self._type in ['string', 'timestamp', 'uint']:
# And it's below zero
if maximum < 0:
raise ValueError('__maximum__')
# Else if the type is decimal
elif self._type == 'decimal':
# Store it if it's valid, else throw a ValueError
try:
maximum = Decimal(maximum)
except ValueError:
raise ValueError('__maximum__')
# Else if the type is float
elif self._type == 'float':
# Store it if it's valid, else throw a ValueError
try:
minimum = float(minimum)
except ValueError:
raise ValueError('__maximum__')
# Else if the type is price
elif self._type == 'price':
# If it's not a valid representation of a price
if not isinstance(maximum, basestring) or not _typeToRegex['price'].match(maximum):
raise ValueError('__maximum__')
# Store it as a Decimal
maximum = Decimal(maximum)
# Else we can't have a maximum
else:
raise TypeError('can not set __maximum__ for ' + self._type)
# If we also have a minimum
if self._minimum is not None:
# If the type is an IP
if self._type == 'ip':
# If the min is above the max, we have a problem
if self.__compare_ips(self._minimum, maximum) == 1:
raise ValueError('__maximum__')
# Else any other data type
else:
# If the min is above the max, we have a problem
if self._minimum > maximum:
raise ValueError('__maximum__')
# Store the maximum
self._maximum = maximum |
def set_properties(self, properties, recursive=True):
"""
Adds new or modifies existing properties listed in properties
properties - is a dict which contains the property names and values to set.
Property values can be a list or tuple to set multiple values
for a key.
recursive - on folders property attachment is recursive by default. It is
possible to force recursive behavior.
"""
if not properties:
return
# If URL > 13KB, nginx default raise error '414 Request-URI Too Large'
MAX_SIZE = 50
if len(properties) > MAX_SIZE:
for chunk in chunks(properties, MAX_SIZE):
self._accessor.set_properties(self, chunk, recursive)
else:
self._accessor.set_properties(self, properties, recursive) | Adds new or modifies existing properties listed in properties
properties - is a dict which contains the property names and values to set.
Property values can be a list or tuple to set multiple values
for a key.
recursive - on folders property attachment is recursive by default. It is
possible to force recursive behavior. | Below is the the instruction that describes the task:
### Input:
Adds new or modifies existing properties listed in properties
properties - is a dict which contains the property names and values to set.
Property values can be a list or tuple to set multiple values
for a key.
recursive - on folders property attachment is recursive by default. It is
possible to force recursive behavior.
### Response:
def set_properties(self, properties, recursive=True):
"""
Adds new or modifies existing properties listed in properties
properties - is a dict which contains the property names and values to set.
Property values can be a list or tuple to set multiple values
for a key.
recursive - on folders property attachment is recursive by default. It is
possible to force recursive behavior.
"""
if not properties:
return
# If URL > 13KB, nginx default raise error '414 Request-URI Too Large'
MAX_SIZE = 50
if len(properties) > MAX_SIZE:
for chunk in chunks(properties, MAX_SIZE):
self._accessor.set_properties(self, chunk, recursive)
else:
self._accessor.set_properties(self, properties, recursive) |
def remove(self, element):
"""
Return a new PSet with element removed. Raises KeyError if element is not present.
>>> s1 = s(1, 2)
>>> s1.remove(2)
pset([1])
"""
if element in self._map:
return self.evolver().remove(element).persistent()
raise KeyError("Element '%s' not present in PSet" % element) | Return a new PSet with element removed. Raises KeyError if element is not present.
>>> s1 = s(1, 2)
>>> s1.remove(2)
pset([1]) | Below is the the instruction that describes the task:
### Input:
Return a new PSet with element removed. Raises KeyError if element is not present.
>>> s1 = s(1, 2)
>>> s1.remove(2)
pset([1])
### Response:
def remove(self, element):
"""
Return a new PSet with element removed. Raises KeyError if element is not present.
>>> s1 = s(1, 2)
>>> s1.remove(2)
pset([1])
"""
if element in self._map:
return self.evolver().remove(element).persistent()
raise KeyError("Element '%s' not present in PSet" % element) |
def expected_counts_stationary(T, n, mu=None):
r"""Expected transition counts for Markov chain in equilibrium.
Since mu is stationary for T we have
.. math::
E(C^{(n)})=n diag(mu)*T.
Parameters
----------
T : (M, M) sparse matrix
Transition matrix.
n : int
Number of steps for chain.
mu : (M,) ndarray (optional)
Stationary distribution for T. If mu is not specified it will be
computed via diagonalization of T.
Returns
-------
EC : (M, M) sparse matrix
Expected value for transition counts after N steps.
"""
if (n <= 0):
EC = coo_matrix(T.shape, dtype=float)
return EC
else:
if mu is None:
mu = stationary_distribution(T)
D_mu = diags(mu, 0)
EC = n * D_mu.dot(T)
return EC | r"""Expected transition counts for Markov chain in equilibrium.
Since mu is stationary for T we have
.. math::
E(C^{(n)})=n diag(mu)*T.
Parameters
----------
T : (M, M) sparse matrix
Transition matrix.
n : int
Number of steps for chain.
mu : (M,) ndarray (optional)
Stationary distribution for T. If mu is not specified it will be
computed via diagonalization of T.
Returns
-------
EC : (M, M) sparse matrix
Expected value for transition counts after N steps. | Below is the the instruction that describes the task:
### Input:
r"""Expected transition counts for Markov chain in equilibrium.
Since mu is stationary for T we have
.. math::
E(C^{(n)})=n diag(mu)*T.
Parameters
----------
T : (M, M) sparse matrix
Transition matrix.
n : int
Number of steps for chain.
mu : (M,) ndarray (optional)
Stationary distribution for T. If mu is not specified it will be
computed via diagonalization of T.
Returns
-------
EC : (M, M) sparse matrix
Expected value for transition counts after N steps.
### Response:
def expected_counts_stationary(T, n, mu=None):
r"""Expected transition counts for Markov chain in equilibrium.
Since mu is stationary for T we have
.. math::
E(C^{(n)})=n diag(mu)*T.
Parameters
----------
T : (M, M) sparse matrix
Transition matrix.
n : int
Number of steps for chain.
mu : (M,) ndarray (optional)
Stationary distribution for T. If mu is not specified it will be
computed via diagonalization of T.
Returns
-------
EC : (M, M) sparse matrix
Expected value for transition counts after N steps.
"""
if (n <= 0):
EC = coo_matrix(T.shape, dtype=float)
return EC
else:
if mu is None:
mu = stationary_distribution(T)
D_mu = diags(mu, 0)
EC = n * D_mu.dot(T)
return EC |
def arcs_missing(self):
"""Returns a sorted list of the arcs in the code not executed."""
possible = self.arc_possibilities()
executed = self.arcs_executed()
missing = [
p for p in possible
if p not in executed
and p[0] not in self.no_branch
]
return sorted(missing) | Returns a sorted list of the arcs in the code not executed. | Below is the the instruction that describes the task:
### Input:
Returns a sorted list of the arcs in the code not executed.
### Response:
def arcs_missing(self):
"""Returns a sorted list of the arcs in the code not executed."""
possible = self.arc_possibilities()
executed = self.arcs_executed()
missing = [
p for p in possible
if p not in executed
and p[0] not in self.no_branch
]
return sorted(missing) |
def make_perfect_cd(wcs):
""" Create a perfect (square, orthogonal, undistorted) CD matrix from the
input WCS.
"""
def_scale = (wcs.pscale) / 3600.
def_orientat = np.deg2rad(wcs.orientat)
perfect_cd = def_scale * np.array(
[[-np.cos(def_orientat),np.sin(def_orientat)],
[np.sin(def_orientat),np.cos(def_orientat)]]
)
return perfect_cd | Create a perfect (square, orthogonal, undistorted) CD matrix from the
input WCS. | Below is the the instruction that describes the task:
### Input:
Create a perfect (square, orthogonal, undistorted) CD matrix from the
input WCS.
### Response:
def make_perfect_cd(wcs):
""" Create a perfect (square, orthogonal, undistorted) CD matrix from the
input WCS.
"""
def_scale = (wcs.pscale) / 3600.
def_orientat = np.deg2rad(wcs.orientat)
perfect_cd = def_scale * np.array(
[[-np.cos(def_orientat),np.sin(def_orientat)],
[np.sin(def_orientat),np.cos(def_orientat)]]
)
return perfect_cd |
def load(path=None, **kwargs):
'''
Loads the configuration from the file provided onto the device.
path (required)
Path where the configuration/template file is present. If the file has
a ``.conf`` extension, the content is treated as text format. If the
file has a ``.xml`` extension, the content is treated as XML format. If
the file has a ``.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
replace : False
Specify whether the configuration file uses ``replace:`` statements. If
``True``, only those statements under the ``replace`` tag will be
changed.
format
Determines the format of the contents
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1.
template_vars
Variables to be passed into the template processing engine in addition to
those present in pillar, the minion configuration, grains, etc. You may
reference these variables in your template like so:
.. code-block:: jinja
{{ template_vars["var_name"] }}
CLI Examples:
.. code-block:: bash
salt 'device_name' junos.load 'salt://production/network/routers/config.set'
salt 'device_name' junos.load 'salt://templates/replace_config.conf' replace=True
salt 'device_name' junos.load 'salt://my_new_configuration.conf' overwrite=True
salt 'device_name' junos.load 'salt://syslog_template.conf' template_vars='{"syslog_host": "10.180.222.7"}'
'''
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
if path is None:
ret['message'] = \
'Please provide the salt path where the configuration is present'
ret['out'] = False
return ret
op = {}
if '__pub_arg' in kwargs:
if kwargs['__pub_arg']:
if isinstance(kwargs['__pub_arg'][-1], dict):
op.update(kwargs['__pub_arg'][-1])
else:
op.update(kwargs)
template_vars = {}
if "template_vars" in op:
template_vars = op["template_vars"]
template_cached_path = salt.utils.files.mkstemp()
__salt__['cp.get_template'](
path,
template_cached_path,
template_vars=template_vars)
if not os.path.isfile(template_cached_path):
ret['message'] = 'Invalid file path.'
ret['out'] = False
return ret
if os.path.getsize(template_cached_path) == 0:
ret['message'] = 'Template failed to render'
ret['out'] = False
return ret
op['path'] = template_cached_path
if 'format' not in op:
if path.endswith('set'):
template_format = 'set'
elif path.endswith('xml'):
template_format = 'xml'
else:
template_format = 'text'
op['format'] = template_format
if 'replace' in op and op['replace']:
op['merge'] = False
del op['replace']
elif 'overwrite' in op and op['overwrite']:
op['overwrite'] = True
elif 'overwrite' in op and not op['overwrite']:
op['merge'] = True
del op['overwrite']
try:
conn.cu.load(**op)
ret['message'] = "Successfully loaded the configuration."
except Exception as exception:
ret['message'] = 'Could not load configuration due to : "{0}"'.format(
exception)
ret['format'] = op['format']
ret['out'] = False
return ret
finally:
salt.utils.files.safe_rm(template_cached_path)
return ret | Loads the configuration from the file provided onto the device.
path (required)
Path where the configuration/template file is present. If the file has
a ``.conf`` extension, the content is treated as text format. If the
file has a ``.xml`` extension, the content is treated as XML format. If
the file has a ``.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
replace : False
Specify whether the configuration file uses ``replace:`` statements. If
``True``, only those statements under the ``replace`` tag will be
changed.
format
Determines the format of the contents
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1.
template_vars
Variables to be passed into the template processing engine in addition to
those present in pillar, the minion configuration, grains, etc. You may
reference these variables in your template like so:
.. code-block:: jinja
{{ template_vars["var_name"] }}
CLI Examples:
.. code-block:: bash
salt 'device_name' junos.load 'salt://production/network/routers/config.set'
salt 'device_name' junos.load 'salt://templates/replace_config.conf' replace=True
salt 'device_name' junos.load 'salt://my_new_configuration.conf' overwrite=True
salt 'device_name' junos.load 'salt://syslog_template.conf' template_vars='{"syslog_host": "10.180.222.7"}' | Below is the the instruction that describes the task:
### Input:
Loads the configuration from the file provided onto the device.
path (required)
Path where the configuration/template file is present. If the file has
a ``.conf`` extension, the content is treated as text format. If the
file has a ``.xml`` extension, the content is treated as XML format. If
the file has a ``.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
replace : False
Specify whether the configuration file uses ``replace:`` statements. If
``True``, only those statements under the ``replace`` tag will be
changed.
format
Determines the format of the contents
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1.
template_vars
Variables to be passed into the template processing engine in addition to
those present in pillar, the minion configuration, grains, etc. You may
reference these variables in your template like so:
.. code-block:: jinja
{{ template_vars["var_name"] }}
CLI Examples:
.. code-block:: bash
salt 'device_name' junos.load 'salt://production/network/routers/config.set'
salt 'device_name' junos.load 'salt://templates/replace_config.conf' replace=True
salt 'device_name' junos.load 'salt://my_new_configuration.conf' overwrite=True
salt 'device_name' junos.load 'salt://syslog_template.conf' template_vars='{"syslog_host": "10.180.222.7"}'
### Response:
def load(path=None, **kwargs):
'''
Loads the configuration from the file provided onto the device.
path (required)
Path where the configuration/template file is present. If the file has
a ``.conf`` extension, the content is treated as text format. If the
file has a ``.xml`` extension, the content is treated as XML format. If
the file has a ``.set`` extension, the content is treated as Junos OS
``set`` commands.
overwrite : False
Set to ``True`` if you want this file is to completely replace the
configuration file.
replace : False
Specify whether the configuration file uses ``replace:`` statements. If
``True``, only those statements under the ``replace`` tag will be
changed.
format
Determines the format of the contents
update : False
Compare a complete loaded configuration against the candidate
configuration. For each hierarchy level or configuration object that is
different in the two configurations, the version in the loaded
configuration replaces the version in the candidate configuration. When
the configuration is later committed, only system processes that are
affected by the changed configuration elements parse the new
configuration. This action is supported from PyEZ 2.1.
template_vars
Variables to be passed into the template processing engine in addition to
those present in pillar, the minion configuration, grains, etc. You may
reference these variables in your template like so:
.. code-block:: jinja
{{ template_vars["var_name"] }}
CLI Examples:
.. code-block:: bash
salt 'device_name' junos.load 'salt://production/network/routers/config.set'
salt 'device_name' junos.load 'salt://templates/replace_config.conf' replace=True
salt 'device_name' junos.load 'salt://my_new_configuration.conf' overwrite=True
salt 'device_name' junos.load 'salt://syslog_template.conf' template_vars='{"syslog_host": "10.180.222.7"}'
'''
conn = __proxy__['junos.conn']()
ret = {}
ret['out'] = True
if path is None:
ret['message'] = \
'Please provide the salt path where the configuration is present'
ret['out'] = False
return ret
op = {}
if '__pub_arg' in kwargs:
if kwargs['__pub_arg']:
if isinstance(kwargs['__pub_arg'][-1], dict):
op.update(kwargs['__pub_arg'][-1])
else:
op.update(kwargs)
template_vars = {}
if "template_vars" in op:
template_vars = op["template_vars"]
template_cached_path = salt.utils.files.mkstemp()
__salt__['cp.get_template'](
path,
template_cached_path,
template_vars=template_vars)
if not os.path.isfile(template_cached_path):
ret['message'] = 'Invalid file path.'
ret['out'] = False
return ret
if os.path.getsize(template_cached_path) == 0:
ret['message'] = 'Template failed to render'
ret['out'] = False
return ret
op['path'] = template_cached_path
if 'format' not in op:
if path.endswith('set'):
template_format = 'set'
elif path.endswith('xml'):
template_format = 'xml'
else:
template_format = 'text'
op['format'] = template_format
if 'replace' in op and op['replace']:
op['merge'] = False
del op['replace']
elif 'overwrite' in op and op['overwrite']:
op['overwrite'] = True
elif 'overwrite' in op and not op['overwrite']:
op['merge'] = True
del op['overwrite']
try:
conn.cu.load(**op)
ret['message'] = "Successfully loaded the configuration."
except Exception as exception:
ret['message'] = 'Could not load configuration due to : "{0}"'.format(
exception)
ret['format'] = op['format']
ret['out'] = False
return ret
finally:
salt.utils.files.safe_rm(template_cached_path)
return ret |
def graph_structure(self, x, standalone=True):
"""
Architecture of FlowNetSimple in Figure 2 of FlowNet 1.0.
Args:
x: 2CHW if standalone==True, else NCHW where C=12 is a concatenation
of 5 tensors of [3, 3, 3, 2, 1] channels.
standalone: If True, this model is used to predict flow from two inputs.
If False, this model is used as part of the FlowNet2.
"""
if standalone:
x = tf.concat(tf.split(x, 2, axis=0), axis=1)
with argscope([tf.layers.conv2d], activation=lambda x: tf.nn.leaky_relu(x, 0.1),
padding='valid', strides=2, kernel_size=3,
data_format='channels_first'), \
argscope([tf.layers.conv2d_transpose], padding='same', activation=tf.identity,
data_format='channels_first', strides=2, kernel_size=4):
x = tf.layers.conv2d(pad(x, 3), 64, kernel_size=7, name='conv1')
conv2 = tf.layers.conv2d(pad(x, 2), 128, kernel_size=5, name='conv2')
x = tf.layers.conv2d(pad(conv2, 2), 256, kernel_size=5, name='conv3')
conv3 = tf.layers.conv2d(pad(x, 1), 256, name='conv3_1', strides=1)
x = tf.layers.conv2d(pad(conv3, 1), 512, name='conv4')
conv4 = tf.layers.conv2d(pad(x, 1), 512, name='conv4_1', strides=1)
x = tf.layers.conv2d(pad(conv4, 1), 512, name='conv5')
conv5 = tf.layers.conv2d(pad(x, 1), 512, name='conv5_1', strides=1)
x = tf.layers.conv2d(pad(conv5, 1), 1024, name='conv6')
conv6 = tf.layers.conv2d(pad(x, 1), 1024, name='conv6_1', strides=1)
flow6 = tf.layers.conv2d(pad(conv6, 1), 2, name='predict_flow6', strides=1, activation=tf.identity)
flow6_up = tf.layers.conv2d_transpose(flow6, 2, name='upsampled_flow6_to_5', use_bias=False)
x = tf.layers.conv2d_transpose(conv6, 512, name='deconv5', activation=lambda x: tf.nn.leaky_relu(x, 0.1))
concat5 = tf.concat([conv5, x, flow6_up], axis=1, name='concat5')
flow5 = tf.layers.conv2d(pad(concat5, 1), 2, name='predict_flow5', strides=1, activation=tf.identity)
flow5_up = tf.layers.conv2d_transpose(flow5, 2, name='upsampled_flow5_to_4', use_bias=False)
x = tf.layers.conv2d_transpose(concat5, 256, name='deconv4', activation=lambda x: tf.nn.leaky_relu(x, 0.1))
concat4 = tf.concat([conv4, x, flow5_up], axis=1, name='concat4')
flow4 = tf.layers.conv2d(pad(concat4, 1), 2, name='predict_flow4', strides=1, activation=tf.identity)
flow4_up = tf.layers.conv2d_transpose(flow4, 2, name='upsampled_flow4_to_3', use_bias=False)
x = tf.layers.conv2d_transpose(concat4, 128, name='deconv3', activation=lambda x: tf.nn.leaky_relu(x, 0.1))
concat3 = tf.concat([conv3, x, flow4_up], axis=1, name='concat3')
flow3 = tf.layers.conv2d(pad(concat3, 1), 2, name='predict_flow3', strides=1, activation=tf.identity)
flow3_up = tf.layers.conv2d_transpose(flow3, 2, name='upsampled_flow3_to_2', use_bias=False)
x = tf.layers.conv2d_transpose(concat3, 64, name='deconv2', activation=lambda x: tf.nn.leaky_relu(x, 0.1))
concat2 = tf.concat([conv2, x, flow3_up], axis=1, name='concat2')
flow2 = tf.layers.conv2d(pad(concat2, 1), 2, name='predict_flow2', strides=1, activation=tf.identity)
return tf.identity(flow2, name='flow2') | Architecture of FlowNetSimple in Figure 2 of FlowNet 1.0.
Args:
x: 2CHW if standalone==True, else NCHW where C=12 is a concatenation
of 5 tensors of [3, 3, 3, 2, 1] channels.
standalone: If True, this model is used to predict flow from two inputs.
If False, this model is used as part of the FlowNet2. | Below is the the instruction that describes the task:
### Input:
Architecture of FlowNetSimple in Figure 2 of FlowNet 1.0.
Args:
x: 2CHW if standalone==True, else NCHW where C=12 is a concatenation
of 5 tensors of [3, 3, 3, 2, 1] channels.
standalone: If True, this model is used to predict flow from two inputs.
If False, this model is used as part of the FlowNet2.
### Response:
def graph_structure(self, x, standalone=True):
"""
Architecture of FlowNetSimple in Figure 2 of FlowNet 1.0.
Args:
x: 2CHW if standalone==True, else NCHW where C=12 is a concatenation
of 5 tensors of [3, 3, 3, 2, 1] channels.
standalone: If True, this model is used to predict flow from two inputs.
If False, this model is used as part of the FlowNet2.
"""
if standalone:
x = tf.concat(tf.split(x, 2, axis=0), axis=1)
with argscope([tf.layers.conv2d], activation=lambda x: tf.nn.leaky_relu(x, 0.1),
padding='valid', strides=2, kernel_size=3,
data_format='channels_first'), \
argscope([tf.layers.conv2d_transpose], padding='same', activation=tf.identity,
data_format='channels_first', strides=2, kernel_size=4):
x = tf.layers.conv2d(pad(x, 3), 64, kernel_size=7, name='conv1')
conv2 = tf.layers.conv2d(pad(x, 2), 128, kernel_size=5, name='conv2')
x = tf.layers.conv2d(pad(conv2, 2), 256, kernel_size=5, name='conv3')
conv3 = tf.layers.conv2d(pad(x, 1), 256, name='conv3_1', strides=1)
x = tf.layers.conv2d(pad(conv3, 1), 512, name='conv4')
conv4 = tf.layers.conv2d(pad(x, 1), 512, name='conv4_1', strides=1)
x = tf.layers.conv2d(pad(conv4, 1), 512, name='conv5')
conv5 = tf.layers.conv2d(pad(x, 1), 512, name='conv5_1', strides=1)
x = tf.layers.conv2d(pad(conv5, 1), 1024, name='conv6')
conv6 = tf.layers.conv2d(pad(x, 1), 1024, name='conv6_1', strides=1)
flow6 = tf.layers.conv2d(pad(conv6, 1), 2, name='predict_flow6', strides=1, activation=tf.identity)
flow6_up = tf.layers.conv2d_transpose(flow6, 2, name='upsampled_flow6_to_5', use_bias=False)
x = tf.layers.conv2d_transpose(conv6, 512, name='deconv5', activation=lambda x: tf.nn.leaky_relu(x, 0.1))
concat5 = tf.concat([conv5, x, flow6_up], axis=1, name='concat5')
flow5 = tf.layers.conv2d(pad(concat5, 1), 2, name='predict_flow5', strides=1, activation=tf.identity)
flow5_up = tf.layers.conv2d_transpose(flow5, 2, name='upsampled_flow5_to_4', use_bias=False)
x = tf.layers.conv2d_transpose(concat5, 256, name='deconv4', activation=lambda x: tf.nn.leaky_relu(x, 0.1))
concat4 = tf.concat([conv4, x, flow5_up], axis=1, name='concat4')
flow4 = tf.layers.conv2d(pad(concat4, 1), 2, name='predict_flow4', strides=1, activation=tf.identity)
flow4_up = tf.layers.conv2d_transpose(flow4, 2, name='upsampled_flow4_to_3', use_bias=False)
x = tf.layers.conv2d_transpose(concat4, 128, name='deconv3', activation=lambda x: tf.nn.leaky_relu(x, 0.1))
concat3 = tf.concat([conv3, x, flow4_up], axis=1, name='concat3')
flow3 = tf.layers.conv2d(pad(concat3, 1), 2, name='predict_flow3', strides=1, activation=tf.identity)
flow3_up = tf.layers.conv2d_transpose(flow3, 2, name='upsampled_flow3_to_2', use_bias=False)
x = tf.layers.conv2d_transpose(concat3, 64, name='deconv2', activation=lambda x: tf.nn.leaky_relu(x, 0.1))
concat2 = tf.concat([conv2, x, flow3_up], axis=1, name='concat2')
flow2 = tf.layers.conv2d(pad(concat2, 1), 2, name='predict_flow2', strides=1, activation=tf.identity)
return tf.identity(flow2, name='flow2') |
def handle_request(self):
"""Handles an HTTP request.The actual HTTP request is handled using a
different thread.
"""
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
ctime = get_time()
done_req = False
shutdown_latency = self.shutdown_latency
if timeout is not None:
shutdown_latency = min(shutdown_latency, timeout) \
if shutdown_latency is not None else timeout
while not (self.done or done_req) and (timeout is None or
timeout == 0 or
(get_time() - ctime) < timeout):
try:
fd_sets = select.select([self], [], [], shutdown_latency)
except (OSError, select.error) as e:
if e.args[0] != errno.EINTR:
raise
# treat EINTR as shutdown_latency timeout
fd_sets = [[], [], []]
for _fd in fd_sets[0]:
done_req = True
self._handle_request_noblock()
if timeout == 0:
break
if not (self.done or done_req):
# don't handle timeouts if we should shut down the server instead
self.handle_timeout() | Handles an HTTP request.The actual HTTP request is handled using a
different thread. | Below is the the instruction that describes the task:
### Input:
Handles an HTTP request.The actual HTTP request is handled using a
different thread.
### Response:
def handle_request(self):
"""Handles an HTTP request.The actual HTTP request is handled using a
different thread.
"""
timeout = self.socket.gettimeout()
if timeout is None:
timeout = self.timeout
elif self.timeout is not None:
timeout = min(timeout, self.timeout)
ctime = get_time()
done_req = False
shutdown_latency = self.shutdown_latency
if timeout is not None:
shutdown_latency = min(shutdown_latency, timeout) \
if shutdown_latency is not None else timeout
while not (self.done or done_req) and (timeout is None or
timeout == 0 or
(get_time() - ctime) < timeout):
try:
fd_sets = select.select([self], [], [], shutdown_latency)
except (OSError, select.error) as e:
if e.args[0] != errno.EINTR:
raise
# treat EINTR as shutdown_latency timeout
fd_sets = [[], [], []]
for _fd in fd_sets[0]:
done_req = True
self._handle_request_noblock()
if timeout == 0:
break
if not (self.done or done_req):
# don't handle timeouts if we should shut down the server instead
self.handle_timeout() |
def paragraphs(self):
"""
Immutable sequence of |_Paragraph| instances corresponding to the
paragraphs in this text frame. A text frame always contains at least
one paragraph.
"""
return tuple([_Paragraph(p, self) for p in self._txBody.p_lst]) | Immutable sequence of |_Paragraph| instances corresponding to the
paragraphs in this text frame. A text frame always contains at least
one paragraph. | Below is the the instruction that describes the task:
### Input:
Immutable sequence of |_Paragraph| instances corresponding to the
paragraphs in this text frame. A text frame always contains at least
one paragraph.
### Response:
def paragraphs(self):
"""
Immutable sequence of |_Paragraph| instances corresponding to the
paragraphs in this text frame. A text frame always contains at least
one paragraph.
"""
return tuple([_Paragraph(p, self) for p in self._txBody.p_lst]) |
def addDraftThingType(self, thingTypeId, name = None, description = None, schemaId = None, metadata = None):
"""
Creates a thing type.
It accepts thingTypeId (string), name (string), description (string), schemaId(string) and metadata(dict) as parameter
In case of failure it throws APIException
"""
draftThingTypesUrl = ApiClient.draftThingTypesUrl % (self.host)
payload = {'id' : thingTypeId, 'name' : name, 'description' : description, 'schemaId' : schemaId, 'metadata': metadata}
r = requests.post(draftThingTypesUrl, auth=self.credentials, data=json.dumps(payload), headers = {'content-type': 'application/json'}, verify=self.verify)
status = r.status_code
if status == 201:
self.logger.debug("The draft thing Type is created")
return r.json()
elif status == 400:
raise ibmiotf.APIException(400, "Invalid request (No body, invalid JSON, unexpected key, bad value)", r.json())
elif status == 401:
raise ibmiotf.APIException(401, "The authentication token is empty or invalid", None)
elif status == 403:
raise ibmiotf.APIException(403, "The authentication method is invalid or the api key used does not exist", None)
elif status == 409:
raise ibmiotf.APIException(409, "The draft thing type already exists", r.json())
elif status == 500:
raise ibmiotf.APIException(500, "Unexpected error", None)
else:
raise ibmiotf.APIException(None, "Unexpected error", None) | Creates a thing type.
It accepts thingTypeId (string), name (string), description (string), schemaId(string) and metadata(dict) as parameter
In case of failure it throws APIException | Below is the the instruction that describes the task:
### Input:
Creates a thing type.
It accepts thingTypeId (string), name (string), description (string), schemaId(string) and metadata(dict) as parameter
In case of failure it throws APIException
### Response:
def addDraftThingType(self, thingTypeId, name = None, description = None, schemaId = None, metadata = None):
"""
Creates a thing type.
It accepts thingTypeId (string), name (string), description (string), schemaId(string) and metadata(dict) as parameter
In case of failure it throws APIException
"""
draftThingTypesUrl = ApiClient.draftThingTypesUrl % (self.host)
payload = {'id' : thingTypeId, 'name' : name, 'description' : description, 'schemaId' : schemaId, 'metadata': metadata}
r = requests.post(draftThingTypesUrl, auth=self.credentials, data=json.dumps(payload), headers = {'content-type': 'application/json'}, verify=self.verify)
status = r.status_code
if status == 201:
self.logger.debug("The draft thing Type is created")
return r.json()
elif status == 400:
raise ibmiotf.APIException(400, "Invalid request (No body, invalid JSON, unexpected key, bad value)", r.json())
elif status == 401:
raise ibmiotf.APIException(401, "The authentication token is empty or invalid", None)
elif status == 403:
raise ibmiotf.APIException(403, "The authentication method is invalid or the api key used does not exist", None)
elif status == 409:
raise ibmiotf.APIException(409, "The draft thing type already exists", r.json())
elif status == 500:
raise ibmiotf.APIException(500, "Unexpected error", None)
else:
raise ibmiotf.APIException(None, "Unexpected error", None) |
def _parse_ldap(ldap_filter):
# type: (str) -> Optional[LDAPFilter]
"""
Parses the given LDAP filter string
:param ldap_filter: An LDAP filter string
:return: An LDAPFilter object, None if the filter was empty
:raise ValueError: The LDAP filter string is invalid
"""
if ldap_filter is None:
# Nothing to do
return None
assert is_string(ldap_filter)
# Remove surrounding spaces
ldap_filter = ldap_filter.strip()
if not ldap_filter:
# Empty string
return None
escaped = False
filter_len = len(ldap_filter)
root = None
stack = []
subfilter_stack = []
idx = 0
while idx < filter_len:
if not escaped:
if ldap_filter[idx] == "(":
# Opening filter : get the operator
idx = _skip_spaces(ldap_filter, idx + 1)
if idx == -1:
raise ValueError(
"Missing filter operator: {0}".format(ldap_filter)
)
operator = _compute_operation(ldap_filter, idx)
if operator is not None:
# New sub-filter
stack.append(LDAPFilter(operator))
else:
# Sub-filter content
subfilter_stack.append(idx)
elif ldap_filter[idx] == ")":
# Ending filter : store it in its parent
if subfilter_stack:
# criterion finished
start_idx = subfilter_stack.pop()
criterion = _parse_ldap_criteria(
ldap_filter, start_idx, idx
)
if stack:
top = stack.pop()
top.append(criterion)
stack.append(top)
else:
# No parent : filter contains only one criterion
# Make a parent to stay homogeneous
root = LDAPFilter(AND)
root.append(criterion)
elif stack:
# Sub filter finished
ended_filter = stack.pop()
if stack:
top = stack.pop()
top.append(ended_filter)
stack.append(top)
else:
# End of the parse
root = ended_filter
else:
raise ValueError(
"Too many end of parenthesis:{0}: {1}".format(
idx, ldap_filter[idx:]
)
)
elif ldap_filter[idx] == "\\":
# Next character must be ignored
escaped = True
else:
# Escaped character ignored
escaped = False
# Don't forget to increment...
idx += 1
# No root : invalid content
if root is None:
raise ValueError("Invalid filter string: {0}".format(ldap_filter))
# Return the root of the filter
return root.normalize() | Parses the given LDAP filter string
:param ldap_filter: An LDAP filter string
:return: An LDAPFilter object, None if the filter was empty
:raise ValueError: The LDAP filter string is invalid | Below is the the instruction that describes the task:
### Input:
Parses the given LDAP filter string
:param ldap_filter: An LDAP filter string
:return: An LDAPFilter object, None if the filter was empty
:raise ValueError: The LDAP filter string is invalid
### Response:
def _parse_ldap(ldap_filter):
# type: (str) -> Optional[LDAPFilter]
"""
Parses the given LDAP filter string
:param ldap_filter: An LDAP filter string
:return: An LDAPFilter object, None if the filter was empty
:raise ValueError: The LDAP filter string is invalid
"""
if ldap_filter is None:
# Nothing to do
return None
assert is_string(ldap_filter)
# Remove surrounding spaces
ldap_filter = ldap_filter.strip()
if not ldap_filter:
# Empty string
return None
escaped = False
filter_len = len(ldap_filter)
root = None
stack = []
subfilter_stack = []
idx = 0
while idx < filter_len:
if not escaped:
if ldap_filter[idx] == "(":
# Opening filter : get the operator
idx = _skip_spaces(ldap_filter, idx + 1)
if idx == -1:
raise ValueError(
"Missing filter operator: {0}".format(ldap_filter)
)
operator = _compute_operation(ldap_filter, idx)
if operator is not None:
# New sub-filter
stack.append(LDAPFilter(operator))
else:
# Sub-filter content
subfilter_stack.append(idx)
elif ldap_filter[idx] == ")":
# Ending filter : store it in its parent
if subfilter_stack:
# criterion finished
start_idx = subfilter_stack.pop()
criterion = _parse_ldap_criteria(
ldap_filter, start_idx, idx
)
if stack:
top = stack.pop()
top.append(criterion)
stack.append(top)
else:
# No parent : filter contains only one criterion
# Make a parent to stay homogeneous
root = LDAPFilter(AND)
root.append(criterion)
elif stack:
# Sub filter finished
ended_filter = stack.pop()
if stack:
top = stack.pop()
top.append(ended_filter)
stack.append(top)
else:
# End of the parse
root = ended_filter
else:
raise ValueError(
"Too many end of parenthesis:{0}: {1}".format(
idx, ldap_filter[idx:]
)
)
elif ldap_filter[idx] == "\\":
# Next character must be ignored
escaped = True
else:
# Escaped character ignored
escaped = False
# Don't forget to increment...
idx += 1
# No root : invalid content
if root is None:
raise ValueError("Invalid filter string: {0}".format(ldap_filter))
# Return the root of the filter
return root.normalize() |
def list_firewall_rules(self, server_name):
'''
Retrieves the set of firewall rules for an Azure SQL Database Server.
server_name:
Name of the server.
'''
_validate_not_none('server_name', server_name)
response = self._perform_get(self._get_firewall_rules_path(server_name),
None)
return _MinidomXmlToObject.parse_service_resources_response(
response, FirewallRule) | Retrieves the set of firewall rules for an Azure SQL Database Server.
server_name:
Name of the server. | Below is the the instruction that describes the task:
### Input:
Retrieves the set of firewall rules for an Azure SQL Database Server.
server_name:
Name of the server.
### Response:
def list_firewall_rules(self, server_name):
'''
Retrieves the set of firewall rules for an Azure SQL Database Server.
server_name:
Name of the server.
'''
_validate_not_none('server_name', server_name)
response = self._perform_get(self._get_firewall_rules_path(server_name),
None)
return _MinidomXmlToObject.parse_service_resources_response(
response, FirewallRule) |
def orient_import2(self, event):
"""
initialize window to import an AzDip format file into the working directory
"""
pmag_menu_dialogs.ImportAzDipFile(self.parent, self.parent.WD) | initialize window to import an AzDip format file into the working directory | Below is the the instruction that describes the task:
### Input:
initialize window to import an AzDip format file into the working directory
### Response:
def orient_import2(self, event):
"""
initialize window to import an AzDip format file into the working directory
"""
pmag_menu_dialogs.ImportAzDipFile(self.parent, self.parent.WD) |
def parse_case_snake_to_camel(snake, upper_first=True):
"""
Convert a string from snake_case to CamelCase.
:param str snake: The snake_case string to convert.
:param bool upper_first: Whether or not to capitalize the first
character of the string.
:return: The CamelCase version of string.
:rtype: str
"""
snake = snake.split('_')
first_part = snake[0]
if upper_first:
first_part = first_part.title()
return first_part + ''.join(word.title() for word in snake[1:]) | Convert a string from snake_case to CamelCase.
:param str snake: The snake_case string to convert.
:param bool upper_first: Whether or not to capitalize the first
character of the string.
:return: The CamelCase version of string.
:rtype: str | Below is the the instruction that describes the task:
### Input:
Convert a string from snake_case to CamelCase.
:param str snake: The snake_case string to convert.
:param bool upper_first: Whether or not to capitalize the first
character of the string.
:return: The CamelCase version of string.
:rtype: str
### Response:
def parse_case_snake_to_camel(snake, upper_first=True):
"""
Convert a string from snake_case to CamelCase.
:param str snake: The snake_case string to convert.
:param bool upper_first: Whether or not to capitalize the first
character of the string.
:return: The CamelCase version of string.
:rtype: str
"""
snake = snake.split('_')
first_part = snake[0]
if upper_first:
first_part = first_part.title()
return first_part + ''.join(word.title() for word in snake[1:]) |
def solve_minimize(
self,
func,
weights,
constraints,
lower_bound=0.0,
upper_bound=1.0,
func_deriv=False
):
"""
Returns the solution to a minimization problem.
"""
bounds = ((lower_bound, upper_bound), ) * len(self.SUPPORTED_COINS)
return minimize(
fun=func, x0=weights, jac=func_deriv, bounds=bounds,
constraints=constraints, method='SLSQP', options={'disp': False}
) | Returns the solution to a minimization problem. | Below is the the instruction that describes the task:
### Input:
Returns the solution to a minimization problem.
### Response:
def solve_minimize(
self,
func,
weights,
constraints,
lower_bound=0.0,
upper_bound=1.0,
func_deriv=False
):
"""
Returns the solution to a minimization problem.
"""
bounds = ((lower_bound, upper_bound), ) * len(self.SUPPORTED_COINS)
return minimize(
fun=func, x0=weights, jac=func_deriv, bounds=bounds,
constraints=constraints, method='SLSQP', options={'disp': False}
) |
def create(self, input=None, live_stream=False, outputs=None, options=None):
""" Creates a transcoding job. Here are some examples::
job.create('s3://zencodertesting/test.mov')
job.create(live_stream=True)
job.create(input='http://example.com/input.mov',
outputs=({'label': 'test output'},))
https://app.zencoder.com/docs/api/jobs/create
"""
data = {"input": input, "test": self.test}
if outputs:
data['outputs'] = outputs
if options:
data.update(options)
if live_stream:
data['live_stream'] = live_stream
return self.post(self.base_url, body=json.dumps(data)) | Creates a transcoding job. Here are some examples::
job.create('s3://zencodertesting/test.mov')
job.create(live_stream=True)
job.create(input='http://example.com/input.mov',
outputs=({'label': 'test output'},))
https://app.zencoder.com/docs/api/jobs/create | Below is the the instruction that describes the task:
### Input:
Creates a transcoding job. Here are some examples::
job.create('s3://zencodertesting/test.mov')
job.create(live_stream=True)
job.create(input='http://example.com/input.mov',
outputs=({'label': 'test output'},))
https://app.zencoder.com/docs/api/jobs/create
### Response:
def create(self, input=None, live_stream=False, outputs=None, options=None):
""" Creates a transcoding job. Here are some examples::
job.create('s3://zencodertesting/test.mov')
job.create(live_stream=True)
job.create(input='http://example.com/input.mov',
outputs=({'label': 'test output'},))
https://app.zencoder.com/docs/api/jobs/create
"""
data = {"input": input, "test": self.test}
if outputs:
data['outputs'] = outputs
if options:
data.update(options)
if live_stream:
data['live_stream'] = live_stream
return self.post(self.base_url, body=json.dumps(data)) |
def rdkitmol_Hs(self):
r'''RDKit object of the chemical, with hydrogen. If RDKit is not
available, holds None.
For examples of what can be done with RDKit, see
`their website <http://www.rdkit.org/docs/GettingStartedInPython.html>`_.
'''
if self.__rdkitmol_Hs:
return self.__rdkitmol_Hs
else:
try:
self.__rdkitmol_Hs = Chem.AddHs(self.rdkitmol)
return self.__rdkitmol_Hs
except:
return None | r'''RDKit object of the chemical, with hydrogen. If RDKit is not
available, holds None.
For examples of what can be done with RDKit, see
`their website <http://www.rdkit.org/docs/GettingStartedInPython.html>`_. | Below is the the instruction that describes the task:
### Input:
r'''RDKit object of the chemical, with hydrogen. If RDKit is not
available, holds None.
For examples of what can be done with RDKit, see
`their website <http://www.rdkit.org/docs/GettingStartedInPython.html>`_.
### Response:
def rdkitmol_Hs(self):
r'''RDKit object of the chemical, with hydrogen. If RDKit is not
available, holds None.
For examples of what can be done with RDKit, see
`their website <http://www.rdkit.org/docs/GettingStartedInPython.html>`_.
'''
if self.__rdkitmol_Hs:
return self.__rdkitmol_Hs
else:
try:
self.__rdkitmol_Hs = Chem.AddHs(self.rdkitmol)
return self.__rdkitmol_Hs
except:
return None |
def makeObjectFeed(
paginator, objectToXMLFunction, feedId, title, webRoot,
idAttr="id", nameAttr="name", dateAttr=None, request=None, page=1,
count=20, author=APP_AUTHOR):
"""
Take a list of some kind of object, a conversion function, an id and a
title Return XML representing an ATOM feed
"""
listSize = paginator.count
if listSize:
object_list = paginator.page(page).object_list
else:
object_list = []
count = int(count)
originalId = feedId
idParts = feedId.split("?", 1)
if len(idParts) == 2:
feedId = idParts[0]
if request:
GETStruct = request.GET
else:
GETStruct = False
feedTag = etree.Element(ATOM + "feed", nsmap=ATOM_NSMAP)
# The id tag is very similar to the 'self' link
idTag = etree.SubElement(feedTag, ATOM + "id")
idTag.text = "%s/%s" % (webRoot, feedId)
# The title is passed in from the calling function
titleTag = etree.SubElement(feedTag, ATOM + "title")
titleTag.text = title
# The author is passed in from the calling function and required to be valid ATOM
if author:
authorTag = etree.SubElement(feedTag, ATOM + "author")
nameTag = etree.SubElement(authorTag, ATOM + "name")
urlTag = etree.SubElement(authorTag, ATOM + "uri")
nameTag.text = author.get('name', 'UNT')
urlTag.text = author.get('uri', 'http://library.unt.edu/')
# The updated tag is a
updatedTag = etree.SubElement(feedTag, ATOM + "updated")
updatedTag.text = xsDateTime_format(localize_datetime(datetime.now()))
# We will always show the link to the current 'self' page
linkTag = etree.SubElement(feedTag, ATOM + "link")
linkTag.set("rel", "self")
if not request or not request.META['QUERY_STRING']:
linkTag.set("href", "%s/%s" % (webRoot, feedId))
else:
linkTag.set(
"href", "%s/%s?%s" % (
webRoot, feedId, urllib.urlencode(request.GET, doseq=True)
)
)
# We always have a last page
endLink = etree.SubElement(feedTag, ATOM + "link")
endLink.set("rel", "last")
if GETStruct:
endLinkGS = GETStruct.copy()
else:
endLinkGS = {}
endLinkGS.update({"page": paginator.num_pages})
endLink.set(
"href", "%s/%s?%s" % (
webRoot, feedId, urllib.urlencode(endLinkGS, doseq=True)
)
)
# We always have a first page
startLink = etree.SubElement(feedTag, ATOM + "link")
startLink.set("rel", "first")
if GETStruct:
startLinkGS = GETStruct.copy()
else:
startLinkGS = {}
startLinkGS.update({"page": paginator.page_range[0]})
startLink.set(
"href", "%s/%s?%s" % (
webRoot, feedId, urllib.urlencode(startLinkGS, doseq=True)
)
)
# Potentially there is a previous page, list it's details
if paginator.page(page).has_previous():
prevLink = etree.SubElement(feedTag, ATOM + "link")
prevLink.set("rel", "previous")
if GETStruct:
prevLinkGS = GETStruct.copy()
else:
prevLinkGS = {}
prevLinkGS.update(
{"page": paginator.page(page).previous_page_number()}
)
prevLinkText = "%s/%s?%s" % (
webRoot, feedId, urllib.urlencode(prevLinkGS, doseq=True)
)
prevLink.set("href", prevLinkText)
# Potentially there is a next page, fill in it's details
if paginator.page(page).has_next():
nextLink = etree.SubElement(feedTag, ATOM + "link")
nextLink.set("rel", "next")
if GETStruct:
nextLinkGS = GETStruct.copy()
else:
nextLinkGS = {}
nextLinkGS.update({"page": paginator.page(page).next_page_number()})
nextLinkText = "%s/%s?%s" % (
webRoot, feedId, urllib.urlencode(nextLinkGS, doseq=True)
)
nextLink.set("href", nextLinkText)
for o in object_list:
objectXML = objectToXMLFunction(o)
if dateAttr:
dateStamp = getattr(o, dateAttr)
else:
dateStamp = None
althref = feedId.strip('/').split('/')[-1]
althref = '%s/%s/%s/' % (
webRoot, althref, getattr(o, idAttr)
)
objectEntry = wrapAtom(
xml=objectXML,
id='%s/%s%s/' % (webRoot, originalId, getattr(o, idAttr)),
title=getattr(o, nameAttr),
updated=dateStamp,
alt=althref
)
feedTag.append(objectEntry)
return feedTag | Take a list of some kind of object, a conversion function, an id and a
title Return XML representing an ATOM feed | Below is the the instruction that describes the task:
### Input:
Take a list of some kind of object, a conversion function, an id and a
title Return XML representing an ATOM feed
### Response:
def makeObjectFeed(
paginator, objectToXMLFunction, feedId, title, webRoot,
idAttr="id", nameAttr="name", dateAttr=None, request=None, page=1,
count=20, author=APP_AUTHOR):
"""
Take a list of some kind of object, a conversion function, an id and a
title Return XML representing an ATOM feed
"""
listSize = paginator.count
if listSize:
object_list = paginator.page(page).object_list
else:
object_list = []
count = int(count)
originalId = feedId
idParts = feedId.split("?", 1)
if len(idParts) == 2:
feedId = idParts[0]
if request:
GETStruct = request.GET
else:
GETStruct = False
feedTag = etree.Element(ATOM + "feed", nsmap=ATOM_NSMAP)
# The id tag is very similar to the 'self' link
idTag = etree.SubElement(feedTag, ATOM + "id")
idTag.text = "%s/%s" % (webRoot, feedId)
# The title is passed in from the calling function
titleTag = etree.SubElement(feedTag, ATOM + "title")
titleTag.text = title
# The author is passed in from the calling function and required to be valid ATOM
if author:
authorTag = etree.SubElement(feedTag, ATOM + "author")
nameTag = etree.SubElement(authorTag, ATOM + "name")
urlTag = etree.SubElement(authorTag, ATOM + "uri")
nameTag.text = author.get('name', 'UNT')
urlTag.text = author.get('uri', 'http://library.unt.edu/')
# The updated tag is a
updatedTag = etree.SubElement(feedTag, ATOM + "updated")
updatedTag.text = xsDateTime_format(localize_datetime(datetime.now()))
# We will always show the link to the current 'self' page
linkTag = etree.SubElement(feedTag, ATOM + "link")
linkTag.set("rel", "self")
if not request or not request.META['QUERY_STRING']:
linkTag.set("href", "%s/%s" % (webRoot, feedId))
else:
linkTag.set(
"href", "%s/%s?%s" % (
webRoot, feedId, urllib.urlencode(request.GET, doseq=True)
)
)
# We always have a last page
endLink = etree.SubElement(feedTag, ATOM + "link")
endLink.set("rel", "last")
if GETStruct:
endLinkGS = GETStruct.copy()
else:
endLinkGS = {}
endLinkGS.update({"page": paginator.num_pages})
endLink.set(
"href", "%s/%s?%s" % (
webRoot, feedId, urllib.urlencode(endLinkGS, doseq=True)
)
)
# We always have a first page
startLink = etree.SubElement(feedTag, ATOM + "link")
startLink.set("rel", "first")
if GETStruct:
startLinkGS = GETStruct.copy()
else:
startLinkGS = {}
startLinkGS.update({"page": paginator.page_range[0]})
startLink.set(
"href", "%s/%s?%s" % (
webRoot, feedId, urllib.urlencode(startLinkGS, doseq=True)
)
)
# Potentially there is a previous page, list it's details
if paginator.page(page).has_previous():
prevLink = etree.SubElement(feedTag, ATOM + "link")
prevLink.set("rel", "previous")
if GETStruct:
prevLinkGS = GETStruct.copy()
else:
prevLinkGS = {}
prevLinkGS.update(
{"page": paginator.page(page).previous_page_number()}
)
prevLinkText = "%s/%s?%s" % (
webRoot, feedId, urllib.urlencode(prevLinkGS, doseq=True)
)
prevLink.set("href", prevLinkText)
# Potentially there is a next page, fill in it's details
if paginator.page(page).has_next():
nextLink = etree.SubElement(feedTag, ATOM + "link")
nextLink.set("rel", "next")
if GETStruct:
nextLinkGS = GETStruct.copy()
else:
nextLinkGS = {}
nextLinkGS.update({"page": paginator.page(page).next_page_number()})
nextLinkText = "%s/%s?%s" % (
webRoot, feedId, urllib.urlencode(nextLinkGS, doseq=True)
)
nextLink.set("href", nextLinkText)
for o in object_list:
objectXML = objectToXMLFunction(o)
if dateAttr:
dateStamp = getattr(o, dateAttr)
else:
dateStamp = None
althref = feedId.strip('/').split('/')[-1]
althref = '%s/%s/%s/' % (
webRoot, althref, getattr(o, idAttr)
)
objectEntry = wrapAtom(
xml=objectXML,
id='%s/%s%s/' % (webRoot, originalId, getattr(o, idAttr)),
title=getattr(o, nameAttr),
updated=dateStamp,
alt=althref
)
feedTag.append(objectEntry)
return feedTag |
def canonical_order(self):
"""The vertices in a canonical or normalized order.
This routine will return a list of vertices in an order that does not
depend on the initial order, but only depends on the connectivity and
the return values of the function self.get_vertex_string.
Only the vertices that are involved in edges will be included. The
result can be given as first argument to self.get_subgraph, with
reduce=True as second argument. This will return a complete canonical
graph.
The routine is designed not to use symmetry relations that are
obtained with the GraphSearch routine. We also tried to create an
ordering that feels like natural, i.e. starting in the center and
pushing vertices with few equivalents to the front. If necessary, the
nature of the vertices and their bonds to atoms closer to the center
will also play a role, but only as a last resort.
"""
# A) find an appropriate starting vertex.
# Here we take a central vertex that has a minimal number of symmetrical
# equivalents, 'the highest atom number', and the highest fingerprint.
# Note that the symmetrical equivalents are computed from the vertex
# fingerprints, i.e. without the GraphSearch.
starting_vertex = max(
(
-len(self.equivalent_vertices[vertex]),
self.get_vertex_string(vertex),
self.vertex_fingerprints[vertex].tobytes(),
vertex
) for vertex in self.central_vertices
)[-1]
# B) sort all vertices based on
# 1) distance from central vertex
# 2) number of equivalent vertices
# 3) vertex string, (higher atom numbers come first)
# 4) fingerprint
# 5) vertex index
# The last field is only included to collect the result of the sort.
# The fingerprint on itself would be sufficient, but the three first are
# there to have a naturally appealing result.
l = [
[
-distance,
-len(self.equivalent_vertices[vertex]),
self.get_vertex_string(vertex),
self.vertex_fingerprints[vertex].tobytes(),
vertex
] for vertex, distance in self.iter_breadth_first(starting_vertex)
if len(self.neighbors[vertex]) > 0
]
l.sort(reverse=True)
# C) The order of some vertices is still not completely set. e.g.
# consider the case of allene. The four hydrogen atoms are equivalent,
# but one can have two different orders: make geminiles consecutive or
# don't. It is more trikcy than one would think at first sight. In the
# case of allene, geminility could easily solve the problem. Consider a
# big flat rotationally symmetric molecule (order 2). The first five
# shells are order 4 and one would just give a random order to four
# segemnts in the first shell. Only when one reaches the outer part that
# has order two, it turns out that the arbitrary choices in the inner
# shell play a role. So it does not help to look at relations with
# vertices at inner or current shells only. One has to consider the
# whole picture. (unit testing reveals troubles like these)
# I need some sleep now. The code below checks for potential fuzz and
# will raise an error if the ordering is not fully determined yet. One
# day, I'll need this code more than I do now, and I'll fix things up.
# I know how to do this, but I don't care enough right now.
# -- Toon
for i in range(1, len(l)):
if l[i][:-1] == l[i-1][:-1]:
raise NotImplementedError
# D) Return only the vertex indexes.
return [record[-1] for record in l] | The vertices in a canonical or normalized order.
This routine will return a list of vertices in an order that does not
depend on the initial order, but only depends on the connectivity and
the return values of the function self.get_vertex_string.
Only the vertices that are involved in edges will be included. The
result can be given as first argument to self.get_subgraph, with
reduce=True as second argument. This will return a complete canonical
graph.
The routine is designed not to use symmetry relations that are
obtained with the GraphSearch routine. We also tried to create an
ordering that feels like natural, i.e. starting in the center and
pushing vertices with few equivalents to the front. If necessary, the
nature of the vertices and their bonds to atoms closer to the center
will also play a role, but only as a last resort. | Below is the the instruction that describes the task:
### Input:
The vertices in a canonical or normalized order.
This routine will return a list of vertices in an order that does not
depend on the initial order, but only depends on the connectivity and
the return values of the function self.get_vertex_string.
Only the vertices that are involved in edges will be included. The
result can be given as first argument to self.get_subgraph, with
reduce=True as second argument. This will return a complete canonical
graph.
The routine is designed not to use symmetry relations that are
obtained with the GraphSearch routine. We also tried to create an
ordering that feels like natural, i.e. starting in the center and
pushing vertices with few equivalents to the front. If necessary, the
nature of the vertices and their bonds to atoms closer to the center
will also play a role, but only as a last resort.
### Response:
def canonical_order(self):
"""The vertices in a canonical or normalized order.
This routine will return a list of vertices in an order that does not
depend on the initial order, but only depends on the connectivity and
the return values of the function self.get_vertex_string.
Only the vertices that are involved in edges will be included. The
result can be given as first argument to self.get_subgraph, with
reduce=True as second argument. This will return a complete canonical
graph.
The routine is designed not to use symmetry relations that are
obtained with the GraphSearch routine. We also tried to create an
ordering that feels like natural, i.e. starting in the center and
pushing vertices with few equivalents to the front. If necessary, the
nature of the vertices and their bonds to atoms closer to the center
will also play a role, but only as a last resort.
"""
# A) find an appropriate starting vertex.
# Here we take a central vertex that has a minimal number of symmetrical
# equivalents, 'the highest atom number', and the highest fingerprint.
# Note that the symmetrical equivalents are computed from the vertex
# fingerprints, i.e. without the GraphSearch.
starting_vertex = max(
(
-len(self.equivalent_vertices[vertex]),
self.get_vertex_string(vertex),
self.vertex_fingerprints[vertex].tobytes(),
vertex
) for vertex in self.central_vertices
)[-1]
# B) sort all vertices based on
# 1) distance from central vertex
# 2) number of equivalent vertices
# 3) vertex string, (higher atom numbers come first)
# 4) fingerprint
# 5) vertex index
# The last field is only included to collect the result of the sort.
# The fingerprint on itself would be sufficient, but the three first are
# there to have a naturally appealing result.
l = [
[
-distance,
-len(self.equivalent_vertices[vertex]),
self.get_vertex_string(vertex),
self.vertex_fingerprints[vertex].tobytes(),
vertex
] for vertex, distance in self.iter_breadth_first(starting_vertex)
if len(self.neighbors[vertex]) > 0
]
l.sort(reverse=True)
# C) The order of some vertices is still not completely set. e.g.
# consider the case of allene. The four hydrogen atoms are equivalent,
# but one can have two different orders: make geminiles consecutive or
# don't. It is more trikcy than one would think at first sight. In the
# case of allene, geminility could easily solve the problem. Consider a
# big flat rotationally symmetric molecule (order 2). The first five
# shells are order 4 and one would just give a random order to four
# segemnts in the first shell. Only when one reaches the outer part that
# has order two, it turns out that the arbitrary choices in the inner
# shell play a role. So it does not help to look at relations with
# vertices at inner or current shells only. One has to consider the
# whole picture. (unit testing reveals troubles like these)
# I need some sleep now. The code below checks for potential fuzz and
# will raise an error if the ordering is not fully determined yet. One
# day, I'll need this code more than I do now, and I'll fix things up.
# I know how to do this, but I don't care enough right now.
# -- Toon
for i in range(1, len(l)):
if l[i][:-1] == l[i-1][:-1]:
raise NotImplementedError
# D) Return only the vertex indexes.
return [record[-1] for record in l] |
def _get_upper_bound(self):
r"""Return an upper bound on the eigenvalues of the Laplacian."""
if self.lap_type == 'normalized':
return 2 # Equal iff the graph is bipartite.
elif self.lap_type == 'combinatorial':
bounds = []
# Equal for full graphs.
bounds += [self.n_vertices * np.max(self.W)]
# Gershgorin circle theorem. Equal for regular bipartite graphs.
# Special case of the below bound.
bounds += [2 * np.max(self.dw)]
# Anderson, Morley, Eigenvalues of the Laplacian of a graph.
# Equal for regular bipartite graphs.
if self.n_edges > 0:
sources, targets, _ = self.get_edge_list()
bounds += [np.max(self.dw[sources] + self.dw[targets])]
# Merris, A note on Laplacian graph eigenvalues.
if not self.is_directed():
W = self.W
else:
W = utils.symmetrize(self.W, method='average')
m = W.dot(self.dw) / self.dw # Mean degree of adjacent vertices.
bounds += [np.max(self.dw + m)]
# Good review: On upper bounds for Laplacian graph eigenvalues.
return min(bounds)
else:
raise ValueError('Unknown Laplacian type '
'{}'.format(self.lap_type)) | r"""Return an upper bound on the eigenvalues of the Laplacian. | Below is the the instruction that describes the task:
### Input:
r"""Return an upper bound on the eigenvalues of the Laplacian.
### Response:
def _get_upper_bound(self):
r"""Return an upper bound on the eigenvalues of the Laplacian."""
if self.lap_type == 'normalized':
return 2 # Equal iff the graph is bipartite.
elif self.lap_type == 'combinatorial':
bounds = []
# Equal for full graphs.
bounds += [self.n_vertices * np.max(self.W)]
# Gershgorin circle theorem. Equal for regular bipartite graphs.
# Special case of the below bound.
bounds += [2 * np.max(self.dw)]
# Anderson, Morley, Eigenvalues of the Laplacian of a graph.
# Equal for regular bipartite graphs.
if self.n_edges > 0:
sources, targets, _ = self.get_edge_list()
bounds += [np.max(self.dw[sources] + self.dw[targets])]
# Merris, A note on Laplacian graph eigenvalues.
if not self.is_directed():
W = self.W
else:
W = utils.symmetrize(self.W, method='average')
m = W.dot(self.dw) / self.dw # Mean degree of adjacent vertices.
bounds += [np.max(self.dw + m)]
# Good review: On upper bounds for Laplacian graph eigenvalues.
return min(bounds)
else:
raise ValueError('Unknown Laplacian type '
'{}'.format(self.lap_type)) |
def connect(self, f, mode=None):
"""
Connect an object `f` to the signal. The type the object needs to have
depends on `mode`, but usually it needs to be a callable.
:meth:`connect` returns an opaque token which can be used with
:meth:`disconnect` to disconnect the object from the signal.
The default value for `mode` is :attr:`STRONG`. Any decorator can be
used as argument for `mode` and it is applied to `f`. The result is
stored internally and is what will be called when the signal is being
emitted.
If the result of `mode` returns a false value during emission, the
connection is removed.
.. note::
The return values required by the callable returned by `mode` and
the one required by a callable passed to `f` using the predefined
modes are complementary!
A callable `f` needs to return true to be removed from the
connections, while a callable returned by the `mode` decorator needs
to return false.
Existing modes are listed below.
"""
mode = mode or self.STRONG
self.logger.debug("connecting %r with mode %r", f, mode)
return self._connect(mode(f)) | Connect an object `f` to the signal. The type the object needs to have
depends on `mode`, but usually it needs to be a callable.
:meth:`connect` returns an opaque token which can be used with
:meth:`disconnect` to disconnect the object from the signal.
The default value for `mode` is :attr:`STRONG`. Any decorator can be
used as argument for `mode` and it is applied to `f`. The result is
stored internally and is what will be called when the signal is being
emitted.
If the result of `mode` returns a false value during emission, the
connection is removed.
.. note::
The return values required by the callable returned by `mode` and
the one required by a callable passed to `f` using the predefined
modes are complementary!
A callable `f` needs to return true to be removed from the
connections, while a callable returned by the `mode` decorator needs
to return false.
Existing modes are listed below. | Below is the the instruction that describes the task:
### Input:
Connect an object `f` to the signal. The type the object needs to have
depends on `mode`, but usually it needs to be a callable.
:meth:`connect` returns an opaque token which can be used with
:meth:`disconnect` to disconnect the object from the signal.
The default value for `mode` is :attr:`STRONG`. Any decorator can be
used as argument for `mode` and it is applied to `f`. The result is
stored internally and is what will be called when the signal is being
emitted.
If the result of `mode` returns a false value during emission, the
connection is removed.
.. note::
The return values required by the callable returned by `mode` and
the one required by a callable passed to `f` using the predefined
modes are complementary!
A callable `f` needs to return true to be removed from the
connections, while a callable returned by the `mode` decorator needs
to return false.
Existing modes are listed below.
### Response:
def connect(self, f, mode=None):
"""
Connect an object `f` to the signal. The type the object needs to have
depends on `mode`, but usually it needs to be a callable.
:meth:`connect` returns an opaque token which can be used with
:meth:`disconnect` to disconnect the object from the signal.
The default value for `mode` is :attr:`STRONG`. Any decorator can be
used as argument for `mode` and it is applied to `f`. The result is
stored internally and is what will be called when the signal is being
emitted.
If the result of `mode` returns a false value during emission, the
connection is removed.
.. note::
The return values required by the callable returned by `mode` and
the one required by a callable passed to `f` using the predefined
modes are complementary!
A callable `f` needs to return true to be removed from the
connections, while a callable returned by the `mode` decorator needs
to return false.
Existing modes are listed below.
"""
mode = mode or self.STRONG
self.logger.debug("connecting %r with mode %r", f, mode)
return self._connect(mode(f)) |
def accountSummary(self, account: str = '') -> List[AccountValue]:
"""
List of account values for the given account,
or of all accounts if account is left blank.
This method is blocking on first run, non-blocking after that.
Args:
account: If specified, filter for this account name.
"""
if not self.wrapper.acctSummary:
# loaded on demand since it takes ca. 250 ms
self.reqAccountSummary()
if account:
return [v for v in self.wrapper.acctSummary.values()
if v.account == account]
else:
return list(self.wrapper.acctSummary.values()) | List of account values for the given account,
or of all accounts if account is left blank.
This method is blocking on first run, non-blocking after that.
Args:
account: If specified, filter for this account name. | Below is the the instruction that describes the task:
### Input:
List of account values for the given account,
or of all accounts if account is left blank.
This method is blocking on first run, non-blocking after that.
Args:
account: If specified, filter for this account name.
### Response:
def accountSummary(self, account: str = '') -> List[AccountValue]:
"""
List of account values for the given account,
or of all accounts if account is left blank.
This method is blocking on first run, non-blocking after that.
Args:
account: If specified, filter for this account name.
"""
if not self.wrapper.acctSummary:
# loaded on demand since it takes ca. 250 ms
self.reqAccountSummary()
if account:
return [v for v in self.wrapper.acctSummary.values()
if v.account == account]
else:
return list(self.wrapper.acctSummary.values()) |
def lpush(self, key, *values):
"""
Insert all the specified values at the head of the list stored at key.
:param key: The list's key
:type key: :class:`str`, :class:`bytes`
:param values: One or more positional arguments to insert at the
beginning of the list. Each value is inserted at the beginning
of the list individually (see discussion below).
:returns: the length of the list after push operations
:rtype: int
:raises: :exc:`~tredis.exceptions.TRedisException`
If `key` does not exist, it is created as empty list before
performing the push operations. When key holds a value that is not a
list, an error is returned.
It is possible to push multiple elements using a single command call
just specifying multiple arguments at the end of the command.
Elements are inserted one after the other to the head of the list,
from the leftmost element to the rightmost element. So for instance
``client.lpush('mylist', 'a', 'b', 'c')`` will result into a list
containing ``c`` as first element, ``b`` as second element and ``a``
as third element.
.. note::
**Time complexity**: ``O(1)``
"""
return self._execute([b'LPUSH', key] + list(values)) | Insert all the specified values at the head of the list stored at key.
:param key: The list's key
:type key: :class:`str`, :class:`bytes`
:param values: One or more positional arguments to insert at the
beginning of the list. Each value is inserted at the beginning
of the list individually (see discussion below).
:returns: the length of the list after push operations
:rtype: int
:raises: :exc:`~tredis.exceptions.TRedisException`
If `key` does not exist, it is created as empty list before
performing the push operations. When key holds a value that is not a
list, an error is returned.
It is possible to push multiple elements using a single command call
just specifying multiple arguments at the end of the command.
Elements are inserted one after the other to the head of the list,
from the leftmost element to the rightmost element. So for instance
``client.lpush('mylist', 'a', 'b', 'c')`` will result into a list
containing ``c`` as first element, ``b`` as second element and ``a``
as third element.
.. note::
**Time complexity**: ``O(1)`` | Below is the the instruction that describes the task:
### Input:
Insert all the specified values at the head of the list stored at key.
:param key: The list's key
:type key: :class:`str`, :class:`bytes`
:param values: One or more positional arguments to insert at the
beginning of the list. Each value is inserted at the beginning
of the list individually (see discussion below).
:returns: the length of the list after push operations
:rtype: int
:raises: :exc:`~tredis.exceptions.TRedisException`
If `key` does not exist, it is created as empty list before
performing the push operations. When key holds a value that is not a
list, an error is returned.
It is possible to push multiple elements using a single command call
just specifying multiple arguments at the end of the command.
Elements are inserted one after the other to the head of the list,
from the leftmost element to the rightmost element. So for instance
``client.lpush('mylist', 'a', 'b', 'c')`` will result into a list
containing ``c`` as first element, ``b`` as second element and ``a``
as third element.
.. note::
**Time complexity**: ``O(1)``
### Response:
def lpush(self, key, *values):
"""
Insert all the specified values at the head of the list stored at key.
:param key: The list's key
:type key: :class:`str`, :class:`bytes`
:param values: One or more positional arguments to insert at the
beginning of the list. Each value is inserted at the beginning
of the list individually (see discussion below).
:returns: the length of the list after push operations
:rtype: int
:raises: :exc:`~tredis.exceptions.TRedisException`
If `key` does not exist, it is created as empty list before
performing the push operations. When key holds a value that is not a
list, an error is returned.
It is possible to push multiple elements using a single command call
just specifying multiple arguments at the end of the command.
Elements are inserted one after the other to the head of the list,
from the leftmost element to the rightmost element. So for instance
``client.lpush('mylist', 'a', 'b', 'c')`` will result into a list
containing ``c`` as first element, ``b`` as second element and ``a``
as third element.
.. note::
**Time complexity**: ``O(1)``
"""
return self._execute([b'LPUSH', key] + list(values)) |
def verify(path):
"""Verify that `path` has the qpimage series file format"""
valid = False
try:
h5 = h5py.File(path, mode="r")
qpi0 = h5["qpi_0"]
except (OSError, KeyError):
pass
else:
if ("qpimage version" in qpi0.attrs and
"phase" in qpi0 and
"amplitude" in qpi0 and
"bg_data" in qpi0["phase"] and
"bg_data" in qpi0["amplitude"]):
valid = True
return valid | Verify that `path` has the qpimage series file format | Below is the the instruction that describes the task:
### Input:
Verify that `path` has the qpimage series file format
### Response:
def verify(path):
"""Verify that `path` has the qpimage series file format"""
valid = False
try:
h5 = h5py.File(path, mode="r")
qpi0 = h5["qpi_0"]
except (OSError, KeyError):
pass
else:
if ("qpimage version" in qpi0.attrs and
"phase" in qpi0 and
"amplitude" in qpi0 and
"bg_data" in qpi0["phase"] and
"bg_data" in qpi0["amplitude"]):
valid = True
return valid |
def _pfp__build(self, stream=None, save_offset=False):
"""Build the field and write the result into the stream
:stream: An IO stream that can be written to
:returns: None
"""
if stream is not None and save_offset:
self._pfp__offset = stream.tell()
if self.bitsize is None:
data = struct.pack(
"{}{}".format(self.endian, self.format),
self._pfp__value
)
if stream is not None:
stream.write(data)
return len(data)
else:
return data
else:
data = struct.pack(
"{}{}".format(BIG_ENDIAN, self.format),
self._pfp__value
)
num_bytes = int(math.ceil(self.bitsize / 8.0))
bit_data = data[-num_bytes:]
raw_bits = bitwrap.bytes_to_bits(bit_data)
bits = raw_bits[-self.bitsize:]
if stream is not None:
self.bitfield_rw.write_bits(stream, bits, self.bitfield_padded, self.bitfield_left_right, self.endian)
return len(bits) // 8
else:
# TODO this can't be right....
return bits | Build the field and write the result into the stream
:stream: An IO stream that can be written to
:returns: None | Below is the the instruction that describes the task:
### Input:
Build the field and write the result into the stream
:stream: An IO stream that can be written to
:returns: None
### Response:
def _pfp__build(self, stream=None, save_offset=False):
"""Build the field and write the result into the stream
:stream: An IO stream that can be written to
:returns: None
"""
if stream is not None and save_offset:
self._pfp__offset = stream.tell()
if self.bitsize is None:
data = struct.pack(
"{}{}".format(self.endian, self.format),
self._pfp__value
)
if stream is not None:
stream.write(data)
return len(data)
else:
return data
else:
data = struct.pack(
"{}{}".format(BIG_ENDIAN, self.format),
self._pfp__value
)
num_bytes = int(math.ceil(self.bitsize / 8.0))
bit_data = data[-num_bytes:]
raw_bits = bitwrap.bytes_to_bits(bit_data)
bits = raw_bits[-self.bitsize:]
if stream is not None:
self.bitfield_rw.write_bits(stream, bits, self.bitfield_padded, self.bitfield_left_right, self.endian)
return len(bits) // 8
else:
# TODO this can't be right....
return bits |
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit() | Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session | Below is the the instruction that describes the task:
### Input:
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
### Response:
def clear_nonexistent_import_errors(self, session):
"""
Clears import errors for files that no longer exist.
:param session: session for ORM operations
:type session: sqlalchemy.orm.session.Session
"""
query = session.query(errors.ImportError)
if self._file_paths:
query = query.filter(
~errors.ImportError.filename.in_(self._file_paths)
)
query.delete(synchronize_session='fetch')
session.commit() |
def regex_in(pl,regex):
'''
regex = re.compile("^[a-z]+$")
pl = ['b1c3d','xab15cxx','1x','y2']
regex_in(pl,regex)
regex = re.compile("^[0-9a-z]+$")
pl = ['b1c3d','xab15cxx','1x','y2']
regex_in(pl,regex)
'''
def cond_func(ele,regex):
m = regex.search(ele)
if(m == None):
return(False)
else:
return(True)
cond = some(pl,cond_func,regex)['cond']
return(cond) | regex = re.compile("^[a-z]+$")
pl = ['b1c3d','xab15cxx','1x','y2']
regex_in(pl,regex)
regex = re.compile("^[0-9a-z]+$")
pl = ['b1c3d','xab15cxx','1x','y2']
regex_in(pl,regex) | Below is the the instruction that describes the task:
### Input:
regex = re.compile("^[a-z]+$")
pl = ['b1c3d','xab15cxx','1x','y2']
regex_in(pl,regex)
regex = re.compile("^[0-9a-z]+$")
pl = ['b1c3d','xab15cxx','1x','y2']
regex_in(pl,regex)
### Response:
def regex_in(pl,regex):
'''
regex = re.compile("^[a-z]+$")
pl = ['b1c3d','xab15cxx','1x','y2']
regex_in(pl,regex)
regex = re.compile("^[0-9a-z]+$")
pl = ['b1c3d','xab15cxx','1x','y2']
regex_in(pl,regex)
'''
def cond_func(ele,regex):
m = regex.search(ele)
if(m == None):
return(False)
else:
return(True)
cond = some(pl,cond_func,regex)['cond']
return(cond) |
def get_dashboard_info(adapter, institute_id=None, slice_query=None):
"""Returns cases with phenotype
If phenotypes are provided search for only those
Args:
adapter(adapter.MongoAdapter)
institute_id(str): an institute _id
slice_query(str): query to filter cases to obtain statistics for.
Returns:
data(dict): Dictionary with relevant information
"""
LOG.debug("General query with institute_id {}.".format(institute_id))
# if institute_id == 'None' or None, all cases and general stats will be returned
if institute_id == 'None':
institute_id = None
# If a slice_query is present then numbers in "General statistics" and "Case statistics" will
# reflect the data available for the query
general_sliced_info = get_general_case_info(adapter, institute_id=institute_id,
slice_query=slice_query)
total_sliced_cases = general_sliced_info['total_cases']
data = {'total_cases': total_sliced_cases}
if total_sliced_cases == 0:
return data
data['pedigree'] = []
for ped_info in general_sliced_info['pedigree'].values():
ped_info['percent'] = ped_info['count'] / total_sliced_cases
data['pedigree'].append(ped_info)
data['cases'] = get_case_groups(adapter, total_sliced_cases,
institute_id=institute_id, slice_query=slice_query)
data['analysis_types'] = get_analysis_types(adapter, total_sliced_cases,
institute_id=institute_id, slice_query=slice_query)
overview = [
{
'title': 'Phenotype terms',
'count': general_sliced_info['phenotype_cases'],
'percent': general_sliced_info['phenotype_cases'] / total_sliced_cases,
},
{
'title': 'Causative variants',
'count': general_sliced_info['causative_cases'],
'percent': general_sliced_info['causative_cases'] / total_sliced_cases,
},
{
'title': 'Pinned variants',
'count': general_sliced_info['pinned_cases'],
'percent': general_sliced_info['pinned_cases'] / total_sliced_cases,
},
{
'title': 'Cohort tag',
'count': general_sliced_info['cohort_cases'],
'percent': general_sliced_info['cohort_cases'] / total_sliced_cases,
}
]
# Data from "Variant statistics tab" is not filtered by slice_query and numbers will
# reflect verified variants in all available cases for an institute
general_info = get_general_case_info(adapter, institute_id=institute_id)
total_cases = general_info['total_cases']
sliced_case_ids = general_sliced_info['case_ids']
verified_query = {
'verb' : 'validate',
}
if institute_id: # filter by institute if users wishes so
verified_query['institute'] = institute_id
# Case level information
sliced_validation_cases = set()
sliced_validated_cases = set()
# Variant level information
validated_tp = set()
validated_fp = set()
var_valid_orders = 0 # use this counter to count 'True Positive', 'False positive' and 'Not validated' vars
validate_events = adapter.event_collection.find(verified_query)
for validate_event in list(validate_events):
case_id = validate_event.get('case')
var_obj = adapter.variant(case_id=case_id, document_id=validate_event['variant_id'])
if var_obj: # Don't take into account variants which have been removed from db
var_valid_orders += 1
if case_id in sliced_case_ids:
sliced_validation_cases.add(case_id) # add to the set. Can't add same id twice since it'a a set
validation = var_obj.get('validation')
if validation and validation in ['True positive', 'False positive']:
if case_id in sliced_case_ids:
sliced_validated_cases.add(case_id)
if validation == 'True positive':
validated_tp.add(var_obj['_id'])
elif validation == 'False positive':
validated_fp.add(var_obj['_id'])
n_validation_cases = len(sliced_validation_cases)
n_validated_cases = len(sliced_validated_cases)
# append
overview.append(
{
'title': 'Validation ordered',
'count': n_validation_cases,
'percent': n_validation_cases / total_sliced_cases,
})
overview.append(
{
'title': 'Validated cases (TP + FP)',
'count': n_validated_cases,
'percent': n_validated_cases / total_sliced_cases,
})
data['overview'] = overview
variants = []
nr_validated = len(validated_tp) + len(validated_fp)
variants.append(
{
'title': 'Validation ordered',
'count': var_valid_orders,
'percent': 1
}
)
# taking into account that var_valid_orders might be 0:
percent_validated_tp = 0
percent_validated_fp = 0
if var_valid_orders:
percent_validated_tp = len(validated_tp) / var_valid_orders
percent_validated_fp = len(validated_fp) / var_valid_orders
variants.append(
{
'title': 'Validated True Positive',
'count': len(validated_tp),
'percent': percent_validated_tp,
}
)
variants.append(
{
'title': 'Validated False Positive',
'count': len(validated_fp),
'percent': percent_validated_fp,
}
)
data['variants'] = variants
return data | Returns cases with phenotype
If phenotypes are provided search for only those
Args:
adapter(adapter.MongoAdapter)
institute_id(str): an institute _id
slice_query(str): query to filter cases to obtain statistics for.
Returns:
data(dict): Dictionary with relevant information | Below is the the instruction that describes the task:
### Input:
Returns cases with phenotype
If phenotypes are provided search for only those
Args:
adapter(adapter.MongoAdapter)
institute_id(str): an institute _id
slice_query(str): query to filter cases to obtain statistics for.
Returns:
data(dict): Dictionary with relevant information
### Response:
def get_dashboard_info(adapter, institute_id=None, slice_query=None):
"""Returns cases with phenotype
If phenotypes are provided search for only those
Args:
adapter(adapter.MongoAdapter)
institute_id(str): an institute _id
slice_query(str): query to filter cases to obtain statistics for.
Returns:
data(dict): Dictionary with relevant information
"""
LOG.debug("General query with institute_id {}.".format(institute_id))
# if institute_id == 'None' or None, all cases and general stats will be returned
if institute_id == 'None':
institute_id = None
# If a slice_query is present then numbers in "General statistics" and "Case statistics" will
# reflect the data available for the query
general_sliced_info = get_general_case_info(adapter, institute_id=institute_id,
slice_query=slice_query)
total_sliced_cases = general_sliced_info['total_cases']
data = {'total_cases': total_sliced_cases}
if total_sliced_cases == 0:
return data
data['pedigree'] = []
for ped_info in general_sliced_info['pedigree'].values():
ped_info['percent'] = ped_info['count'] / total_sliced_cases
data['pedigree'].append(ped_info)
data['cases'] = get_case_groups(adapter, total_sliced_cases,
institute_id=institute_id, slice_query=slice_query)
data['analysis_types'] = get_analysis_types(adapter, total_sliced_cases,
institute_id=institute_id, slice_query=slice_query)
overview = [
{
'title': 'Phenotype terms',
'count': general_sliced_info['phenotype_cases'],
'percent': general_sliced_info['phenotype_cases'] / total_sliced_cases,
},
{
'title': 'Causative variants',
'count': general_sliced_info['causative_cases'],
'percent': general_sliced_info['causative_cases'] / total_sliced_cases,
},
{
'title': 'Pinned variants',
'count': general_sliced_info['pinned_cases'],
'percent': general_sliced_info['pinned_cases'] / total_sliced_cases,
},
{
'title': 'Cohort tag',
'count': general_sliced_info['cohort_cases'],
'percent': general_sliced_info['cohort_cases'] / total_sliced_cases,
}
]
# Data from "Variant statistics tab" is not filtered by slice_query and numbers will
# reflect verified variants in all available cases for an institute
general_info = get_general_case_info(adapter, institute_id=institute_id)
total_cases = general_info['total_cases']
sliced_case_ids = general_sliced_info['case_ids']
verified_query = {
'verb' : 'validate',
}
if institute_id: # filter by institute if users wishes so
verified_query['institute'] = institute_id
# Case level information
sliced_validation_cases = set()
sliced_validated_cases = set()
# Variant level information
validated_tp = set()
validated_fp = set()
var_valid_orders = 0 # use this counter to count 'True Positive', 'False positive' and 'Not validated' vars
validate_events = adapter.event_collection.find(verified_query)
for validate_event in list(validate_events):
case_id = validate_event.get('case')
var_obj = adapter.variant(case_id=case_id, document_id=validate_event['variant_id'])
if var_obj: # Don't take into account variants which have been removed from db
var_valid_orders += 1
if case_id in sliced_case_ids:
sliced_validation_cases.add(case_id) # add to the set. Can't add same id twice since it'a a set
validation = var_obj.get('validation')
if validation and validation in ['True positive', 'False positive']:
if case_id in sliced_case_ids:
sliced_validated_cases.add(case_id)
if validation == 'True positive':
validated_tp.add(var_obj['_id'])
elif validation == 'False positive':
validated_fp.add(var_obj['_id'])
n_validation_cases = len(sliced_validation_cases)
n_validated_cases = len(sliced_validated_cases)
# append
overview.append(
{
'title': 'Validation ordered',
'count': n_validation_cases,
'percent': n_validation_cases / total_sliced_cases,
})
overview.append(
{
'title': 'Validated cases (TP + FP)',
'count': n_validated_cases,
'percent': n_validated_cases / total_sliced_cases,
})
data['overview'] = overview
variants = []
nr_validated = len(validated_tp) + len(validated_fp)
variants.append(
{
'title': 'Validation ordered',
'count': var_valid_orders,
'percent': 1
}
)
# taking into account that var_valid_orders might be 0:
percent_validated_tp = 0
percent_validated_fp = 0
if var_valid_orders:
percent_validated_tp = len(validated_tp) / var_valid_orders
percent_validated_fp = len(validated_fp) / var_valid_orders
variants.append(
{
'title': 'Validated True Positive',
'count': len(validated_tp),
'percent': percent_validated_tp,
}
)
variants.append(
{
'title': 'Validated False Positive',
'count': len(validated_fp),
'percent': percent_validated_fp,
}
)
data['variants'] = variants
return data |
def fdf(self, x):
"""Calculate the value of the functional for the specified arguments,
and the derivatives with respect to the parameters (taking any
specified mask into account).
:param x: the value(s) to evaluate at
"""
x = self._flatten(x)
n = 1
if hasattr(x, "__len__"):
n = len(x)
if self._dtype == 0:
retval = _functional._fdf(self, x)
else:
retval = _functional._fdfc(self, x)
if len(retval) == n:
return numpy.array(retval)
return numpy.array(retval).reshape(self.npar() + 1,
n // self.ndim()).transpose() | Calculate the value of the functional for the specified arguments,
and the derivatives with respect to the parameters (taking any
specified mask into account).
:param x: the value(s) to evaluate at | Below is the the instruction that describes the task:
### Input:
Calculate the value of the functional for the specified arguments,
and the derivatives with respect to the parameters (taking any
specified mask into account).
:param x: the value(s) to evaluate at
### Response:
def fdf(self, x):
"""Calculate the value of the functional for the specified arguments,
and the derivatives with respect to the parameters (taking any
specified mask into account).
:param x: the value(s) to evaluate at
"""
x = self._flatten(x)
n = 1
if hasattr(x, "__len__"):
n = len(x)
if self._dtype == 0:
retval = _functional._fdf(self, x)
else:
retval = _functional._fdfc(self, x)
if len(retval) == n:
return numpy.array(retval)
return numpy.array(retval).reshape(self.npar() + 1,
n // self.ndim()).transpose() |
def darken(self, amount):
''' Darken (reduce the luminance) of this color.
Args:
amount (float) :
Amount to reduce the luminance by (clamped above zero)
Returns:
Color
'''
hsl = self.to_hsl()
hsl.l = self.clamp(hsl.l - amount)
return self.from_hsl(hsl) | Darken (reduce the luminance) of this color.
Args:
amount (float) :
Amount to reduce the luminance by (clamped above zero)
Returns:
Color | Below is the the instruction that describes the task:
### Input:
Darken (reduce the luminance) of this color.
Args:
amount (float) :
Amount to reduce the luminance by (clamped above zero)
Returns:
Color
### Response:
def darken(self, amount):
''' Darken (reduce the luminance) of this color.
Args:
amount (float) :
Amount to reduce the luminance by (clamped above zero)
Returns:
Color
'''
hsl = self.to_hsl()
hsl.l = self.clamp(hsl.l - amount)
return self.from_hsl(hsl) |
def copy_to(source, dest, engine_or_conn, **flags):
"""Export a query or select to a file. For flags, see the PostgreSQL
documentation at http://www.postgresql.org/docs/9.5/static/sql-copy.html.
Examples: ::
select = MyTable.select()
with open('/path/to/file.tsv', 'w') as fp:
copy_to(select, fp, conn)
query = session.query(MyModel)
with open('/path/to/file/csv', 'w') as fp:
copy_to(query, fp, engine, format='csv', null='.')
:param source: SQLAlchemy query or select
:param dest: Destination file pointer, in write mode
:param engine_or_conn: SQLAlchemy engine, connection, or raw_connection
:param **flags: Options passed through to COPY
If an existing connection is passed to `engine_or_conn`, it is the caller's
responsibility to commit and close.
"""
dialect = postgresql.dialect()
statement = getattr(source, 'statement', source)
compiled = statement.compile(dialect=dialect)
conn, autoclose = raw_connection_from(engine_or_conn)
cursor = conn.cursor()
query = cursor.mogrify(compiled.string, compiled.params).decode()
formatted_flags = '({})'.format(format_flags(flags)) if flags else ''
copy = 'COPY ({}) TO STDOUT {}'.format(query, formatted_flags)
cursor.copy_expert(copy, dest)
if autoclose:
conn.close() | Export a query or select to a file. For flags, see the PostgreSQL
documentation at http://www.postgresql.org/docs/9.5/static/sql-copy.html.
Examples: ::
select = MyTable.select()
with open('/path/to/file.tsv', 'w') as fp:
copy_to(select, fp, conn)
query = session.query(MyModel)
with open('/path/to/file/csv', 'w') as fp:
copy_to(query, fp, engine, format='csv', null='.')
:param source: SQLAlchemy query or select
:param dest: Destination file pointer, in write mode
:param engine_or_conn: SQLAlchemy engine, connection, or raw_connection
:param **flags: Options passed through to COPY
If an existing connection is passed to `engine_or_conn`, it is the caller's
responsibility to commit and close. | Below is the the instruction that describes the task:
### Input:
Export a query or select to a file. For flags, see the PostgreSQL
documentation at http://www.postgresql.org/docs/9.5/static/sql-copy.html.
Examples: ::
select = MyTable.select()
with open('/path/to/file.tsv', 'w') as fp:
copy_to(select, fp, conn)
query = session.query(MyModel)
with open('/path/to/file/csv', 'w') as fp:
copy_to(query, fp, engine, format='csv', null='.')
:param source: SQLAlchemy query or select
:param dest: Destination file pointer, in write mode
:param engine_or_conn: SQLAlchemy engine, connection, or raw_connection
:param **flags: Options passed through to COPY
If an existing connection is passed to `engine_or_conn`, it is the caller's
responsibility to commit and close.
### Response:
def copy_to(source, dest, engine_or_conn, **flags):
"""Export a query or select to a file. For flags, see the PostgreSQL
documentation at http://www.postgresql.org/docs/9.5/static/sql-copy.html.
Examples: ::
select = MyTable.select()
with open('/path/to/file.tsv', 'w') as fp:
copy_to(select, fp, conn)
query = session.query(MyModel)
with open('/path/to/file/csv', 'w') as fp:
copy_to(query, fp, engine, format='csv', null='.')
:param source: SQLAlchemy query or select
:param dest: Destination file pointer, in write mode
:param engine_or_conn: SQLAlchemy engine, connection, or raw_connection
:param **flags: Options passed through to COPY
If an existing connection is passed to `engine_or_conn`, it is the caller's
responsibility to commit and close.
"""
dialect = postgresql.dialect()
statement = getattr(source, 'statement', source)
compiled = statement.compile(dialect=dialect)
conn, autoclose = raw_connection_from(engine_or_conn)
cursor = conn.cursor()
query = cursor.mogrify(compiled.string, compiled.params).decode()
formatted_flags = '({})'.format(format_flags(flags)) if flags else ''
copy = 'COPY ({}) TO STDOUT {}'.format(query, formatted_flags)
cursor.copy_expert(copy, dest)
if autoclose:
conn.close() |
def hasattrs(object, *names):
"""
Takes in an object and a variable length amount of named attributes,
and checks to see if the object has each property. If any of the
attributes are missing, this returns false.
:param object: an object that may or may not contain the listed attributes
:param names: a variable amount of attribute names to check for
:return: True if the object contains each named attribute, false otherwise
"""
for name in names:
if not hasattr(object, name):
return False
return True | Takes in an object and a variable length amount of named attributes,
and checks to see if the object has each property. If any of the
attributes are missing, this returns false.
:param object: an object that may or may not contain the listed attributes
:param names: a variable amount of attribute names to check for
:return: True if the object contains each named attribute, false otherwise | Below is the the instruction that describes the task:
### Input:
Takes in an object and a variable length amount of named attributes,
and checks to see if the object has each property. If any of the
attributes are missing, this returns false.
:param object: an object that may or may not contain the listed attributes
:param names: a variable amount of attribute names to check for
:return: True if the object contains each named attribute, false otherwise
### Response:
def hasattrs(object, *names):
"""
Takes in an object and a variable length amount of named attributes,
and checks to see if the object has each property. If any of the
attributes are missing, this returns false.
:param object: an object that may or may not contain the listed attributes
:param names: a variable amount of attribute names to check for
:return: True if the object contains each named attribute, false otherwise
"""
for name in names:
if not hasattr(object, name):
return False
return True |
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.maplesat and self.status == False:
return pysolvers.maplesat_core(self.maplesat) | Get an unsatisfiable core if the formula was previously
unsatisfied. | Below is the the instruction that describes the task:
### Input:
Get an unsatisfiable core if the formula was previously
unsatisfied.
### Response:
def get_core(self):
"""
Get an unsatisfiable core if the formula was previously
unsatisfied.
"""
if self.maplesat and self.status == False:
return pysolvers.maplesat_core(self.maplesat) |
def insert(cls, cur, table: str, values: dict):
"""
Creates an insert statement with only chosen fields
Args:
table: a string indicating the name of the table
values: a dict of fields and values to be inserted
Returns:
A 'Record' object with table columns as properties
"""
keys = cls._COMMA.join(values.keys())
value_place_holder = cls._PLACEHOLDER * len(values)
query = cls._insert_string.format(table, keys, value_place_holder[:-1])
yield from cur.execute(query, tuple(values.values()))
return (yield from cur.fetchone()) | Creates an insert statement with only chosen fields
Args:
table: a string indicating the name of the table
values: a dict of fields and values to be inserted
Returns:
A 'Record' object with table columns as properties | Below is the the instruction that describes the task:
### Input:
Creates an insert statement with only chosen fields
Args:
table: a string indicating the name of the table
values: a dict of fields and values to be inserted
Returns:
A 'Record' object with table columns as properties
### Response:
def insert(cls, cur, table: str, values: dict):
"""
Creates an insert statement with only chosen fields
Args:
table: a string indicating the name of the table
values: a dict of fields and values to be inserted
Returns:
A 'Record' object with table columns as properties
"""
keys = cls._COMMA.join(values.keys())
value_place_holder = cls._PLACEHOLDER * len(values)
query = cls._insert_string.format(table, keys, value_place_holder[:-1])
yield from cur.execute(query, tuple(values.values()))
return (yield from cur.fetchone()) |
def constraint_matches(self, c, m):
"""
Return dict noting the substitution values (or False for no match)
"""
if isinstance(m, tuple):
d = {}
if isinstance(c, Operator) and c._op_name == m[0]:
for c1, m1 in zip(c._args, m[1:]):
r = self.constraint_matches(c1, m1)
if r is False:
return r
d.update(r)
return d
return False
return m.match(c) | Return dict noting the substitution values (or False for no match) | Below is the the instruction that describes the task:
### Input:
Return dict noting the substitution values (or False for no match)
### Response:
def constraint_matches(self, c, m):
"""
Return dict noting the substitution values (or False for no match)
"""
if isinstance(m, tuple):
d = {}
if isinstance(c, Operator) and c._op_name == m[0]:
for c1, m1 in zip(c._args, m[1:]):
r = self.constraint_matches(c1, m1)
if r is False:
return r
d.update(r)
return d
return False
return m.match(c) |
def paginate_sources(owner=None, page=1, page_size=DEFAULT_PAGE_SIZE):
'''Paginate harvest sources'''
sources = _sources_queryset(owner=owner)
page = max(page or 1, 1)
return sources.paginate(page, page_size) | Paginate harvest sources | Below is the the instruction that describes the task:
### Input:
Paginate harvest sources
### Response:
def paginate_sources(owner=None, page=1, page_size=DEFAULT_PAGE_SIZE):
'''Paginate harvest sources'''
sources = _sources_queryset(owner=owner)
page = max(page or 1, 1)
return sources.paginate(page, page_size) |
def sort_sam(sam, sort):
"""
sort sam file
"""
tempdir = '%s/' % (os.path.abspath(sam).rsplit('/', 1)[0])
if sort is True:
mapping = '%s.sorted.sam' % (sam.rsplit('.', 1)[0])
if sam != '-':
if os.path.exists(mapping) is False:
os.system("\
sort -k1 --buffer-size=%sG -T %s -o %s %s\
" % (sbuffer, tempdir, mapping, sam))
else:
mapping = 'stdin-sam.sorted.sam'
p = Popen("sort -k1 --buffer-size=%sG -T %s -o %s" \
% (sbuffer, tempdir, mapping), stdin = sys.stdin, shell = True)
p.communicate()
mapping = open(mapping)
else:
if sam == '-':
mapping = sys.stdin
else:
mapping = open(sam)
return mapping | sort sam file | Below is the the instruction that describes the task:
### Input:
sort sam file
### Response:
def sort_sam(sam, sort):
"""
sort sam file
"""
tempdir = '%s/' % (os.path.abspath(sam).rsplit('/', 1)[0])
if sort is True:
mapping = '%s.sorted.sam' % (sam.rsplit('.', 1)[0])
if sam != '-':
if os.path.exists(mapping) is False:
os.system("\
sort -k1 --buffer-size=%sG -T %s -o %s %s\
" % (sbuffer, tempdir, mapping, sam))
else:
mapping = 'stdin-sam.sorted.sam'
p = Popen("sort -k1 --buffer-size=%sG -T %s -o %s" \
% (sbuffer, tempdir, mapping), stdin = sys.stdin, shell = True)
p.communicate()
mapping = open(mapping)
else:
if sam == '-':
mapping = sys.stdin
else:
mapping = open(sam)
return mapping |
def get_gateway_info(self):
"""
Return the gateway info.
Returns a Command.
"""
def process_result(result):
return GatewayInfo(result)
return Command('get',
[ROOT_GATEWAY, ATTR_GATEWAY_INFO],
process_result=process_result) | Return the gateway info.
Returns a Command. | Below is the the instruction that describes the task:
### Input:
Return the gateway info.
Returns a Command.
### Response:
def get_gateway_info(self):
"""
Return the gateway info.
Returns a Command.
"""
def process_result(result):
return GatewayInfo(result)
return Command('get',
[ROOT_GATEWAY, ATTR_GATEWAY_INFO],
process_result=process_result) |
def _locateConvergencePoint(stats, minOverlap, maxOverlap):
"""
Walk backwards through stats until you locate the first point that diverges
from target overlap values. We need this to handle cases where it might get
to target values, diverge, and then get back again. We want the last
convergence point.
"""
for i, v in enumerate(stats[::-1]):
if not (v >= minOverlap and v <= maxOverlap):
return len(stats) - i + 1
# Never differs - converged in one iteration
return 1 | Walk backwards through stats until you locate the first point that diverges
from target overlap values. We need this to handle cases where it might get
to target values, diverge, and then get back again. We want the last
convergence point. | Below is the the instruction that describes the task:
### Input:
Walk backwards through stats until you locate the first point that diverges
from target overlap values. We need this to handle cases where it might get
to target values, diverge, and then get back again. We want the last
convergence point.
### Response:
def _locateConvergencePoint(stats, minOverlap, maxOverlap):
"""
Walk backwards through stats until you locate the first point that diverges
from target overlap values. We need this to handle cases where it might get
to target values, diverge, and then get back again. We want the last
convergence point.
"""
for i, v in enumerate(stats[::-1]):
if not (v >= minOverlap and v <= maxOverlap):
return len(stats) - i + 1
# Never differs - converged in one iteration
return 1 |
def getall(self, table):
"""
Get all rows values for a table
"""
try:
self._check_db()
except Exception as e:
self.err(e, "Can not connect to database")
return
if table not in self.db.tables:
self.warning("The table " + table + " does not exists")
return
try:
res = self.db[table].all()
df = pd.DataFrame(list(res))
return df
except Exception as e:
self.err(e, "Error retrieving data in table") | Get all rows values for a table | Below is the the instruction that describes the task:
### Input:
Get all rows values for a table
### Response:
def getall(self, table):
"""
Get all rows values for a table
"""
try:
self._check_db()
except Exception as e:
self.err(e, "Can not connect to database")
return
if table not in self.db.tables:
self.warning("The table " + table + " does not exists")
return
try:
res = self.db[table].all()
df = pd.DataFrame(list(res))
return df
except Exception as e:
self.err(e, "Error retrieving data in table") |
def _key_values(self, sn: "SequenceNode") -> Union[EntryKeys, EntryValue]:
"""Parse leaf-list value or list keys."""
try:
keys = self.up_to("/")
except EndOfInput:
keys = self.remaining()
if not keys:
raise UnexpectedInput(self, "entry value or keys")
if isinstance(sn, LeafListNode):
return EntryValue(unquote(keys))
ks = keys.split(",")
try:
if len(ks) != len(sn.keys):
raise UnexpectedInput(self, f"exactly {len(sn.keys)} keys")
except AttributeError:
raise BadSchemaNodeType(sn.qual_name, "list")
sel = {}
for j in range(len(ks)):
knod = sn.get_data_child(*sn.keys[j])
val = unquote(ks[j])
sel[(knod.name, None if knod.ns == sn.ns else knod.ns)] = val
return EntryKeys(sel) | Parse leaf-list value or list keys. | Below is the the instruction that describes the task:
### Input:
Parse leaf-list value or list keys.
### Response:
def _key_values(self, sn: "SequenceNode") -> Union[EntryKeys, EntryValue]:
"""Parse leaf-list value or list keys."""
try:
keys = self.up_to("/")
except EndOfInput:
keys = self.remaining()
if not keys:
raise UnexpectedInput(self, "entry value or keys")
if isinstance(sn, LeafListNode):
return EntryValue(unquote(keys))
ks = keys.split(",")
try:
if len(ks) != len(sn.keys):
raise UnexpectedInput(self, f"exactly {len(sn.keys)} keys")
except AttributeError:
raise BadSchemaNodeType(sn.qual_name, "list")
sel = {}
for j in range(len(ks)):
knod = sn.get_data_child(*sn.keys[j])
val = unquote(ks[j])
sel[(knod.name, None if knod.ns == sn.ns else knod.ns)] = val
return EntryKeys(sel) |
def set_sig_figs(n=4):
"""Set the number of significant figures used to print Pint, Pandas, and
NumPy quantities.
Args:
n (int): Number of significant figures to display.
"""
u.default_format = '.' + str(n) + 'g'
pd.options.display.float_format = ('{:,.' + str(n) + '}').format | Set the number of significant figures used to print Pint, Pandas, and
NumPy quantities.
Args:
n (int): Number of significant figures to display. | Below is the the instruction that describes the task:
### Input:
Set the number of significant figures used to print Pint, Pandas, and
NumPy quantities.
Args:
n (int): Number of significant figures to display.
### Response:
def set_sig_figs(n=4):
"""Set the number of significant figures used to print Pint, Pandas, and
NumPy quantities.
Args:
n (int): Number of significant figures to display.
"""
u.default_format = '.' + str(n) + 'g'
pd.options.display.float_format = ('{:,.' + str(n) + '}').format |
def vectorize(density_matrix, method='col'):
"""Flatten an operator to a vector in a specified basis.
Args:
density_matrix (ndarray): a density matrix.
method (str): the method of vectorization. Allowed values are
- 'col' (default) flattens to column-major vector.
- 'row' flattens to row-major vector.
- 'pauli'flattens in the n-qubit Pauli basis.
- 'pauli-weights': flattens in the n-qubit Pauli basis ordered by
weight.
Returns:
ndarray: the resulting vector.
Raises:
Exception: if input state is not a n-qubit state
"""
density_matrix = np.array(density_matrix)
if method == 'col':
return density_matrix.flatten(order='F')
elif method == 'row':
return density_matrix.flatten(order='C')
elif method in ['pauli', 'pauli_weights']:
num = int(np.log2(len(density_matrix))) # number of qubits
if len(density_matrix) != 2**num:
raise Exception('Input state must be n-qubit state')
if method == 'pauli_weights':
pgroup = pauli_group(num, case='weight')
else:
pgroup = pauli_group(num, case='tensor')
vals = [np.trace(np.dot(p.to_matrix(), density_matrix))
for p in pgroup]
return np.array(vals)
return None | Flatten an operator to a vector in a specified basis.
Args:
density_matrix (ndarray): a density matrix.
method (str): the method of vectorization. Allowed values are
- 'col' (default) flattens to column-major vector.
- 'row' flattens to row-major vector.
- 'pauli'flattens in the n-qubit Pauli basis.
- 'pauli-weights': flattens in the n-qubit Pauli basis ordered by
weight.
Returns:
ndarray: the resulting vector.
Raises:
Exception: if input state is not a n-qubit state | Below is the the instruction that describes the task:
### Input:
Flatten an operator to a vector in a specified basis.
Args:
density_matrix (ndarray): a density matrix.
method (str): the method of vectorization. Allowed values are
- 'col' (default) flattens to column-major vector.
- 'row' flattens to row-major vector.
- 'pauli'flattens in the n-qubit Pauli basis.
- 'pauli-weights': flattens in the n-qubit Pauli basis ordered by
weight.
Returns:
ndarray: the resulting vector.
Raises:
Exception: if input state is not a n-qubit state
### Response:
def vectorize(density_matrix, method='col'):
"""Flatten an operator to a vector in a specified basis.
Args:
density_matrix (ndarray): a density matrix.
method (str): the method of vectorization. Allowed values are
- 'col' (default) flattens to column-major vector.
- 'row' flattens to row-major vector.
- 'pauli'flattens in the n-qubit Pauli basis.
- 'pauli-weights': flattens in the n-qubit Pauli basis ordered by
weight.
Returns:
ndarray: the resulting vector.
Raises:
Exception: if input state is not a n-qubit state
"""
density_matrix = np.array(density_matrix)
if method == 'col':
return density_matrix.flatten(order='F')
elif method == 'row':
return density_matrix.flatten(order='C')
elif method in ['pauli', 'pauli_weights']:
num = int(np.log2(len(density_matrix))) # number of qubits
if len(density_matrix) != 2**num:
raise Exception('Input state must be n-qubit state')
if method == 'pauli_weights':
pgroup = pauli_group(num, case='weight')
else:
pgroup = pauli_group(num, case='tensor')
vals = [np.trace(np.dot(p.to_matrix(), density_matrix))
for p in pgroup]
return np.array(vals)
return None |
def add(self, years=0, months=0, weeks=0, days=0):
"""
Add duration to the instance.
:param years: The number of years
:type years: int
:param months: The number of months
:type months: int
:param weeks: The number of weeks
:type weeks: int
:param days: The number of days
:type days: int
:rtype: Date
"""
dt = add_duration(
date(self.year, self.month, self.day),
years=years,
months=months,
weeks=weeks,
days=days,
)
return self.__class__(dt.year, dt.month, dt.day) | Add duration to the instance.
:param years: The number of years
:type years: int
:param months: The number of months
:type months: int
:param weeks: The number of weeks
:type weeks: int
:param days: The number of days
:type days: int
:rtype: Date | Below is the the instruction that describes the task:
### Input:
Add duration to the instance.
:param years: The number of years
:type years: int
:param months: The number of months
:type months: int
:param weeks: The number of weeks
:type weeks: int
:param days: The number of days
:type days: int
:rtype: Date
### Response:
def add(self, years=0, months=0, weeks=0, days=0):
"""
Add duration to the instance.
:param years: The number of years
:type years: int
:param months: The number of months
:type months: int
:param weeks: The number of weeks
:type weeks: int
:param days: The number of days
:type days: int
:rtype: Date
"""
dt = add_duration(
date(self.year, self.month, self.day),
years=years,
months=months,
weeks=weeks,
days=days,
)
return self.__class__(dt.year, dt.month, dt.day) |
def MultiDelete(self, urns, token=None):
"""Drop all the information about given objects.
DANGEROUS! This recursively deletes all objects contained within the
specified URN.
Args:
urns: Urns of objects to remove.
token: The Security Token to use for opening this item.
Raises:
ValueError: If one of the urns is too short. This is a safety check to
ensure the root is not removed.
"""
urns = [rdfvalue.RDFURN(urn) for urn in urns]
if token is None:
token = data_store.default_token
for urn in urns:
if urn.Path() == "/":
raise ValueError("Can't delete root URN. Please enter a valid URN")
deletion_pool = DeletionPool(token=token)
deletion_pool.MultiMarkForDeletion(urns)
marked_root_urns = deletion_pool.root_urns_for_deletion
marked_urns = deletion_pool.urns_for_deletion
logging.debug(u"Found %d objects to remove when removing %s",
len(marked_urns), urns)
logging.debug(u"Removing %d root objects when removing %s: %s",
len(marked_root_urns), urns, marked_root_urns)
pool = data_store.DB.GetMutationPool()
for root in marked_root_urns:
# Only the index of the parent object should be updated. Everything
# below the target object (along with indexes) is going to be
# deleted.
self._DeleteChildFromIndex(root, mutation_pool=pool)
for urn_to_delete in marked_urns:
try:
self.intermediate_cache.ExpireObject(urn_to_delete.Path())
except KeyError:
pass
pool.DeleteSubjects(marked_urns)
pool.Flush()
# Ensure this is removed from the cache as well.
self.Flush()
logging.debug("Removed %d objects", len(marked_urns)) | Drop all the information about given objects.
DANGEROUS! This recursively deletes all objects contained within the
specified URN.
Args:
urns: Urns of objects to remove.
token: The Security Token to use for opening this item.
Raises:
ValueError: If one of the urns is too short. This is a safety check to
ensure the root is not removed. | Below is the the instruction that describes the task:
### Input:
Drop all the information about given objects.
DANGEROUS! This recursively deletes all objects contained within the
specified URN.
Args:
urns: Urns of objects to remove.
token: The Security Token to use for opening this item.
Raises:
ValueError: If one of the urns is too short. This is a safety check to
ensure the root is not removed.
### Response:
def MultiDelete(self, urns, token=None):
"""Drop all the information about given objects.
DANGEROUS! This recursively deletes all objects contained within the
specified URN.
Args:
urns: Urns of objects to remove.
token: The Security Token to use for opening this item.
Raises:
ValueError: If one of the urns is too short. This is a safety check to
ensure the root is not removed.
"""
urns = [rdfvalue.RDFURN(urn) for urn in urns]
if token is None:
token = data_store.default_token
for urn in urns:
if urn.Path() == "/":
raise ValueError("Can't delete root URN. Please enter a valid URN")
deletion_pool = DeletionPool(token=token)
deletion_pool.MultiMarkForDeletion(urns)
marked_root_urns = deletion_pool.root_urns_for_deletion
marked_urns = deletion_pool.urns_for_deletion
logging.debug(u"Found %d objects to remove when removing %s",
len(marked_urns), urns)
logging.debug(u"Removing %d root objects when removing %s: %s",
len(marked_root_urns), urns, marked_root_urns)
pool = data_store.DB.GetMutationPool()
for root in marked_root_urns:
# Only the index of the parent object should be updated. Everything
# below the target object (along with indexes) is going to be
# deleted.
self._DeleteChildFromIndex(root, mutation_pool=pool)
for urn_to_delete in marked_urns:
try:
self.intermediate_cache.ExpireObject(urn_to_delete.Path())
except KeyError:
pass
pool.DeleteSubjects(marked_urns)
pool.Flush()
# Ensure this is removed from the cache as well.
self.Flush()
logging.debug("Removed %d objects", len(marked_urns)) |
def fetch_fieldnames(self, sql: str, *args) -> List[str]:
"""Executes SQL; returns just the output fieldnames."""
self.ensure_db_open()
cursor = self.db.cursor()
self.db_exec_with_cursor(cursor, sql, *args)
try:
return [i[0] for i in cursor.description]
except: # nopep8
log.exception("fetch_fieldnames: SQL was: " + sql)
raise | Executes SQL; returns just the output fieldnames. | Below is the the instruction that describes the task:
### Input:
Executes SQL; returns just the output fieldnames.
### Response:
def fetch_fieldnames(self, sql: str, *args) -> List[str]:
"""Executes SQL; returns just the output fieldnames."""
self.ensure_db_open()
cursor = self.db.cursor()
self.db_exec_with_cursor(cursor, sql, *args)
try:
return [i[0] for i in cursor.description]
except: # nopep8
log.exception("fetch_fieldnames: SQL was: " + sql)
raise |
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self | Specify the header of the table | Below is the the instruction that describes the task:
### Input:
Specify the header of the table
### Response:
def header(self, array):
"""Specify the header of the table
"""
self._check_row_size(array)
self._header = list(map(obj2unicode, array))
return self |
def rst_to_notebook(infile, outfile):
"""Convert an rst file to a notebook file."""
# Read infile into a string
with open(infile, 'r') as fin:
rststr = fin.read()
# Convert string from rst to markdown
mdfmt = 'markdown_github+tex_math_dollars+fenced_code_attributes'
mdstr = pypandoc.convert_text(rststr, mdfmt, format='rst',
extra_args=['--atx-headers'])
# In links, replace .py extensions with .ipynb
mdstr = re.sub(r'\(([^\)]+).py\)', r'(\1.ipynb)', mdstr)
# Enclose the markdown within triple quotes and convert from
# python to notebook
mdstr = '"""' + mdstr + '"""'
nb = py2jn.py_string_to_notebook(mdstr)
py2jn.tools.write_notebook(nb, outfile, nbver=4) | Convert an rst file to a notebook file. | Below is the the instruction that describes the task:
### Input:
Convert an rst file to a notebook file.
### Response:
def rst_to_notebook(infile, outfile):
"""Convert an rst file to a notebook file."""
# Read infile into a string
with open(infile, 'r') as fin:
rststr = fin.read()
# Convert string from rst to markdown
mdfmt = 'markdown_github+tex_math_dollars+fenced_code_attributes'
mdstr = pypandoc.convert_text(rststr, mdfmt, format='rst',
extra_args=['--atx-headers'])
# In links, replace .py extensions with .ipynb
mdstr = re.sub(r'\(([^\)]+).py\)', r'(\1.ipynb)', mdstr)
# Enclose the markdown within triple quotes and convert from
# python to notebook
mdstr = '"""' + mdstr + '"""'
nb = py2jn.py_string_to_notebook(mdstr)
py2jn.tools.write_notebook(nb, outfile, nbver=4) |
def status(self):
""" Status of this SMS. Can be ENROUTE, DELIVERED or FAILED
The actual status report object may be accessed via the 'report' attribute
if status is 'DELIVERED' or 'FAILED'
"""
if self.report == None:
return SentSms.ENROUTE
else:
return SentSms.DELIVERED if self.report.deliveryStatus == StatusReport.DELIVERED else SentSms.FAILED | Status of this SMS. Can be ENROUTE, DELIVERED or FAILED
The actual status report object may be accessed via the 'report' attribute
if status is 'DELIVERED' or 'FAILED' | Below is the the instruction that describes the task:
### Input:
Status of this SMS. Can be ENROUTE, DELIVERED or FAILED
The actual status report object may be accessed via the 'report' attribute
if status is 'DELIVERED' or 'FAILED'
### Response:
def status(self):
""" Status of this SMS. Can be ENROUTE, DELIVERED or FAILED
The actual status report object may be accessed via the 'report' attribute
if status is 'DELIVERED' or 'FAILED'
"""
if self.report == None:
return SentSms.ENROUTE
else:
return SentSms.DELIVERED if self.report.deliveryStatus == StatusReport.DELIVERED else SentSms.FAILED |
def configuration(self, plugin):
"""
Get plugin configuration.
Return a tuple of (on|off|default, args)
"""
conf = self.config.get(plugin, "default;").split(';')
if len(conf) == 1:
conf.append('')
return tuple(conf) | Get plugin configuration.
Return a tuple of (on|off|default, args) | Below is the the instruction that describes the task:
### Input:
Get plugin configuration.
Return a tuple of (on|off|default, args)
### Response:
def configuration(self, plugin):
"""
Get plugin configuration.
Return a tuple of (on|off|default, args)
"""
conf = self.config.get(plugin, "default;").split(';')
if len(conf) == 1:
conf.append('')
return tuple(conf) |
def _to_pandas(ob):
"""Convert an array-like to a pandas object.
Parameters
----------
ob : array-like
The object to convert.
Returns
-------
pandas_structure : pd.Series or pd.DataFrame
The correct structure based on the dimensionality of the data.
"""
if isinstance(ob, (pd.Series, pd.DataFrame)):
return ob
if ob.ndim == 1:
return pd.Series(ob)
elif ob.ndim == 2:
return pd.DataFrame(ob)
else:
raise ValueError(
'cannot convert array of dim > 2 to a pandas structure',
) | Convert an array-like to a pandas object.
Parameters
----------
ob : array-like
The object to convert.
Returns
-------
pandas_structure : pd.Series or pd.DataFrame
The correct structure based on the dimensionality of the data. | Below is the the instruction that describes the task:
### Input:
Convert an array-like to a pandas object.
Parameters
----------
ob : array-like
The object to convert.
Returns
-------
pandas_structure : pd.Series or pd.DataFrame
The correct structure based on the dimensionality of the data.
### Response:
def _to_pandas(ob):
"""Convert an array-like to a pandas object.
Parameters
----------
ob : array-like
The object to convert.
Returns
-------
pandas_structure : pd.Series or pd.DataFrame
The correct structure based on the dimensionality of the data.
"""
if isinstance(ob, (pd.Series, pd.DataFrame)):
return ob
if ob.ndim == 1:
return pd.Series(ob)
elif ob.ndim == 2:
return pd.DataFrame(ob)
else:
raise ValueError(
'cannot convert array of dim > 2 to a pandas structure',
) |
def get_rate_limits(response):
"""Returns a list of rate limit information from a given response's headers."""
periods = response.headers['X-RateLimit-Period']
if not periods:
return []
rate_limits = []
periods = periods.split(',')
limits = response.headers['X-RateLimit-Limit'].split(',')
remaining = response.headers['X-RateLimit-Remaining'].split(',')
reset = response.headers['X-RateLimit-Reset'].split(',')
for idx, period in enumerate(periods):
rate_limit = {}
limit_period = get_readable_time_string(period)
rate_limit["period"] = limit_period
rate_limit["period_seconds"] = period
rate_limit["request_limit"] = limits[idx]
rate_limit["requests_remaining"] = remaining[idx]
reset_datetime = get_datetime_from_timestamp(reset[idx])
rate_limit["reset"] = reset_datetime
right_now = datetime.now()
if (reset_datetime is not None) and (right_now < reset_datetime):
# add 1 second because of rounding
seconds_remaining = (reset_datetime - right_now).seconds + 1
else:
seconds_remaining = 0
rate_limit["reset_in_seconds"] = seconds_remaining
rate_limit["time_to_reset"] = get_readable_time_string(seconds_remaining)
rate_limits.append(rate_limit)
return rate_limits | Returns a list of rate limit information from a given response's headers. | Below is the the instruction that describes the task:
### Input:
Returns a list of rate limit information from a given response's headers.
### Response:
def get_rate_limits(response):
"""Returns a list of rate limit information from a given response's headers."""
periods = response.headers['X-RateLimit-Period']
if not periods:
return []
rate_limits = []
periods = periods.split(',')
limits = response.headers['X-RateLimit-Limit'].split(',')
remaining = response.headers['X-RateLimit-Remaining'].split(',')
reset = response.headers['X-RateLimit-Reset'].split(',')
for idx, period in enumerate(periods):
rate_limit = {}
limit_period = get_readable_time_string(period)
rate_limit["period"] = limit_period
rate_limit["period_seconds"] = period
rate_limit["request_limit"] = limits[idx]
rate_limit["requests_remaining"] = remaining[idx]
reset_datetime = get_datetime_from_timestamp(reset[idx])
rate_limit["reset"] = reset_datetime
right_now = datetime.now()
if (reset_datetime is not None) and (right_now < reset_datetime):
# add 1 second because of rounding
seconds_remaining = (reset_datetime - right_now).seconds + 1
else:
seconds_remaining = 0
rate_limit["reset_in_seconds"] = seconds_remaining
rate_limit["time_to_reset"] = get_readable_time_string(seconds_remaining)
rate_limits.append(rate_limit)
return rate_limits |
def get_all_not_wh_regions(db_connection):
""" Gets a list of all regions that are not WH regions.
:return: A list of all regions not including wormhole regions. Results have regionID and regionName.
:rtype: list
"""
if not hasattr(get_all_not_wh_regions, '_results'):
sql = 'CALL get_all_not_wh_regions();'
results = execute_sql(sql, db_connection)
get_all_not_wh_regions._results = results
return get_all_not_wh_regions._results | Gets a list of all regions that are not WH regions.
:return: A list of all regions not including wormhole regions. Results have regionID and regionName.
:rtype: list | Below is the the instruction that describes the task:
### Input:
Gets a list of all regions that are not WH regions.
:return: A list of all regions not including wormhole regions. Results have regionID and regionName.
:rtype: list
### Response:
def get_all_not_wh_regions(db_connection):
""" Gets a list of all regions that are not WH regions.
:return: A list of all regions not including wormhole regions. Results have regionID and regionName.
:rtype: list
"""
if not hasattr(get_all_not_wh_regions, '_results'):
sql = 'CALL get_all_not_wh_regions();'
results = execute_sql(sql, db_connection)
get_all_not_wh_regions._results = results
return get_all_not_wh_regions._results |
def rescan_images(registry):
'''Update the kernel image metadata from all configured docker registries.'''
with Session() as session:
try:
result = session.Image.rescanImages(registry)
except Exception as e:
print_error(e)
sys.exit(1)
if result['ok']:
print("kernel image metadata updated")
else:
print("rescanning failed: {0}".format(result['msg'])) | Update the kernel image metadata from all configured docker registries. | Below is the the instruction that describes the task:
### Input:
Update the kernel image metadata from all configured docker registries.
### Response:
def rescan_images(registry):
'''Update the kernel image metadata from all configured docker registries.'''
with Session() as session:
try:
result = session.Image.rescanImages(registry)
except Exception as e:
print_error(e)
sys.exit(1)
if result['ok']:
print("kernel image metadata updated")
else:
print("rescanning failed: {0}".format(result['msg'])) |
def convert_to_file(file, ndarr):
"""
Writes the contents of the numpy.ndarray ndarr to file in IDX format.
file is a file-like object (with write() method) or a file name.
"""
if isinstance(file, six_string_types):
with open(file, 'wb') as fp:
_internal_write(fp, ndarr)
else:
_internal_write(file, ndarr) | Writes the contents of the numpy.ndarray ndarr to file in IDX format.
file is a file-like object (with write() method) or a file name. | Below is the the instruction that describes the task:
### Input:
Writes the contents of the numpy.ndarray ndarr to file in IDX format.
file is a file-like object (with write() method) or a file name.
### Response:
def convert_to_file(file, ndarr):
"""
Writes the contents of the numpy.ndarray ndarr to file in IDX format.
file is a file-like object (with write() method) or a file name.
"""
if isinstance(file, six_string_types):
with open(file, 'wb') as fp:
_internal_write(fp, ndarr)
else:
_internal_write(file, ndarr) |
def conference_undeaf(self, call_params):
"""REST Conference Undeaf helper
"""
path = '/' + self.api_version + '/ConferenceUndeaf/'
method = 'POST'
return self.request(path, method, call_params) | REST Conference Undeaf helper | Below is the the instruction that describes the task:
### Input:
REST Conference Undeaf helper
### Response:
def conference_undeaf(self, call_params):
"""REST Conference Undeaf helper
"""
path = '/' + self.api_version + '/ConferenceUndeaf/'
method = 'POST'
return self.request(path, method, call_params) |
def requestPdpContextActivation(AccessPointName_presence=0):
"""REQUEST PDP CONTEXT ACTIVATION Section 9.5.4"""
a = TpPd(pd=0x8)
b = MessageType(mesType=0x44) # 01000100
c = PacketDataProtocolAddress()
packet = a / b / c
if AccessPointName_presence is 1:
d = AccessPointName(ieiAPN=0x28)
packet = packet / d
return packet | REQUEST PDP CONTEXT ACTIVATION Section 9.5.4 | Below is the the instruction that describes the task:
### Input:
REQUEST PDP CONTEXT ACTIVATION Section 9.5.4
### Response:
def requestPdpContextActivation(AccessPointName_presence=0):
"""REQUEST PDP CONTEXT ACTIVATION Section 9.5.4"""
a = TpPd(pd=0x8)
b = MessageType(mesType=0x44) # 01000100
c = PacketDataProtocolAddress()
packet = a / b / c
if AccessPointName_presence is 1:
d = AccessPointName(ieiAPN=0x28)
packet = packet / d
return packet |
def smart_account(app):
"""尝试使用内置方式构建账户"""
if os.environ['FANTASY_ACTIVE_ACCOUNT'] == 'no':
return
from flask_security import SQLAlchemyUserDatastore, Security
account_module_name, account_class_name = os.environ[
'FANTASY_ACCOUNT_MODEL'].rsplit('.', 1)
account_module = importlib.import_module(account_module_name)
account_class = getattr(account_module, account_class_name)
role_module_name, role_class_name = os.environ[
'FANTASY_ROLE_MODEL'].rsplit('.', 1)
role_module = importlib.import_module(role_module_name)
role_class = getattr(role_module, role_class_name)
r = True if os.environ[
'FANTASY_ACCOUNT_SECURITY_MODE'] != 'no' else False
Security(app,
SQLAlchemyUserDatastore(
app.db, account_class, role_class),
register_blueprint=r)
pass | 尝试使用内置方式构建账户 | Below is the the instruction that describes the task:
### Input:
尝试使用内置方式构建账户
### Response:
def smart_account(app):
"""尝试使用内置方式构建账户"""
if os.environ['FANTASY_ACTIVE_ACCOUNT'] == 'no':
return
from flask_security import SQLAlchemyUserDatastore, Security
account_module_name, account_class_name = os.environ[
'FANTASY_ACCOUNT_MODEL'].rsplit('.', 1)
account_module = importlib.import_module(account_module_name)
account_class = getattr(account_module, account_class_name)
role_module_name, role_class_name = os.environ[
'FANTASY_ROLE_MODEL'].rsplit('.', 1)
role_module = importlib.import_module(role_module_name)
role_class = getattr(role_module, role_class_name)
r = True if os.environ[
'FANTASY_ACCOUNT_SECURITY_MODE'] != 'no' else False
Security(app,
SQLAlchemyUserDatastore(
app.db, account_class, role_class),
register_blueprint=r)
pass |
def log_url (self, url_data):
"""Write url checking info."""
self.writeln()
if self.has_part('url'):
self.write_url(url_data)
if url_data.name and self.has_part('name'):
self.write_name(url_data)
if url_data.parent_url and self.has_part('parenturl'):
self.write_parent(url_data)
if url_data.base_ref and self.has_part('base'):
self.write_base(url_data)
if url_data.url and self.has_part('realurl'):
self.write_real(url_data)
if url_data.checktime and self.has_part('checktime'):
self.write_checktime(url_data)
if url_data.dltime >= 0 and self.has_part('dltime'):
self.write_dltime(url_data)
if url_data.size >= 0 and self.has_part('dlsize'):
self.write_size(url_data)
if url_data.info and self.has_part('info'):
self.write_info(url_data)
if url_data.modified and self.has_part('modified'):
self.write_modified(url_data)
if url_data.warnings and self.has_part('warning'):
self.write_warning(url_data)
if self.has_part('result'):
self.write_result(url_data)
self.flush() | Write url checking info. | Below is the the instruction that describes the task:
### Input:
Write url checking info.
### Response:
def log_url (self, url_data):
"""Write url checking info."""
self.writeln()
if self.has_part('url'):
self.write_url(url_data)
if url_data.name and self.has_part('name'):
self.write_name(url_data)
if url_data.parent_url and self.has_part('parenturl'):
self.write_parent(url_data)
if url_data.base_ref and self.has_part('base'):
self.write_base(url_data)
if url_data.url and self.has_part('realurl'):
self.write_real(url_data)
if url_data.checktime and self.has_part('checktime'):
self.write_checktime(url_data)
if url_data.dltime >= 0 and self.has_part('dltime'):
self.write_dltime(url_data)
if url_data.size >= 0 and self.has_part('dlsize'):
self.write_size(url_data)
if url_data.info and self.has_part('info'):
self.write_info(url_data)
if url_data.modified and self.has_part('modified'):
self.write_modified(url_data)
if url_data.warnings and self.has_part('warning'):
self.write_warning(url_data)
if self.has_part('result'):
self.write_result(url_data)
self.flush() |
def create(callback=None, path=None, method=Method.POST, resource=None, tags=None, summary="Create a new resource",
middleware=None):
# type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation
"""
Decorator to configure an operation that creates a resource.
"""
def inner(c):
op = ResourceOperation(c, path or NoPath, method, resource, tags, summary, middleware)
op.responses.add(Response(HTTPStatus.CREATED, "{name} has been created"))
op.responses.add(Response(HTTPStatus.BAD_REQUEST, "Validation failed.", Error))
return op
return inner(callback) if callback else inner | Decorator to configure an operation that creates a resource. | Below is the the instruction that describes the task:
### Input:
Decorator to configure an operation that creates a resource.
### Response:
def create(callback=None, path=None, method=Method.POST, resource=None, tags=None, summary="Create a new resource",
middleware=None):
# type: (Callable, Path, Methods, Resource, Tags, str, List[Any]) -> Operation
"""
Decorator to configure an operation that creates a resource.
"""
def inner(c):
op = ResourceOperation(c, path or NoPath, method, resource, tags, summary, middleware)
op.responses.add(Response(HTTPStatus.CREATED, "{name} has been created"))
op.responses.add(Response(HTTPStatus.BAD_REQUEST, "Validation failed.", Error))
return op
return inner(callback) if callback else inner |
def _create_affine_multiframe(multiframe_dicom):
"""
Function to create the affine matrix for a siemens mosaic dataset
This will work for siemens dti and 4D if in mosaic format
"""
first_frame = multiframe_dicom[Tag(0x5200, 0x9230)][0]
last_frame = multiframe_dicom[Tag(0x5200, 0x9230)][-1]
# Create affine matrix (http://nipy.sourceforge.net/nibabel/dicom/dicom_orientation.html#dicom-slice-affine)
image_orient1 = numpy.array(first_frame.PlaneOrientationSequence[0].ImageOrientationPatient)[0:3].astype(float)
image_orient2 = numpy.array(first_frame.PlaneOrientationSequence[0].ImageOrientationPatient)[3:6].astype(float)
normal = numpy.cross(image_orient1, image_orient2)
delta_r = float(first_frame[0x2005, 0x140f][0].PixelSpacing[0])
delta_c = float(first_frame[0x2005, 0x140f][0].PixelSpacing[1])
image_pos = numpy.array(first_frame.PlanePositionSequence[0].ImagePositionPatient).astype(float)
last_image_pos = numpy.array(last_frame.PlanePositionSequence[0].ImagePositionPatient).astype(float)
number_of_stack_slices = int(common.get_ss_value(multiframe_dicom[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)]))
delta_s = abs(numpy.linalg.norm(last_image_pos - image_pos)) / (number_of_stack_slices - 1)
return numpy.array(
[[-image_orient1[0] * delta_c, -image_orient2[0] * delta_r, -delta_s * normal[0], -image_pos[0]],
[-image_orient1[1] * delta_c, -image_orient2[1] * delta_r, -delta_s * normal[1], -image_pos[1]],
[image_orient1[2] * delta_c, image_orient2[2] * delta_r, delta_s * normal[2], image_pos[2]],
[0, 0, 0, 1]]) | Function to create the affine matrix for a siemens mosaic dataset
This will work for siemens dti and 4D if in mosaic format | Below is the the instruction that describes the task:
### Input:
Function to create the affine matrix for a siemens mosaic dataset
This will work for siemens dti and 4D if in mosaic format
### Response:
def _create_affine_multiframe(multiframe_dicom):
"""
Function to create the affine matrix for a siemens mosaic dataset
This will work for siemens dti and 4D if in mosaic format
"""
first_frame = multiframe_dicom[Tag(0x5200, 0x9230)][0]
last_frame = multiframe_dicom[Tag(0x5200, 0x9230)][-1]
# Create affine matrix (http://nipy.sourceforge.net/nibabel/dicom/dicom_orientation.html#dicom-slice-affine)
image_orient1 = numpy.array(first_frame.PlaneOrientationSequence[0].ImageOrientationPatient)[0:3].astype(float)
image_orient2 = numpy.array(first_frame.PlaneOrientationSequence[0].ImageOrientationPatient)[3:6].astype(float)
normal = numpy.cross(image_orient1, image_orient2)
delta_r = float(first_frame[0x2005, 0x140f][0].PixelSpacing[0])
delta_c = float(first_frame[0x2005, 0x140f][0].PixelSpacing[1])
image_pos = numpy.array(first_frame.PlanePositionSequence[0].ImagePositionPatient).astype(float)
last_image_pos = numpy.array(last_frame.PlanePositionSequence[0].ImagePositionPatient).astype(float)
number_of_stack_slices = int(common.get_ss_value(multiframe_dicom[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)]))
delta_s = abs(numpy.linalg.norm(last_image_pos - image_pos)) / (number_of_stack_slices - 1)
return numpy.array(
[[-image_orient1[0] * delta_c, -image_orient2[0] * delta_r, -delta_s * normal[0], -image_pos[0]],
[-image_orient1[1] * delta_c, -image_orient2[1] * delta_r, -delta_s * normal[1], -image_pos[1]],
[image_orient1[2] * delta_c, image_orient2[2] * delta_r, delta_s * normal[2], image_pos[2]],
[0, 0, 0, 1]]) |
def spectra(i, **kwargs):
"""
Define colours by number.
Can be plotted either in order of gray scale or in the 'best' order for
having a strong gray contrast for only three or four lines
:param i: the index to access a colour
"""
ordered = kwargs.get('ordered', False)
options = kwargs.get('options', 'best')
gray = kwargs.get('gray', False)
CD = {}
CD['dark blue'] = (1.0, 0.0, 0.55) # 0
CD['dark green'] = (0.15, 0.35, 0.0) # 1
CD['dark red'] = (0.73, 0.0, 0.0) # 2
CD['dark purple'] = (0.8, 0.0, 0.8) # 3
CD['light green'] = (0.49, 0.64, 0.0) # 4
CD['orange'] = (1.0, 0.5, 0.0) # 5
CD['light blue'] = (0.5, 0.85, 1.0) # 6
CD['pink'] = (1.0, 0.8, 0.8) # 7
CD['brown'] = (0.5, 0.3, 0.0) # 8
CD['red'] = (0.9, 0.0, 0.0) # 9
CD['greenish blue'] = (0.12, .8, .8) # 10
CD['bluey purple'] = (0.8, 0.85, 1.0) # 12
CD['yellow'] = (1.0, 1.0, 0.0) # 6
CD['dark gray'] = (0.25, 0.25, 0.25) #
CD['mid gray'] = (0.5, 0.5, 0.5) #
CD['light gray'] = (0.75, 0.75, 0.75) #
CD['black5'] = (0.05, 0.05, 0.05) #
CD['black'] = (0.0, 0.0, 0.0) #
CD['white'] = (1.0, 1.0, 1.0) #
if isinstance(i, int):
i = i
elif isinstance(i, float):
i = int(i)
elif isinstance(i, str):
dat = CD[i]
return dat
DtoL = ['dark blue', 'dark green', 'dark red', 'brown',
'light green', 'orange', 'light blue', 'pink', 'dark purple',
'red', 'greenish blue', 'bluey purple', 'yellow',
'dark gray', 'mid gray', 'light gray']
Best = ['dark blue', 'orange', 'light blue', 'dark purple', 'dark green',
'bluey purple', 'dark red', 'light green', 'pink', 'brown',
'red', 'yellow', 'greenish blue', 'dark gray',
'mid gray', 'light gray']
Dots = ['dark blue', 'yellow', 'light blue', 'dark purple', 'dark green', 'orange',
'bluey purple', 'dark red', 'light green', 'pink', 'brown',
'red', 'greenish blue', 'dark gray',
'mid gray', 'light gray']
# ll = [0, 5, 2, 4, 1, 6, 3, 7, 8, 11, 9, 12, 10, 13, 14, 15] # change 11 w 5
ind = i % len(Best)
dat = CD[Best[ind]]
col = Best[ind]
if ordered: # if ordered is true then the colours are accessed from darkest to lightest
ind = i % len(DtoL)
dat = CD[DtoL[ind]]
col = DtoL[ind]
if options == "dots":
ind = i % len(Dots)
dat = CD[Dots[ind]]
col = Dots[ind]
if options == "ordered":
ind = i % len(DtoL)
dat = CD[DtoL[ind]]
col = DtoL[ind]
gray_value = 0.299 * dat[0] + 0.587 * dat[1] + 0.114 * dat[2] # calculate the gray scale value
if gray:
return gray_value, gray_value, gray_value
return dat | Define colours by number.
Can be plotted either in order of gray scale or in the 'best' order for
having a strong gray contrast for only three or four lines
:param i: the index to access a colour | Below is the the instruction that describes the task:
### Input:
Define colours by number.
Can be plotted either in order of gray scale or in the 'best' order for
having a strong gray contrast for only three or four lines
:param i: the index to access a colour
### Response:
def spectra(i, **kwargs):
"""
Define colours by number.
Can be plotted either in order of gray scale or in the 'best' order for
having a strong gray contrast for only three or four lines
:param i: the index to access a colour
"""
ordered = kwargs.get('ordered', False)
options = kwargs.get('options', 'best')
gray = kwargs.get('gray', False)
CD = {}
CD['dark blue'] = (1.0, 0.0, 0.55) # 0
CD['dark green'] = (0.15, 0.35, 0.0) # 1
CD['dark red'] = (0.73, 0.0, 0.0) # 2
CD['dark purple'] = (0.8, 0.0, 0.8) # 3
CD['light green'] = (0.49, 0.64, 0.0) # 4
CD['orange'] = (1.0, 0.5, 0.0) # 5
CD['light blue'] = (0.5, 0.85, 1.0) # 6
CD['pink'] = (1.0, 0.8, 0.8) # 7
CD['brown'] = (0.5, 0.3, 0.0) # 8
CD['red'] = (0.9, 0.0, 0.0) # 9
CD['greenish blue'] = (0.12, .8, .8) # 10
CD['bluey purple'] = (0.8, 0.85, 1.0) # 12
CD['yellow'] = (1.0, 1.0, 0.0) # 6
CD['dark gray'] = (0.25, 0.25, 0.25) #
CD['mid gray'] = (0.5, 0.5, 0.5) #
CD['light gray'] = (0.75, 0.75, 0.75) #
CD['black5'] = (0.05, 0.05, 0.05) #
CD['black'] = (0.0, 0.0, 0.0) #
CD['white'] = (1.0, 1.0, 1.0) #
if isinstance(i, int):
i = i
elif isinstance(i, float):
i = int(i)
elif isinstance(i, str):
dat = CD[i]
return dat
DtoL = ['dark blue', 'dark green', 'dark red', 'brown',
'light green', 'orange', 'light blue', 'pink', 'dark purple',
'red', 'greenish blue', 'bluey purple', 'yellow',
'dark gray', 'mid gray', 'light gray']
Best = ['dark blue', 'orange', 'light blue', 'dark purple', 'dark green',
'bluey purple', 'dark red', 'light green', 'pink', 'brown',
'red', 'yellow', 'greenish blue', 'dark gray',
'mid gray', 'light gray']
Dots = ['dark blue', 'yellow', 'light blue', 'dark purple', 'dark green', 'orange',
'bluey purple', 'dark red', 'light green', 'pink', 'brown',
'red', 'greenish blue', 'dark gray',
'mid gray', 'light gray']
# ll = [0, 5, 2, 4, 1, 6, 3, 7, 8, 11, 9, 12, 10, 13, 14, 15] # change 11 w 5
ind = i % len(Best)
dat = CD[Best[ind]]
col = Best[ind]
if ordered: # if ordered is true then the colours are accessed from darkest to lightest
ind = i % len(DtoL)
dat = CD[DtoL[ind]]
col = DtoL[ind]
if options == "dots":
ind = i % len(Dots)
dat = CD[Dots[ind]]
col = Dots[ind]
if options == "ordered":
ind = i % len(DtoL)
dat = CD[DtoL[ind]]
col = DtoL[ind]
gray_value = 0.299 * dat[0] + 0.587 * dat[1] + 0.114 * dat[2] # calculate the gray scale value
if gray:
return gray_value, gray_value, gray_value
return dat |
def clear(self, *args, **kwargs):
"""Clear only drafts.
Status required: ``'draft'``.
Meta information inside `_deposit` are preserved.
"""
super(Deposit, self).clear(*args, **kwargs) | Clear only drafts.
Status required: ``'draft'``.
Meta information inside `_deposit` are preserved. | Below is the the instruction that describes the task:
### Input:
Clear only drafts.
Status required: ``'draft'``.
Meta information inside `_deposit` are preserved.
### Response:
def clear(self, *args, **kwargs):
"""Clear only drafts.
Status required: ``'draft'``.
Meta information inside `_deposit` are preserved.
"""
super(Deposit, self).clear(*args, **kwargs) |
def str_to_num(i, exact_match=True):
"""
Attempts to convert a str to either an int or float
"""
# TODO: Cleanup -- this is really ugly
if not isinstance(i, str):
return i
try:
if not exact_match:
return int(i)
elif str(int(i)) == i:
return int(i)
elif str(float(i)) == i:
return float(i)
else:
pass
except ValueError:
pass
return i | Attempts to convert a str to either an int or float | Below is the the instruction that describes the task:
### Input:
Attempts to convert a str to either an int or float
### Response:
def str_to_num(i, exact_match=True):
"""
Attempts to convert a str to either an int or float
"""
# TODO: Cleanup -- this is really ugly
if not isinstance(i, str):
return i
try:
if not exact_match:
return int(i)
elif str(int(i)) == i:
return int(i)
elif str(float(i)) == i:
return float(i)
else:
pass
except ValueError:
pass
return i |
def _customized_loader(container, loader=Loader, mapping_tag=_MAPPING_TAG):
"""
Create or update loader with making given callble 'container' to make
mapping objects such as dict and OrderedDict, used to construct python
object from yaml mapping node internally.
:param container: Set container used internally
"""
def construct_mapping(loader, node, deep=False):
"""Construct python object from yaml mapping node, based on
:meth:`yaml.BaseConstructor.construct_mapping` in PyYAML (MIT).
"""
loader.flatten_mapping(node)
if not isinstance(node, yaml.MappingNode):
msg = "expected a mapping node, but found %s" % node.id
raise yaml.constructor.ConstructorError(None, None, msg,
node.start_mark)
mapping = container()
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
eargs = ("while constructing a mapping",
node.start_mark,
"found unacceptable key (%s)" % exc,
key_node.start_mark)
raise yaml.constructor.ConstructorError(*eargs)
value = loader.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
tag = "tag:yaml.org,2002:python/unicode"
def construct_ustr(loader, node):
"""Unicode string constructor"""
return loader.construct_scalar(node)
try:
loader.add_constructor(tag, construct_ustr)
except NameError:
pass
if type(container) != dict:
loader.add_constructor(mapping_tag, construct_mapping)
return loader | Create or update loader with making given callble 'container' to make
mapping objects such as dict and OrderedDict, used to construct python
object from yaml mapping node internally.
:param container: Set container used internally | Below is the the instruction that describes the task:
### Input:
Create or update loader with making given callble 'container' to make
mapping objects such as dict and OrderedDict, used to construct python
object from yaml mapping node internally.
:param container: Set container used internally
### Response:
def _customized_loader(container, loader=Loader, mapping_tag=_MAPPING_TAG):
"""
Create or update loader with making given callble 'container' to make
mapping objects such as dict and OrderedDict, used to construct python
object from yaml mapping node internally.
:param container: Set container used internally
"""
def construct_mapping(loader, node, deep=False):
"""Construct python object from yaml mapping node, based on
:meth:`yaml.BaseConstructor.construct_mapping` in PyYAML (MIT).
"""
loader.flatten_mapping(node)
if not isinstance(node, yaml.MappingNode):
msg = "expected a mapping node, but found %s" % node.id
raise yaml.constructor.ConstructorError(None, None, msg,
node.start_mark)
mapping = container()
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
try:
hash(key)
except TypeError as exc:
eargs = ("while constructing a mapping",
node.start_mark,
"found unacceptable key (%s)" % exc,
key_node.start_mark)
raise yaml.constructor.ConstructorError(*eargs)
value = loader.construct_object(value_node, deep=deep)
mapping[key] = value
return mapping
tag = "tag:yaml.org,2002:python/unicode"
def construct_ustr(loader, node):
"""Unicode string constructor"""
return loader.construct_scalar(node)
try:
loader.add_constructor(tag, construct_ustr)
except NameError:
pass
if type(container) != dict:
loader.add_constructor(mapping_tag, construct_mapping)
return loader |
def create_widget(self):
""" Create the underlying widget.
A dialog is not a subclass of view, hence we don't set name as widget
or children will try to use it as their parent.
"""
d = self.declaration
self.dialog = BottomSheetDialog(self.get_context(), d.style) | Create the underlying widget.
A dialog is not a subclass of view, hence we don't set name as widget
or children will try to use it as their parent. | Below is the the instruction that describes the task:
### Input:
Create the underlying widget.
A dialog is not a subclass of view, hence we don't set name as widget
or children will try to use it as their parent.
### Response:
def create_widget(self):
""" Create the underlying widget.
A dialog is not a subclass of view, hence we don't set name as widget
or children will try to use it as their parent.
"""
d = self.declaration
self.dialog = BottomSheetDialog(self.get_context(), d.style) |
def euler_trans_matrix(etheta, elongan, eincl):
"""
Get the transformation matrix R to translate/rotate a mesh according to
euler angles.
The matrix is
R(long,incl,theta) =
Rz(pi).Rz(long).Rx(incl).Rz(theta)
Rz(long).Rx(-incl).Rz(theta).Rz(pi)
where
Rx(u) = 1, 0, 0
0, cos(u), -sin(u)
0, sin(u), cos(u)
Ry(u) = cos(u), 0, sin(u)
0, 1, 0
-sin(u), 0, cos(u)
Rz(u) = cos(u), -sin(u), 0
sin(u), cos(u), 0
0, 0, 1
Rz(pi) = reflection across z-axis
Note:
R(0,0,0) = -1, 0, 0
0, -1, 0
0, 0, 1
:parameter float etheta: euler theta angle
:parameter float elongan: euler long of asc node angle
:parameter float eincl: euler inclination angle
:return: matrix with size 3x3
"""
s1 = sin(eincl);
c1 = cos(eincl);
s2 = sin(elongan);
c2 = cos(elongan);
s3 = sin(etheta);
c3 = cos(etheta);
c1s3 = c1*s3;
c1c3 = c1*c3;
return np.array([
[-c2*c3+s2*c1s3, c2*s3+s2*c1c3, -s2*s1],
[-s2*c3-c2*c1s3, s2*s3-c2*c1c3, c2*s1],
[s1*s3, s1*c3, c1]
]) | Get the transformation matrix R to translate/rotate a mesh according to
euler angles.
The matrix is
R(long,incl,theta) =
Rz(pi).Rz(long).Rx(incl).Rz(theta)
Rz(long).Rx(-incl).Rz(theta).Rz(pi)
where
Rx(u) = 1, 0, 0
0, cos(u), -sin(u)
0, sin(u), cos(u)
Ry(u) = cos(u), 0, sin(u)
0, 1, 0
-sin(u), 0, cos(u)
Rz(u) = cos(u), -sin(u), 0
sin(u), cos(u), 0
0, 0, 1
Rz(pi) = reflection across z-axis
Note:
R(0,0,0) = -1, 0, 0
0, -1, 0
0, 0, 1
:parameter float etheta: euler theta angle
:parameter float elongan: euler long of asc node angle
:parameter float eincl: euler inclination angle
:return: matrix with size 3x3 | Below is the the instruction that describes the task:
### Input:
Get the transformation matrix R to translate/rotate a mesh according to
euler angles.
The matrix is
R(long,incl,theta) =
Rz(pi).Rz(long).Rx(incl).Rz(theta)
Rz(long).Rx(-incl).Rz(theta).Rz(pi)
where
Rx(u) = 1, 0, 0
0, cos(u), -sin(u)
0, sin(u), cos(u)
Ry(u) = cos(u), 0, sin(u)
0, 1, 0
-sin(u), 0, cos(u)
Rz(u) = cos(u), -sin(u), 0
sin(u), cos(u), 0
0, 0, 1
Rz(pi) = reflection across z-axis
Note:
R(0,0,0) = -1, 0, 0
0, -1, 0
0, 0, 1
:parameter float etheta: euler theta angle
:parameter float elongan: euler long of asc node angle
:parameter float eincl: euler inclination angle
:return: matrix with size 3x3
### Response:
def euler_trans_matrix(etheta, elongan, eincl):
"""
Get the transformation matrix R to translate/rotate a mesh according to
euler angles.
The matrix is
R(long,incl,theta) =
Rz(pi).Rz(long).Rx(incl).Rz(theta)
Rz(long).Rx(-incl).Rz(theta).Rz(pi)
where
Rx(u) = 1, 0, 0
0, cos(u), -sin(u)
0, sin(u), cos(u)
Ry(u) = cos(u), 0, sin(u)
0, 1, 0
-sin(u), 0, cos(u)
Rz(u) = cos(u), -sin(u), 0
sin(u), cos(u), 0
0, 0, 1
Rz(pi) = reflection across z-axis
Note:
R(0,0,0) = -1, 0, 0
0, -1, 0
0, 0, 1
:parameter float etheta: euler theta angle
:parameter float elongan: euler long of asc node angle
:parameter float eincl: euler inclination angle
:return: matrix with size 3x3
"""
s1 = sin(eincl);
c1 = cos(eincl);
s2 = sin(elongan);
c2 = cos(elongan);
s3 = sin(etheta);
c3 = cos(etheta);
c1s3 = c1*s3;
c1c3 = c1*c3;
return np.array([
[-c2*c3+s2*c1s3, c2*s3+s2*c1c3, -s2*s1],
[-s2*c3-c2*c1s3, s2*s3-c2*c1c3, c2*s1],
[s1*s3, s1*c3, c1]
]) |
def user_twitter_list_bag_of_words(twitter_list_corpus,
sent_tokenize, _treebank_word_tokenize,
tagger, lemmatizer, lemmatize, stopset,
first_cap_re, all_cap_re, digits_punctuation_whitespace_re,
pos_set):
"""
Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user.
Inputs: - twitter_list_corpus: A python list of Twitter lists in json format.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Output: - bag_of_words: A bag-of-words in python dictionary format.
- lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
"""
# Extract a bag-of-words from a list of Twitter lists.
# May result in empty sets
list_of_keyword_sets, list_of_lemma_to_keywordbags = clean_list_of_twitter_list(twitter_list_corpus,
sent_tokenize, _treebank_word_tokenize,
tagger, lemmatizer, lemmatize, stopset,
first_cap_re, all_cap_re, digits_punctuation_whitespace_re,
pos_set)
# Reduce keyword sets.
bag_of_words = reduce_list_of_bags_of_words(list_of_keyword_sets)
# Reduce lemma to keywordbag maps.
lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int))
for lemma_to_keywordbag in list_of_lemma_to_keywordbags:
for lemma, keywordbag in lemma_to_keywordbag.items():
for keyword, multiplicity in keywordbag.items():
lemma_to_keywordbag_total[lemma][keyword] += multiplicity
return bag_of_words, lemma_to_keywordbag_total | Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user.
Inputs: - twitter_list_corpus: A python list of Twitter lists in json format.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Output: - bag_of_words: A bag-of-words in python dictionary format.
- lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords. | Below is the the instruction that describes the task:
### Input:
Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user.
Inputs: - twitter_list_corpus: A python list of Twitter lists in json format.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Output: - bag_of_words: A bag-of-words in python dictionary format.
- lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
### Response:
def user_twitter_list_bag_of_words(twitter_list_corpus,
sent_tokenize, _treebank_word_tokenize,
tagger, lemmatizer, lemmatize, stopset,
first_cap_re, all_cap_re, digits_punctuation_whitespace_re,
pos_set):
"""
Extract a bag-of-words for a corpus of Twitter lists pertaining to a Twitter user.
Inputs: - twitter_list_corpus: A python list of Twitter lists in json format.
- lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet".
Output: - bag_of_words: A bag-of-words in python dictionary format.
- lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords.
"""
# Extract a bag-of-words from a list of Twitter lists.
# May result in empty sets
list_of_keyword_sets, list_of_lemma_to_keywordbags = clean_list_of_twitter_list(twitter_list_corpus,
sent_tokenize, _treebank_word_tokenize,
tagger, lemmatizer, lemmatize, stopset,
first_cap_re, all_cap_re, digits_punctuation_whitespace_re,
pos_set)
# Reduce keyword sets.
bag_of_words = reduce_list_of_bags_of_words(list_of_keyword_sets)
# Reduce lemma to keywordbag maps.
lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int))
for lemma_to_keywordbag in list_of_lemma_to_keywordbags:
for lemma, keywordbag in lemma_to_keywordbag.items():
for keyword, multiplicity in keywordbag.items():
lemma_to_keywordbag_total[lemma][keyword] += multiplicity
return bag_of_words, lemma_to_keywordbag_total |
def _populate_inputs(self, total):
"""Request the names for all active, configured inputs on the device.
Once we learn how many inputs are configured, this function is called
which will ask for the name of each active input.
"""
total = total + 1
for input_number in range(1, total):
self.query('ISN'+str(input_number).zfill(2)) | Request the names for all active, configured inputs on the device.
Once we learn how many inputs are configured, this function is called
which will ask for the name of each active input. | Below is the the instruction that describes the task:
### Input:
Request the names for all active, configured inputs on the device.
Once we learn how many inputs are configured, this function is called
which will ask for the name of each active input.
### Response:
def _populate_inputs(self, total):
"""Request the names for all active, configured inputs on the device.
Once we learn how many inputs are configured, this function is called
which will ask for the name of each active input.
"""
total = total + 1
for input_number in range(1, total):
self.query('ISN'+str(input_number).zfill(2)) |
def service_create(auth=None, **kwargs):
'''
Create a service
CLI Example:
.. code-block:: bash
salt '*' keystoneng.service_create name=glance type=image
salt '*' keystoneng.service_create name=glance type=image description="Image"
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(keep_name=True, **kwargs)
return cloud.create_service(**kwargs) | Create a service
CLI Example:
.. code-block:: bash
salt '*' keystoneng.service_create name=glance type=image
salt '*' keystoneng.service_create name=glance type=image description="Image" | Below is the the instruction that describes the task:
### Input:
Create a service
CLI Example:
.. code-block:: bash
salt '*' keystoneng.service_create name=glance type=image
salt '*' keystoneng.service_create name=glance type=image description="Image"
### Response:
def service_create(auth=None, **kwargs):
'''
Create a service
CLI Example:
.. code-block:: bash
salt '*' keystoneng.service_create name=glance type=image
salt '*' keystoneng.service_create name=glance type=image description="Image"
'''
cloud = get_operator_cloud(auth)
kwargs = _clean_kwargs(keep_name=True, **kwargs)
return cloud.create_service(**kwargs) |
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if sys.platform == 'win32':
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
try:
import win32api
except ImportError:
print 'Error: the path "%s" has a space in it' % home_dir
print 'To handle these kinds of paths, the win32api module must be installed:'
print ' http://sourceforge.net/projects/pywin32/'
sys.exit(3)
home_dir = win32api.GetShortPathName(home_dir)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
elif is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
else:
lib_dir = join(home_dir, 'lib', py_version)
inc_dir = join(home_dir, 'include', py_version)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir | Return the path locations for the environment (where libraries are,
where scripts go, etc) | Below is the the instruction that describes the task:
### Input:
Return the path locations for the environment (where libraries are,
where scripts go, etc)
### Response:
def path_locations(home_dir):
"""Return the path locations for the environment (where libraries are,
where scripts go, etc)"""
# XXX: We'd use distutils.sysconfig.get_python_inc/lib but its
# prefix arg is broken: http://bugs.python.org/issue3386
if sys.platform == 'win32':
# Windows has lots of problems with executables with spaces in
# the name; this function will remove them (using the ~1
# format):
mkdir(home_dir)
if ' ' in home_dir:
try:
import win32api
except ImportError:
print 'Error: the path "%s" has a space in it' % home_dir
print 'To handle these kinds of paths, the win32api module must be installed:'
print ' http://sourceforge.net/projects/pywin32/'
sys.exit(3)
home_dir = win32api.GetShortPathName(home_dir)
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'Scripts')
elif is_jython:
lib_dir = join(home_dir, 'Lib')
inc_dir = join(home_dir, 'Include')
bin_dir = join(home_dir, 'bin')
else:
lib_dir = join(home_dir, 'lib', py_version)
inc_dir = join(home_dir, 'include', py_version)
bin_dir = join(home_dir, 'bin')
return home_dir, lib_dir, inc_dir, bin_dir |
def extend_expiration_date(self, days=KEY_EXPIRATION_DELTA):
"""
Extend expiration date a number of given years
"""
delta = timedelta_days(days)
self.expiration_date = self.expiration_date + delta
self.save() | Extend expiration date a number of given years | Below is the the instruction that describes the task:
### Input:
Extend expiration date a number of given years
### Response:
def extend_expiration_date(self, days=KEY_EXPIRATION_DELTA):
"""
Extend expiration date a number of given years
"""
delta = timedelta_days(days)
self.expiration_date = self.expiration_date + delta
self.save() |
def from_dict(data, ctx):
"""
Instantiate a new AccountChangesState from a dict (generally from
loading a JSON response). The data used to instantiate the
AccountChangesState is a shallow copy of the dict passed in, with any
complex child types instantiated appropriately.
"""
data = data.copy()
if data.get('unrealizedPL') is not None:
data['unrealizedPL'] = ctx.convert_decimal_number(
data.get('unrealizedPL')
)
if data.get('NAV') is not None:
data['NAV'] = ctx.convert_decimal_number(
data.get('NAV')
)
if data.get('marginUsed') is not None:
data['marginUsed'] = ctx.convert_decimal_number(
data.get('marginUsed')
)
if data.get('marginAvailable') is not None:
data['marginAvailable'] = ctx.convert_decimal_number(
data.get('marginAvailable')
)
if data.get('positionValue') is not None:
data['positionValue'] = ctx.convert_decimal_number(
data.get('positionValue')
)
if data.get('marginCloseoutUnrealizedPL') is not None:
data['marginCloseoutUnrealizedPL'] = ctx.convert_decimal_number(
data.get('marginCloseoutUnrealizedPL')
)
if data.get('marginCloseoutNAV') is not None:
data['marginCloseoutNAV'] = ctx.convert_decimal_number(
data.get('marginCloseoutNAV')
)
if data.get('marginCloseoutMarginUsed') is not None:
data['marginCloseoutMarginUsed'] = ctx.convert_decimal_number(
data.get('marginCloseoutMarginUsed')
)
if data.get('marginCloseoutPercent') is not None:
data['marginCloseoutPercent'] = ctx.convert_decimal_number(
data.get('marginCloseoutPercent')
)
if data.get('marginCloseoutPositionValue') is not None:
data['marginCloseoutPositionValue'] = ctx.convert_decimal_number(
data.get('marginCloseoutPositionValue')
)
if data.get('withdrawalLimit') is not None:
data['withdrawalLimit'] = ctx.convert_decimal_number(
data.get('withdrawalLimit')
)
if data.get('marginCallMarginUsed') is not None:
data['marginCallMarginUsed'] = ctx.convert_decimal_number(
data.get('marginCallMarginUsed')
)
if data.get('marginCallPercent') is not None:
data['marginCallPercent'] = ctx.convert_decimal_number(
data.get('marginCallPercent')
)
if data.get('orders') is not None:
data['orders'] = [
ctx.order.DynamicOrderState.from_dict(d, ctx)
for d in data.get('orders')
]
if data.get('trades') is not None:
data['trades'] = [
ctx.trade.CalculatedTradeState.from_dict(d, ctx)
for d in data.get('trades')
]
if data.get('positions') is not None:
data['positions'] = [
ctx.position.CalculatedPositionState.from_dict(d, ctx)
for d in data.get('positions')
]
return AccountChangesState(**data) | Instantiate a new AccountChangesState from a dict (generally from
loading a JSON response). The data used to instantiate the
AccountChangesState is a shallow copy of the dict passed in, with any
complex child types instantiated appropriately. | Below is the the instruction that describes the task:
### Input:
Instantiate a new AccountChangesState from a dict (generally from
loading a JSON response). The data used to instantiate the
AccountChangesState is a shallow copy of the dict passed in, with any
complex child types instantiated appropriately.
### Response:
def from_dict(data, ctx):
"""
Instantiate a new AccountChangesState from a dict (generally from
loading a JSON response). The data used to instantiate the
AccountChangesState is a shallow copy of the dict passed in, with any
complex child types instantiated appropriately.
"""
data = data.copy()
if data.get('unrealizedPL') is not None:
data['unrealizedPL'] = ctx.convert_decimal_number(
data.get('unrealizedPL')
)
if data.get('NAV') is not None:
data['NAV'] = ctx.convert_decimal_number(
data.get('NAV')
)
if data.get('marginUsed') is not None:
data['marginUsed'] = ctx.convert_decimal_number(
data.get('marginUsed')
)
if data.get('marginAvailable') is not None:
data['marginAvailable'] = ctx.convert_decimal_number(
data.get('marginAvailable')
)
if data.get('positionValue') is not None:
data['positionValue'] = ctx.convert_decimal_number(
data.get('positionValue')
)
if data.get('marginCloseoutUnrealizedPL') is not None:
data['marginCloseoutUnrealizedPL'] = ctx.convert_decimal_number(
data.get('marginCloseoutUnrealizedPL')
)
if data.get('marginCloseoutNAV') is not None:
data['marginCloseoutNAV'] = ctx.convert_decimal_number(
data.get('marginCloseoutNAV')
)
if data.get('marginCloseoutMarginUsed') is not None:
data['marginCloseoutMarginUsed'] = ctx.convert_decimal_number(
data.get('marginCloseoutMarginUsed')
)
if data.get('marginCloseoutPercent') is not None:
data['marginCloseoutPercent'] = ctx.convert_decimal_number(
data.get('marginCloseoutPercent')
)
if data.get('marginCloseoutPositionValue') is not None:
data['marginCloseoutPositionValue'] = ctx.convert_decimal_number(
data.get('marginCloseoutPositionValue')
)
if data.get('withdrawalLimit') is not None:
data['withdrawalLimit'] = ctx.convert_decimal_number(
data.get('withdrawalLimit')
)
if data.get('marginCallMarginUsed') is not None:
data['marginCallMarginUsed'] = ctx.convert_decimal_number(
data.get('marginCallMarginUsed')
)
if data.get('marginCallPercent') is not None:
data['marginCallPercent'] = ctx.convert_decimal_number(
data.get('marginCallPercent')
)
if data.get('orders') is not None:
data['orders'] = [
ctx.order.DynamicOrderState.from_dict(d, ctx)
for d in data.get('orders')
]
if data.get('trades') is not None:
data['trades'] = [
ctx.trade.CalculatedTradeState.from_dict(d, ctx)
for d in data.get('trades')
]
if data.get('positions') is not None:
data['positions'] = [
ctx.position.CalculatedPositionState.from_dict(d, ctx)
for d in data.get('positions')
]
return AccountChangesState(**data) |
def _run_web_container(self, port, command, address, log_syslog=False,
datapusher=True, interactive=False):
"""
Start web container on port with command
"""
if is_boot2docker():
ro = {}
volumes_from = self._get_container_name('venv')
else:
ro = {self.datadir + '/venv': '/usr/lib/ckan'}
volumes_from = None
links = {
self._get_container_name('solr'): 'solr',
self._get_container_name('postgres'): 'db'
}
links.update({self._get_container_name(container): container
for container in self.extra_containers})
if datapusher:
if 'datapusher' not in self.containers_running():
raise DatacatsError(container_logs(self._get_container_name('datapusher'), "all",
False, False))
links[self._get_container_name('datapusher')] = 'datapusher'
ro = dict({
self.target: '/project/',
scripts.get_script_path('web.sh'): '/scripts/web.sh',
scripts.get_script_path('adjust_devini.py'): '/scripts/adjust_devini.py'},
**ro)
rw = {
self.sitedir + '/files': '/var/www/storage',
self.sitedir + '/run/development.ini': '/project/development.ini'
}
try:
if not interactive:
run_container(
name=self._get_container_name('web'),
image='datacats/web',
rw=rw,
ro=ro,
links=links,
volumes_from=volumes_from,
command=command,
port_bindings={
5000: port if is_boot2docker() else (address, port)},
log_syslog=log_syslog
)
else:
# FIXME: share more code with interactive_shell
if is_boot2docker():
switches = ['--volumes-from',
self._get_container_name('pgdata'), '--volumes-from',
self._get_container_name('venv')]
else:
switches = []
switches += ['--volume={}:{}:ro'.format(vol, ro[vol]) for vol in ro]
switches += ['--volume={}:{}'.format(vol, rw[vol]) for vol in rw]
links = ['--link={}:{}'.format(link, links[link]) for link in links]
args = ['docker', 'run', '-it', '--name', self._get_container_name('web'),
'-p', '{}:5000'.format(port) if is_boot2docker()
else '{}:{}:5000'.format(address, port)] + \
switches + links + ['datacats/web', ] + command
subprocess.call(args)
except APIError as e:
if '409' in str(e):
raise DatacatsError('Web container already running. '
'Please stop_web before running.')
else:
raise | Start web container on port with command | Below is the the instruction that describes the task:
### Input:
Start web container on port with command
### Response:
def _run_web_container(self, port, command, address, log_syslog=False,
datapusher=True, interactive=False):
"""
Start web container on port with command
"""
if is_boot2docker():
ro = {}
volumes_from = self._get_container_name('venv')
else:
ro = {self.datadir + '/venv': '/usr/lib/ckan'}
volumes_from = None
links = {
self._get_container_name('solr'): 'solr',
self._get_container_name('postgres'): 'db'
}
links.update({self._get_container_name(container): container
for container in self.extra_containers})
if datapusher:
if 'datapusher' not in self.containers_running():
raise DatacatsError(container_logs(self._get_container_name('datapusher'), "all",
False, False))
links[self._get_container_name('datapusher')] = 'datapusher'
ro = dict({
self.target: '/project/',
scripts.get_script_path('web.sh'): '/scripts/web.sh',
scripts.get_script_path('adjust_devini.py'): '/scripts/adjust_devini.py'},
**ro)
rw = {
self.sitedir + '/files': '/var/www/storage',
self.sitedir + '/run/development.ini': '/project/development.ini'
}
try:
if not interactive:
run_container(
name=self._get_container_name('web'),
image='datacats/web',
rw=rw,
ro=ro,
links=links,
volumes_from=volumes_from,
command=command,
port_bindings={
5000: port if is_boot2docker() else (address, port)},
log_syslog=log_syslog
)
else:
# FIXME: share more code with interactive_shell
if is_boot2docker():
switches = ['--volumes-from',
self._get_container_name('pgdata'), '--volumes-from',
self._get_container_name('venv')]
else:
switches = []
switches += ['--volume={}:{}:ro'.format(vol, ro[vol]) for vol in ro]
switches += ['--volume={}:{}'.format(vol, rw[vol]) for vol in rw]
links = ['--link={}:{}'.format(link, links[link]) for link in links]
args = ['docker', 'run', '-it', '--name', self._get_container_name('web'),
'-p', '{}:5000'.format(port) if is_boot2docker()
else '{}:{}:5000'.format(address, port)] + \
switches + links + ['datacats/web', ] + command
subprocess.call(args)
except APIError as e:
if '409' in str(e):
raise DatacatsError('Web container already running. '
'Please stop_web before running.')
else:
raise |
def ensure_dir(self, *path_parts):
"""Ensures a subdirectory of the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory.
"""
path = self.getpath(*path_parts)
ensure_directory(path)
return path | Ensures a subdirectory of the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory. | Below is the the instruction that describes the task:
### Input:
Ensures a subdirectory of the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory.
### Response:
def ensure_dir(self, *path_parts):
"""Ensures a subdirectory of the working directory.
Parameters
----------
path_parts : iterable[str]
The parts of the path after the working directory.
"""
path = self.getpath(*path_parts)
ensure_directory(path)
return path |
def position_target_global_int_encode(self, time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate):
'''
Reports the current commanded vehicle position, velocity, and
acceleration as specified by the autopilot. This
should match the commands sent in
SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being
controlled this way.
time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t)
coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t)
lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t)
alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float)
'''
return MAVLink_position_target_global_int_message(time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate) | Reports the current commanded vehicle position, velocity, and
acceleration as specified by the autopilot. This
should match the commands sent in
SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being
controlled this way.
time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t)
coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t)
lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t)
alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float) | Below is the the instruction that describes the task:
### Input:
Reports the current commanded vehicle position, velocity, and
acceleration as specified by the autopilot. This
should match the commands sent in
SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being
controlled this way.
time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t)
coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t)
lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t)
alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float)
### Response:
def position_target_global_int_encode(self, time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate):
'''
Reports the current commanded vehicle position, velocity, and
acceleration as specified by the autopilot. This
should match the commands sent in
SET_POSITION_TARGET_GLOBAL_INT if the vehicle is being
controlled this way.
time_boot_ms : Timestamp in milliseconds since system boot. The rationale for the timestamp in the setpoint is to allow the system to compensate for the transport delay of the setpoint. This allows the system to compensate processing latency. (uint32_t)
coordinate_frame : Valid options are: MAV_FRAME_GLOBAL_INT = 5, MAV_FRAME_GLOBAL_RELATIVE_ALT_INT = 6, MAV_FRAME_GLOBAL_TERRAIN_ALT_INT = 11 (uint8_t)
type_mask : Bitmask to indicate which dimensions should be ignored by the vehicle: a value of 0b0000000000000000 or 0b0000001000000000 indicates that none of the setpoint dimensions should be ignored. If bit 10 is set the floats afx afy afz should be interpreted as force instead of acceleration. Mapping: bit 1: x, bit 2: y, bit 3: z, bit 4: vx, bit 5: vy, bit 6: vz, bit 7: ax, bit 8: ay, bit 9: az, bit 10: is force setpoint, bit 11: yaw, bit 12: yaw rate (uint16_t)
lat_int : X Position in WGS84 frame in 1e7 * meters (int32_t)
lon_int : Y Position in WGS84 frame in 1e7 * meters (int32_t)
alt : Altitude in meters in AMSL altitude, not WGS84 if absolute or relative, above terrain if GLOBAL_TERRAIN_ALT_INT (float)
vx : X velocity in NED frame in meter / s (float)
vy : Y velocity in NED frame in meter / s (float)
vz : Z velocity in NED frame in meter / s (float)
afx : X acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afy : Y acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
afz : Z acceleration or force (if bit 10 of type_mask is set) in NED frame in meter / s^2 or N (float)
yaw : yaw setpoint in rad (float)
yaw_rate : yaw rate setpoint in rad/s (float)
'''
return MAVLink_position_target_global_int_message(time_boot_ms, coordinate_frame, type_mask, lat_int, lon_int, alt, vx, vy, vz, afx, afy, afz, yaw, yaw_rate) |
def inception_v3(pretrained=False, **kwargs):
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
aux_logits (bool): If True, add an auxiliary branch that can improve training.
Default: *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' in kwargs:
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
else:
original_aux_logits = True
model = Inception3(**kwargs)
model.load_state_dict(model_zoo.load_url(model_urls['inception_v3_google']))
if not original_aux_logits:
model.aux_logits = False
del model.AuxLogits
return model
return Inception3(**kwargs) | r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
aux_logits (bool): If True, add an auxiliary branch that can improve training.
Default: *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False* | Below is the the instruction that describes the task:
### Input:
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
aux_logits (bool): If True, add an auxiliary branch that can improve training.
Default: *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
### Response:
def inception_v3(pretrained=False, **kwargs):
r"""Inception v3 model architecture from
`"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_.
.. note::
**Important**: In contrast to the other models the inception_v3 expects tensors with a size of
N x 3 x 299 x 299, so ensure your images are sized accordingly.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
aux_logits (bool): If True, add an auxiliary branch that can improve training.
Default: *True*
transform_input (bool): If True, preprocesses the input according to the method with which it
was trained on ImageNet. Default: *False*
"""
if pretrained:
if 'transform_input' not in kwargs:
kwargs['transform_input'] = True
if 'aux_logits' in kwargs:
original_aux_logits = kwargs['aux_logits']
kwargs['aux_logits'] = True
else:
original_aux_logits = True
model = Inception3(**kwargs)
model.load_state_dict(model_zoo.load_url(model_urls['inception_v3_google']))
if not original_aux_logits:
model.aux_logits = False
del model.AuxLogits
return model
return Inception3(**kwargs) |
def get_load_balancer(self, id):
"""
Returns a Load Balancer object by its ID.
Args:
id (str): Load Balancer ID
"""
return LoadBalancer.get_object(api_token=self.token, id=id) | Returns a Load Balancer object by its ID.
Args:
id (str): Load Balancer ID | Below is the the instruction that describes the task:
### Input:
Returns a Load Balancer object by its ID.
Args:
id (str): Load Balancer ID
### Response:
def get_load_balancer(self, id):
"""
Returns a Load Balancer object by its ID.
Args:
id (str): Load Balancer ID
"""
return LoadBalancer.get_object(api_token=self.token, id=id) |
def enable_audit_device(self, device_type, description=None, options=None, path=None):
"""Enable a new audit device at the supplied path.
The path can be a single word name or a more complex, nested path.
Supported methods:
PUT: /sys/audit/{path}. Produces: 204 (empty body)
:param device_type: Specifies the type of the audit device.
:type device_type: str | unicode
:param description: Human-friendly description of the audit device.
:type description: str | unicode
:param options: Configuration options to pass to the audit device itself. This is
dependent on the audit device type.
:type options: str | unicode
:param path: Specifies the path in which to enable the audit device. This is part of
the request URL.
:type path: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if path is None:
path = device_type
params = {
'type': device_type,
'description': description,
'options': options,
}
api_path = '/v1/sys/audit/{path}'.format(path=path)
return self._adapter.post(
url=api_path,
json=params
) | Enable a new audit device at the supplied path.
The path can be a single word name or a more complex, nested path.
Supported methods:
PUT: /sys/audit/{path}. Produces: 204 (empty body)
:param device_type: Specifies the type of the audit device.
:type device_type: str | unicode
:param description: Human-friendly description of the audit device.
:type description: str | unicode
:param options: Configuration options to pass to the audit device itself. This is
dependent on the audit device type.
:type options: str | unicode
:param path: Specifies the path in which to enable the audit device. This is part of
the request URL.
:type path: str | unicode
:return: The response of the request.
:rtype: requests.Response | Below is the the instruction that describes the task:
### Input:
Enable a new audit device at the supplied path.
The path can be a single word name or a more complex, nested path.
Supported methods:
PUT: /sys/audit/{path}. Produces: 204 (empty body)
:param device_type: Specifies the type of the audit device.
:type device_type: str | unicode
:param description: Human-friendly description of the audit device.
:type description: str | unicode
:param options: Configuration options to pass to the audit device itself. This is
dependent on the audit device type.
:type options: str | unicode
:param path: Specifies the path in which to enable the audit device. This is part of
the request URL.
:type path: str | unicode
:return: The response of the request.
:rtype: requests.Response
### Response:
def enable_audit_device(self, device_type, description=None, options=None, path=None):
"""Enable a new audit device at the supplied path.
The path can be a single word name or a more complex, nested path.
Supported methods:
PUT: /sys/audit/{path}. Produces: 204 (empty body)
:param device_type: Specifies the type of the audit device.
:type device_type: str | unicode
:param description: Human-friendly description of the audit device.
:type description: str | unicode
:param options: Configuration options to pass to the audit device itself. This is
dependent on the audit device type.
:type options: str | unicode
:param path: Specifies the path in which to enable the audit device. This is part of
the request URL.
:type path: str | unicode
:return: The response of the request.
:rtype: requests.Response
"""
if path is None:
path = device_type
params = {
'type': device_type,
'description': description,
'options': options,
}
api_path = '/v1/sys/audit/{path}'.format(path=path)
return self._adapter.post(
url=api_path,
json=params
) |
def sort_ranges(inranges):
"""from an array of ranges, make a sorted array of ranges
:param inranges: List of GenomicRange data
:type inranges: GenomicRange[]
:returns: a new sorted GenomicRange list
:rtype: GenomicRange[]
"""
return sorted(inranges,key=lambda x: (x.chr,x.start,x.end,x.direction)) | from an array of ranges, make a sorted array of ranges
:param inranges: List of GenomicRange data
:type inranges: GenomicRange[]
:returns: a new sorted GenomicRange list
:rtype: GenomicRange[] | Below is the the instruction that describes the task:
### Input:
from an array of ranges, make a sorted array of ranges
:param inranges: List of GenomicRange data
:type inranges: GenomicRange[]
:returns: a new sorted GenomicRange list
:rtype: GenomicRange[]
### Response:
def sort_ranges(inranges):
"""from an array of ranges, make a sorted array of ranges
:param inranges: List of GenomicRange data
:type inranges: GenomicRange[]
:returns: a new sorted GenomicRange list
:rtype: GenomicRange[]
"""
return sorted(inranges,key=lambda x: (x.chr,x.start,x.end,x.direction)) |
def main(cls):
"""Main entry point of Laniakea.
"""
args = cls.parse_args()
if args.focus:
Focus.init()
else:
Focus.disable()
logging.basicConfig(format='[Laniakea] %(asctime)s %(levelname)s: %(message)s',
level=args.verbosity * 10,
datefmt='%Y-%m-%d %H:%M:%S')
# Laniakea base configuration
logger.info('Loading Laniakea configuration from %s', Focus.data(args.settings.name))
try:
settings = json.loads(args.settings.read())
except ValueError as msg:
logger.error('Unable to parse %s: %s', args.settings.name, msg)
return 1
# UserData
userdata = ''
if args.userdata:
logger.info('Reading user data script content from %s', Focus.info(args.userdata.name))
try:
userdata = UserData.handle_import_tags(args.userdata.read(),
os.path.dirname(args.userdata.name))
except UserDataException as msg:
logging.error(msg)
return 1
if args.list_userdata_macros:
UserData.list_tags(userdata)
return 0
if args.userdata_macros:
args.userdata_macros = UserData.convert_pair_to_dict(args.userdata_macros or '')
userdata = UserData.handle_tags(userdata, args.userdata_macros)
if args.print_userdata:
logger.info('Combined UserData script:\n%s', userdata)
return 0
if args.provider:
provider = getattr(globals()[args.provider], args.provider.title() + 'CommandLine')
provider().main(args, settings, userdata)
return 0 | Main entry point of Laniakea. | Below is the the instruction that describes the task:
### Input:
Main entry point of Laniakea.
### Response:
def main(cls):
"""Main entry point of Laniakea.
"""
args = cls.parse_args()
if args.focus:
Focus.init()
else:
Focus.disable()
logging.basicConfig(format='[Laniakea] %(asctime)s %(levelname)s: %(message)s',
level=args.verbosity * 10,
datefmt='%Y-%m-%d %H:%M:%S')
# Laniakea base configuration
logger.info('Loading Laniakea configuration from %s', Focus.data(args.settings.name))
try:
settings = json.loads(args.settings.read())
except ValueError as msg:
logger.error('Unable to parse %s: %s', args.settings.name, msg)
return 1
# UserData
userdata = ''
if args.userdata:
logger.info('Reading user data script content from %s', Focus.info(args.userdata.name))
try:
userdata = UserData.handle_import_tags(args.userdata.read(),
os.path.dirname(args.userdata.name))
except UserDataException as msg:
logging.error(msg)
return 1
if args.list_userdata_macros:
UserData.list_tags(userdata)
return 0
if args.userdata_macros:
args.userdata_macros = UserData.convert_pair_to_dict(args.userdata_macros or '')
userdata = UserData.handle_tags(userdata, args.userdata_macros)
if args.print_userdata:
logger.info('Combined UserData script:\n%s', userdata)
return 0
if args.provider:
provider = getattr(globals()[args.provider], args.provider.title() + 'CommandLine')
provider().main(args, settings, userdata)
return 0 |
def process(self, context, internal_response):
"""
Manage consent and attribute filtering
:type context: satosa.context.Context
:type internal_response: satosa.internal.InternalData
:rtype: satosa.response.Response
:param context: response context
:param internal_response: the response
:return: response
"""
consent_state = context.state[STATE_KEY]
internal_response.attributes = self._filter_attributes(internal_response.attributes, consent_state["filter"])
id_hash = self._get_consent_id(internal_response.requester, internal_response.subject_id,
internal_response.attributes)
try:
# Check if consent is already given
consent_attributes = self._verify_consent(id_hash)
except requests.exceptions.ConnectionError as e:
satosa_logging(logger, logging.ERROR,
"Consent service is not reachable, no consent given.", context.state)
# Send an internal_response without any attributes
internal_response.attributes = {}
return self._end_consent(context, internal_response)
# Previous consent was given
if consent_attributes is not None:
satosa_logging(logger, logging.DEBUG, "Previous consent was given", context.state)
internal_response.attributes = self._filter_attributes(internal_response.attributes, consent_attributes)
return self._end_consent(context, internal_response)
# No previous consent, request consent by user
return self._approve_new_consent(context, internal_response, id_hash) | Manage consent and attribute filtering
:type context: satosa.context.Context
:type internal_response: satosa.internal.InternalData
:rtype: satosa.response.Response
:param context: response context
:param internal_response: the response
:return: response | Below is the the instruction that describes the task:
### Input:
Manage consent and attribute filtering
:type context: satosa.context.Context
:type internal_response: satosa.internal.InternalData
:rtype: satosa.response.Response
:param context: response context
:param internal_response: the response
:return: response
### Response:
def process(self, context, internal_response):
"""
Manage consent and attribute filtering
:type context: satosa.context.Context
:type internal_response: satosa.internal.InternalData
:rtype: satosa.response.Response
:param context: response context
:param internal_response: the response
:return: response
"""
consent_state = context.state[STATE_KEY]
internal_response.attributes = self._filter_attributes(internal_response.attributes, consent_state["filter"])
id_hash = self._get_consent_id(internal_response.requester, internal_response.subject_id,
internal_response.attributes)
try:
# Check if consent is already given
consent_attributes = self._verify_consent(id_hash)
except requests.exceptions.ConnectionError as e:
satosa_logging(logger, logging.ERROR,
"Consent service is not reachable, no consent given.", context.state)
# Send an internal_response without any attributes
internal_response.attributes = {}
return self._end_consent(context, internal_response)
# Previous consent was given
if consent_attributes is not None:
satosa_logging(logger, logging.DEBUG, "Previous consent was given", context.state)
internal_response.attributes = self._filter_attributes(internal_response.attributes, consent_attributes)
return self._end_consent(context, internal_response)
# No previous consent, request consent by user
return self._approve_new_consent(context, internal_response, id_hash) |
def gtr(self, value):
"""
Set a new GTR object
Parameters
-----------
value : GTR
the new GTR object
"""
if not (isinstance(value, GTR) or isinstance(value, GTR_site_specific)):
raise TypeError(" GTR instance expected")
self._gtr = value | Set a new GTR object
Parameters
-----------
value : GTR
the new GTR object | Below is the the instruction that describes the task:
### Input:
Set a new GTR object
Parameters
-----------
value : GTR
the new GTR object
### Response:
def gtr(self, value):
"""
Set a new GTR object
Parameters
-----------
value : GTR
the new GTR object
"""
if not (isinstance(value, GTR) or isinstance(value, GTR_site_specific)):
raise TypeError(" GTR instance expected")
self._gtr = value |
def send_eth_to(self, private_key: str, to: str, gas_price: int, value: int, gas: int=22000,
retry: bool = False, block_identifier=None, max_eth_to_send: int = 0) -> bytes:
"""
Send ether using configured account
:param to: to
:param gas_price: gas_price
:param value: value(wei)
:param gas: gas, defaults to 22000
:param retry: Retry if a problem is found
:param block_identifier: None default, 'pending' not confirmed txs
:return: tx_hash
"""
assert check_checksum(to)
if max_eth_to_send and value > self.w3.toWei(max_eth_to_send, 'ether'):
raise EtherLimitExceeded('%d is bigger than %f' % (value, max_eth_to_send))
tx = {
'to': to,
'value': value,
'gas': gas,
'gasPrice': gas_price,
}
return self.send_unsigned_transaction(tx, private_key=private_key, retry=retry,
block_identifier=block_identifier) | Send ether using configured account
:param to: to
:param gas_price: gas_price
:param value: value(wei)
:param gas: gas, defaults to 22000
:param retry: Retry if a problem is found
:param block_identifier: None default, 'pending' not confirmed txs
:return: tx_hash | Below is the the instruction that describes the task:
### Input:
Send ether using configured account
:param to: to
:param gas_price: gas_price
:param value: value(wei)
:param gas: gas, defaults to 22000
:param retry: Retry if a problem is found
:param block_identifier: None default, 'pending' not confirmed txs
:return: tx_hash
### Response:
def send_eth_to(self, private_key: str, to: str, gas_price: int, value: int, gas: int=22000,
retry: bool = False, block_identifier=None, max_eth_to_send: int = 0) -> bytes:
"""
Send ether using configured account
:param to: to
:param gas_price: gas_price
:param value: value(wei)
:param gas: gas, defaults to 22000
:param retry: Retry if a problem is found
:param block_identifier: None default, 'pending' not confirmed txs
:return: tx_hash
"""
assert check_checksum(to)
if max_eth_to_send and value > self.w3.toWei(max_eth_to_send, 'ether'):
raise EtherLimitExceeded('%d is bigger than %f' % (value, max_eth_to_send))
tx = {
'to': to,
'value': value,
'gas': gas,
'gasPrice': gas_price,
}
return self.send_unsigned_transaction(tx, private_key=private_key, retry=retry,
block_identifier=block_identifier) |
def in_unit_of(self, unit, as_quantity=False):
"""
Return the current value transformed to the new units
:param unit: either an astropy.Unit instance, or a string which can be converted to an astropy.Unit
instance, like "1 / (erg cm**2 s)"
:param as_quantity: if True, the method return an astropy.Quantity, if False just a floating point number.
Default is False
:return: either a floating point or a astropy.Quantity depending on the value of "as_quantity"
"""
new_unit = u.Unit(unit)
new_quantity = self.as_quantity.to(new_unit)
if as_quantity:
return new_quantity
else:
return new_quantity.value | Return the current value transformed to the new units
:param unit: either an astropy.Unit instance, or a string which can be converted to an astropy.Unit
instance, like "1 / (erg cm**2 s)"
:param as_quantity: if True, the method return an astropy.Quantity, if False just a floating point number.
Default is False
:return: either a floating point or a astropy.Quantity depending on the value of "as_quantity" | Below is the the instruction that describes the task:
### Input:
Return the current value transformed to the new units
:param unit: either an astropy.Unit instance, or a string which can be converted to an astropy.Unit
instance, like "1 / (erg cm**2 s)"
:param as_quantity: if True, the method return an astropy.Quantity, if False just a floating point number.
Default is False
:return: either a floating point or a astropy.Quantity depending on the value of "as_quantity"
### Response:
def in_unit_of(self, unit, as_quantity=False):
"""
Return the current value transformed to the new units
:param unit: either an astropy.Unit instance, or a string which can be converted to an astropy.Unit
instance, like "1 / (erg cm**2 s)"
:param as_quantity: if True, the method return an astropy.Quantity, if False just a floating point number.
Default is False
:return: either a floating point or a astropy.Quantity depending on the value of "as_quantity"
"""
new_unit = u.Unit(unit)
new_quantity = self.as_quantity.to(new_unit)
if as_quantity:
return new_quantity
else:
return new_quantity.value |
def serializeTransform(transformObj):
"""
Reserializes the transform data with some cleanups.
"""
return ' '.join([command + '(' + ' '.join([scourUnitlessLength(number) for number in numbers]) + ')'
for command, numbers in transformObj]) | Reserializes the transform data with some cleanups. | Below is the the instruction that describes the task:
### Input:
Reserializes the transform data with some cleanups.
### Response:
def serializeTransform(transformObj):
"""
Reserializes the transform data with some cleanups.
"""
return ' '.join([command + '(' + ' '.join([scourUnitlessLength(number) for number in numbers]) + ')'
for command, numbers in transformObj]) |
def department_update(self, department_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/departments#update-department-by-id"
api_path = "/api/v2/departments/{department_id}"
api_path = api_path.format(department_id=department_id)
return self.call(api_path, method="PUT", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/chat/departments#update-department-by-id | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/chat/departments#update-department-by-id
### Response:
def department_update(self, department_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/chat/departments#update-department-by-id"
api_path = "/api/v2/departments/{department_id}"
api_path = api_path.format(department_id=department_id)
return self.call(api_path, method="PUT", data=data, **kwargs) |
def _mine_flush(self, load, skip_verify=False):
'''
Allow the minion to delete all of its own mine contents
'''
if not skip_verify and 'id' not in load:
return False
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
return self.cache.flush('minions/{0}'.format(load['id']), 'mine')
return True | Allow the minion to delete all of its own mine contents | Below is the the instruction that describes the task:
### Input:
Allow the minion to delete all of its own mine contents
### Response:
def _mine_flush(self, load, skip_verify=False):
'''
Allow the minion to delete all of its own mine contents
'''
if not skip_verify and 'id' not in load:
return False
if self.opts.get('minion_data_cache', False) or self.opts.get('enforce_mine_cache', False):
return self.cache.flush('minions/{0}'.format(load['id']), 'mine')
return True |
def validate(self, dct):
"""
Choose a schema for client request operation and validate
the operation field. If the schema is not found skips validation.
:param dct: an operation field from client request
:return: raises exception if invalid request
"""
if not isinstance(dct, dict):
# TODO this check should be in side of the validator not here
self._raise_invalid_fields('', dct, 'wrong type')
txn_type = dct.get(TXN_TYPE)
if txn_type is None:
self._raise_missed_fields(TXN_TYPE)
if txn_type in self.operations:
# check only if the schema is defined
op = self.operations[txn_type]
op.validate(dct) | Choose a schema for client request operation and validate
the operation field. If the schema is not found skips validation.
:param dct: an operation field from client request
:return: raises exception if invalid request | Below is the the instruction that describes the task:
### Input:
Choose a schema for client request operation and validate
the operation field. If the schema is not found skips validation.
:param dct: an operation field from client request
:return: raises exception if invalid request
### Response:
def validate(self, dct):
"""
Choose a schema for client request operation and validate
the operation field. If the schema is not found skips validation.
:param dct: an operation field from client request
:return: raises exception if invalid request
"""
if not isinstance(dct, dict):
# TODO this check should be in side of the validator not here
self._raise_invalid_fields('', dct, 'wrong type')
txn_type = dct.get(TXN_TYPE)
if txn_type is None:
self._raise_missed_fields(TXN_TYPE)
if txn_type in self.operations:
# check only if the schema is defined
op = self.operations[txn_type]
op.validate(dct) |
def outLineReceived(self, line):
"""
Handle data via stdout linewise. This is useful if you turned off
buffering.
In your subclass, override this if you want to handle the line as a
protocol line in addition to logging it. (You may upcall this function
safely.)
"""
log_debug('<<< {name} stdout >>> {line}',
name=self.name,
line=self.outFilter(line)) | Handle data via stdout linewise. This is useful if you turned off
buffering.
In your subclass, override this if you want to handle the line as a
protocol line in addition to logging it. (You may upcall this function
safely.) | Below is the the instruction that describes the task:
### Input:
Handle data via stdout linewise. This is useful if you turned off
buffering.
In your subclass, override this if you want to handle the line as a
protocol line in addition to logging it. (You may upcall this function
safely.)
### Response:
def outLineReceived(self, line):
"""
Handle data via stdout linewise. This is useful if you turned off
buffering.
In your subclass, override this if you want to handle the line as a
protocol line in addition to logging it. (You may upcall this function
safely.)
"""
log_debug('<<< {name} stdout >>> {line}',
name=self.name,
line=self.outFilter(line)) |
def validate_authentication(self, username, password, handler):
"""authenticate user with password
"""
user = authenticate(
**{self.username_field: username, 'password': password}
)
account = self.get_account(username)
if not (user and account):
raise AuthenticationFailed("Authentication failed.") | authenticate user with password | Below is the the instruction that describes the task:
### Input:
authenticate user with password
### Response:
def validate_authentication(self, username, password, handler):
"""authenticate user with password
"""
user = authenticate(
**{self.username_field: username, 'password': password}
)
account = self.get_account(username)
if not (user and account):
raise AuthenticationFailed("Authentication failed.") |
def get_patient_expression(job, patient_dict):
"""
Convenience function to get the expression from the patient dict
:param dict patient_dict: dict of patient info
:return: The gene and isoform expression
:rtype: toil.fileStore.FileID
"""
expression_archive = job.fileStore.readGlobalFile(patient_dict['expression_files'])
expression_archive = untargz(expression_archive, os.getcwd())
output_dict = {}
for filename in 'rsem.genes.results', 'rsem.isoforms.results':
output_dict[filename] = job.fileStore.writeGlobalFile(os.path.join(expression_archive,
filename))
return output_dict | Convenience function to get the expression from the patient dict
:param dict patient_dict: dict of patient info
:return: The gene and isoform expression
:rtype: toil.fileStore.FileID | Below is the the instruction that describes the task:
### Input:
Convenience function to get the expression from the patient dict
:param dict patient_dict: dict of patient info
:return: The gene and isoform expression
:rtype: toil.fileStore.FileID
### Response:
def get_patient_expression(job, patient_dict):
"""
Convenience function to get the expression from the patient dict
:param dict patient_dict: dict of patient info
:return: The gene and isoform expression
:rtype: toil.fileStore.FileID
"""
expression_archive = job.fileStore.readGlobalFile(patient_dict['expression_files'])
expression_archive = untargz(expression_archive, os.getcwd())
output_dict = {}
for filename in 'rsem.genes.results', 'rsem.isoforms.results':
output_dict[filename] = job.fileStore.writeGlobalFile(os.path.join(expression_archive,
filename))
return output_dict |
def run(self, cmd, timeout=None, key=None):
"""
Run a command on the phablet device using ssh
:param cmd:
a list of strings to execute as a command
:param timeout:
a timeout (in seconds) for device discovery
:param key:
a path to a public ssh key to use for connection
:returns:
the exit code of the command
This method will not allow you to capture stdout/stderr from the target
process. If you wish to do that please consider switching to one of
subprocess functions along with. :meth:`cmdline()`.
"""
if not isinstance(cmd, list):
raise TypeError("cmd needs to be a list")
if not all(isinstance(item, str) for item in cmd):
raise TypeError("cmd needs to be a list of strings")
self.connect(timeout, key)
return self._run_ssh(cmd) | Run a command on the phablet device using ssh
:param cmd:
a list of strings to execute as a command
:param timeout:
a timeout (in seconds) for device discovery
:param key:
a path to a public ssh key to use for connection
:returns:
the exit code of the command
This method will not allow you to capture stdout/stderr from the target
process. If you wish to do that please consider switching to one of
subprocess functions along with. :meth:`cmdline()`. | Below is the the instruction that describes the task:
### Input:
Run a command on the phablet device using ssh
:param cmd:
a list of strings to execute as a command
:param timeout:
a timeout (in seconds) for device discovery
:param key:
a path to a public ssh key to use for connection
:returns:
the exit code of the command
This method will not allow you to capture stdout/stderr from the target
process. If you wish to do that please consider switching to one of
subprocess functions along with. :meth:`cmdline()`.
### Response:
def run(self, cmd, timeout=None, key=None):
"""
Run a command on the phablet device using ssh
:param cmd:
a list of strings to execute as a command
:param timeout:
a timeout (in seconds) for device discovery
:param key:
a path to a public ssh key to use for connection
:returns:
the exit code of the command
This method will not allow you to capture stdout/stderr from the target
process. If you wish to do that please consider switching to one of
subprocess functions along with. :meth:`cmdline()`.
"""
if not isinstance(cmd, list):
raise TypeError("cmd needs to be a list")
if not all(isinstance(item, str) for item in cmd):
raise TypeError("cmd needs to be a list of strings")
self.connect(timeout, key)
return self._run_ssh(cmd) |
def result(self, psd_state):
"""Return freqs and averaged PSD for given center frequency"""
freq_array = numpy.fft.fftshift(psd_state['freq_array'])
pwr_array = numpy.fft.fftshift(psd_state['pwr_array'])
if self._crop_factor:
crop_bins_half = round((self._crop_factor * self._bins) / 2)
freq_array = freq_array[crop_bins_half:-crop_bins_half]
pwr_array = pwr_array[crop_bins_half:-crop_bins_half]
if psd_state['repeats'] > 1:
pwr_array = pwr_array / psd_state['repeats']
if self._log_scale:
pwr_array = 10 * numpy.log10(pwr_array)
return (freq_array, pwr_array) | Return freqs and averaged PSD for given center frequency | Below is the the instruction that describes the task:
### Input:
Return freqs and averaged PSD for given center frequency
### Response:
def result(self, psd_state):
"""Return freqs and averaged PSD for given center frequency"""
freq_array = numpy.fft.fftshift(psd_state['freq_array'])
pwr_array = numpy.fft.fftshift(psd_state['pwr_array'])
if self._crop_factor:
crop_bins_half = round((self._crop_factor * self._bins) / 2)
freq_array = freq_array[crop_bins_half:-crop_bins_half]
pwr_array = pwr_array[crop_bins_half:-crop_bins_half]
if psd_state['repeats'] > 1:
pwr_array = pwr_array / psd_state['repeats']
if self._log_scale:
pwr_array = 10 * numpy.log10(pwr_array)
return (freq_array, pwr_array) |
def validate(self):
""" method starts validation of the tree.
@see TreeNode.validate """
for timeperiod, child in self.root.children.items():
child.validate()
self.validation_timestamp = datetime.utcnow() | method starts validation of the tree.
@see TreeNode.validate | Below is the the instruction that describes the task:
### Input:
method starts validation of the tree.
@see TreeNode.validate
### Response:
def validate(self):
""" method starts validation of the tree.
@see TreeNode.validate """
for timeperiod, child in self.root.children.items():
child.validate()
self.validation_timestamp = datetime.utcnow() |
def mismatches(s1, s2, context=0, eq=operator.eq):
'''extract mismatched segments from aligned strings
>>> list(mismatches(*align('pharmacy', 'farmácia'), context=1))
[('pha', ' fa'), ('mac', 'mác'), ('c y', 'cia')]
>>> list(mismatches(*align('constitution', 'constituição'), context=1))
[('ution', 'uição')]
>>> list(mismatches(*align('idea', 'ideia'), context=1))
[('e a', 'eia')]
>>> list(mismatches(*align('instructed', 'instruído'), context=1))
[('ucted', 'u ído')]
>>> list(mismatches(*align('concluded', 'concluído'), context=1))
[('uded', 'uído')]
'''
n = len(s1)
assert(len(s2) == n)
lct, rct = context, context if isinstance(context, int) else context
i = None
for j in range(n):
if eq(s1[j], s2[j]):
if i is not None:
# report mismatch segment [i:j] with lct chars of left context
# and rct chars of right context
p, q = max(0, i-lct), min(j+rct, n)
yield s1[p:q], s2[p:q]
i = None
elif i is None:
i = j
if i is not None:
p = max(i-lct, 0)
yield s1[p:], s2[p:] | extract mismatched segments from aligned strings
>>> list(mismatches(*align('pharmacy', 'farmácia'), context=1))
[('pha', ' fa'), ('mac', 'mác'), ('c y', 'cia')]
>>> list(mismatches(*align('constitution', 'constituição'), context=1))
[('ution', 'uição')]
>>> list(mismatches(*align('idea', 'ideia'), context=1))
[('e a', 'eia')]
>>> list(mismatches(*align('instructed', 'instruído'), context=1))
[('ucted', 'u ído')]
>>> list(mismatches(*align('concluded', 'concluído'), context=1))
[('uded', 'uído')] | Below is the the instruction that describes the task:
### Input:
extract mismatched segments from aligned strings
>>> list(mismatches(*align('pharmacy', 'farmácia'), context=1))
[('pha', ' fa'), ('mac', 'mác'), ('c y', 'cia')]
>>> list(mismatches(*align('constitution', 'constituição'), context=1))
[('ution', 'uição')]
>>> list(mismatches(*align('idea', 'ideia'), context=1))
[('e a', 'eia')]
>>> list(mismatches(*align('instructed', 'instruído'), context=1))
[('ucted', 'u ído')]
>>> list(mismatches(*align('concluded', 'concluído'), context=1))
[('uded', 'uído')]
### Response:
def mismatches(s1, s2, context=0, eq=operator.eq):
'''extract mismatched segments from aligned strings
>>> list(mismatches(*align('pharmacy', 'farmácia'), context=1))
[('pha', ' fa'), ('mac', 'mác'), ('c y', 'cia')]
>>> list(mismatches(*align('constitution', 'constituição'), context=1))
[('ution', 'uição')]
>>> list(mismatches(*align('idea', 'ideia'), context=1))
[('e a', 'eia')]
>>> list(mismatches(*align('instructed', 'instruído'), context=1))
[('ucted', 'u ído')]
>>> list(mismatches(*align('concluded', 'concluído'), context=1))
[('uded', 'uído')]
'''
n = len(s1)
assert(len(s2) == n)
lct, rct = context, context if isinstance(context, int) else context
i = None
for j in range(n):
if eq(s1[j], s2[j]):
if i is not None:
# report mismatch segment [i:j] with lct chars of left context
# and rct chars of right context
p, q = max(0, i-lct), min(j+rct, n)
yield s1[p:q], s2[p:q]
i = None
elif i is None:
i = j
if i is not None:
p = max(i-lct, 0)
yield s1[p:], s2[p:] |
def inferMainPropertyType(uriref):
"""
Attempt to reduce the property types to 4 main types
(without the OWL ontology - which would be the propert way)
In [3]: for x in g.all_properties:
...: print x.rdftype
...:
http://www.w3.org/2002/07/owl#FunctionalProperty
http://www.w3.org/2002/07/owl#FunctionalProperty
http://www.w3.org/2002/07/owl#InverseFunctionalProperty
http://www.w3.org/2002/07/owl#ObjectProperty
http://www.w3.org/2002/07/owl#ObjectProperty
http://www.w3.org/2002/07/owl#TransitiveProperty
http://www.w3.org/2002/07/owl#TransitiveProperty
etc.....
"""
if uriref:
if uriref == rdflib.OWL.DatatypeProperty:
return uriref
elif uriref == rdflib.OWL.AnnotationProperty:
return uriref
elif uriref == rdflib.RDF.Property:
return uriref
else: # hack..
return rdflib.OWL.ObjectProperty
else:
return None | Attempt to reduce the property types to 4 main types
(without the OWL ontology - which would be the propert way)
In [3]: for x in g.all_properties:
...: print x.rdftype
...:
http://www.w3.org/2002/07/owl#FunctionalProperty
http://www.w3.org/2002/07/owl#FunctionalProperty
http://www.w3.org/2002/07/owl#InverseFunctionalProperty
http://www.w3.org/2002/07/owl#ObjectProperty
http://www.w3.org/2002/07/owl#ObjectProperty
http://www.w3.org/2002/07/owl#TransitiveProperty
http://www.w3.org/2002/07/owl#TransitiveProperty
etc..... | Below is the the instruction that describes the task:
### Input:
Attempt to reduce the property types to 4 main types
(without the OWL ontology - which would be the propert way)
In [3]: for x in g.all_properties:
...: print x.rdftype
...:
http://www.w3.org/2002/07/owl#FunctionalProperty
http://www.w3.org/2002/07/owl#FunctionalProperty
http://www.w3.org/2002/07/owl#InverseFunctionalProperty
http://www.w3.org/2002/07/owl#ObjectProperty
http://www.w3.org/2002/07/owl#ObjectProperty
http://www.w3.org/2002/07/owl#TransitiveProperty
http://www.w3.org/2002/07/owl#TransitiveProperty
etc.....
### Response:
def inferMainPropertyType(uriref):
"""
Attempt to reduce the property types to 4 main types
(without the OWL ontology - which would be the propert way)
In [3]: for x in g.all_properties:
...: print x.rdftype
...:
http://www.w3.org/2002/07/owl#FunctionalProperty
http://www.w3.org/2002/07/owl#FunctionalProperty
http://www.w3.org/2002/07/owl#InverseFunctionalProperty
http://www.w3.org/2002/07/owl#ObjectProperty
http://www.w3.org/2002/07/owl#ObjectProperty
http://www.w3.org/2002/07/owl#TransitiveProperty
http://www.w3.org/2002/07/owl#TransitiveProperty
etc.....
"""
if uriref:
if uriref == rdflib.OWL.DatatypeProperty:
return uriref
elif uriref == rdflib.OWL.AnnotationProperty:
return uriref
elif uriref == rdflib.RDF.Property:
return uriref
else: # hack..
return rdflib.OWL.ObjectProperty
else:
return None |
def expose(dists):
"""Exposes vendored code in isolated chroots.
Any vendored distributions listed in ``dists`` will be unpacked to individual chroots for addition
to the ``sys.path``; ie: ``expose(['setuptools', 'wheel'])`` will unpack these vendored
distributions and yield the two chroot paths they were unpacked to.
:param dists: A list of vendored distribution names to expose.
:type dists: list of str
:raise: :class:`ValueError` if any distributions to expose cannot be found.
:returns: An iterator of exposed vendored distribution chroot paths.
"""
from pex.common import safe_delete
for path in VendorImporter.expose(dists, root=isolated()):
safe_delete(os.path.join(path, '__init__.py'))
yield path | Exposes vendored code in isolated chroots.
Any vendored distributions listed in ``dists`` will be unpacked to individual chroots for addition
to the ``sys.path``; ie: ``expose(['setuptools', 'wheel'])`` will unpack these vendored
distributions and yield the two chroot paths they were unpacked to.
:param dists: A list of vendored distribution names to expose.
:type dists: list of str
:raise: :class:`ValueError` if any distributions to expose cannot be found.
:returns: An iterator of exposed vendored distribution chroot paths. | Below is the the instruction that describes the task:
### Input:
Exposes vendored code in isolated chroots.
Any vendored distributions listed in ``dists`` will be unpacked to individual chroots for addition
to the ``sys.path``; ie: ``expose(['setuptools', 'wheel'])`` will unpack these vendored
distributions and yield the two chroot paths they were unpacked to.
:param dists: A list of vendored distribution names to expose.
:type dists: list of str
:raise: :class:`ValueError` if any distributions to expose cannot be found.
:returns: An iterator of exposed vendored distribution chroot paths.
### Response:
def expose(dists):
"""Exposes vendored code in isolated chroots.
Any vendored distributions listed in ``dists`` will be unpacked to individual chroots for addition
to the ``sys.path``; ie: ``expose(['setuptools', 'wheel'])`` will unpack these vendored
distributions and yield the two chroot paths they were unpacked to.
:param dists: A list of vendored distribution names to expose.
:type dists: list of str
:raise: :class:`ValueError` if any distributions to expose cannot be found.
:returns: An iterator of exposed vendored distribution chroot paths.
"""
from pex.common import safe_delete
for path in VendorImporter.expose(dists, root=isolated()):
safe_delete(os.path.join(path, '__init__.py'))
yield path |
def _should_fetch_reason_with_robots(self, request: Request) -> Tuple[bool, str]:
'''Return info whether the URL should be fetched including checking
robots.txt.
Coroutine.
'''
result = yield from \
self._fetch_rule.check_initial_web_request(self._item_session, request)
return result | Return info whether the URL should be fetched including checking
robots.txt.
Coroutine. | Below is the the instruction that describes the task:
### Input:
Return info whether the URL should be fetched including checking
robots.txt.
Coroutine.
### Response:
def _should_fetch_reason_with_robots(self, request: Request) -> Tuple[bool, str]:
'''Return info whether the URL should be fetched including checking
robots.txt.
Coroutine.
'''
result = yield from \
self._fetch_rule.check_initial_web_request(self._item_session, request)
return result |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.