code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def post_customer_preferences(self, **kwargs): # noqa: E501
"""Update selected fields of customer preferences # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_customer_preferences(async_req=True)
>>> result = thread.get()
:param async_req bool
:param CustomerPreferencesUpdating body:
:return: CustomerPreferences
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_customer_preferences_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.post_customer_preferences_with_http_info(**kwargs) # noqa: E501
return data | Update selected fields of customer preferences # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_customer_preferences(async_req=True)
>>> result = thread.get()
:param async_req bool
:param CustomerPreferencesUpdating body:
:return: CustomerPreferences
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
Update selected fields of customer preferences # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_customer_preferences(async_req=True)
>>> result = thread.get()
:param async_req bool
:param CustomerPreferencesUpdating body:
:return: CustomerPreferences
If the method is called asynchronously,
returns the request thread.
### Response:
def post_customer_preferences(self, **kwargs): # noqa: E501
"""Update selected fields of customer preferences # noqa: E501
# noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_customer_preferences(async_req=True)
>>> result = thread.get()
:param async_req bool
:param CustomerPreferencesUpdating body:
:return: CustomerPreferences
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_customer_preferences_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.post_customer_preferences_with_http_info(**kwargs) # noqa: E501
return data |
def most_populated(adf):
"""
Looks at each column, using the one with the most values
Honours the Trump override/failsafe logic. """
# just look at the feeds, ignore overrides and failsafes:
feeds_only = adf[adf.columns[1:-1]]
# find the most populated feed
cnt_df = feeds_only.count()
cnt = cnt_df.max()
selected_feeds = cnt_df[cnt_df == cnt]
# if there aren't any feeds, the first feed will work...
if len(selected_feeds) == 0:
pre_final = adf['feed001'] # if they are all empty
# they should all be
# equally empty
else:
#if there's one or more, take the highest priority one
pre_final = adf[selected_feeds.index[0]]
# create the final, applying the override and failsafe logic...
final_df = pd.concat([adf.override_feed000,
pre_final,
adf.failsafe_feed999], axis=1)
final_df = final_df.apply(_row_wise_priority, axis=1)
return final_df | Looks at each column, using the one with the most values
Honours the Trump override/failsafe logic. | Below is the the instruction that describes the task:
### Input:
Looks at each column, using the one with the most values
Honours the Trump override/failsafe logic.
### Response:
def most_populated(adf):
"""
Looks at each column, using the one with the most values
Honours the Trump override/failsafe logic. """
# just look at the feeds, ignore overrides and failsafes:
feeds_only = adf[adf.columns[1:-1]]
# find the most populated feed
cnt_df = feeds_only.count()
cnt = cnt_df.max()
selected_feeds = cnt_df[cnt_df == cnt]
# if there aren't any feeds, the first feed will work...
if len(selected_feeds) == 0:
pre_final = adf['feed001'] # if they are all empty
# they should all be
# equally empty
else:
#if there's one or more, take the highest priority one
pre_final = adf[selected_feeds.index[0]]
# create the final, applying the override and failsafe logic...
final_df = pd.concat([adf.override_feed000,
pre_final,
adf.failsafe_feed999], axis=1)
final_df = final_df.apply(_row_wise_priority, axis=1)
return final_df |
async def download(resource_url):
'''
Download given resource_url
'''
scheme = resource_url.parsed.scheme
if scheme in ('http', 'https'):
await download_http(resource_url)
elif scheme in ('git', 'git+https', 'git+http'):
await download_git(resource_url)
else:
raise ValueError('Unknown URL scheme: "%s"' % scheme) | Download given resource_url | Below is the the instruction that describes the task:
### Input:
Download given resource_url
### Response:
async def download(resource_url):
'''
Download given resource_url
'''
scheme = resource_url.parsed.scheme
if scheme in ('http', 'https'):
await download_http(resource_url)
elif scheme in ('git', 'git+https', 'git+http'):
await download_git(resource_url)
else:
raise ValueError('Unknown URL scheme: "%s"' % scheme) |
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
# NA values are represented in self._codes with -1
# np.take causes NA values to take final element in new_categories
if np.any(self._codes == -1):
new_categories = new_categories.insert(len(new_categories),
np.nan)
return np.take(new_categories, self._codes) | Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object') | Below is the the instruction that describes the task:
### Input:
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
### Response:
def map(self, mapper):
"""
Map categories using input correspondence (dict, Series, or function).
Maps the categories to new categories. If the mapping correspondence is
one-to-one the result is a :class:`~pandas.Categorical` which has the
same order property as the original, otherwise a :class:`~pandas.Index`
is returned. NaN values are unaffected.
If a `dict` or :class:`~pandas.Series` is used any unmapped category is
mapped to `NaN`. Note that if this happens an :class:`~pandas.Index`
will be returned.
Parameters
----------
mapper : function, dict, or Series
Mapping correspondence.
Returns
-------
pandas.Categorical or pandas.Index
Mapped categorical.
See Also
--------
CategoricalIndex.map : Apply a mapping correspondence on a
:class:`~pandas.CategoricalIndex`.
Index.map : Apply a mapping correspondence on an
:class:`~pandas.Index`.
Series.map : Apply a mapping correspondence on a
:class:`~pandas.Series`.
Series.apply : Apply more complex functions on a
:class:`~pandas.Series`.
Examples
--------
>>> cat = pd.Categorical(['a', 'b', 'c'])
>>> cat
[a, b, c]
Categories (3, object): [a, b, c]
>>> cat.map(lambda x: x.upper())
[A, B, C]
Categories (3, object): [A, B, C]
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'third'})
[first, second, third]
Categories (3, object): [first, second, third]
If the mapping is one-to-one the ordering of the categories is
preserved:
>>> cat = pd.Categorical(['a', 'b', 'c'], ordered=True)
>>> cat
[a, b, c]
Categories (3, object): [a < b < c]
>>> cat.map({'a': 3, 'b': 2, 'c': 1})
[3, 2, 1]
Categories (3, int64): [3 < 2 < 1]
If the mapping is not one-to-one an :class:`~pandas.Index` is returned:
>>> cat.map({'a': 'first', 'b': 'second', 'c': 'first'})
Index(['first', 'second', 'first'], dtype='object')
If a `dict` is used, all unmapped categories are mapped to `NaN` and
the result is an :class:`~pandas.Index`:
>>> cat.map({'a': 'first', 'b': 'second'})
Index(['first', 'second', nan], dtype='object')
"""
new_categories = self.categories.map(mapper)
try:
return self.from_codes(self._codes.copy(),
categories=new_categories,
ordered=self.ordered)
except ValueError:
# NA values are represented in self._codes with -1
# np.take causes NA values to take final element in new_categories
if np.any(self._codes == -1):
new_categories = new_categories.insert(len(new_categories),
np.nan)
return np.take(new_categories, self._codes) |
def getImageForBulkExpressions(self, retina_name, body, get_fingerprint=None, image_scalar=2, plot_shape="circle", sparsity=1.0):
"""Bulk get images for expressions
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
image_scalar, int: The scale of the image (optional) (optional)
plot_shape, str: The image shape (optional) (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: Array[Image]
"""
resourcePath = '/image/bulk'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['image_scalar'] = image_scalar
queryParams['plot_shape'] = plot_shape
queryParams['sparsity'] = sparsity
queryParams['get_fingerprint'] = get_fingerprint
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [image.Image(**r) for r in response.json()] | Bulk get images for expressions
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
image_scalar, int: The scale of the image (optional) (optional)
plot_shape, str: The image shape (optional) (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: Array[Image] | Below is the the instruction that describes the task:
### Input:
Bulk get images for expressions
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
image_scalar, int: The scale of the image (optional) (optional)
plot_shape, str: The image shape (optional) (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: Array[Image]
### Response:
def getImageForBulkExpressions(self, retina_name, body, get_fingerprint=None, image_scalar=2, plot_shape="circle", sparsity=1.0):
"""Bulk get images for expressions
Args:
retina_name, str: The retina name (required)
body, ExpressionOperation: The JSON encoded expression to be evaluated (required)
get_fingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional)
image_scalar, int: The scale of the image (optional) (optional)
plot_shape, str: The image shape (optional) (optional)
sparsity, float: Sparsify the resulting expression to this percentage (optional)
Returns: Array[Image]
"""
resourcePath = '/image/bulk'
method = 'POST'
queryParams = {}
headerParams = {'Accept': 'Application/json', 'Content-Type': 'application/json'}
postData = None
queryParams['retina_name'] = retina_name
queryParams['image_scalar'] = image_scalar
queryParams['plot_shape'] = plot_shape
queryParams['sparsity'] = sparsity
queryParams['get_fingerprint'] = get_fingerprint
postData = body
response = self.apiClient._callAPI(resourcePath, method, queryParams, postData, headerParams)
return [image.Image(**r) for r in response.json()] |
def add_spark_slave(self, master, slave, configure):
"""
add spark slave
:return:
"""
# go to master server, add config
self.reset_server_env(master, configure)
with cd(bigdata_conf.spark_home):
if not exists('conf/spark-env.sh'):
sudo('cp conf/spark-env.sh.template conf/spark-env.sh')
spark_env = bigdata_conf.spark_env.format(
spark_home=bigdata_conf.spark_home,
hadoop_home=bigdata_conf.hadoop_home,
host=env.host_string,
SPARK_WORKER_MEMORY=configure[master].get(
'SPARK_WORKER_MEMORY', '512M'
)
)
put(StringIO(spark_env), 'conf/spark-env.sh', use_sudo=True)
if not exists('conf/slaves'):
sudo('cp conf/slaves.template conf/slaves')
# comment slaves localhost
comment('{0}/conf/slaves'.format(bigdata_conf.spark_home),
'localhost', use_sudo=True)
# add slave into config
append('{0}/conf/slaves'.format(bigdata_conf.spark_home),
'\n{0}'.format(configure[slave]['host']), use_sudo=True)
run('scp -r {0} {1}@{2}:/opt'.format(
bigdata_conf.spark_home,
configure[slave]['user'],
configure[slave]['host']
))
# go to slave server
self.reset_server_env(slave, configure)
append(bigdata_conf.global_env_home, 'export SPARK_LOCAL_IP={0}'.format(
configure[slave]['host']
), use_sudo=True)
run('source {0}'.format(bigdata_conf.global_env_home))
# go to master server, restart server
self.reset_server_env(master, configure)
with cd(bigdata_conf.spark_home):
run('./sbin/stop-master.sh')
run('./sbin/stop-slaves.sh')
run('./sbin/start-master.sh')
run('./sbin/start-slaves.sh') | add spark slave
:return: | Below is the the instruction that describes the task:
### Input:
add spark slave
:return:
### Response:
def add_spark_slave(self, master, slave, configure):
"""
add spark slave
:return:
"""
# go to master server, add config
self.reset_server_env(master, configure)
with cd(bigdata_conf.spark_home):
if not exists('conf/spark-env.sh'):
sudo('cp conf/spark-env.sh.template conf/spark-env.sh')
spark_env = bigdata_conf.spark_env.format(
spark_home=bigdata_conf.spark_home,
hadoop_home=bigdata_conf.hadoop_home,
host=env.host_string,
SPARK_WORKER_MEMORY=configure[master].get(
'SPARK_WORKER_MEMORY', '512M'
)
)
put(StringIO(spark_env), 'conf/spark-env.sh', use_sudo=True)
if not exists('conf/slaves'):
sudo('cp conf/slaves.template conf/slaves')
# comment slaves localhost
comment('{0}/conf/slaves'.format(bigdata_conf.spark_home),
'localhost', use_sudo=True)
# add slave into config
append('{0}/conf/slaves'.format(bigdata_conf.spark_home),
'\n{0}'.format(configure[slave]['host']), use_sudo=True)
run('scp -r {0} {1}@{2}:/opt'.format(
bigdata_conf.spark_home,
configure[slave]['user'],
configure[slave]['host']
))
# go to slave server
self.reset_server_env(slave, configure)
append(bigdata_conf.global_env_home, 'export SPARK_LOCAL_IP={0}'.format(
configure[slave]['host']
), use_sudo=True)
run('source {0}'.format(bigdata_conf.global_env_home))
# go to master server, restart server
self.reset_server_env(master, configure)
with cd(bigdata_conf.spark_home):
run('./sbin/stop-master.sh')
run('./sbin/stop-slaves.sh')
run('./sbin/start-master.sh')
run('./sbin/start-slaves.sh') |
def from_path(cls, spec_path):
"""
Load a specification from a path.
:param FilePath spec_path: The location of the specification to read.
"""
with spec_path.open() as spec_file:
return cls.from_document(load(spec_file)) | Load a specification from a path.
:param FilePath spec_path: The location of the specification to read. | Below is the the instruction that describes the task:
### Input:
Load a specification from a path.
:param FilePath spec_path: The location of the specification to read.
### Response:
def from_path(cls, spec_path):
"""
Load a specification from a path.
:param FilePath spec_path: The location of the specification to read.
"""
with spec_path.open() as spec_file:
return cls.from_document(load(spec_file)) |
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name] | Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove. | Below is the the instruction that describes the task:
### Input:
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
### Response:
def remove_distribution(self, dist):
"""
Remove a distribution from the finder. This will update internal
information about who provides what.
:param dist: The distribution to remove.
"""
logger.debug('removing distribution %s', dist)
name = dist.key
del self.dists_by_name[name]
del self.dists[(name, dist.version)]
for p in dist.provides:
name, version = parse_name_and_version(p)
logger.debug('Remove from provided: %s, %s, %s', name, version, dist)
s = self.provided[name]
s.remove((version, dist))
if not s:
del self.provided[name] |
def _year_month_delta_from_elements(elements):
"""
Return a tuple of (years, months) from a dict of date elements.
Accepts a dict containing any of the following:
- years
- months
Example:
>>> _year_month_delta_from_elements({'years': 2, 'months': 14})
(3, 2)
"""
return divmod(
(int(elements.get('years', 0)) * MONTHS_IN_YEAR) +
elements.get('months', 0),
MONTHS_IN_YEAR
) | Return a tuple of (years, months) from a dict of date elements.
Accepts a dict containing any of the following:
- years
- months
Example:
>>> _year_month_delta_from_elements({'years': 2, 'months': 14})
(3, 2) | Below is the the instruction that describes the task:
### Input:
Return a tuple of (years, months) from a dict of date elements.
Accepts a dict containing any of the following:
- years
- months
Example:
>>> _year_month_delta_from_elements({'years': 2, 'months': 14})
(3, 2)
### Response:
def _year_month_delta_from_elements(elements):
"""
Return a tuple of (years, months) from a dict of date elements.
Accepts a dict containing any of the following:
- years
- months
Example:
>>> _year_month_delta_from_elements({'years': 2, 'months': 14})
(3, 2)
"""
return divmod(
(int(elements.get('years', 0)) * MONTHS_IN_YEAR) +
elements.get('months', 0),
MONTHS_IN_YEAR
) |
def _nuclear_factor(self, Tp):
"""
Compute nuclear enhancement factor
"""
sigmaRpp = 10 * np.pi * 1e-27
sigmainel = self._sigma_inel(Tp)
sigmainel0 = self._sigma_inel(1e3) # at 1e3 GeV
f = sigmainel / sigmainel0
f2 = np.where(f > 1, f, 1.0)
G = 1.0 + np.log(f2)
# epsilon factors computed from Eqs 21 to 23 with local ISM abundances
epsC = 1.37
eps1 = 0.29
eps2 = 0.1
epstotal = np.where(
Tp > self._Tth,
epsC + (eps1 + eps2) * sigmaRpp * G / sigmainel,
0.0,
)
if np.any(Tp < 1.0):
# nuclear enhancement factor diverges towards Tp = Tth, fix Tp<1 to
# eps(1.0) = 1.91
loE = np.where((Tp > self._Tth) * (Tp < 1.0))
epstotal[loE] = 1.9141
return epstotal | Compute nuclear enhancement factor | Below is the the instruction that describes the task:
### Input:
Compute nuclear enhancement factor
### Response:
def _nuclear_factor(self, Tp):
"""
Compute nuclear enhancement factor
"""
sigmaRpp = 10 * np.pi * 1e-27
sigmainel = self._sigma_inel(Tp)
sigmainel0 = self._sigma_inel(1e3) # at 1e3 GeV
f = sigmainel / sigmainel0
f2 = np.where(f > 1, f, 1.0)
G = 1.0 + np.log(f2)
# epsilon factors computed from Eqs 21 to 23 with local ISM abundances
epsC = 1.37
eps1 = 0.29
eps2 = 0.1
epstotal = np.where(
Tp > self._Tth,
epsC + (eps1 + eps2) * sigmaRpp * G / sigmainel,
0.0,
)
if np.any(Tp < 1.0):
# nuclear enhancement factor diverges towards Tp = Tth, fix Tp<1 to
# eps(1.0) = 1.91
loE = np.where((Tp > self._Tth) * (Tp < 1.0))
epstotal[loE] = 1.9141
return epstotal |
def metric_crud(client, to_delete):
"""Metric CRUD."""
METRIC_NAME = "robots-%d" % (_millis(),)
DESCRIPTION = "Robots all up in your server"
FILTER = "logName:apache-access AND textPayload:robot"
UPDATED_FILTER = "textPayload:robot"
UPDATED_DESCRIPTION = "Danger, Will Robinson!"
# [START client_list_metrics]
for metric in client.list_metrics(): # API call(s)
do_something_with(metric)
# [END client_list_metrics]
# [START metric_create]
metric = client.metric(METRIC_NAME, filter_=FILTER, description=DESCRIPTION)
assert not metric.exists() # API call
metric.create() # API call
assert metric.exists() # API call
# [END metric_create]
to_delete.append(metric)
# [START metric_reload]
existing_metric = client.metric(METRIC_NAME)
existing_metric.reload() # API call
# [END metric_reload]
assert existing_metric.filter_ == FILTER
assert existing_metric.description == DESCRIPTION
# [START metric_update]
existing_metric.filter_ = UPDATED_FILTER
existing_metric.description = UPDATED_DESCRIPTION
existing_metric.update() # API call
# [END metric_update]
existing_metric.reload()
assert existing_metric.filter_ == UPDATED_FILTER
assert existing_metric.description == UPDATED_DESCRIPTION
def _metric_delete():
# [START metric_delete]
metric.delete()
# [END metric_delete]
_backoff_not_found(_metric_delete)
to_delete.remove(metric) | Metric CRUD. | Below is the the instruction that describes the task:
### Input:
Metric CRUD.
### Response:
def metric_crud(client, to_delete):
"""Metric CRUD."""
METRIC_NAME = "robots-%d" % (_millis(),)
DESCRIPTION = "Robots all up in your server"
FILTER = "logName:apache-access AND textPayload:robot"
UPDATED_FILTER = "textPayload:robot"
UPDATED_DESCRIPTION = "Danger, Will Robinson!"
# [START client_list_metrics]
for metric in client.list_metrics(): # API call(s)
do_something_with(metric)
# [END client_list_metrics]
# [START metric_create]
metric = client.metric(METRIC_NAME, filter_=FILTER, description=DESCRIPTION)
assert not metric.exists() # API call
metric.create() # API call
assert metric.exists() # API call
# [END metric_create]
to_delete.append(metric)
# [START metric_reload]
existing_metric = client.metric(METRIC_NAME)
existing_metric.reload() # API call
# [END metric_reload]
assert existing_metric.filter_ == FILTER
assert existing_metric.description == DESCRIPTION
# [START metric_update]
existing_metric.filter_ = UPDATED_FILTER
existing_metric.description = UPDATED_DESCRIPTION
existing_metric.update() # API call
# [END metric_update]
existing_metric.reload()
assert existing_metric.filter_ == UPDATED_FILTER
assert existing_metric.description == UPDATED_DESCRIPTION
def _metric_delete():
# [START metric_delete]
metric.delete()
# [END metric_delete]
_backoff_not_found(_metric_delete)
to_delete.remove(metric) |
def _on_ws_message(self, ws, message):
"""
on_message callback of websocket class, load the message into a dict and then
update an Ack Object with the results
:param ws: web socket connection that the message was received on
:param message: web socket message in text form
:return: None
"""
logging.debug(message)
json_list = json.loads(message)
for rx_ack in json_list:
ack = EventHub_pb2.Ack()
for key, value in rx_ack.items():
setattr(ack, key, value)
self._publisher_callback(ack) | on_message callback of websocket class, load the message into a dict and then
update an Ack Object with the results
:param ws: web socket connection that the message was received on
:param message: web socket message in text form
:return: None | Below is the the instruction that describes the task:
### Input:
on_message callback of websocket class, load the message into a dict and then
update an Ack Object with the results
:param ws: web socket connection that the message was received on
:param message: web socket message in text form
:return: None
### Response:
def _on_ws_message(self, ws, message):
"""
on_message callback of websocket class, load the message into a dict and then
update an Ack Object with the results
:param ws: web socket connection that the message was received on
:param message: web socket message in text form
:return: None
"""
logging.debug(message)
json_list = json.loads(message)
for rx_ack in json_list:
ack = EventHub_pb2.Ack()
for key, value in rx_ack.items():
setattr(ack, key, value)
self._publisher_callback(ack) |
def _parse_complement(self, tokens):
""" Parses a complement
Complement ::= 'complement' '(' SuperRange ')'
"""
tokens.pop(0) # Pop 'complement'
tokens.pop(0) # Pop '('
res = self._parse_nested_interval(tokens)
tokens.pop(0) # Pop ')'
res.switch_strand()
return res | Parses a complement
Complement ::= 'complement' '(' SuperRange ')' | Below is the the instruction that describes the task:
### Input:
Parses a complement
Complement ::= 'complement' '(' SuperRange ')'
### Response:
def _parse_complement(self, tokens):
""" Parses a complement
Complement ::= 'complement' '(' SuperRange ')'
"""
tokens.pop(0) # Pop 'complement'
tokens.pop(0) # Pop '('
res = self._parse_nested_interval(tokens)
tokens.pop(0) # Pop ')'
res.switch_strand()
return res |
def parse_value(self, value):
"""
Convert value string to float for reporting
"""
value = value.strip()
# Skip missing sensors
if value == 'na':
return None
# Try just getting the float value
try:
return float(value)
except:
pass
# Next best guess is a hex value
try:
return float.fromhex(value)
except:
pass
# No luck, bail
return None | Convert value string to float for reporting | Below is the the instruction that describes the task:
### Input:
Convert value string to float for reporting
### Response:
def parse_value(self, value):
"""
Convert value string to float for reporting
"""
value = value.strip()
# Skip missing sensors
if value == 'na':
return None
# Try just getting the float value
try:
return float(value)
except:
pass
# Next best guess is a hex value
try:
return float.fromhex(value)
except:
pass
# No luck, bail
return None |
def freemem(**kwargs):
'''
Return an int representing the amount of memory (in MB) that has not
been given to virtual machines on this node
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.freemem
'''
conn = __get_conn(**kwargs)
mem = _freemem(conn)
conn.close()
return mem | Return an int representing the amount of memory (in MB) that has not
been given to virtual machines on this node
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.freemem | Below is the the instruction that describes the task:
### Input:
Return an int representing the amount of memory (in MB) that has not
been given to virtual machines on this node
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.freemem
### Response:
def freemem(**kwargs):
'''
Return an int representing the amount of memory (in MB) that has not
been given to virtual machines on this node
:param connection: libvirt connection URI, overriding defaults
.. versionadded:: 2019.2.0
:param username: username to connect with, overriding defaults
.. versionadded:: 2019.2.0
:param password: password to connect with, overriding defaults
.. versionadded:: 2019.2.0
CLI Example:
.. code-block:: bash
salt '*' virt.freemem
'''
conn = __get_conn(**kwargs)
mem = _freemem(conn)
conn.close()
return mem |
def _place_ticks_vertical(self):
"""Display the ticks for a vertical slider."""
for tick, label in zip(self.ticks, self.ticklabels):
y = self.convert_to_pixels(tick)
label.place_configure(y=y) | Display the ticks for a vertical slider. | Below is the the instruction that describes the task:
### Input:
Display the ticks for a vertical slider.
### Response:
def _place_ticks_vertical(self):
"""Display the ticks for a vertical slider."""
for tick, label in zip(self.ticks, self.ticklabels):
y = self.convert_to_pixels(tick)
label.place_configure(y=y) |
def isUrl(urlString):
"""
Attempts to return whether a given URL string is valid by checking
for the presence of the URL scheme and netloc using the urlparse
module, and then using a regex.
From http://stackoverflow.com/questions/7160737/
"""
parsed = urlparse.urlparse(urlString)
urlparseValid = parsed.netloc != '' and parsed.scheme != ''
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)'
r'+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return regex.match(urlString) and urlparseValid | Attempts to return whether a given URL string is valid by checking
for the presence of the URL scheme and netloc using the urlparse
module, and then using a regex.
From http://stackoverflow.com/questions/7160737/ | Below is the the instruction that describes the task:
### Input:
Attempts to return whether a given URL string is valid by checking
for the presence of the URL scheme and netloc using the urlparse
module, and then using a regex.
From http://stackoverflow.com/questions/7160737/
### Response:
def isUrl(urlString):
"""
Attempts to return whether a given URL string is valid by checking
for the presence of the URL scheme and netloc using the urlparse
module, and then using a regex.
From http://stackoverflow.com/questions/7160737/
"""
parsed = urlparse.urlparse(urlString)
urlparseValid = parsed.netloc != '' and parsed.scheme != ''
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)'
r'+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
return regex.match(urlString) and urlparseValid |
def reindex(self, force=False, background=True):
"""Start jira re-indexing. Returns True if reindexing is in progress or not needed, or False.
If you call reindex() without any parameters it will perform a background reindex only if JIRA thinks it should do it.
:param force: reindex even if JIRA doesn't say this is needed, False by default.
:param background: reindex in background, slower but does not impact the users, defaults to True.
"""
# /secure/admin/IndexAdmin.jspa
# /secure/admin/jira/IndexProgress.jspa?taskId=1
if background:
indexingStrategy = 'background'
else:
indexingStrategy = 'stoptheworld'
url = self._options['server'] + '/secure/admin/jira/IndexReIndex.jspa'
r = self._session.get(url, headers=self._options['headers'])
if r.status_code == 503:
# logging.warning("JIRA returned 503, this could mean that a full reindex is in progress.")
return 503
if not r.text.find("To perform the re-index now, please go to the") and force is False:
return True
if r.text.find('All issues are being re-indexed'):
logging.warning("JIRA re-indexing is already running.")
return True # still reindexing is considered still a success
if r.text.find('To perform the re-index now, please go to the') or force:
r = self._session.post(url, headers=self._options['headers'],
params={"indexingStrategy": indexingStrategy, "reindex": "Re-Index"})
if r.text.find('All issues are being re-indexed') != -1:
return True
else:
logging.error("Failed to reindex jira, probably a bug.")
return False | Start jira re-indexing. Returns True if reindexing is in progress or not needed, or False.
If you call reindex() without any parameters it will perform a background reindex only if JIRA thinks it should do it.
:param force: reindex even if JIRA doesn't say this is needed, False by default.
:param background: reindex in background, slower but does not impact the users, defaults to True. | Below is the the instruction that describes the task:
### Input:
Start jira re-indexing. Returns True if reindexing is in progress or not needed, or False.
If you call reindex() without any parameters it will perform a background reindex only if JIRA thinks it should do it.
:param force: reindex even if JIRA doesn't say this is needed, False by default.
:param background: reindex in background, slower but does not impact the users, defaults to True.
### Response:
def reindex(self, force=False, background=True):
"""Start jira re-indexing. Returns True if reindexing is in progress or not needed, or False.
If you call reindex() without any parameters it will perform a background reindex only if JIRA thinks it should do it.
:param force: reindex even if JIRA doesn't say this is needed, False by default.
:param background: reindex in background, slower but does not impact the users, defaults to True.
"""
# /secure/admin/IndexAdmin.jspa
# /secure/admin/jira/IndexProgress.jspa?taskId=1
if background:
indexingStrategy = 'background'
else:
indexingStrategy = 'stoptheworld'
url = self._options['server'] + '/secure/admin/jira/IndexReIndex.jspa'
r = self._session.get(url, headers=self._options['headers'])
if r.status_code == 503:
# logging.warning("JIRA returned 503, this could mean that a full reindex is in progress.")
return 503
if not r.text.find("To perform the re-index now, please go to the") and force is False:
return True
if r.text.find('All issues are being re-indexed'):
logging.warning("JIRA re-indexing is already running.")
return True # still reindexing is considered still a success
if r.text.find('To perform the re-index now, please go to the') or force:
r = self._session.post(url, headers=self._options['headers'],
params={"indexingStrategy": indexingStrategy, "reindex": "Re-Index"})
if r.text.find('All issues are being re-indexed') != -1:
return True
else:
logging.error("Failed to reindex jira, probably a bug.")
return False |
def plot_kurtosis(self, f_start=None, f_stop=None, if_id=0, **kwargs):
""" Plot kurtosis
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
kwargs: keyword args to be passed to matplotlib imshow()
"""
ax = plt.gca()
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
#Using accending frequency for all plots.
if self.header[b'foff'] < 0:
plot_data = plot_data[..., ::-1] # Reverse data
plot_f = plot_f[::-1]
try:
plot_kurtosis = scipy.stats.kurtosis(plot_data, axis=0, nan_policy='omit')
except:
plot_kurtosis = plot_data*0.0
plt.plot(plot_f, plot_kurtosis, **kwargs)
plt.ylabel("Kurtosis")
plt.xlabel("Frequency [MHz]")
plt.xlim(plot_f[0], plot_f[-1]) | Plot kurtosis
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
kwargs: keyword args to be passed to matplotlib imshow() | Below is the the instruction that describes the task:
### Input:
Plot kurtosis
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
kwargs: keyword args to be passed to matplotlib imshow()
### Response:
def plot_kurtosis(self, f_start=None, f_stop=None, if_id=0, **kwargs):
""" Plot kurtosis
Args:
f_start (float): start frequency, in MHz
f_stop (float): stop frequency, in MHz
kwargs: keyword args to be passed to matplotlib imshow()
"""
ax = plt.gca()
plot_f, plot_data = self.grab_data(f_start, f_stop, if_id)
#Using accending frequency for all plots.
if self.header[b'foff'] < 0:
plot_data = plot_data[..., ::-1] # Reverse data
plot_f = plot_f[::-1]
try:
plot_kurtosis = scipy.stats.kurtosis(plot_data, axis=0, nan_policy='omit')
except:
plot_kurtosis = plot_data*0.0
plt.plot(plot_f, plot_kurtosis, **kwargs)
plt.ylabel("Kurtosis")
plt.xlabel("Frequency [MHz]")
plt.xlim(plot_f[0], plot_f[-1]) |
def create_spot_requests(self,
price,
instance_type='default',
root_device_type='ebs',
size='default',
vol_type='gp2',
delete_on_termination=False,
timeout=None):
"""Request creation of one or more EC2 spot instances.
:param size:
:param vol_type:
:param delete_on_termination:
:param root_device_type: The type of the root device.
:type root_device_type: str
:param price: Max price to pay for spot instance per hour.
:type price: float
:param instance_type: A section name in amazon.json
:type instance_type: str
:param timeout: Seconds to keep the request open (cancelled if not fulfilled).
:type timeout: int
:return: List of requests created
:rtype: list
"""
name, size = self._get_default_name_size(instance_type, size)
if root_device_type == 'ebs':
self.images[instance_type]['block_device_map'] = \
self._configure_ebs_volume(vol_type, name, size, delete_on_termination)
valid_until = None
if timeout is not None:
valid_until = (datetime.datetime.now() + datetime.timedelta(seconds=timeout)).isoformat()
requests = self.ec2.request_spot_instances(price, valid_until=valid_until, **self.images[instance_type])
return [r.id for r in requests] | Request creation of one or more EC2 spot instances.
:param size:
:param vol_type:
:param delete_on_termination:
:param root_device_type: The type of the root device.
:type root_device_type: str
:param price: Max price to pay for spot instance per hour.
:type price: float
:param instance_type: A section name in amazon.json
:type instance_type: str
:param timeout: Seconds to keep the request open (cancelled if not fulfilled).
:type timeout: int
:return: List of requests created
:rtype: list | Below is the the instruction that describes the task:
### Input:
Request creation of one or more EC2 spot instances.
:param size:
:param vol_type:
:param delete_on_termination:
:param root_device_type: The type of the root device.
:type root_device_type: str
:param price: Max price to pay for spot instance per hour.
:type price: float
:param instance_type: A section name in amazon.json
:type instance_type: str
:param timeout: Seconds to keep the request open (cancelled if not fulfilled).
:type timeout: int
:return: List of requests created
:rtype: list
### Response:
def create_spot_requests(self,
price,
instance_type='default',
root_device_type='ebs',
size='default',
vol_type='gp2',
delete_on_termination=False,
timeout=None):
"""Request creation of one or more EC2 spot instances.
:param size:
:param vol_type:
:param delete_on_termination:
:param root_device_type: The type of the root device.
:type root_device_type: str
:param price: Max price to pay for spot instance per hour.
:type price: float
:param instance_type: A section name in amazon.json
:type instance_type: str
:param timeout: Seconds to keep the request open (cancelled if not fulfilled).
:type timeout: int
:return: List of requests created
:rtype: list
"""
name, size = self._get_default_name_size(instance_type, size)
if root_device_type == 'ebs':
self.images[instance_type]['block_device_map'] = \
self._configure_ebs_volume(vol_type, name, size, delete_on_termination)
valid_until = None
if timeout is not None:
valid_until = (datetime.datetime.now() + datetime.timedelta(seconds=timeout)).isoformat()
requests = self.ec2.request_spot_instances(price, valid_until=valid_until, **self.images[instance_type])
return [r.id for r in requests] |
def update(self, other=None, **kwargs):
'''Takes the same arguments as the update method in the builtin dict
class. However, this version returns a new ImmutableDict instead of
modifying in-place.'''
copydict = ImmutableDict()
if other:
vallist = [(hash(key), (key, other[key])) for key in other]
else: vallist = []
if kwargs:
vallist += [(hash(key), (key, kwargs[key])) for key in kwargs]
copydict.tree = self.tree.multi_assoc(vallist)
copydict._length = iter_length(copydict.tree)
return copydict | Takes the same arguments as the update method in the builtin dict
class. However, this version returns a new ImmutableDict instead of
modifying in-place. | Below is the the instruction that describes the task:
### Input:
Takes the same arguments as the update method in the builtin dict
class. However, this version returns a new ImmutableDict instead of
modifying in-place.
### Response:
def update(self, other=None, **kwargs):
'''Takes the same arguments as the update method in the builtin dict
class. However, this version returns a new ImmutableDict instead of
modifying in-place.'''
copydict = ImmutableDict()
if other:
vallist = [(hash(key), (key, other[key])) for key in other]
else: vallist = []
if kwargs:
vallist += [(hash(key), (key, kwargs[key])) for key in kwargs]
copydict.tree = self.tree.multi_assoc(vallist)
copydict._length = iter_length(copydict.tree)
return copydict |
def netflowv9_defragment(plist, verb=1):
"""Process all NetflowV9/10 Packets to match IDs of the DataFlowsets
with the Headers
params:
- plist: the list of mixed NetflowV9/10 packets.
- verb: verbose print (0/1)
"""
if not isinstance(plist, (PacketList, list)):
plist = [plist]
# We need the whole packet to be dissected to access field def in
# NetflowFlowsetV9 or NetflowOptionsFlowsetV9/10
definitions = {}
definitions_opts = {}
ignored = set()
# Iterate through initial list
for pkt in plist:
_netflowv9_defragment_packet(pkt,
definitions,
definitions_opts,
ignored)
if conf.verb >= 1 and ignored:
warning("Ignored templateIDs (missing): %s" % list(ignored))
return plist | Process all NetflowV9/10 Packets to match IDs of the DataFlowsets
with the Headers
params:
- plist: the list of mixed NetflowV9/10 packets.
- verb: verbose print (0/1) | Below is the the instruction that describes the task:
### Input:
Process all NetflowV9/10 Packets to match IDs of the DataFlowsets
with the Headers
params:
- plist: the list of mixed NetflowV9/10 packets.
- verb: verbose print (0/1)
### Response:
def netflowv9_defragment(plist, verb=1):
"""Process all NetflowV9/10 Packets to match IDs of the DataFlowsets
with the Headers
params:
- plist: the list of mixed NetflowV9/10 packets.
- verb: verbose print (0/1)
"""
if not isinstance(plist, (PacketList, list)):
plist = [plist]
# We need the whole packet to be dissected to access field def in
# NetflowFlowsetV9 or NetflowOptionsFlowsetV9/10
definitions = {}
definitions_opts = {}
ignored = set()
# Iterate through initial list
for pkt in plist:
_netflowv9_defragment_packet(pkt,
definitions,
definitions_opts,
ignored)
if conf.verb >= 1 and ignored:
warning("Ignored templateIDs (missing): %s" % list(ignored))
return plist |
def getch():
"""
get character. waiting for key
"""
try:
termios.tcsetattr(_fd, termios.TCSANOW, _new_settings)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(_fd, termios.TCSADRAIN, _old_settings)
return ch | get character. waiting for key | Below is the the instruction that describes the task:
### Input:
get character. waiting for key
### Response:
def getch():
"""
get character. waiting for key
"""
try:
termios.tcsetattr(_fd, termios.TCSANOW, _new_settings)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(_fd, termios.TCSADRAIN, _old_settings)
return ch |
def score_samples(self, X, lengths=None):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
logprob : float
Log likelihood of ``X``.
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample in ``X``.
See Also
--------
score : Compute the log probability under the model.
decode : Find most likely state sequence corresponding to ``X``.
"""
check_is_fitted(self, "startprob_")
self._check()
X = check_array(X)
n_samples = X.shape[0]
logprob = 0
posteriors = np.zeros((n_samples, self.n_components))
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprobij, fwdlattice = self._do_forward_pass(framelogprob)
logprob += logprobij
bwdlattice = self._do_backward_pass(framelogprob)
posteriors[i:j] = self._compute_posteriors(fwdlattice, bwdlattice)
return logprob, posteriors | Compute the log probability under the model and compute posteriors.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
logprob : float
Log likelihood of ``X``.
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample in ``X``.
See Also
--------
score : Compute the log probability under the model.
decode : Find most likely state sequence corresponding to ``X``. | Below is the the instruction that describes the task:
### Input:
Compute the log probability under the model and compute posteriors.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
logprob : float
Log likelihood of ``X``.
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample in ``X``.
See Also
--------
score : Compute the log probability under the model.
decode : Find most likely state sequence corresponding to ``X``.
### Response:
def score_samples(self, X, lengths=None):
"""Compute the log probability under the model and compute posteriors.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Feature matrix of individual samples.
lengths : array-like of integers, shape (n_sequences, ), optional
Lengths of the individual sequences in ``X``. The sum of
these should be ``n_samples``.
Returns
-------
logprob : float
Log likelihood of ``X``.
posteriors : array, shape (n_samples, n_components)
State-membership probabilities for each sample in ``X``.
See Also
--------
score : Compute the log probability under the model.
decode : Find most likely state sequence corresponding to ``X``.
"""
check_is_fitted(self, "startprob_")
self._check()
X = check_array(X)
n_samples = X.shape[0]
logprob = 0
posteriors = np.zeros((n_samples, self.n_components))
for i, j in iter_from_X_lengths(X, lengths):
framelogprob = self._compute_log_likelihood(X[i:j])
logprobij, fwdlattice = self._do_forward_pass(framelogprob)
logprob += logprobij
bwdlattice = self._do_backward_pass(framelogprob)
posteriors[i:j] = self._compute_posteriors(fwdlattice, bwdlattice)
return logprob, posteriors |
def wrap_rethink_errors(func_, *args, **kwargs):
"""
Wraps rethinkdb specific errors as builtin/Brain errors
:param func_: <function> to call
:param args: <tuple> positional arguments
:param kwargs: <dict> keyword arguments
:return: inherits from the called function
"""
try:
return func_(*args, **kwargs)
except WRAP_RETHINK_ERRORS as reql_err:
raise ValueError(str(reql_err)) | Wraps rethinkdb specific errors as builtin/Brain errors
:param func_: <function> to call
:param args: <tuple> positional arguments
:param kwargs: <dict> keyword arguments
:return: inherits from the called function | Below is the the instruction that describes the task:
### Input:
Wraps rethinkdb specific errors as builtin/Brain errors
:param func_: <function> to call
:param args: <tuple> positional arguments
:param kwargs: <dict> keyword arguments
:return: inherits from the called function
### Response:
def wrap_rethink_errors(func_, *args, **kwargs):
"""
Wraps rethinkdb specific errors as builtin/Brain errors
:param func_: <function> to call
:param args: <tuple> positional arguments
:param kwargs: <dict> keyword arguments
:return: inherits from the called function
"""
try:
return func_(*args, **kwargs)
except WRAP_RETHINK_ERRORS as reql_err:
raise ValueError(str(reql_err)) |
def get_languages_from_model(app_label, model_label):
"""
Get the languages configured for the current model
:param model_label:
:param app_label:
:return:
"""
try:
mod_lan = TransModelLanguage.objects.filter(model='{} - {}'.format(app_label, model_label)).get()
languages = [lang.code for lang in mod_lan.languages.all()]
return languages
except TransModelLanguage.DoesNotExist:
return [] | Get the languages configured for the current model
:param model_label:
:param app_label:
:return: | Below is the the instruction that describes the task:
### Input:
Get the languages configured for the current model
:param model_label:
:param app_label:
:return:
### Response:
def get_languages_from_model(app_label, model_label):
"""
Get the languages configured for the current model
:param model_label:
:param app_label:
:return:
"""
try:
mod_lan = TransModelLanguage.objects.filter(model='{} - {}'.format(app_label, model_label)).get()
languages = [lang.code for lang in mod_lan.languages.all()]
return languages
except TransModelLanguage.DoesNotExist:
return [] |
def getresponse(self):
"""Wait for and return a HTTP response.
The return value will be a :class:`HttpMessage`. When this method
returns only the response header has been read. The response body can
be read using :meth:`~gruvi.Stream.read` and similar methods on
the message :attr:`~HttpMessage.body`.
Note that if you use persistent connections (the default), it is
required that you read the entire body of each response. If you don't
then deadlocks may occur.
"""
if self._error:
raise compat.saved_exc(self._error)
elif self._transport is None:
raise HttpError('not connected')
message = self._queue.get(timeout=self._timeout)
if isinstance(message, Exception):
raise compat.saved_exc(message)
return message | Wait for and return a HTTP response.
The return value will be a :class:`HttpMessage`. When this method
returns only the response header has been read. The response body can
be read using :meth:`~gruvi.Stream.read` and similar methods on
the message :attr:`~HttpMessage.body`.
Note that if you use persistent connections (the default), it is
required that you read the entire body of each response. If you don't
then deadlocks may occur. | Below is the the instruction that describes the task:
### Input:
Wait for and return a HTTP response.
The return value will be a :class:`HttpMessage`. When this method
returns only the response header has been read. The response body can
be read using :meth:`~gruvi.Stream.read` and similar methods on
the message :attr:`~HttpMessage.body`.
Note that if you use persistent connections (the default), it is
required that you read the entire body of each response. If you don't
then deadlocks may occur.
### Response:
def getresponse(self):
"""Wait for and return a HTTP response.
The return value will be a :class:`HttpMessage`. When this method
returns only the response header has been read. The response body can
be read using :meth:`~gruvi.Stream.read` and similar methods on
the message :attr:`~HttpMessage.body`.
Note that if you use persistent connections (the default), it is
required that you read the entire body of each response. If you don't
then deadlocks may occur.
"""
if self._error:
raise compat.saved_exc(self._error)
elif self._transport is None:
raise HttpError('not connected')
message = self._queue.get(timeout=self._timeout)
if isinstance(message, Exception):
raise compat.saved_exc(message)
return message |
def InternalExchange(self, cmd, payload_in):
"""Sends and receives a message from the device."""
# make a copy because we destroy it below
self.logger.debug('payload: ' + str(list(payload_in)))
payload = bytearray()
payload[:] = payload_in
for _ in range(2):
self.InternalSend(cmd, payload)
ret_cmd, ret_payload = self.InternalRecv()
if ret_cmd == UsbHidTransport.U2FHID_ERROR:
if ret_payload == UsbHidTransport.ERR_CHANNEL_BUSY:
time.sleep(0.5)
continue
raise errors.HidError('Device error: %d' % int(ret_payload[0]))
elif ret_cmd != cmd:
raise errors.HidError('Command mismatch!')
return ret_payload
raise errors.HidError('Device Busy. Please retry') | Sends and receives a message from the device. | Below is the the instruction that describes the task:
### Input:
Sends and receives a message from the device.
### Response:
def InternalExchange(self, cmd, payload_in):
"""Sends and receives a message from the device."""
# make a copy because we destroy it below
self.logger.debug('payload: ' + str(list(payload_in)))
payload = bytearray()
payload[:] = payload_in
for _ in range(2):
self.InternalSend(cmd, payload)
ret_cmd, ret_payload = self.InternalRecv()
if ret_cmd == UsbHidTransport.U2FHID_ERROR:
if ret_payload == UsbHidTransport.ERR_CHANNEL_BUSY:
time.sleep(0.5)
continue
raise errors.HidError('Device error: %d' % int(ret_payload[0]))
elif ret_cmd != cmd:
raise errors.HidError('Command mismatch!')
return ret_payload
raise errors.HidError('Device Busy. Please retry') |
def widgetValue( widget ):
"""
Returns the value for the inputed widget based on its type.
:param widget | <QWidget>
:return (<variant> value, <bool> success)
"""
for wtype in reversed(_widgetValueTypes):
if isinstance(widget, wtype[0]):
try:
return (wtype[1](widget), True)
except:
return (None, False)
return (None, False) | Returns the value for the inputed widget based on its type.
:param widget | <QWidget>
:return (<variant> value, <bool> success) | Below is the the instruction that describes the task:
### Input:
Returns the value for the inputed widget based on its type.
:param widget | <QWidget>
:return (<variant> value, <bool> success)
### Response:
def widgetValue( widget ):
"""
Returns the value for the inputed widget based on its type.
:param widget | <QWidget>
:return (<variant> value, <bool> success)
"""
for wtype in reversed(_widgetValueTypes):
if isinstance(widget, wtype[0]):
try:
return (wtype[1](widget), True)
except:
return (None, False)
return (None, False) |
def perform_command(self):
"""
Perform command and return the appropriate exit code.
:rtype: int
"""
if len(self.actual_arguments) < 1:
return self.print_help()
if self.has_option([u"-e", u"--examples"]):
return self.print_examples(False)
if self.has_option(u"--examples-all"):
return self.print_examples(True)
if self.has_option([u"--list-parameters"]):
return self.print_parameters()
parameter = self.has_option_with_value(u"--list-values")
if parameter is not None:
return self.print_values(parameter)
elif self.has_option(u"--list-values"):
return self.print_values(u"?")
# NOTE list() is needed for Python3, where keys() is not a list!
demo = self.has_option(list(self.DEMOS.keys()))
demo_parameters = u""
download_from_youtube = self.has_option([u"-y", u"--youtube"])
largest_audio = self.has_option(u"--largest-audio")
keep_audio = self.has_option(u"--keep-audio")
output_html = self.has_option(u"--output-html")
validate = not self.has_option(u"--skip-validator")
print_faster_rate = self.has_option(u"--faster-rate")
print_rates = self.has_option(u"--rate")
print_zero = self.has_option(u"--zero")
presets_word = self.has_option(u"--presets-word")
if demo:
validate = False
for key in self.DEMOS:
if self.has_option(key):
demo_parameters = self.DEMOS[key]
audio_file_path = demo_parameters[u"audio"]
text_file_path = demo_parameters[u"text"]
config_string = demo_parameters[u"config"]
sync_map_file_path = demo_parameters[u"syncmap"]
# TODO allow injecting rconf options directly from DEMOS options field
if key == u"--example-cewsubprocess":
self.rconf[RuntimeConfiguration.CEW_SUBPROCESS_ENABLED] = True
elif key == u"--example-ctw-espeak":
self.rconf[RuntimeConfiguration.TTS] = "custom"
self.rconf[RuntimeConfiguration.TTS_PATH] = self.CTW_ESPEAK
elif key == u"--example-ctw-speect":
self.rconf[RuntimeConfiguration.TTS] = "custom"
self.rconf[RuntimeConfiguration.TTS_PATH] = self.CTW_SPEECT
elif key == u"--example-festival":
self.rconf[RuntimeConfiguration.TTS] = "festival"
elif key == u"--example-mws":
self.rconf[RuntimeConfiguration.MFCC_WINDOW_LENGTH] = "1.500"
self.rconf[RuntimeConfiguration.MFCC_WINDOW_SHIFT] = "0.500"
elif key == u"--example-multilevel-tts":
self.rconf[RuntimeConfiguration.TTS_L1] = "festival"
self.rconf[RuntimeConfiguration.TTS_L2] = "festival"
self.rconf[RuntimeConfiguration.TTS_L3] = "espeak"
elif key == u"--example-words-festival-cache":
self.rconf[RuntimeConfiguration.TTS] = "festival"
self.rconf[RuntimeConfiguration.TTS_CACHE] = True
elif key == u"--example-faster-rate":
print_faster_rate = True
elif key == u"--example-no-zero":
print_zero = True
elif key == u"--example-py":
self.rconf[RuntimeConfiguration.C_EXTENSIONS] = False
elif key == u"--example-rate":
print_rates = True
elif key == u"--example-remove-nonspeech-rateaggressive":
print_rates = True
elif key == u"--example-youtube":
download_from_youtube = True
break
else:
if len(self.actual_arguments) < 4:
return self.print_help()
audio_file_path = self.actual_arguments[0]
text_file_path = self.actual_arguments[1]
config_string = self.actual_arguments[2]
sync_map_file_path = self.actual_arguments[3]
if presets_word:
self.print_info(u"Preset for word-level alignment")
self.rconf[RuntimeConfiguration.MFCC_MASK_NONSPEECH] = True
self.rconf[RuntimeConfiguration.MFCC_MASK_NONSPEECH_L3] = True
html_file_path = None
if output_html:
keep_audio = True
html_file_path = sync_map_file_path + u".html"
if download_from_youtube:
youtube_url = gf.safe_unicode(audio_file_path)
if (not download_from_youtube) and (not self.check_input_file(audio_file_path)):
return self.ERROR_EXIT_CODE
if not self.check_input_file(text_file_path):
return self.ERROR_EXIT_CODE
if not self.check_output_file(sync_map_file_path):
return self.ERROR_EXIT_CODE
if (html_file_path is not None) and (not self.check_output_file(html_file_path)):
return self.ERROR_EXIT_CODE
self.check_c_extensions()
if demo:
msg = []
msg.append(u"Running example task with arguments:")
if download_from_youtube:
msg.append(u" YouTube URL: %s" % youtube_url)
else:
msg.append(u" Audio file: %s" % audio_file_path)
msg.append(u" Text file: %s" % text_file_path)
msg.append(u" Config string: %s" % config_string)
msg.append(u" Sync map file: %s" % sync_map_file_path)
if len(demo_parameters[u"options"]) > 0:
msg.append(u" Options: %s" % demo_parameters[u"options"])
self.print_info(u"\n".join(msg))
if validate:
self.print_info(u"Validating config string (specify --skip-validator to bypass)...")
validator = Validator(logger=self.logger)
result = validator.check_configuration_string(config_string, is_job=False, external_name=True)
if not result.passed:
self.print_error(u"The given config string is not valid:")
self.print_generic(result.pretty_print())
return self.ERROR_EXIT_CODE
self.print_info(u"Validating config string... done")
if download_from_youtube:
try:
self.print_info(u"Downloading audio from '%s' ..." % youtube_url)
downloader = Downloader(logger=self.logger)
audio_file_path = downloader.audio_from_youtube(
youtube_url,
download=True,
output_file_path=None,
largest_audio=largest_audio
)
self.print_info(u"Downloading audio from '%s' ... done" % youtube_url)
except ImportError:
self.print_no_dependency_error()
return self.ERROR_EXIT_CODE
except Exception as exc:
self.print_error(u"An unexpected error occurred while downloading audio from YouTube:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
else:
audio_extension = gf.file_extension(audio_file_path)
if audio_extension.lower() not in AudioFile.FILE_EXTENSIONS:
self.print_warning(u"Your audio file path has extension '%s', which is uncommon for an audio file." % audio_extension)
self.print_warning(u"Attempting at executing your Task anyway.")
self.print_warning(u"If it fails, you might have swapped the first two arguments.")
self.print_warning(u"The audio file path should be the first argument, the text file path the second.")
try:
self.print_info(u"Creating task...")
task = Task(config_string, logger=self.logger)
task.audio_file_path_absolute = audio_file_path
task.text_file_path_absolute = text_file_path
task.sync_map_file_path_absolute = sync_map_file_path
self.print_info(u"Creating task... done")
except Exception as exc:
self.print_error(u"An unexpected error occurred while creating the task:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
try:
self.print_info(u"Executing task...")
executor = ExecuteTask(task=task, rconf=self.rconf, logger=self.logger)
executor.execute()
self.print_info(u"Executing task... done")
except Exception as exc:
self.print_error(u"An unexpected error occurred while executing the task:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
try:
self.print_info(u"Creating output sync map file...")
path = task.output_sync_map_file()
self.print_info(u"Creating output sync map file... done")
self.print_success(u"Created file '%s'" % path)
except Exception as exc:
self.print_error(u"An unexpected error occurred while writing the sync map file:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
if output_html:
try:
parameters = {}
parameters[gc.PPN_TASK_OS_FILE_FORMAT] = task.configuration["o_format"]
parameters[gc.PPN_TASK_OS_FILE_EAF_AUDIO_REF] = task.configuration["o_eaf_audio_ref"]
parameters[gc.PPN_TASK_OS_FILE_SMIL_AUDIO_REF] = task.configuration["o_smil_audio_ref"]
parameters[gc.PPN_TASK_OS_FILE_SMIL_PAGE_REF] = task.configuration["o_smil_page_ref"]
self.print_info(u"Creating output HTML file...")
task.sync_map.output_html_for_tuning(audio_file_path, html_file_path, parameters)
self.print_info(u"Creating output HTML file... done")
self.print_success(u"Created file '%s'" % html_file_path)
except Exception as exc:
self.print_error(u"An unexpected error occurred while writing the HTML file:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
if download_from_youtube:
if keep_audio:
self.print_info(u"Option --keep-audio set: keeping downloaded file '%s'" % audio_file_path)
else:
gf.delete_file(None, audio_file_path)
if print_zero:
zero_duration = [l for l in task.sync_map_leaves(SyncMapFragment.REGULAR) if l.begin == l.end]
if len(zero_duration) > 0:
self.print_warning(u"Fragments with zero duration:")
for fragment in zero_duration:
self.print_generic(u" %s" % (fragment.pretty_print))
if print_rates:
self.print_info(u"Fragments with rates:")
for fragment in task.sync_map_leaves(SyncMapFragment.REGULAR):
self.print_generic(u" %s\t%.3f" % (fragment.pretty_print, fragment.rate or 0.0))
if print_faster_rate:
max_rate = task.configuration["aba_rate_value"]
if max_rate is not None:
faster = [l for l in task.sync_map_leaves(SyncMapFragment.REGULAR) if l.rate >= max_rate + Decimal("0.001")]
if len(faster) > 0:
self.print_warning(u"Fragments with rate greater than %.3f:" % max_rate)
for fragment in faster:
self.print_generic(u" %s\t%.3f" % (fragment.pretty_print, fragment.rate or 0.0))
return self.NO_ERROR_EXIT_CODE | Perform command and return the appropriate exit code.
:rtype: int | Below is the the instruction that describes the task:
### Input:
Perform command and return the appropriate exit code.
:rtype: int
### Response:
def perform_command(self):
"""
Perform command and return the appropriate exit code.
:rtype: int
"""
if len(self.actual_arguments) < 1:
return self.print_help()
if self.has_option([u"-e", u"--examples"]):
return self.print_examples(False)
if self.has_option(u"--examples-all"):
return self.print_examples(True)
if self.has_option([u"--list-parameters"]):
return self.print_parameters()
parameter = self.has_option_with_value(u"--list-values")
if parameter is not None:
return self.print_values(parameter)
elif self.has_option(u"--list-values"):
return self.print_values(u"?")
# NOTE list() is needed for Python3, where keys() is not a list!
demo = self.has_option(list(self.DEMOS.keys()))
demo_parameters = u""
download_from_youtube = self.has_option([u"-y", u"--youtube"])
largest_audio = self.has_option(u"--largest-audio")
keep_audio = self.has_option(u"--keep-audio")
output_html = self.has_option(u"--output-html")
validate = not self.has_option(u"--skip-validator")
print_faster_rate = self.has_option(u"--faster-rate")
print_rates = self.has_option(u"--rate")
print_zero = self.has_option(u"--zero")
presets_word = self.has_option(u"--presets-word")
if demo:
validate = False
for key in self.DEMOS:
if self.has_option(key):
demo_parameters = self.DEMOS[key]
audio_file_path = demo_parameters[u"audio"]
text_file_path = demo_parameters[u"text"]
config_string = demo_parameters[u"config"]
sync_map_file_path = demo_parameters[u"syncmap"]
# TODO allow injecting rconf options directly from DEMOS options field
if key == u"--example-cewsubprocess":
self.rconf[RuntimeConfiguration.CEW_SUBPROCESS_ENABLED] = True
elif key == u"--example-ctw-espeak":
self.rconf[RuntimeConfiguration.TTS] = "custom"
self.rconf[RuntimeConfiguration.TTS_PATH] = self.CTW_ESPEAK
elif key == u"--example-ctw-speect":
self.rconf[RuntimeConfiguration.TTS] = "custom"
self.rconf[RuntimeConfiguration.TTS_PATH] = self.CTW_SPEECT
elif key == u"--example-festival":
self.rconf[RuntimeConfiguration.TTS] = "festival"
elif key == u"--example-mws":
self.rconf[RuntimeConfiguration.MFCC_WINDOW_LENGTH] = "1.500"
self.rconf[RuntimeConfiguration.MFCC_WINDOW_SHIFT] = "0.500"
elif key == u"--example-multilevel-tts":
self.rconf[RuntimeConfiguration.TTS_L1] = "festival"
self.rconf[RuntimeConfiguration.TTS_L2] = "festival"
self.rconf[RuntimeConfiguration.TTS_L3] = "espeak"
elif key == u"--example-words-festival-cache":
self.rconf[RuntimeConfiguration.TTS] = "festival"
self.rconf[RuntimeConfiguration.TTS_CACHE] = True
elif key == u"--example-faster-rate":
print_faster_rate = True
elif key == u"--example-no-zero":
print_zero = True
elif key == u"--example-py":
self.rconf[RuntimeConfiguration.C_EXTENSIONS] = False
elif key == u"--example-rate":
print_rates = True
elif key == u"--example-remove-nonspeech-rateaggressive":
print_rates = True
elif key == u"--example-youtube":
download_from_youtube = True
break
else:
if len(self.actual_arguments) < 4:
return self.print_help()
audio_file_path = self.actual_arguments[0]
text_file_path = self.actual_arguments[1]
config_string = self.actual_arguments[2]
sync_map_file_path = self.actual_arguments[3]
if presets_word:
self.print_info(u"Preset for word-level alignment")
self.rconf[RuntimeConfiguration.MFCC_MASK_NONSPEECH] = True
self.rconf[RuntimeConfiguration.MFCC_MASK_NONSPEECH_L3] = True
html_file_path = None
if output_html:
keep_audio = True
html_file_path = sync_map_file_path + u".html"
if download_from_youtube:
youtube_url = gf.safe_unicode(audio_file_path)
if (not download_from_youtube) and (not self.check_input_file(audio_file_path)):
return self.ERROR_EXIT_CODE
if not self.check_input_file(text_file_path):
return self.ERROR_EXIT_CODE
if not self.check_output_file(sync_map_file_path):
return self.ERROR_EXIT_CODE
if (html_file_path is not None) and (not self.check_output_file(html_file_path)):
return self.ERROR_EXIT_CODE
self.check_c_extensions()
if demo:
msg = []
msg.append(u"Running example task with arguments:")
if download_from_youtube:
msg.append(u" YouTube URL: %s" % youtube_url)
else:
msg.append(u" Audio file: %s" % audio_file_path)
msg.append(u" Text file: %s" % text_file_path)
msg.append(u" Config string: %s" % config_string)
msg.append(u" Sync map file: %s" % sync_map_file_path)
if len(demo_parameters[u"options"]) > 0:
msg.append(u" Options: %s" % demo_parameters[u"options"])
self.print_info(u"\n".join(msg))
if validate:
self.print_info(u"Validating config string (specify --skip-validator to bypass)...")
validator = Validator(logger=self.logger)
result = validator.check_configuration_string(config_string, is_job=False, external_name=True)
if not result.passed:
self.print_error(u"The given config string is not valid:")
self.print_generic(result.pretty_print())
return self.ERROR_EXIT_CODE
self.print_info(u"Validating config string... done")
if download_from_youtube:
try:
self.print_info(u"Downloading audio from '%s' ..." % youtube_url)
downloader = Downloader(logger=self.logger)
audio_file_path = downloader.audio_from_youtube(
youtube_url,
download=True,
output_file_path=None,
largest_audio=largest_audio
)
self.print_info(u"Downloading audio from '%s' ... done" % youtube_url)
except ImportError:
self.print_no_dependency_error()
return self.ERROR_EXIT_CODE
except Exception as exc:
self.print_error(u"An unexpected error occurred while downloading audio from YouTube:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
else:
audio_extension = gf.file_extension(audio_file_path)
if audio_extension.lower() not in AudioFile.FILE_EXTENSIONS:
self.print_warning(u"Your audio file path has extension '%s', which is uncommon for an audio file." % audio_extension)
self.print_warning(u"Attempting at executing your Task anyway.")
self.print_warning(u"If it fails, you might have swapped the first two arguments.")
self.print_warning(u"The audio file path should be the first argument, the text file path the second.")
try:
self.print_info(u"Creating task...")
task = Task(config_string, logger=self.logger)
task.audio_file_path_absolute = audio_file_path
task.text_file_path_absolute = text_file_path
task.sync_map_file_path_absolute = sync_map_file_path
self.print_info(u"Creating task... done")
except Exception as exc:
self.print_error(u"An unexpected error occurred while creating the task:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
try:
self.print_info(u"Executing task...")
executor = ExecuteTask(task=task, rconf=self.rconf, logger=self.logger)
executor.execute()
self.print_info(u"Executing task... done")
except Exception as exc:
self.print_error(u"An unexpected error occurred while executing the task:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
try:
self.print_info(u"Creating output sync map file...")
path = task.output_sync_map_file()
self.print_info(u"Creating output sync map file... done")
self.print_success(u"Created file '%s'" % path)
except Exception as exc:
self.print_error(u"An unexpected error occurred while writing the sync map file:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
if output_html:
try:
parameters = {}
parameters[gc.PPN_TASK_OS_FILE_FORMAT] = task.configuration["o_format"]
parameters[gc.PPN_TASK_OS_FILE_EAF_AUDIO_REF] = task.configuration["o_eaf_audio_ref"]
parameters[gc.PPN_TASK_OS_FILE_SMIL_AUDIO_REF] = task.configuration["o_smil_audio_ref"]
parameters[gc.PPN_TASK_OS_FILE_SMIL_PAGE_REF] = task.configuration["o_smil_page_ref"]
self.print_info(u"Creating output HTML file...")
task.sync_map.output_html_for_tuning(audio_file_path, html_file_path, parameters)
self.print_info(u"Creating output HTML file... done")
self.print_success(u"Created file '%s'" % html_file_path)
except Exception as exc:
self.print_error(u"An unexpected error occurred while writing the HTML file:")
self.print_error(u"%s" % exc)
return self.ERROR_EXIT_CODE
if download_from_youtube:
if keep_audio:
self.print_info(u"Option --keep-audio set: keeping downloaded file '%s'" % audio_file_path)
else:
gf.delete_file(None, audio_file_path)
if print_zero:
zero_duration = [l for l in task.sync_map_leaves(SyncMapFragment.REGULAR) if l.begin == l.end]
if len(zero_duration) > 0:
self.print_warning(u"Fragments with zero duration:")
for fragment in zero_duration:
self.print_generic(u" %s" % (fragment.pretty_print))
if print_rates:
self.print_info(u"Fragments with rates:")
for fragment in task.sync_map_leaves(SyncMapFragment.REGULAR):
self.print_generic(u" %s\t%.3f" % (fragment.pretty_print, fragment.rate or 0.0))
if print_faster_rate:
max_rate = task.configuration["aba_rate_value"]
if max_rate is not None:
faster = [l for l in task.sync_map_leaves(SyncMapFragment.REGULAR) if l.rate >= max_rate + Decimal("0.001")]
if len(faster) > 0:
self.print_warning(u"Fragments with rate greater than %.3f:" % max_rate)
for fragment in faster:
self.print_generic(u" %s\t%.3f" % (fragment.pretty_print, fragment.rate or 0.0))
return self.NO_ERROR_EXIT_CODE |
def as_cnpj(numero):
"""Formata um número de CNPJ. Se o número não for um CNPJ válido apenas
retorna o argumento sem qualquer modificação.
"""
_num = digitos(numero)
if is_cnpj(_num):
return '{}.{}.{}/{}-{}'.format(
_num[:2], _num[2:5], _num[5:8], _num[8:12], _num[12:])
return numero | Formata um número de CNPJ. Se o número não for um CNPJ válido apenas
retorna o argumento sem qualquer modificação. | Below is the the instruction that describes the task:
### Input:
Formata um número de CNPJ. Se o número não for um CNPJ válido apenas
retorna o argumento sem qualquer modificação.
### Response:
def as_cnpj(numero):
"""Formata um número de CNPJ. Se o número não for um CNPJ válido apenas
retorna o argumento sem qualquer modificação.
"""
_num = digitos(numero)
if is_cnpj(_num):
return '{}.{}.{}/{}-{}'.format(
_num[:2], _num[2:5], _num[5:8], _num[8:12], _num[12:])
return numero |
def direction(self, direction):
"""
set the direction
"""
if not isinstance(direction, str):
raise TypeError("direction must be of type str")
accepted_values = ['i', 'x', 'y', 'z', 's', 'c']
if direction not in accepted_values:
raise ValueError("must be one of: {}".format(accepted_values))
self._direction = direction | set the direction | Below is the the instruction that describes the task:
### Input:
set the direction
### Response:
def direction(self, direction):
"""
set the direction
"""
if not isinstance(direction, str):
raise TypeError("direction must be of type str")
accepted_values = ['i', 'x', 'y', 'z', 's', 'c']
if direction not in accepted_values:
raise ValueError("must be one of: {}".format(accepted_values))
self._direction = direction |
def recvSecurityList(self, data):
"""
Read security list packet send from server to client
@param data: Stream that contains well formed packet
"""
securityList = []
while data.dataLen() > 0:
securityElement = UInt8()
data.readType(securityElement)
securityList.append(securityElement)
#select high security level
for s in securityList:
if s.value in [SecurityType.NONE, SecurityType.VNC] and s > self._securityLevel:
self._securityLevel = s
break
#send back security level choosen
self.send(self._securityLevel)
if self._securityLevel.value == SecurityType.VNC:
self.expect(16, self.recvVNCChallenge)
else:
self.expect(4, self.recvSecurityResult) | Read security list packet send from server to client
@param data: Stream that contains well formed packet | Below is the the instruction that describes the task:
### Input:
Read security list packet send from server to client
@param data: Stream that contains well formed packet
### Response:
def recvSecurityList(self, data):
"""
Read security list packet send from server to client
@param data: Stream that contains well formed packet
"""
securityList = []
while data.dataLen() > 0:
securityElement = UInt8()
data.readType(securityElement)
securityList.append(securityElement)
#select high security level
for s in securityList:
if s.value in [SecurityType.NONE, SecurityType.VNC] and s > self._securityLevel:
self._securityLevel = s
break
#send back security level choosen
self.send(self._securityLevel)
if self._securityLevel.value == SecurityType.VNC:
self.expect(16, self.recvVNCChallenge)
else:
self.expect(4, self.recvSecurityResult) |
def coerce_date_dict(date_dict):
"""
given a dictionary (presumed to be from request.GET) it returns a tuple
that represents a date. It will return from year down to seconds until one
is not found. ie if year, month, and seconds are in the dictionary, only
year and month will be returned, the rest will be returned as min. If none
of the parts are found return an empty tuple.
"""
keys = ['year', 'month', 'day', 'hour', 'minute', 'second']
ret_val = {
'year': 1,
'month': 1,
'day': 1,
'hour': 0,
'minute': 0,
'second': 0}
modified = False
for key in keys:
try:
ret_val[key] = int(date_dict[key])
modified = True
except KeyError:
break
return modified and ret_val or {} | given a dictionary (presumed to be from request.GET) it returns a tuple
that represents a date. It will return from year down to seconds until one
is not found. ie if year, month, and seconds are in the dictionary, only
year and month will be returned, the rest will be returned as min. If none
of the parts are found return an empty tuple. | Below is the the instruction that describes the task:
### Input:
given a dictionary (presumed to be from request.GET) it returns a tuple
that represents a date. It will return from year down to seconds until one
is not found. ie if year, month, and seconds are in the dictionary, only
year and month will be returned, the rest will be returned as min. If none
of the parts are found return an empty tuple.
### Response:
def coerce_date_dict(date_dict):
"""
given a dictionary (presumed to be from request.GET) it returns a tuple
that represents a date. It will return from year down to seconds until one
is not found. ie if year, month, and seconds are in the dictionary, only
year and month will be returned, the rest will be returned as min. If none
of the parts are found return an empty tuple.
"""
keys = ['year', 'month', 'day', 'hour', 'minute', 'second']
ret_val = {
'year': 1,
'month': 1,
'day': 1,
'hour': 0,
'minute': 0,
'second': 0}
modified = False
for key in keys:
try:
ret_val[key] = int(date_dict[key])
modified = True
except KeyError:
break
return modified and ret_val or {} |
def _onDecorator(self, name, line, pos, absPosition):
"""Memorizes a function or a class decorator"""
# A class or a function must be on the top of the stack
d = Decorator(name, line, pos, absPosition)
if self.__lastDecorators is None:
self.__lastDecorators = [d]
else:
self.__lastDecorators.append(d) | Memorizes a function or a class decorator | Below is the the instruction that describes the task:
### Input:
Memorizes a function or a class decorator
### Response:
def _onDecorator(self, name, line, pos, absPosition):
"""Memorizes a function or a class decorator"""
# A class or a function must be on the top of the stack
d = Decorator(name, line, pos, absPosition)
if self.__lastDecorators is None:
self.__lastDecorators = [d]
else:
self.__lastDecorators.append(d) |
def lsf_stable(filt):
"""
Tests whether the given filter is stable or not by using the Line Spectral
Frequencies (LSF) of the given filter. Needs NumPy.
Parameters
----------
filt :
A LTI filter as a LinearFilter object.
Returns
-------
A boolean that is true only when the LSF values from forward and backward
prediction filters alternates. Critical stability (both forward and backward
filters has the same LSF value) is seem as an instability, and returns
False.
See Also
--------
lsf :
Gets the Line Spectral Frequencies from a filter. Needs NumPy.
parcor_stable :
Tests filter stability with partial correlation coefficients (reflection
coefficients).
"""
lsf_data = lsf(ZFilter(filt.denpoly))
return all(a < b for a, b in blocks(lsf_data, size=2, hop=1)) | Tests whether the given filter is stable or not by using the Line Spectral
Frequencies (LSF) of the given filter. Needs NumPy.
Parameters
----------
filt :
A LTI filter as a LinearFilter object.
Returns
-------
A boolean that is true only when the LSF values from forward and backward
prediction filters alternates. Critical stability (both forward and backward
filters has the same LSF value) is seem as an instability, and returns
False.
See Also
--------
lsf :
Gets the Line Spectral Frequencies from a filter. Needs NumPy.
parcor_stable :
Tests filter stability with partial correlation coefficients (reflection
coefficients). | Below is the the instruction that describes the task:
### Input:
Tests whether the given filter is stable or not by using the Line Spectral
Frequencies (LSF) of the given filter. Needs NumPy.
Parameters
----------
filt :
A LTI filter as a LinearFilter object.
Returns
-------
A boolean that is true only when the LSF values from forward and backward
prediction filters alternates. Critical stability (both forward and backward
filters has the same LSF value) is seem as an instability, and returns
False.
See Also
--------
lsf :
Gets the Line Spectral Frequencies from a filter. Needs NumPy.
parcor_stable :
Tests filter stability with partial correlation coefficients (reflection
coefficients).
### Response:
def lsf_stable(filt):
"""
Tests whether the given filter is stable or not by using the Line Spectral
Frequencies (LSF) of the given filter. Needs NumPy.
Parameters
----------
filt :
A LTI filter as a LinearFilter object.
Returns
-------
A boolean that is true only when the LSF values from forward and backward
prediction filters alternates. Critical stability (both forward and backward
filters has the same LSF value) is seem as an instability, and returns
False.
See Also
--------
lsf :
Gets the Line Spectral Frequencies from a filter. Needs NumPy.
parcor_stable :
Tests filter stability with partial correlation coefficients (reflection
coefficients).
"""
lsf_data = lsf(ZFilter(filt.denpoly))
return all(a < b for a, b in blocks(lsf_data, size=2, hop=1)) |
def get_admins(self, account_id, params={}):
"""
Return a list of the admins in the account.
https://canvas.instructure.com/doc/api/admins.html#method.admins.index
"""
url = ADMINS_API.format(account_id)
admins = []
for data in self._get_paged_resource(url, params=params):
admins.append(CanvasAdmin(data=data))
return admins | Return a list of the admins in the account.
https://canvas.instructure.com/doc/api/admins.html#method.admins.index | Below is the the instruction that describes the task:
### Input:
Return a list of the admins in the account.
https://canvas.instructure.com/doc/api/admins.html#method.admins.index
### Response:
def get_admins(self, account_id, params={}):
"""
Return a list of the admins in the account.
https://canvas.instructure.com/doc/api/admins.html#method.admins.index
"""
url = ADMINS_API.format(account_id)
admins = []
for data in self._get_paged_resource(url, params=params):
admins.append(CanvasAdmin(data=data))
return admins |
def mi(mi, iq=None, pl=None):
# pylint: disable=redefined-outer-name
"""
This function is a wrapper for
:meth:`~pywbem.WBEMConnection.ModifyInstance`.
Modify the property values of an instance.
Parameters:
mi (:class:`~pywbem.CIMInstance`):
Modified instance, also indicating its instance path.
The properties defined in this object specify the new property
values for the instance to be modified. Missing properties
(relative to the class declaration) and properties provided with
a value of `None` will be set to NULL.
iq (:class:`py:bool`):
IncludeQualifiers flag: Modify instance qualifiers as specified in
the instance.
`None` will cause the server default of `True` to be used.
Deprecated in :term:`DSP0200`: Clients cannot rely on qualifiers to
be modified by this operation.
pl (:term:`string` or :term:`py:iterable` of :term:`string`):
PropertyList: Names of properties to be modified. An empty iterable
indicates to modify no properties. If `None`, all properties exposed
by the instance will be modified.
"""
CONN.ModifyInstance(mi,
IncludeQualifiers=iq,
PropertyList=pl) | This function is a wrapper for
:meth:`~pywbem.WBEMConnection.ModifyInstance`.
Modify the property values of an instance.
Parameters:
mi (:class:`~pywbem.CIMInstance`):
Modified instance, also indicating its instance path.
The properties defined in this object specify the new property
values for the instance to be modified. Missing properties
(relative to the class declaration) and properties provided with
a value of `None` will be set to NULL.
iq (:class:`py:bool`):
IncludeQualifiers flag: Modify instance qualifiers as specified in
the instance.
`None` will cause the server default of `True` to be used.
Deprecated in :term:`DSP0200`: Clients cannot rely on qualifiers to
be modified by this operation.
pl (:term:`string` or :term:`py:iterable` of :term:`string`):
PropertyList: Names of properties to be modified. An empty iterable
indicates to modify no properties. If `None`, all properties exposed
by the instance will be modified. | Below is the the instruction that describes the task:
### Input:
This function is a wrapper for
:meth:`~pywbem.WBEMConnection.ModifyInstance`.
Modify the property values of an instance.
Parameters:
mi (:class:`~pywbem.CIMInstance`):
Modified instance, also indicating its instance path.
The properties defined in this object specify the new property
values for the instance to be modified. Missing properties
(relative to the class declaration) and properties provided with
a value of `None` will be set to NULL.
iq (:class:`py:bool`):
IncludeQualifiers flag: Modify instance qualifiers as specified in
the instance.
`None` will cause the server default of `True` to be used.
Deprecated in :term:`DSP0200`: Clients cannot rely on qualifiers to
be modified by this operation.
pl (:term:`string` or :term:`py:iterable` of :term:`string`):
PropertyList: Names of properties to be modified. An empty iterable
indicates to modify no properties. If `None`, all properties exposed
by the instance will be modified.
### Response:
def mi(mi, iq=None, pl=None):
# pylint: disable=redefined-outer-name
"""
This function is a wrapper for
:meth:`~pywbem.WBEMConnection.ModifyInstance`.
Modify the property values of an instance.
Parameters:
mi (:class:`~pywbem.CIMInstance`):
Modified instance, also indicating its instance path.
The properties defined in this object specify the new property
values for the instance to be modified. Missing properties
(relative to the class declaration) and properties provided with
a value of `None` will be set to NULL.
iq (:class:`py:bool`):
IncludeQualifiers flag: Modify instance qualifiers as specified in
the instance.
`None` will cause the server default of `True` to be used.
Deprecated in :term:`DSP0200`: Clients cannot rely on qualifiers to
be modified by this operation.
pl (:term:`string` or :term:`py:iterable` of :term:`string`):
PropertyList: Names of properties to be modified. An empty iterable
indicates to modify no properties. If `None`, all properties exposed
by the instance will be modified.
"""
CONN.ModifyInstance(mi,
IncludeQualifiers=iq,
PropertyList=pl) |
def user_organization_membership_create(self, user_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/organization_memberships#create-membership"
api_path = "/api/v2/users/{user_id}/organization_memberships.json"
api_path = api_path.format(user_id=user_id)
return self.call(api_path, method="POST", data=data, **kwargs) | https://developer.zendesk.com/rest_api/docs/core/organization_memberships#create-membership | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/core/organization_memberships#create-membership
### Response:
def user_organization_membership_create(self, user_id, data, **kwargs):
"https://developer.zendesk.com/rest_api/docs/core/organization_memberships#create-membership"
api_path = "/api/v2/users/{user_id}/organization_memberships.json"
api_path = api_path.format(user_id=user_id)
return self.call(api_path, method="POST", data=data, **kwargs) |
def parse_instancepath(parser, event, node):
#pylint: disable=unused-argument
"""Parse the CIM/XML INSTANCEPATH element and return an
instancname
<!ELEMENT INSTANCEPATH (NAMESPACEPATH, INSTANCENAME)>
"""
(next_event, next_node) = six.next(parser)
if not _is_start(next_event, next_node, 'NAMESPACEPATH'):
raise ParseError('Expecting NAMESPACEPATH')
host, namespacepath = parse_namespacepath(parser, next_event, next_node)
(next_event, next_node) = six.next(parser)
if not _is_start(next_event, next_node, 'INSTANCENAME'):
print(next_event, next_node)
raise ParseError('Expecting INSTANCENAME')
instancename = parse_instancename(parser, next_event, next_node)
instancename.host = host
instancename.namespace = namespacepath
return instancename | Parse the CIM/XML INSTANCEPATH element and return an
instancname
<!ELEMENT INSTANCEPATH (NAMESPACEPATH, INSTANCENAME)> | Below is the the instruction that describes the task:
### Input:
Parse the CIM/XML INSTANCEPATH element and return an
instancname
<!ELEMENT INSTANCEPATH (NAMESPACEPATH, INSTANCENAME)>
### Response:
def parse_instancepath(parser, event, node):
#pylint: disable=unused-argument
"""Parse the CIM/XML INSTANCEPATH element and return an
instancname
<!ELEMENT INSTANCEPATH (NAMESPACEPATH, INSTANCENAME)>
"""
(next_event, next_node) = six.next(parser)
if not _is_start(next_event, next_node, 'NAMESPACEPATH'):
raise ParseError('Expecting NAMESPACEPATH')
host, namespacepath = parse_namespacepath(parser, next_event, next_node)
(next_event, next_node) = six.next(parser)
if not _is_start(next_event, next_node, 'INSTANCENAME'):
print(next_event, next_node)
raise ParseError('Expecting INSTANCENAME')
instancename = parse_instancename(parser, next_event, next_node)
instancename.host = host
instancename.namespace = namespacepath
return instancename |
def geo_distance(a, b):
"""Distance between two geo points in km. (p.x = long, p.y = lat)"""
a_y = radians(a.y)
b_y = radians(b.y)
delta_x = radians(a.x - b.x)
cos_x = (sin(a_y) * sin(b_y) +
cos(a_y) * cos(b_y) * cos(delta_x))
return acos(cos_x) * earth_radius_km | Distance between two geo points in km. (p.x = long, p.y = lat) | Below is the the instruction that describes the task:
### Input:
Distance between two geo points in km. (p.x = long, p.y = lat)
### Response:
def geo_distance(a, b):
"""Distance between two geo points in km. (p.x = long, p.y = lat)"""
a_y = radians(a.y)
b_y = radians(b.y)
delta_x = radians(a.x - b.x)
cos_x = (sin(a_y) * sin(b_y) +
cos(a_y) * cos(b_y) * cos(delta_x))
return acos(cos_x) * earth_radius_km |
def _poll(self) -> None:
"""Check the status of the wrapped running subprocess.
Note:
This should only be called on currently-running tasks.
"""
if self._subprocess is None:
raise SublemonLifetimeError(
'Attempted to poll a non-active subprocess')
elif self._subprocess.returncode is not None:
self._exit_code = self._subprocess.returncode
self._done_running_evt.set()
self._server._running_set.remove(self)
self._server._sem.release() | Check the status of the wrapped running subprocess.
Note:
This should only be called on currently-running tasks. | Below is the the instruction that describes the task:
### Input:
Check the status of the wrapped running subprocess.
Note:
This should only be called on currently-running tasks.
### Response:
def _poll(self) -> None:
"""Check the status of the wrapped running subprocess.
Note:
This should only be called on currently-running tasks.
"""
if self._subprocess is None:
raise SublemonLifetimeError(
'Attempted to poll a non-active subprocess')
elif self._subprocess.returncode is not None:
self._exit_code = self._subprocess.returncode
self._done_running_evt.set()
self._server._running_set.remove(self)
self._server._sem.release() |
def get_templates(self):
'''list templates in the builder bundle library. If a name is provided,
look it up
'''
base = 'https://singularityhub.github.io/builders'
base = self._get_and_update_setting('SREGISTRY_BUILDER_REPO', base)
base = "%s/configs.json" %base
return self._get(base) | list templates in the builder bundle library. If a name is provided,
look it up | Below is the the instruction that describes the task:
### Input:
list templates in the builder bundle library. If a name is provided,
look it up
### Response:
def get_templates(self):
'''list templates in the builder bundle library. If a name is provided,
look it up
'''
base = 'https://singularityhub.github.io/builders'
base = self._get_and_update_setting('SREGISTRY_BUILDER_REPO', base)
base = "%s/configs.json" %base
return self._get(base) |
def multiplicity(self):
"""
Returns the multiplicity of a defect site within the structure (needed for concentration analysis)
"""
sga = SpacegroupAnalyzer(self.bulk_structure)
periodic_struc = sga.get_symmetrized_structure()
poss_deflist = sorted(
periodic_struc.get_sites_in_sphere(self.site.coords, 2, include_index=True), key=lambda x: x[1])
defindex = poss_deflist[0][2]
equivalent_sites = periodic_struc.find_equivalent_sites(self.bulk_structure[defindex])
return len(equivalent_sites) | Returns the multiplicity of a defect site within the structure (needed for concentration analysis) | Below is the the instruction that describes the task:
### Input:
Returns the multiplicity of a defect site within the structure (needed for concentration analysis)
### Response:
def multiplicity(self):
"""
Returns the multiplicity of a defect site within the structure (needed for concentration analysis)
"""
sga = SpacegroupAnalyzer(self.bulk_structure)
periodic_struc = sga.get_symmetrized_structure()
poss_deflist = sorted(
periodic_struc.get_sites_in_sphere(self.site.coords, 2, include_index=True), key=lambda x: x[1])
defindex = poss_deflist[0][2]
equivalent_sites = periodic_struc.find_equivalent_sites(self.bulk_structure[defindex])
return len(equivalent_sites) |
def next_offsets(self):
# type: (Descriptor) -> Offsets
"""Retrieve the next offsets
:param Descriptor self: this
:rtype: Offsets
:return: upload offsets
"""
resume_bytes = self._resume()
with self._meta_lock:
if self._chunk_num >= self._total_chunks:
return None, resume_bytes
if self._offset + self._chunk_size > self._ase.size:
num_bytes = self._ase.size - self._offset
else:
num_bytes = self._chunk_size
chunk_num = self._chunk_num
range_start = self._offset
range_end = self._offset + num_bytes - 1
self._offset += num_bytes
self._chunk_num += 1
if self._ase.is_encrypted and self._offset >= self._ase.size:
pad = True
else:
pad = False
return Offsets(
chunk_num=chunk_num,
num_bytes=num_bytes,
range_start=range_start,
range_end=range_end,
pad=pad,
), resume_bytes | Retrieve the next offsets
:param Descriptor self: this
:rtype: Offsets
:return: upload offsets | Below is the the instruction that describes the task:
### Input:
Retrieve the next offsets
:param Descriptor self: this
:rtype: Offsets
:return: upload offsets
### Response:
def next_offsets(self):
# type: (Descriptor) -> Offsets
"""Retrieve the next offsets
:param Descriptor self: this
:rtype: Offsets
:return: upload offsets
"""
resume_bytes = self._resume()
with self._meta_lock:
if self._chunk_num >= self._total_chunks:
return None, resume_bytes
if self._offset + self._chunk_size > self._ase.size:
num_bytes = self._ase.size - self._offset
else:
num_bytes = self._chunk_size
chunk_num = self._chunk_num
range_start = self._offset
range_end = self._offset + num_bytes - 1
self._offset += num_bytes
self._chunk_num += 1
if self._ase.is_encrypted and self._offset >= self._ase.size:
pad = True
else:
pad = False
return Offsets(
chunk_num=chunk_num,
num_bytes=num_bytes,
range_start=range_start,
range_end=range_end,
pad=pad,
), resume_bytes |
def fetchmany(self, *args, **kwargs):
"""
Analogous to :any:`sqlite3.Cursor.fetchmany`.
Works only in single cursor mode.
"""
if not self.single_cursor_mode:
raise S3MError("Calling Connection.fetchmany() while not in single cursor mode")
return self._cursor.fetchmany(*args, **kwargs) | Analogous to :any:`sqlite3.Cursor.fetchmany`.
Works only in single cursor mode. | Below is the the instruction that describes the task:
### Input:
Analogous to :any:`sqlite3.Cursor.fetchmany`.
Works only in single cursor mode.
### Response:
def fetchmany(self, *args, **kwargs):
"""
Analogous to :any:`sqlite3.Cursor.fetchmany`.
Works only in single cursor mode.
"""
if not self.single_cursor_mode:
raise S3MError("Calling Connection.fetchmany() while not in single cursor mode")
return self._cursor.fetchmany(*args, **kwargs) |
def compare_adjs(self, word1, word2):
"""
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as adjectives
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return self._plequal(word1, word2, self.plural_adj) | compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as adjectives
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise | Below is the the instruction that describes the task:
### Input:
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as adjectives
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
### Response:
def compare_adjs(self, word1, word2):
"""
compare word1 and word2 for equality regardless of plurality
word1 and word2 are to be treated as adjectives
return values:
eq - the strings are equal
p:s - word1 is the plural of word2
s:p - word2 is the plural of word1
p:p - word1 and word2 are two different plural forms of the one word
False - otherwise
"""
return self._plequal(word1, word2, self.plural_adj) |
def coerce(self, value):
"""Convert text values into integer values.
Args:
value (str or int): The value to coerce.
Raises:
TypeError: If the value is not an int or string.
ValueError: If the value is not int or an acceptable value.
Returns:
int: The integer value represented.
"""
if isinstance(value, int) or isinstance(value, compat.long):
return value
return int(value) | Convert text values into integer values.
Args:
value (str or int): The value to coerce.
Raises:
TypeError: If the value is not an int or string.
ValueError: If the value is not int or an acceptable value.
Returns:
int: The integer value represented. | Below is the the instruction that describes the task:
### Input:
Convert text values into integer values.
Args:
value (str or int): The value to coerce.
Raises:
TypeError: If the value is not an int or string.
ValueError: If the value is not int or an acceptable value.
Returns:
int: The integer value represented.
### Response:
def coerce(self, value):
"""Convert text values into integer values.
Args:
value (str or int): The value to coerce.
Raises:
TypeError: If the value is not an int or string.
ValueError: If the value is not int or an acceptable value.
Returns:
int: The integer value represented.
"""
if isinstance(value, int) or isinstance(value, compat.long):
return value
return int(value) |
def save(self, path):
"""
Writes file to a particular location
This won't work for cloud environments like Google's App Engine, use with caution
ensure to catch exceptions so you can provide informed feedback.
prestans does not mask File IO exceptions so your handler can respond better.
"""
file_handle = open(path, 'wb')
file_handle.write(self._file_contents)
file_handle.close() | Writes file to a particular location
This won't work for cloud environments like Google's App Engine, use with caution
ensure to catch exceptions so you can provide informed feedback.
prestans does not mask File IO exceptions so your handler can respond better. | Below is the the instruction that describes the task:
### Input:
Writes file to a particular location
This won't work for cloud environments like Google's App Engine, use with caution
ensure to catch exceptions so you can provide informed feedback.
prestans does not mask File IO exceptions so your handler can respond better.
### Response:
def save(self, path):
"""
Writes file to a particular location
This won't work for cloud environments like Google's App Engine, use with caution
ensure to catch exceptions so you can provide informed feedback.
prestans does not mask File IO exceptions so your handler can respond better.
"""
file_handle = open(path, 'wb')
file_handle.write(self._file_contents)
file_handle.close() |
def wait_for(self, predicate, timeout=None):
"""Like :meth:`wait` but additionally for *predicate* to be true.
The *predicate* argument must be a callable that takes no arguments.
Its result is interpreted as a boolean value.
"""
if not is_locked(self._lock):
raise RuntimeError('lock is not locked')
hub = get_hub()
try:
with switch_back(timeout, lock=thread_lock(self._lock)) as switcher:
handle = add_callback(self, switcher, predicate)
# See the comment in Lock.acquire() why it is OK to release the
# lock here before calling hub.switch().
# Also if this is a reentrant lock make sure it is fully released.
state = release_save(self._lock)
hub.switch()
except BaseException as e:
with self._lock:
remove_callback(self, handle)
if e is switcher.timeout:
return False
raise
finally:
acquire_restore(self._lock, state)
return True | Like :meth:`wait` but additionally for *predicate* to be true.
The *predicate* argument must be a callable that takes no arguments.
Its result is interpreted as a boolean value. | Below is the the instruction that describes the task:
### Input:
Like :meth:`wait` but additionally for *predicate* to be true.
The *predicate* argument must be a callable that takes no arguments.
Its result is interpreted as a boolean value.
### Response:
def wait_for(self, predicate, timeout=None):
"""Like :meth:`wait` but additionally for *predicate* to be true.
The *predicate* argument must be a callable that takes no arguments.
Its result is interpreted as a boolean value.
"""
if not is_locked(self._lock):
raise RuntimeError('lock is not locked')
hub = get_hub()
try:
with switch_back(timeout, lock=thread_lock(self._lock)) as switcher:
handle = add_callback(self, switcher, predicate)
# See the comment in Lock.acquire() why it is OK to release the
# lock here before calling hub.switch().
# Also if this is a reentrant lock make sure it is fully released.
state = release_save(self._lock)
hub.switch()
except BaseException as e:
with self._lock:
remove_callback(self, handle)
if e is switcher.timeout:
return False
raise
finally:
acquire_restore(self._lock, state)
return True |
def _parse_extra(self, fp):
""" Parse and store the config comments and create maps for dot notion lookup """
comment = ''
section = ''
fp.seek(0)
for line in fp:
line = line.rstrip()
if not line:
if comment:
comment += '\n'
continue
if line.startswith('#'): # Comment
comment += line + '\n'
continue
if line.startswith('['): # Section
section = line.strip('[]')
self._add_dot_key(section)
if comment:
self._comments[section] = comment.rstrip()
elif CONFIG_KEY_RE.match(line): # Config
key = line.split('=', 1)[0].strip()
self._add_dot_key(section, key)
if comment:
self._comments[(section, key)] = comment.rstrip()
comment = ''
if comment:
self._comments[self.LAST_COMMENT_KEY] = comment | Parse and store the config comments and create maps for dot notion lookup | Below is the the instruction that describes the task:
### Input:
Parse and store the config comments and create maps for dot notion lookup
### Response:
def _parse_extra(self, fp):
""" Parse and store the config comments and create maps for dot notion lookup """
comment = ''
section = ''
fp.seek(0)
for line in fp:
line = line.rstrip()
if not line:
if comment:
comment += '\n'
continue
if line.startswith('#'): # Comment
comment += line + '\n'
continue
if line.startswith('['): # Section
section = line.strip('[]')
self._add_dot_key(section)
if comment:
self._comments[section] = comment.rstrip()
elif CONFIG_KEY_RE.match(line): # Config
key = line.split('=', 1)[0].strip()
self._add_dot_key(section, key)
if comment:
self._comments[(section, key)] = comment.rstrip()
comment = ''
if comment:
self._comments[self.LAST_COMMENT_KEY] = comment |
def session(self):
"""
This is what you should use to make requests. It sill authenticate for you.
:return: requests.sessions.Session
"""
if not self._session:
self._session = requests.Session()
self._session.headers.update(dict(Authorization='Bearer {0}'.format(self.token)))
return self._session | This is what you should use to make requests. It sill authenticate for you.
:return: requests.sessions.Session | Below is the the instruction that describes the task:
### Input:
This is what you should use to make requests. It sill authenticate for you.
:return: requests.sessions.Session
### Response:
def session(self):
"""
This is what you should use to make requests. It sill authenticate for you.
:return: requests.sessions.Session
"""
if not self._session:
self._session = requests.Session()
self._session.headers.update(dict(Authorization='Bearer {0}'.format(self.token)))
return self._session |
def notify_listeners(self, msg_type, params):
"""Send a message to all the observers."""
for c in self.listeners:
c.notify(msg_type, params) | Send a message to all the observers. | Below is the the instruction that describes the task:
### Input:
Send a message to all the observers.
### Response:
def notify_listeners(self, msg_type, params):
"""Send a message to all the observers."""
for c in self.listeners:
c.notify(msg_type, params) |
def net_fluxes(sources, sinks, msm, for_committors=None):
"""
Computes the transition path theory net flux matrix.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
msm : msmbuilder.MarkovStateModel
MSM fit to data.
for_committors : np.ndarray, optional
The forward committors associated with `sources`, `sinks`, and `tprob`.
If not provided, is calculated from scratch. If provided, `sources`
and `sinks` are ignored.
Returns
-------
net_flux : np.ndarray
The net flux matrix
See Also
--------
fluxes
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016.
"""
flux_matrix = fluxes(sources, sinks, msm, for_committors=for_committors)
net_flux = flux_matrix - flux_matrix.T
net_flux[np.where(net_flux < 0)] = 0.0
return net_flux | Computes the transition path theory net flux matrix.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
msm : msmbuilder.MarkovStateModel
MSM fit to data.
for_committors : np.ndarray, optional
The forward committors associated with `sources`, `sinks`, and `tprob`.
If not provided, is calculated from scratch. If provided, `sources`
and `sinks` are ignored.
Returns
-------
net_flux : np.ndarray
The net flux matrix
See Also
--------
fluxes
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016. | Below is the the instruction that describes the task:
### Input:
Computes the transition path theory net flux matrix.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
msm : msmbuilder.MarkovStateModel
MSM fit to data.
for_committors : np.ndarray, optional
The forward committors associated with `sources`, `sinks`, and `tprob`.
If not provided, is calculated from scratch. If provided, `sources`
and `sinks` are ignored.
Returns
-------
net_flux : np.ndarray
The net flux matrix
See Also
--------
fluxes
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016.
### Response:
def net_fluxes(sources, sinks, msm, for_committors=None):
"""
Computes the transition path theory net flux matrix.
Parameters
----------
sources : array_like, int
The set of unfolded/reactant states.
sinks : array_like, int
The set of folded/product states.
msm : msmbuilder.MarkovStateModel
MSM fit to data.
for_committors : np.ndarray, optional
The forward committors associated with `sources`, `sinks`, and `tprob`.
If not provided, is calculated from scratch. If provided, `sources`
and `sinks` are ignored.
Returns
-------
net_flux : np.ndarray
The net flux matrix
See Also
--------
fluxes
References
----------
.. [1] Weinan, E. and Vanden-Eijnden, E. Towards a theory of
transition paths. J. Stat. Phys. 123, 503-523 (2006).
.. [2] Metzner, P., Schutte, C. & Vanden-Eijnden, E.
Transition path theory for Markov jump processes.
Multiscale Model. Simul. 7, 1192-1219 (2009).
.. [3] Berezhkovskii, A., Hummer, G. & Szabo, A. Reactive
flux and folding pathways in network models of
coarse-grained protein dynamics. J. Chem. Phys.
130, 205102 (2009).
.. [4] Noe, Frank, et al. "Constructing the equilibrium ensemble of folding
pathways from short off-equilibrium simulations." PNAS 106.45 (2009):
19011-19016.
"""
flux_matrix = fluxes(sources, sinks, msm, for_committors=for_committors)
net_flux = flux_matrix - flux_matrix.T
net_flux[np.where(net_flux < 0)] = 0.0
return net_flux |
def vocab_account_type(instance):
"""Ensure a user-account objects' 'account-type' property is from the
account-type-ov vocabulary.
"""
for key, obj in instance['objects'].items():
if 'type' in obj and obj['type'] == 'user-account':
try:
acct_type = obj['account_type']
except KeyError:
continue
if acct_type not in enums.ACCOUNT_TYPE_OV:
yield JSONError("Object '%s' is a User Account Object "
"with an 'account_type' of '%s', which is not a "
"value in the account-type-ov vocabulary."
% (key, acct_type), instance['id'], 'account-type') | Ensure a user-account objects' 'account-type' property is from the
account-type-ov vocabulary. | Below is the the instruction that describes the task:
### Input:
Ensure a user-account objects' 'account-type' property is from the
account-type-ov vocabulary.
### Response:
def vocab_account_type(instance):
"""Ensure a user-account objects' 'account-type' property is from the
account-type-ov vocabulary.
"""
for key, obj in instance['objects'].items():
if 'type' in obj and obj['type'] == 'user-account':
try:
acct_type = obj['account_type']
except KeyError:
continue
if acct_type not in enums.ACCOUNT_TYPE_OV:
yield JSONError("Object '%s' is a User Account Object "
"with an 'account_type' of '%s', which is not a "
"value in the account-type-ov vocabulary."
% (key, acct_type), instance['id'], 'account-type') |
def _build_bst_from_sorted_values(sorted_values):
"""Recursively build a perfect BST from odd number of sorted values.
:param sorted_values: Odd number of sorted values.
:type sorted_values: [int | float]
:return: Root node of the BST.
:rtype: binarytree.Node
"""
if len(sorted_values) == 0:
return None
mid_index = len(sorted_values) // 2
root = Node(sorted_values[mid_index])
root.left = _build_bst_from_sorted_values(sorted_values[:mid_index])
root.right = _build_bst_from_sorted_values(sorted_values[mid_index + 1:])
return root | Recursively build a perfect BST from odd number of sorted values.
:param sorted_values: Odd number of sorted values.
:type sorted_values: [int | float]
:return: Root node of the BST.
:rtype: binarytree.Node | Below is the the instruction that describes the task:
### Input:
Recursively build a perfect BST from odd number of sorted values.
:param sorted_values: Odd number of sorted values.
:type sorted_values: [int | float]
:return: Root node of the BST.
:rtype: binarytree.Node
### Response:
def _build_bst_from_sorted_values(sorted_values):
"""Recursively build a perfect BST from odd number of sorted values.
:param sorted_values: Odd number of sorted values.
:type sorted_values: [int | float]
:return: Root node of the BST.
:rtype: binarytree.Node
"""
if len(sorted_values) == 0:
return None
mid_index = len(sorted_values) // 2
root = Node(sorted_values[mid_index])
root.left = _build_bst_from_sorted_values(sorted_values[:mid_index])
root.right = _build_bst_from_sorted_values(sorted_values[mid_index + 1:])
return root |
def _make_tuple(x):
"""TF has an obnoxious habit of being lenient with single vs tuple."""
if isinstance(x, prettytensor.PrettyTensor):
if x.is_sequence():
return tuple(x.sequence)
else:
return (x.tensor,)
elif isinstance(x, tuple):
return x
elif (isinstance(x, collections.Sequence) and
not isinstance(x, six.string_types)):
return tuple(x)
else:
return (x,) | TF has an obnoxious habit of being lenient with single vs tuple. | Below is the the instruction that describes the task:
### Input:
TF has an obnoxious habit of being lenient with single vs tuple.
### Response:
def _make_tuple(x):
"""TF has an obnoxious habit of being lenient with single vs tuple."""
if isinstance(x, prettytensor.PrettyTensor):
if x.is_sequence():
return tuple(x.sequence)
else:
return (x.tensor,)
elif isinstance(x, tuple):
return x
elif (isinstance(x, collections.Sequence) and
not isinstance(x, six.string_types)):
return tuple(x)
else:
return (x,) |
def two_phase_dP_acceleration(m, D, xi, xo, alpha_i, alpha_o, rho_li, rho_gi,
rho_lo=None, rho_go=None):
r'''This function handles calculation of two-phase liquid-gas pressure drop
due to acceleration for flow inside channels. This is a discrete
calculation for a segment with a known difference in quality (and ideally
known inlet and outlet pressures so density dependence can be included).
.. math::
\Delta P_{acc} = G^2\left\{\left[\frac{(1-x_o)^2}{\rho_{l,o}
(1-\alpha_o)} + \frac{x_o^2}{\rho_{g,o}\alpha_o} \right]
- \left[\frac{(1-x_i)^2}{\rho_{l,i}(1-\alpha_i)}
+ \frac{x_i^2}{\rho_{g,i}\alpha_i} \right]\right\}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
D : float
Diameter of pipe, [m]
xi : float
Quality of fluid at inlet, [-]
xo : float
Quality of fluid at outlet, [-]
alpha_i : float
Void fraction at inlet (area of gas / total area of channel), [-]
alpha_o : float
Void fraction at outlet (area of gas / total area of channel), [-]
rho_li : float
Liquid phase density at inlet, [kg/m^3]
rho_gi : float
Gas phase density at inlet, [kg/m^3]
rho_lo : float, optional
Liquid phase density at outlet, [kg/m^3]
rho_go : float, optional
Gas phase density at outlet, [kg/m^3]
Returns
-------
dP : float
Acceleration component of pressure drop for two-phase flow, [Pa]
Notes
-----
The use of different gas and liquid phase densities at the inlet and outlet
is optional; the outlet densities conditions will be assumed to be those of
the inlet if they are not specified.
There is a continuous variant of this method which can be integrated over,
at the expense of a speed. The differential form of this is as follows
([1]_, [3]_):
.. math::
- \left(\frac{d P}{dz}\right)_{acc} = G^2 \frac{d}{dz} \left[\frac{
(1-x)^2}{\rho_l(1-\alpha)} + \frac{x^2}{\rho_g\alpha}\right]
Examples
--------
>>> two_phase_dP_acceleration(m=1, D=0.1, xi=0.372, xo=0.557, rho_li=827.1,
... rho_gi=3.919, alpha_i=0.992, alpha_o=0.996)
706.8560377214725
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Awad, M. M., and Y. S. Muzychka. "Effective Property Models for
Homogeneous Two-Phase Flows." Experimental Thermal and Fluid Science 33,
no. 1 (October 1, 2008): 106-13.
doi:10.1016/j.expthermflusci.2008.07.006.
.. [3] Kim, Sung-Min, and Issam Mudawar. "Review of Databases and
Predictive Methods for Pressure Drop in Adiabatic, Condensing and
Boiling Mini/Micro-Channel Flows." International Journal of Heat and
Mass Transfer 77 (October 2014): 74-97.
doi:10.1016/j.ijheatmasstransfer.2014.04.035.
'''
G = 4*m/(pi*D*D)
if rho_lo is None:
rho_lo = rho_li
if rho_go is None:
rho_go = rho_gi
in_term = (1.-xi)**2/(rho_li*(1.-alpha_i)) + xi*xi/(rho_gi*alpha_i)
out_term = (1.-xo)**2/(rho_lo*(1.-alpha_o)) + xo*xo/(rho_go*alpha_o)
return G*G*(out_term - in_term) | r'''This function handles calculation of two-phase liquid-gas pressure drop
due to acceleration for flow inside channels. This is a discrete
calculation for a segment with a known difference in quality (and ideally
known inlet and outlet pressures so density dependence can be included).
.. math::
\Delta P_{acc} = G^2\left\{\left[\frac{(1-x_o)^2}{\rho_{l,o}
(1-\alpha_o)} + \frac{x_o^2}{\rho_{g,o}\alpha_o} \right]
- \left[\frac{(1-x_i)^2}{\rho_{l,i}(1-\alpha_i)}
+ \frac{x_i^2}{\rho_{g,i}\alpha_i} \right]\right\}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
D : float
Diameter of pipe, [m]
xi : float
Quality of fluid at inlet, [-]
xo : float
Quality of fluid at outlet, [-]
alpha_i : float
Void fraction at inlet (area of gas / total area of channel), [-]
alpha_o : float
Void fraction at outlet (area of gas / total area of channel), [-]
rho_li : float
Liquid phase density at inlet, [kg/m^3]
rho_gi : float
Gas phase density at inlet, [kg/m^3]
rho_lo : float, optional
Liquid phase density at outlet, [kg/m^3]
rho_go : float, optional
Gas phase density at outlet, [kg/m^3]
Returns
-------
dP : float
Acceleration component of pressure drop for two-phase flow, [Pa]
Notes
-----
The use of different gas and liquid phase densities at the inlet and outlet
is optional; the outlet densities conditions will be assumed to be those of
the inlet if they are not specified.
There is a continuous variant of this method which can be integrated over,
at the expense of a speed. The differential form of this is as follows
([1]_, [3]_):
.. math::
- \left(\frac{d P}{dz}\right)_{acc} = G^2 \frac{d}{dz} \left[\frac{
(1-x)^2}{\rho_l(1-\alpha)} + \frac{x^2}{\rho_g\alpha}\right]
Examples
--------
>>> two_phase_dP_acceleration(m=1, D=0.1, xi=0.372, xo=0.557, rho_li=827.1,
... rho_gi=3.919, alpha_i=0.992, alpha_o=0.996)
706.8560377214725
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Awad, M. M., and Y. S. Muzychka. "Effective Property Models for
Homogeneous Two-Phase Flows." Experimental Thermal and Fluid Science 33,
no. 1 (October 1, 2008): 106-13.
doi:10.1016/j.expthermflusci.2008.07.006.
.. [3] Kim, Sung-Min, and Issam Mudawar. "Review of Databases and
Predictive Methods for Pressure Drop in Adiabatic, Condensing and
Boiling Mini/Micro-Channel Flows." International Journal of Heat and
Mass Transfer 77 (October 2014): 74-97.
doi:10.1016/j.ijheatmasstransfer.2014.04.035. | Below is the the instruction that describes the task:
### Input:
r'''This function handles calculation of two-phase liquid-gas pressure drop
due to acceleration for flow inside channels. This is a discrete
calculation for a segment with a known difference in quality (and ideally
known inlet and outlet pressures so density dependence can be included).
.. math::
\Delta P_{acc} = G^2\left\{\left[\frac{(1-x_o)^2}{\rho_{l,o}
(1-\alpha_o)} + \frac{x_o^2}{\rho_{g,o}\alpha_o} \right]
- \left[\frac{(1-x_i)^2}{\rho_{l,i}(1-\alpha_i)}
+ \frac{x_i^2}{\rho_{g,i}\alpha_i} \right]\right\}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
D : float
Diameter of pipe, [m]
xi : float
Quality of fluid at inlet, [-]
xo : float
Quality of fluid at outlet, [-]
alpha_i : float
Void fraction at inlet (area of gas / total area of channel), [-]
alpha_o : float
Void fraction at outlet (area of gas / total area of channel), [-]
rho_li : float
Liquid phase density at inlet, [kg/m^3]
rho_gi : float
Gas phase density at inlet, [kg/m^3]
rho_lo : float, optional
Liquid phase density at outlet, [kg/m^3]
rho_go : float, optional
Gas phase density at outlet, [kg/m^3]
Returns
-------
dP : float
Acceleration component of pressure drop for two-phase flow, [Pa]
Notes
-----
The use of different gas and liquid phase densities at the inlet and outlet
is optional; the outlet densities conditions will be assumed to be those of
the inlet if they are not specified.
There is a continuous variant of this method which can be integrated over,
at the expense of a speed. The differential form of this is as follows
([1]_, [3]_):
.. math::
- \left(\frac{d P}{dz}\right)_{acc} = G^2 \frac{d}{dz} \left[\frac{
(1-x)^2}{\rho_l(1-\alpha)} + \frac{x^2}{\rho_g\alpha}\right]
Examples
--------
>>> two_phase_dP_acceleration(m=1, D=0.1, xi=0.372, xo=0.557, rho_li=827.1,
... rho_gi=3.919, alpha_i=0.992, alpha_o=0.996)
706.8560377214725
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Awad, M. M., and Y. S. Muzychka. "Effective Property Models for
Homogeneous Two-Phase Flows." Experimental Thermal and Fluid Science 33,
no. 1 (October 1, 2008): 106-13.
doi:10.1016/j.expthermflusci.2008.07.006.
.. [3] Kim, Sung-Min, and Issam Mudawar. "Review of Databases and
Predictive Methods for Pressure Drop in Adiabatic, Condensing and
Boiling Mini/Micro-Channel Flows." International Journal of Heat and
Mass Transfer 77 (October 2014): 74-97.
doi:10.1016/j.ijheatmasstransfer.2014.04.035.
### Response:
def two_phase_dP_acceleration(m, D, xi, xo, alpha_i, alpha_o, rho_li, rho_gi,
rho_lo=None, rho_go=None):
r'''This function handles calculation of two-phase liquid-gas pressure drop
due to acceleration for flow inside channels. This is a discrete
calculation for a segment with a known difference in quality (and ideally
known inlet and outlet pressures so density dependence can be included).
.. math::
\Delta P_{acc} = G^2\left\{\left[\frac{(1-x_o)^2}{\rho_{l,o}
(1-\alpha_o)} + \frac{x_o^2}{\rho_{g,o}\alpha_o} \right]
- \left[\frac{(1-x_i)^2}{\rho_{l,i}(1-\alpha_i)}
+ \frac{x_i^2}{\rho_{g,i}\alpha_i} \right]\right\}
Parameters
----------
m : float
Mass flow rate of fluid, [kg/s]
D : float
Diameter of pipe, [m]
xi : float
Quality of fluid at inlet, [-]
xo : float
Quality of fluid at outlet, [-]
alpha_i : float
Void fraction at inlet (area of gas / total area of channel), [-]
alpha_o : float
Void fraction at outlet (area of gas / total area of channel), [-]
rho_li : float
Liquid phase density at inlet, [kg/m^3]
rho_gi : float
Gas phase density at inlet, [kg/m^3]
rho_lo : float, optional
Liquid phase density at outlet, [kg/m^3]
rho_go : float, optional
Gas phase density at outlet, [kg/m^3]
Returns
-------
dP : float
Acceleration component of pressure drop for two-phase flow, [Pa]
Notes
-----
The use of different gas and liquid phase densities at the inlet and outlet
is optional; the outlet densities conditions will be assumed to be those of
the inlet if they are not specified.
There is a continuous variant of this method which can be integrated over,
at the expense of a speed. The differential form of this is as follows
([1]_, [3]_):
.. math::
- \left(\frac{d P}{dz}\right)_{acc} = G^2 \frac{d}{dz} \left[\frac{
(1-x)^2}{\rho_l(1-\alpha)} + \frac{x^2}{\rho_g\alpha}\right]
Examples
--------
>>> two_phase_dP_acceleration(m=1, D=0.1, xi=0.372, xo=0.557, rho_li=827.1,
... rho_gi=3.919, alpha_i=0.992, alpha_o=0.996)
706.8560377214725
References
----------
.. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat
Transfer, 3E. New York: McGraw-Hill, 1998.
.. [2] Awad, M. M., and Y. S. Muzychka. "Effective Property Models for
Homogeneous Two-Phase Flows." Experimental Thermal and Fluid Science 33,
no. 1 (October 1, 2008): 106-13.
doi:10.1016/j.expthermflusci.2008.07.006.
.. [3] Kim, Sung-Min, and Issam Mudawar. "Review of Databases and
Predictive Methods for Pressure Drop in Adiabatic, Condensing and
Boiling Mini/Micro-Channel Flows." International Journal of Heat and
Mass Transfer 77 (October 2014): 74-97.
doi:10.1016/j.ijheatmasstransfer.2014.04.035.
'''
G = 4*m/(pi*D*D)
if rho_lo is None:
rho_lo = rho_li
if rho_go is None:
rho_go = rho_gi
in_term = (1.-xi)**2/(rho_li*(1.-alpha_i)) + xi*xi/(rho_gi*alpha_i)
out_term = (1.-xo)**2/(rho_lo*(1.-alpha_o)) + xo*xo/(rho_go*alpha_o)
return G*G*(out_term - in_term) |
def disconnectMsToNet(Facility_presence=0, UserUser_presence=0,
SsVersionIndicator_presence=0):
"""Disconnect Section 9.3.7.2"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x25) # 00100101
c = Cause()
packet = a / b / c
if Facility_presence is 1:
d = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
packet = packet / d
if UserUser_presence is 1:
e = UserUserHdr(ieiUU=0x7E, eightBitUU=0x0)
packet = packet / e
if SsVersionIndicator_presence is 1:
f = SsVersionIndicatorHdr(ieiSVI=0x7F, eightBitSVI=0x0)
packet = packet / f
return packet | Disconnect Section 9.3.7.2 | Below is the the instruction that describes the task:
### Input:
Disconnect Section 9.3.7.2
### Response:
def disconnectMsToNet(Facility_presence=0, UserUser_presence=0,
SsVersionIndicator_presence=0):
"""Disconnect Section 9.3.7.2"""
a = TpPd(pd=0x3)
b = MessageType(mesType=0x25) # 00100101
c = Cause()
packet = a / b / c
if Facility_presence is 1:
d = FacilityHdr(ieiF=0x1C, eightBitF=0x0)
packet = packet / d
if UserUser_presence is 1:
e = UserUserHdr(ieiUU=0x7E, eightBitUU=0x0)
packet = packet / e
if SsVersionIndicator_presence is 1:
f = SsVersionIndicatorHdr(ieiSVI=0x7F, eightBitSVI=0x0)
packet = packet / f
return packet |
def write_bel_namespace(self, file: TextIO, use_names: bool = False) -> None:
"""Write as a BEL namespace file."""
if not self.is_populated():
self.populate()
if use_names and not self.has_names:
raise ValueError
values = (
self._get_namespace_name_to_encoding(desc='writing names')
if use_names else
self._get_namespace_identifier_to_encoding(desc='writing identifiers')
)
write_namespace(
namespace_name=self._get_namespace_name(),
namespace_keyword=self._get_namespace_keyword(),
namespace_query_url=self.identifiers_url,
values=values,
file=file,
) | Write as a BEL namespace file. | Below is the the instruction that describes the task:
### Input:
Write as a BEL namespace file.
### Response:
def write_bel_namespace(self, file: TextIO, use_names: bool = False) -> None:
"""Write as a BEL namespace file."""
if not self.is_populated():
self.populate()
if use_names and not self.has_names:
raise ValueError
values = (
self._get_namespace_name_to_encoding(desc='writing names')
if use_names else
self._get_namespace_identifier_to_encoding(desc='writing identifiers')
)
write_namespace(
namespace_name=self._get_namespace_name(),
namespace_keyword=self._get_namespace_keyword(),
namespace_query_url=self.identifiers_url,
values=values,
file=file,
) |
def _get_packages():
# type: () -> List[Package]
"""Convert `pkg_resources.working_set` into a list of `Package` objects.
:return: list
"""
return [Package(pkg_obj=pkg) for pkg in sorted(pkg_resources.working_set,
key=lambda x: str(x).lower())] | Convert `pkg_resources.working_set` into a list of `Package` objects.
:return: list | Below is the the instruction that describes the task:
### Input:
Convert `pkg_resources.working_set` into a list of `Package` objects.
:return: list
### Response:
def _get_packages():
# type: () -> List[Package]
"""Convert `pkg_resources.working_set` into a list of `Package` objects.
:return: list
"""
return [Package(pkg_obj=pkg) for pkg in sorted(pkg_resources.working_set,
key=lambda x: str(x).lower())] |
def Down(self, n = 1, dl = 0):
"""下方向键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.down_key, n) | 下方向键n次 | Below is the the instruction that describes the task:
### Input:
下方向键n次
### Response:
def Down(self, n = 1, dl = 0):
"""下方向键n次
"""
self.Delay(dl)
self.keyboard.tap_key(self.keyboard.down_key, n) |
def get_indicators_metadata(self, indicators):
"""
Provide metadata associated with an list of indicators, including value, indicatorType, noteCount, sightings,
lastSeen, enclaveIds, and tags. The metadata is determined based on the enclaves the user making the request has
READ access to.
:param indicators: a list of |Indicator| objects to query. Values are required, types are optional. Types
might be required to distinguish in a case where one indicator value has been associated with multiple types
based on different contexts.
:return: A list of |Indicator| objects. The following attributes of the objects will be returned:
correlation_count, last_seen, sightings, notes, tags, enclave_ids. All other attributes of the Indicator
objects will have Null values.
"""
data = [{
'value': i.value,
'indicatorType': i.type
} for i in indicators]
resp = self._client.post("indicators/metadata", data=json.dumps(data))
return [Indicator.from_dict(x) for x in resp.json()] | Provide metadata associated with an list of indicators, including value, indicatorType, noteCount, sightings,
lastSeen, enclaveIds, and tags. The metadata is determined based on the enclaves the user making the request has
READ access to.
:param indicators: a list of |Indicator| objects to query. Values are required, types are optional. Types
might be required to distinguish in a case where one indicator value has been associated with multiple types
based on different contexts.
:return: A list of |Indicator| objects. The following attributes of the objects will be returned:
correlation_count, last_seen, sightings, notes, tags, enclave_ids. All other attributes of the Indicator
objects will have Null values. | Below is the the instruction that describes the task:
### Input:
Provide metadata associated with an list of indicators, including value, indicatorType, noteCount, sightings,
lastSeen, enclaveIds, and tags. The metadata is determined based on the enclaves the user making the request has
READ access to.
:param indicators: a list of |Indicator| objects to query. Values are required, types are optional. Types
might be required to distinguish in a case where one indicator value has been associated with multiple types
based on different contexts.
:return: A list of |Indicator| objects. The following attributes of the objects will be returned:
correlation_count, last_seen, sightings, notes, tags, enclave_ids. All other attributes of the Indicator
objects will have Null values.
### Response:
def get_indicators_metadata(self, indicators):
"""
Provide metadata associated with an list of indicators, including value, indicatorType, noteCount, sightings,
lastSeen, enclaveIds, and tags. The metadata is determined based on the enclaves the user making the request has
READ access to.
:param indicators: a list of |Indicator| objects to query. Values are required, types are optional. Types
might be required to distinguish in a case where one indicator value has been associated with multiple types
based on different contexts.
:return: A list of |Indicator| objects. The following attributes of the objects will be returned:
correlation_count, last_seen, sightings, notes, tags, enclave_ids. All other attributes of the Indicator
objects will have Null values.
"""
data = [{
'value': i.value,
'indicatorType': i.type
} for i in indicators]
resp = self._client.post("indicators/metadata", data=json.dumps(data))
return [Indicator.from_dict(x) for x in resp.json()] |
def get_allowed_reset_keys_values(self):
"""Get the allowed values for resetting the system.
:returns: A set with the allowed values.
"""
reset_keys_action = self._get_reset_keys_action_element()
if not reset_keys_action.allowed_values:
LOG.warning('Could not figure out the allowed values for the '
'reset keys in secure boot %s', self.path)
return set(mappings.SECUREBOOT_RESET_KEYS_MAP_REV)
return set([mappings.SECUREBOOT_RESET_KEYS_MAP[v] for v in
set(mappings.SECUREBOOT_RESET_KEYS_MAP).
intersection(reset_keys_action.allowed_values)]) | Get the allowed values for resetting the system.
:returns: A set with the allowed values. | Below is the the instruction that describes the task:
### Input:
Get the allowed values for resetting the system.
:returns: A set with the allowed values.
### Response:
def get_allowed_reset_keys_values(self):
"""Get the allowed values for resetting the system.
:returns: A set with the allowed values.
"""
reset_keys_action = self._get_reset_keys_action_element()
if not reset_keys_action.allowed_values:
LOG.warning('Could not figure out the allowed values for the '
'reset keys in secure boot %s', self.path)
return set(mappings.SECUREBOOT_RESET_KEYS_MAP_REV)
return set([mappings.SECUREBOOT_RESET_KEYS_MAP[v] for v in
set(mappings.SECUREBOOT_RESET_KEYS_MAP).
intersection(reset_keys_action.allowed_values)]) |
def viewzen_corr(data, view_zen):
"""Apply atmospheric correction on the given *data* using the
specified satellite zenith angles (*view_zen*). Both input data
are given as 2-dimensional Numpy (masked) arrays, and they should
have equal shapes.
The *data* array will be changed in place and has to be copied before.
"""
def ratio(value, v_null, v_ref):
return (value - v_null) / (v_ref - v_null)
def tau0(t):
T_0 = 210.0
T_REF = 320.0
TAU_REF = 9.85
return (1 + TAU_REF)**ratio(t, T_0, T_REF) - 1
def tau(t):
T_0 = 170.0
T_REF = 295.0
TAU_REF = 1.0
M = 4
return TAU_REF * ratio(t, T_0, T_REF)**M
def delta(z):
Z_0 = 0.0
Z_REF = 70.0
DELTA_REF = 6.2
return (1 + DELTA_REF)**ratio(z, Z_0, Z_REF) - 1
y0, x0 = np.ma.where(view_zen == 0)
data[y0, x0] += tau0(data[y0, x0])
y, x = np.ma.where((view_zen > 0) & (view_zen < 90) & (~data.mask))
data[y, x] += tau(data[y, x]) * delta(view_zen[y, x])
return data | Apply atmospheric correction on the given *data* using the
specified satellite zenith angles (*view_zen*). Both input data
are given as 2-dimensional Numpy (masked) arrays, and they should
have equal shapes.
The *data* array will be changed in place and has to be copied before. | Below is the the instruction that describes the task:
### Input:
Apply atmospheric correction on the given *data* using the
specified satellite zenith angles (*view_zen*). Both input data
are given as 2-dimensional Numpy (masked) arrays, and they should
have equal shapes.
The *data* array will be changed in place and has to be copied before.
### Response:
def viewzen_corr(data, view_zen):
"""Apply atmospheric correction on the given *data* using the
specified satellite zenith angles (*view_zen*). Both input data
are given as 2-dimensional Numpy (masked) arrays, and they should
have equal shapes.
The *data* array will be changed in place and has to be copied before.
"""
def ratio(value, v_null, v_ref):
return (value - v_null) / (v_ref - v_null)
def tau0(t):
T_0 = 210.0
T_REF = 320.0
TAU_REF = 9.85
return (1 + TAU_REF)**ratio(t, T_0, T_REF) - 1
def tau(t):
T_0 = 170.0
T_REF = 295.0
TAU_REF = 1.0
M = 4
return TAU_REF * ratio(t, T_0, T_REF)**M
def delta(z):
Z_0 = 0.0
Z_REF = 70.0
DELTA_REF = 6.2
return (1 + DELTA_REF)**ratio(z, Z_0, Z_REF) - 1
y0, x0 = np.ma.where(view_zen == 0)
data[y0, x0] += tau0(data[y0, x0])
y, x = np.ma.where((view_zen > 0) & (view_zen < 90) & (~data.mask))
data[y, x] += tau(data[y, x]) * delta(view_zen[y, x])
return data |
def cli_login(self, username='', password=''):
"""Generates CLI prompts to complete the login process
:param username: optionally provide username
:type username: :class:`str`
:param password: optionally provide password
:type password: :class:`str`
:return: logon result, see `CMsgClientLogonResponse.eresult <https://github.com/ValvePython/steam/blob/513c68ca081dc9409df932ad86c66100164380a6/protobufs/steammessages_clientserver.proto#L95-L118>`_
:rtype: :class:`.EResult`
Example console output after calling :meth:`cli_login`
.. code:: python
In [5]: client.cli_login()
Steam username: myusername
Password:
Steam is down. Keep retrying? [y/n]: y
Invalid password for 'myusername'. Enter password:
Enter email code: 123
Incorrect code. Enter email code: K6VKF
Out[5]: <EResult.OK: 1>
"""
if not username:
username = _cli_input("Username: ")
if not password:
password = getpass()
auth_code = two_factor_code = None
prompt_for_unavailable = True
result = self.login(username, password)
while result in (EResult.AccountLogonDenied, EResult.InvalidLoginAuthCode,
EResult.AccountLoginDeniedNeedTwoFactor, EResult.TwoFactorCodeMismatch,
EResult.TryAnotherCM, EResult.ServiceUnavailable,
EResult.InvalidPassword,
):
self.sleep(0.1)
if result == EResult.InvalidPassword:
password = getpass("Invalid password for %s. Enter password: " % repr(username))
elif result in (EResult.AccountLogonDenied, EResult.InvalidLoginAuthCode):
prompt = ("Enter email code: " if result == EResult.AccountLogonDenied else
"Incorrect code. Enter email code: ")
auth_code, two_factor_code = _cli_input(prompt), None
elif result in (EResult.AccountLoginDeniedNeedTwoFactor, EResult.TwoFactorCodeMismatch):
prompt = ("Enter 2FA code: " if result == EResult.AccountLoginDeniedNeedTwoFactor else
"Incorrect code. Enter 2FA code: ")
auth_code, two_factor_code = None, _cli_input(prompt)
elif result in (EResult.TryAnotherCM, EResult.ServiceUnavailable):
if prompt_for_unavailable and result == EResult.ServiceUnavailable:
while True:
answer = _cli_input("Steam is down. Keep retrying? [y/n]: ").lower()
if answer in 'yn': break
prompt_for_unavailable = False
if answer == 'n': break
self.reconnect(maxdelay=15) # implements reconnect throttling
result = self.login(username, password, None, auth_code, two_factor_code)
return result | Generates CLI prompts to complete the login process
:param username: optionally provide username
:type username: :class:`str`
:param password: optionally provide password
:type password: :class:`str`
:return: logon result, see `CMsgClientLogonResponse.eresult <https://github.com/ValvePython/steam/blob/513c68ca081dc9409df932ad86c66100164380a6/protobufs/steammessages_clientserver.proto#L95-L118>`_
:rtype: :class:`.EResult`
Example console output after calling :meth:`cli_login`
.. code:: python
In [5]: client.cli_login()
Steam username: myusername
Password:
Steam is down. Keep retrying? [y/n]: y
Invalid password for 'myusername'. Enter password:
Enter email code: 123
Incorrect code. Enter email code: K6VKF
Out[5]: <EResult.OK: 1> | Below is the the instruction that describes the task:
### Input:
Generates CLI prompts to complete the login process
:param username: optionally provide username
:type username: :class:`str`
:param password: optionally provide password
:type password: :class:`str`
:return: logon result, see `CMsgClientLogonResponse.eresult <https://github.com/ValvePython/steam/blob/513c68ca081dc9409df932ad86c66100164380a6/protobufs/steammessages_clientserver.proto#L95-L118>`_
:rtype: :class:`.EResult`
Example console output after calling :meth:`cli_login`
.. code:: python
In [5]: client.cli_login()
Steam username: myusername
Password:
Steam is down. Keep retrying? [y/n]: y
Invalid password for 'myusername'. Enter password:
Enter email code: 123
Incorrect code. Enter email code: K6VKF
Out[5]: <EResult.OK: 1>
### Response:
def cli_login(self, username='', password=''):
"""Generates CLI prompts to complete the login process
:param username: optionally provide username
:type username: :class:`str`
:param password: optionally provide password
:type password: :class:`str`
:return: logon result, see `CMsgClientLogonResponse.eresult <https://github.com/ValvePython/steam/blob/513c68ca081dc9409df932ad86c66100164380a6/protobufs/steammessages_clientserver.proto#L95-L118>`_
:rtype: :class:`.EResult`
Example console output after calling :meth:`cli_login`
.. code:: python
In [5]: client.cli_login()
Steam username: myusername
Password:
Steam is down. Keep retrying? [y/n]: y
Invalid password for 'myusername'. Enter password:
Enter email code: 123
Incorrect code. Enter email code: K6VKF
Out[5]: <EResult.OK: 1>
"""
if not username:
username = _cli_input("Username: ")
if not password:
password = getpass()
auth_code = two_factor_code = None
prompt_for_unavailable = True
result = self.login(username, password)
while result in (EResult.AccountLogonDenied, EResult.InvalidLoginAuthCode,
EResult.AccountLoginDeniedNeedTwoFactor, EResult.TwoFactorCodeMismatch,
EResult.TryAnotherCM, EResult.ServiceUnavailable,
EResult.InvalidPassword,
):
self.sleep(0.1)
if result == EResult.InvalidPassword:
password = getpass("Invalid password for %s. Enter password: " % repr(username))
elif result in (EResult.AccountLogonDenied, EResult.InvalidLoginAuthCode):
prompt = ("Enter email code: " if result == EResult.AccountLogonDenied else
"Incorrect code. Enter email code: ")
auth_code, two_factor_code = _cli_input(prompt), None
elif result in (EResult.AccountLoginDeniedNeedTwoFactor, EResult.TwoFactorCodeMismatch):
prompt = ("Enter 2FA code: " if result == EResult.AccountLoginDeniedNeedTwoFactor else
"Incorrect code. Enter 2FA code: ")
auth_code, two_factor_code = None, _cli_input(prompt)
elif result in (EResult.TryAnotherCM, EResult.ServiceUnavailable):
if prompt_for_unavailable and result == EResult.ServiceUnavailable:
while True:
answer = _cli_input("Steam is down. Keep retrying? [y/n]: ").lower()
if answer in 'yn': break
prompt_for_unavailable = False
if answer == 'n': break
self.reconnect(maxdelay=15) # implements reconnect throttling
result = self.login(username, password, None, auth_code, two_factor_code)
return result |
def from_dataframe(cls, name, df, indices, primary_key=None):
"""Infer table metadata from a DataFrame"""
# ordered list (column_name, column_type) pairs
column_types = []
# which columns have nullable values
nullable = set()
# tag cached database by dataframe's number of rows and columns
for column_name in df.columns:
values = df[column_name]
if values.isnull().any():
nullable.add(column_name)
column_db_type = db_type(values.dtype)
column_types.append((column_name.replace(" ", "_"), column_db_type))
def make_rows():
return list(tuple(row) for row in df.values)
return cls(
name=name,
column_types=column_types,
make_rows=make_rows,
indices=indices,
nullable=nullable,
primary_key=primary_key) | Infer table metadata from a DataFrame | Below is the the instruction that describes the task:
### Input:
Infer table metadata from a DataFrame
### Response:
def from_dataframe(cls, name, df, indices, primary_key=None):
"""Infer table metadata from a DataFrame"""
# ordered list (column_name, column_type) pairs
column_types = []
# which columns have nullable values
nullable = set()
# tag cached database by dataframe's number of rows and columns
for column_name in df.columns:
values = df[column_name]
if values.isnull().any():
nullable.add(column_name)
column_db_type = db_type(values.dtype)
column_types.append((column_name.replace(" ", "_"), column_db_type))
def make_rows():
return list(tuple(row) for row in df.values)
return cls(
name=name,
column_types=column_types,
make_rows=make_rows,
indices=indices,
nullable=nullable,
primary_key=primary_key) |
def Rz_to_coshucosv(R,z,delta=1.,oblate=False):
"""
NAME:
Rz_to_coshucosv
PURPOSE:
calculate prolate confocal cosh(u) and cos(v) coordinates from R,z, and delta
INPUT:
R - radius
z - height
delta= focus
oblate= (False) if True, compute oblate confocal coordinates instead of prolate
OUTPUT:
(cosh(u),cos(v))
HISTORY:
2012-11-27 - Written - Bovy (IAS)
2017-10-11 - Added oblate coordinates - Bovy (UofT)
"""
if oblate:
d12= (R+delta)**2.+z**2.
d22= (R-delta)**2.+z**2.
else:
d12= (z+delta)**2.+R**2.
d22= (z-delta)**2.+R**2.
coshu= 0.5/delta*(sc.sqrt(d12)+sc.sqrt(d22))
cosv= 0.5/delta*(sc.sqrt(d12)-sc.sqrt(d22))
if oblate: # cosv is currently really sinv
cosv= sc.sqrt(1.-cosv**2.)
return (coshu,cosv) | NAME:
Rz_to_coshucosv
PURPOSE:
calculate prolate confocal cosh(u) and cos(v) coordinates from R,z, and delta
INPUT:
R - radius
z - height
delta= focus
oblate= (False) if True, compute oblate confocal coordinates instead of prolate
OUTPUT:
(cosh(u),cos(v))
HISTORY:
2012-11-27 - Written - Bovy (IAS)
2017-10-11 - Added oblate coordinates - Bovy (UofT) | Below is the the instruction that describes the task:
### Input:
NAME:
Rz_to_coshucosv
PURPOSE:
calculate prolate confocal cosh(u) and cos(v) coordinates from R,z, and delta
INPUT:
R - radius
z - height
delta= focus
oblate= (False) if True, compute oblate confocal coordinates instead of prolate
OUTPUT:
(cosh(u),cos(v))
HISTORY:
2012-11-27 - Written - Bovy (IAS)
2017-10-11 - Added oblate coordinates - Bovy (UofT)
### Response:
def Rz_to_coshucosv(R,z,delta=1.,oblate=False):
"""
NAME:
Rz_to_coshucosv
PURPOSE:
calculate prolate confocal cosh(u) and cos(v) coordinates from R,z, and delta
INPUT:
R - radius
z - height
delta= focus
oblate= (False) if True, compute oblate confocal coordinates instead of prolate
OUTPUT:
(cosh(u),cos(v))
HISTORY:
2012-11-27 - Written - Bovy (IAS)
2017-10-11 - Added oblate coordinates - Bovy (UofT)
"""
if oblate:
d12= (R+delta)**2.+z**2.
d22= (R-delta)**2.+z**2.
else:
d12= (z+delta)**2.+R**2.
d22= (z-delta)**2.+R**2.
coshu= 0.5/delta*(sc.sqrt(d12)+sc.sqrt(d22))
cosv= 0.5/delta*(sc.sqrt(d12)-sc.sqrt(d22))
if oblate: # cosv is currently really sinv
cosv= sc.sqrt(1.-cosv**2.)
return (coshu,cosv) |
def execute(self, table_name=None, table_mode='create', use_cache=True, priority='interactive',
allow_large_results=False, dialect=None, billing_tier=None):
""" Initiate the query, blocking until complete and then return the results.
Args:
table_name: the result table name as a string or TableName; if None (the default), then a
temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled
to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much
as three hours but are not rate-limited.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a table_name to be specified) (default False).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
The QueryResultsTable for the query.
Raises:
Exception if query could not be executed.
"""
job = self.execute_async(table_name=table_name, table_mode=table_mode, use_cache=use_cache,
priority=priority, allow_large_results=allow_large_results,
dialect=dialect, billing_tier=billing_tier)
self._results = job.wait()
return self._results | Initiate the query, blocking until complete and then return the results.
Args:
table_name: the result table name as a string or TableName; if None (the default), then a
temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled
to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much
as three hours but are not rate-limited.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a table_name to be specified) (default False).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
The QueryResultsTable for the query.
Raises:
Exception if query could not be executed. | Below is the the instruction that describes the task:
### Input:
Initiate the query, blocking until complete and then return the results.
Args:
table_name: the result table name as a string or TableName; if None (the default), then a
temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled
to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much
as three hours but are not rate-limited.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a table_name to be specified) (default False).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
The QueryResultsTable for the query.
Raises:
Exception if query could not be executed.
### Response:
def execute(self, table_name=None, table_mode='create', use_cache=True, priority='interactive',
allow_large_results=False, dialect=None, billing_tier=None):
""" Initiate the query, blocking until complete and then return the results.
Args:
table_name: the result table name as a string or TableName; if None (the default), then a
temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled
to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much
as three hours but are not rate-limited.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a table_name to be specified) (default False).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
The QueryResultsTable for the query.
Raises:
Exception if query could not be executed.
"""
job = self.execute_async(table_name=table_name, table_mode=table_mode, use_cache=use_cache,
priority=priority, allow_large_results=allow_large_results,
dialect=dialect, billing_tier=billing_tier)
self._results = job.wait()
return self._results |
def xml_entities_to_utf8(text, skip=('lt', 'gt', 'amp')):
"""Translate HTML or XML character references to UTF-8.
Removes HTML or XML character references and entities from a text string
and replaces them with their UTF-8 representation, if possible.
:param text: The HTML (or XML) source text.
:type text: string
:param skip: list of entity names to skip when transforming.
:type skip: iterable
:return: The plain text, as a Unicode string, if necessary.
@author: Based on http://effbot.org/zone/re-sub.htm#unescape-html
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16)).encode("utf-8")
else:
return unichr(int(text[2:-1])).encode("utf-8")
except ValueError:
pass
else:
# named entity
if text[1:-1] not in skip:
try:
text = unichr(
html_entities.name2codepoint[text[1:-1]]) \
.encode("utf-8")
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text) | Translate HTML or XML character references to UTF-8.
Removes HTML or XML character references and entities from a text string
and replaces them with their UTF-8 representation, if possible.
:param text: The HTML (or XML) source text.
:type text: string
:param skip: list of entity names to skip when transforming.
:type skip: iterable
:return: The plain text, as a Unicode string, if necessary.
@author: Based on http://effbot.org/zone/re-sub.htm#unescape-html | Below is the the instruction that describes the task:
### Input:
Translate HTML or XML character references to UTF-8.
Removes HTML or XML character references and entities from a text string
and replaces them with their UTF-8 representation, if possible.
:param text: The HTML (or XML) source text.
:type text: string
:param skip: list of entity names to skip when transforming.
:type skip: iterable
:return: The plain text, as a Unicode string, if necessary.
@author: Based on http://effbot.org/zone/re-sub.htm#unescape-html
### Response:
def xml_entities_to_utf8(text, skip=('lt', 'gt', 'amp')):
"""Translate HTML or XML character references to UTF-8.
Removes HTML or XML character references and entities from a text string
and replaces them with their UTF-8 representation, if possible.
:param text: The HTML (or XML) source text.
:type text: string
:param skip: list of entity names to skip when transforming.
:type skip: iterable
:return: The plain text, as a Unicode string, if necessary.
@author: Based on http://effbot.org/zone/re-sub.htm#unescape-html
"""
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16)).encode("utf-8")
else:
return unichr(int(text[2:-1])).encode("utf-8")
except ValueError:
pass
else:
# named entity
if text[1:-1] not in skip:
try:
text = unichr(
html_entities.name2codepoint[text[1:-1]]) \
.encode("utf-8")
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text) |
def getwinsize(self):
"""This returns the terminal window size of the child tty. The return
value is a tuple of (rows, cols). """
TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912L)
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(self.fileno(), TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2] | This returns the terminal window size of the child tty. The return
value is a tuple of (rows, cols). | Below is the the instruction that describes the task:
### Input:
This returns the terminal window size of the child tty. The return
value is a tuple of (rows, cols).
### Response:
def getwinsize(self):
"""This returns the terminal window size of the child tty. The return
value is a tuple of (rows, cols). """
TIOCGWINSZ = getattr(termios, 'TIOCGWINSZ', 1074295912L)
s = struct.pack('HHHH', 0, 0, 0, 0)
x = fcntl.ioctl(self.fileno(), TIOCGWINSZ, s)
return struct.unpack('HHHH', x)[0:2] |
def create(
cls,
path,
template_engine=None,
output_filename=None,
output_ext=None,
view_name=None
):
"""Create the relevant subclass of StatikView based on the given path variable and
parameters."""
# if it's a complex view
if isinstance(path, dict):
return StatikViewComplexPath(
path,
template_engine,
output_filename=output_filename,
output_ext=output_ext,
view_name=view_name
)
elif isinstance(path, basestring):
return StatikViewSimplePath(
path,
output_filename=output_filename,
output_ext=output_ext,
view_name=view_name
)
else:
raise ValueError(
"Unrecognised structure for \"path\" configuration in view: %s" % view_name
) | Create the relevant subclass of StatikView based on the given path variable and
parameters. | Below is the the instruction that describes the task:
### Input:
Create the relevant subclass of StatikView based on the given path variable and
parameters.
### Response:
def create(
cls,
path,
template_engine=None,
output_filename=None,
output_ext=None,
view_name=None
):
"""Create the relevant subclass of StatikView based on the given path variable and
parameters."""
# if it's a complex view
if isinstance(path, dict):
return StatikViewComplexPath(
path,
template_engine,
output_filename=output_filename,
output_ext=output_ext,
view_name=view_name
)
elif isinstance(path, basestring):
return StatikViewSimplePath(
path,
output_filename=output_filename,
output_ext=output_ext,
view_name=view_name
)
else:
raise ValueError(
"Unrecognised structure for \"path\" configuration in view: %s" % view_name
) |
def Hughmark(m, x, alpha, D, L, Cpl, kl, mu_b=None, mu_w=None):
r'''Calculates the two-phase non-boiling laminar heat transfer coefficient
of a liquid and gas flowing inside a tube of any inclination, as in [1]_
and reviewed in [2]_.
.. math::
\frac{h_{TP} D}{k_l} = 1.75(1-\alpha)^{-0.5}\left(\frac{m_l C_{p,l}}
{(1-\alpha)k_l L}\right)^{1/3}\left(\frac{\mu_b}{\mu_w}\right)^{0.14}
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific tube interval []
alpha : float
Void fraction in the tube, []
D : float
Diameter of the tube [m]
L : float
Length of the tube, [m]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
kl : float
Thermal conductivity of liquid [W/m/K]
mu_b : float
Viscosity of liquid at bulk conditions (average of inlet/outlet
temperature) [Pa*s]
mu_w : float, optional
Viscosity of liquid at wall temperature [Pa*s]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
This model is based on a laminar entry length correlation - for a
sufficiently long tube, this will predict unrealistically low heat transfer
coefficients.
If the viscosity at the wall temperature is not given, the liquid viscosity
correction is not applied.
Developed for horizontal pipes in laminar slug flow. Data consisted of the
systems air-water, air-SAE 10 oil, gas-oil, air-diethylene glycol, and
air-aqueous glycerine.
Examples
--------
>>> Hughmark(m=1, x=.9, alpha=.9, D=.3, L=.5, Cpl=2300, kl=0.6, mu_b=1E-3,
... mu_w=1.2E-3)
212.7411636127175
References
----------
.. [1] Hughmark, G. A. "Holdup and Heat Transfer in Horizontal Slug Gas-
Liquid Flow." Chemical Engineering Science 20, no. 12 (December 1,
1965): 1007-10. doi:10.1016/0009-2509(65)80101-4.
.. [2] Dongwoo Kim, Venkata K. Ryali, Afshin J. Ghajar, Ronald L.
Dougherty. "Comparison of 20 Two-Phase Heat Transfer Correlations with
Seven Sets of Experimental Data, Including Flow Pattern and Tube
Inclination Effects." Heat Transfer Engineering 20, no. 1 (February 1,
1999): 15-40. doi:10.1080/014576399271691.
'''
ml = m*(1-x)
RL = 1-alpha
Nu_TP = 1.75*(RL)**-0.5*(ml*Cpl/RL/kl/L)**(1/3.)
if mu_b and mu_w:
Nu_TP *= (mu_b/mu_w)**0.14
return Nu_TP*kl/D | r'''Calculates the two-phase non-boiling laminar heat transfer coefficient
of a liquid and gas flowing inside a tube of any inclination, as in [1]_
and reviewed in [2]_.
.. math::
\frac{h_{TP} D}{k_l} = 1.75(1-\alpha)^{-0.5}\left(\frac{m_l C_{p,l}}
{(1-\alpha)k_l L}\right)^{1/3}\left(\frac{\mu_b}{\mu_w}\right)^{0.14}
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific tube interval []
alpha : float
Void fraction in the tube, []
D : float
Diameter of the tube [m]
L : float
Length of the tube, [m]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
kl : float
Thermal conductivity of liquid [W/m/K]
mu_b : float
Viscosity of liquid at bulk conditions (average of inlet/outlet
temperature) [Pa*s]
mu_w : float, optional
Viscosity of liquid at wall temperature [Pa*s]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
This model is based on a laminar entry length correlation - for a
sufficiently long tube, this will predict unrealistically low heat transfer
coefficients.
If the viscosity at the wall temperature is not given, the liquid viscosity
correction is not applied.
Developed for horizontal pipes in laminar slug flow. Data consisted of the
systems air-water, air-SAE 10 oil, gas-oil, air-diethylene glycol, and
air-aqueous glycerine.
Examples
--------
>>> Hughmark(m=1, x=.9, alpha=.9, D=.3, L=.5, Cpl=2300, kl=0.6, mu_b=1E-3,
... mu_w=1.2E-3)
212.7411636127175
References
----------
.. [1] Hughmark, G. A. "Holdup and Heat Transfer in Horizontal Slug Gas-
Liquid Flow." Chemical Engineering Science 20, no. 12 (December 1,
1965): 1007-10. doi:10.1016/0009-2509(65)80101-4.
.. [2] Dongwoo Kim, Venkata K. Ryali, Afshin J. Ghajar, Ronald L.
Dougherty. "Comparison of 20 Two-Phase Heat Transfer Correlations with
Seven Sets of Experimental Data, Including Flow Pattern and Tube
Inclination Effects." Heat Transfer Engineering 20, no. 1 (February 1,
1999): 15-40. doi:10.1080/014576399271691. | Below is the the instruction that describes the task:
### Input:
r'''Calculates the two-phase non-boiling laminar heat transfer coefficient
of a liquid and gas flowing inside a tube of any inclination, as in [1]_
and reviewed in [2]_.
.. math::
\frac{h_{TP} D}{k_l} = 1.75(1-\alpha)^{-0.5}\left(\frac{m_l C_{p,l}}
{(1-\alpha)k_l L}\right)^{1/3}\left(\frac{\mu_b}{\mu_w}\right)^{0.14}
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific tube interval []
alpha : float
Void fraction in the tube, []
D : float
Diameter of the tube [m]
L : float
Length of the tube, [m]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
kl : float
Thermal conductivity of liquid [W/m/K]
mu_b : float
Viscosity of liquid at bulk conditions (average of inlet/outlet
temperature) [Pa*s]
mu_w : float, optional
Viscosity of liquid at wall temperature [Pa*s]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
This model is based on a laminar entry length correlation - for a
sufficiently long tube, this will predict unrealistically low heat transfer
coefficients.
If the viscosity at the wall temperature is not given, the liquid viscosity
correction is not applied.
Developed for horizontal pipes in laminar slug flow. Data consisted of the
systems air-water, air-SAE 10 oil, gas-oil, air-diethylene glycol, and
air-aqueous glycerine.
Examples
--------
>>> Hughmark(m=1, x=.9, alpha=.9, D=.3, L=.5, Cpl=2300, kl=0.6, mu_b=1E-3,
... mu_w=1.2E-3)
212.7411636127175
References
----------
.. [1] Hughmark, G. A. "Holdup and Heat Transfer in Horizontal Slug Gas-
Liquid Flow." Chemical Engineering Science 20, no. 12 (December 1,
1965): 1007-10. doi:10.1016/0009-2509(65)80101-4.
.. [2] Dongwoo Kim, Venkata K. Ryali, Afshin J. Ghajar, Ronald L.
Dougherty. "Comparison of 20 Two-Phase Heat Transfer Correlations with
Seven Sets of Experimental Data, Including Flow Pattern and Tube
Inclination Effects." Heat Transfer Engineering 20, no. 1 (February 1,
1999): 15-40. doi:10.1080/014576399271691.
### Response:
def Hughmark(m, x, alpha, D, L, Cpl, kl, mu_b=None, mu_w=None):
r'''Calculates the two-phase non-boiling laminar heat transfer coefficient
of a liquid and gas flowing inside a tube of any inclination, as in [1]_
and reviewed in [2]_.
.. math::
\frac{h_{TP} D}{k_l} = 1.75(1-\alpha)^{-0.5}\left(\frac{m_l C_{p,l}}
{(1-\alpha)k_l L}\right)^{1/3}\left(\frac{\mu_b}{\mu_w}\right)^{0.14}
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific tube interval []
alpha : float
Void fraction in the tube, []
D : float
Diameter of the tube [m]
L : float
Length of the tube, [m]
Cpl : float
Constant-pressure heat capacity of liquid [J/kg/K]
kl : float
Thermal conductivity of liquid [W/m/K]
mu_b : float
Viscosity of liquid at bulk conditions (average of inlet/outlet
temperature) [Pa*s]
mu_w : float, optional
Viscosity of liquid at wall temperature [Pa*s]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
This model is based on a laminar entry length correlation - for a
sufficiently long tube, this will predict unrealistically low heat transfer
coefficients.
If the viscosity at the wall temperature is not given, the liquid viscosity
correction is not applied.
Developed for horizontal pipes in laminar slug flow. Data consisted of the
systems air-water, air-SAE 10 oil, gas-oil, air-diethylene glycol, and
air-aqueous glycerine.
Examples
--------
>>> Hughmark(m=1, x=.9, alpha=.9, D=.3, L=.5, Cpl=2300, kl=0.6, mu_b=1E-3,
... mu_w=1.2E-3)
212.7411636127175
References
----------
.. [1] Hughmark, G. A. "Holdup and Heat Transfer in Horizontal Slug Gas-
Liquid Flow." Chemical Engineering Science 20, no. 12 (December 1,
1965): 1007-10. doi:10.1016/0009-2509(65)80101-4.
.. [2] Dongwoo Kim, Venkata K. Ryali, Afshin J. Ghajar, Ronald L.
Dougherty. "Comparison of 20 Two-Phase Heat Transfer Correlations with
Seven Sets of Experimental Data, Including Flow Pattern and Tube
Inclination Effects." Heat Transfer Engineering 20, no. 1 (February 1,
1999): 15-40. doi:10.1080/014576399271691.
'''
ml = m*(1-x)
RL = 1-alpha
Nu_TP = 1.75*(RL)**-0.5*(ml*Cpl/RL/kl/L)**(1/3.)
if mu_b and mu_w:
Nu_TP *= (mu_b/mu_w)**0.14
return Nu_TP*kl/D |
def initialize_path(self, path_num=None):
""" inits producer for next path, i.e. sets current state to initial state"""
for p in self.producers:
p.initialize_path(path_num)
# self.state = copy(self.initial_state)
# self.state.path = path_num
self.random.seed(hash(self.seed) + hash(path_num)) | inits producer for next path, i.e. sets current state to initial state | Below is the the instruction that describes the task:
### Input:
inits producer for next path, i.e. sets current state to initial state
### Response:
def initialize_path(self, path_num=None):
""" inits producer for next path, i.e. sets current state to initial state"""
for p in self.producers:
p.initialize_path(path_num)
# self.state = copy(self.initial_state)
# self.state.path = path_num
self.random.seed(hash(self.seed) + hash(path_num)) |
def cross_validate(data=None, folds=5, repeat=1, metrics=None,
reporters=None, model_def=None, **kwargs):
"""Shortcut to cross-validate a single configuration.
ModelDefinition variables are passed in as keyword args, along
with the cross-validation parameters.
"""
md_kwargs = {}
if model_def is None:
for arg in ModelDefinition.params:
if arg in kwargs:
md_kwargs[arg] = kwargs.pop(arg)
model_def = ModelDefinition(**md_kwargs)
if metrics is None:
metrics = []
if reporters is None:
reporters = []
metrics = [MetricReporter(metric) for metric in metrics]
results = modeling.cross_validate(model_def, data, folds, repeat=repeat, **kwargs)
for r in reporters + metrics:
r.process_results(results)
return CVResult(results, reporters, metrics) | Shortcut to cross-validate a single configuration.
ModelDefinition variables are passed in as keyword args, along
with the cross-validation parameters. | Below is the the instruction that describes the task:
### Input:
Shortcut to cross-validate a single configuration.
ModelDefinition variables are passed in as keyword args, along
with the cross-validation parameters.
### Response:
def cross_validate(data=None, folds=5, repeat=1, metrics=None,
reporters=None, model_def=None, **kwargs):
"""Shortcut to cross-validate a single configuration.
ModelDefinition variables are passed in as keyword args, along
with the cross-validation parameters.
"""
md_kwargs = {}
if model_def is None:
for arg in ModelDefinition.params:
if arg in kwargs:
md_kwargs[arg] = kwargs.pop(arg)
model_def = ModelDefinition(**md_kwargs)
if metrics is None:
metrics = []
if reporters is None:
reporters = []
metrics = [MetricReporter(metric) for metric in metrics]
results = modeling.cross_validate(model_def, data, folds, repeat=repeat, **kwargs)
for r in reporters + metrics:
r.process_results(results)
return CVResult(results, reporters, metrics) |
def do_page_has_content(parser, token):
"""
Conditional tag that only renders its nodes if the page
has content for a particular content type. By default the
current page is used.
Syntax::
{% page_has_content <content_type> [<page var name>] %}
...
{%_end page_has_content %}
Example use::
{% page_has_content 'header-image' %}
<img src="{{ MEDIA_URL }}{% imageplaceholder 'header-image' %}">
{% end_page_has_content %}
"""
nodelist = parser.parse(('end_page_has_content',))
parser.delete_first_token()
args = token.split_contents()
try:
content_type = unescape_string_literal(args[1])
except IndexError:
raise template.TemplateSyntaxError(
"%r tag requires the argument content_type" % args[0]
)
if len(args) > 2:
page = args[2]
else:
page = None
return PageHasContentNode(page, content_type, nodelist) | Conditional tag that only renders its nodes if the page
has content for a particular content type. By default the
current page is used.
Syntax::
{% page_has_content <content_type> [<page var name>] %}
...
{%_end page_has_content %}
Example use::
{% page_has_content 'header-image' %}
<img src="{{ MEDIA_URL }}{% imageplaceholder 'header-image' %}">
{% end_page_has_content %} | Below is the the instruction that describes the task:
### Input:
Conditional tag that only renders its nodes if the page
has content for a particular content type. By default the
current page is used.
Syntax::
{% page_has_content <content_type> [<page var name>] %}
...
{%_end page_has_content %}
Example use::
{% page_has_content 'header-image' %}
<img src="{{ MEDIA_URL }}{% imageplaceholder 'header-image' %}">
{% end_page_has_content %}
### Response:
def do_page_has_content(parser, token):
"""
Conditional tag that only renders its nodes if the page
has content for a particular content type. By default the
current page is used.
Syntax::
{% page_has_content <content_type> [<page var name>] %}
...
{%_end page_has_content %}
Example use::
{% page_has_content 'header-image' %}
<img src="{{ MEDIA_URL }}{% imageplaceholder 'header-image' %}">
{% end_page_has_content %}
"""
nodelist = parser.parse(('end_page_has_content',))
parser.delete_first_token()
args = token.split_contents()
try:
content_type = unescape_string_literal(args[1])
except IndexError:
raise template.TemplateSyntaxError(
"%r tag requires the argument content_type" % args[0]
)
if len(args) > 2:
page = args[2]
else:
page = None
return PageHasContentNode(page, content_type, nodelist) |
def list_pkgs(versions_as_list=False, **kwargs):
'''
List the packages currently installed as a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
'''
versions_as_list = salt.utils.data.is_true(versions_as_list)
# not yet implemented or not applicable
if any([salt.utils.data.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
if 'pkg.list_pkgs' in __context__:
if versions_as_list:
return __context__['pkg.list_pkgs']
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs'])
__salt__['pkg_resource.stringify'](ret)
return ret
ret = {}
cmd = 'pkg_info -q -a'
out = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace')
for line in out.splitlines():
try:
pkgname, pkgver, flavor = __PKG_RE.match(line).groups()
except AttributeError:
continue
pkgname += '--{0}'.format(flavor) if flavor else ''
__salt__['pkg_resource.add_pkg'](ret, pkgname, pkgver)
__salt__['pkg_resource.sort_pkglist'](ret)
__context__['pkg.list_pkgs'] = copy.deepcopy(ret)
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
return ret | List the packages currently installed as a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs | Below is the the instruction that describes the task:
### Input:
List the packages currently installed as a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
### Response:
def list_pkgs(versions_as_list=False, **kwargs):
'''
List the packages currently installed as a dict::
{'<package_name>': '<version>'}
CLI Example:
.. code-block:: bash
salt '*' pkg.list_pkgs
'''
versions_as_list = salt.utils.data.is_true(versions_as_list)
# not yet implemented or not applicable
if any([salt.utils.data.is_true(kwargs.get(x))
for x in ('removed', 'purge_desired')]):
return {}
if 'pkg.list_pkgs' in __context__:
if versions_as_list:
return __context__['pkg.list_pkgs']
else:
ret = copy.deepcopy(__context__['pkg.list_pkgs'])
__salt__['pkg_resource.stringify'](ret)
return ret
ret = {}
cmd = 'pkg_info -q -a'
out = __salt__['cmd.run_stdout'](cmd, output_loglevel='trace')
for line in out.splitlines():
try:
pkgname, pkgver, flavor = __PKG_RE.match(line).groups()
except AttributeError:
continue
pkgname += '--{0}'.format(flavor) if flavor else ''
__salt__['pkg_resource.add_pkg'](ret, pkgname, pkgver)
__salt__['pkg_resource.sort_pkglist'](ret)
__context__['pkg.list_pkgs'] = copy.deepcopy(ret)
if not versions_as_list:
__salt__['pkg_resource.stringify'](ret)
return ret |
def check_bounds_variables(self, dataset):
'''
Checks the grid boundary variables.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended variables to describe grid boundaries')
bounds_map = {
'lat_bounds': {
'units': 'degrees_north',
'comment': 'latitude values at the north and south bounds of each pixel.'
},
'lon_bounds': {
'units': 'degrees_east',
'comment': 'longitude values at the west and east bounds of each pixel.'
},
'z_bounds': {
'comment': 'z bounds for each z value',
},
'time_bounds': {
'comment': 'time bounds for each time value'
}
}
bounds_variables = [v.bounds for v in dataset.get_variables_by_attributes(bounds=lambda x: x is not None)]
for variable in bounds_variables:
ncvar = dataset.variables.get(variable, {})
recommended_ctx.assert_true(ncvar != {}, 'a variable {} should exist as indicated by a bounds attribute'.format(variable))
if ncvar == {}:
continue
units = getattr(ncvar, 'units', '')
if variable in bounds_map and 'units' in bounds_map[variable]:
recommended_ctx.assert_true(
units == bounds_map[variable]['units'],
'variable {} should have units {}'.format(variable, bounds_map[variable]['units'])
)
else:
recommended_ctx.assert_true(
units != '',
'variable {} should have a units attribute that is not empty'.format(variable)
)
comment = getattr(ncvar, 'comment', '')
recommended_ctx.assert_true(
comment != '',
'variable {} should have a comment and not be empty'
)
return recommended_ctx.to_result() | Checks the grid boundary variables.
:param netCDF4.Dataset dataset: An open netCDF dataset | Below is the the instruction that describes the task:
### Input:
Checks the grid boundary variables.
:param netCDF4.Dataset dataset: An open netCDF dataset
### Response:
def check_bounds_variables(self, dataset):
'''
Checks the grid boundary variables.
:param netCDF4.Dataset dataset: An open netCDF dataset
'''
recommended_ctx = TestCtx(BaseCheck.MEDIUM, 'Recommended variables to describe grid boundaries')
bounds_map = {
'lat_bounds': {
'units': 'degrees_north',
'comment': 'latitude values at the north and south bounds of each pixel.'
},
'lon_bounds': {
'units': 'degrees_east',
'comment': 'longitude values at the west and east bounds of each pixel.'
},
'z_bounds': {
'comment': 'z bounds for each z value',
},
'time_bounds': {
'comment': 'time bounds for each time value'
}
}
bounds_variables = [v.bounds for v in dataset.get_variables_by_attributes(bounds=lambda x: x is not None)]
for variable in bounds_variables:
ncvar = dataset.variables.get(variable, {})
recommended_ctx.assert_true(ncvar != {}, 'a variable {} should exist as indicated by a bounds attribute'.format(variable))
if ncvar == {}:
continue
units = getattr(ncvar, 'units', '')
if variable in bounds_map and 'units' in bounds_map[variable]:
recommended_ctx.assert_true(
units == bounds_map[variable]['units'],
'variable {} should have units {}'.format(variable, bounds_map[variable]['units'])
)
else:
recommended_ctx.assert_true(
units != '',
'variable {} should have a units attribute that is not empty'.format(variable)
)
comment = getattr(ncvar, 'comment', '')
recommended_ctx.assert_true(
comment != '',
'variable {} should have a comment and not be empty'
)
return recommended_ctx.to_result() |
def CopyToStatTimeTuple(self):
"""Copies the date time value to a stat timestamp tuple.
Returns:
tuple[int, int]: a POSIX timestamp in seconds and the remainder in
100 nano seconds or (None, None) on error.
"""
normalized_timestamp = self._GetNormalizedTimestamp()
if normalized_timestamp is None:
return None, None
if self._precision in (
definitions.PRECISION_1_NANOSECOND,
definitions.PRECISION_100_NANOSECONDS,
definitions.PRECISION_1_MICROSECOND,
definitions.PRECISION_1_MILLISECOND,
definitions.PRECISION_100_MILLISECONDS):
remainder = int((normalized_timestamp % 1) * self._100NS_PER_SECOND)
return int(normalized_timestamp), remainder
return int(normalized_timestamp), None | Copies the date time value to a stat timestamp tuple.
Returns:
tuple[int, int]: a POSIX timestamp in seconds and the remainder in
100 nano seconds or (None, None) on error. | Below is the the instruction that describes the task:
### Input:
Copies the date time value to a stat timestamp tuple.
Returns:
tuple[int, int]: a POSIX timestamp in seconds and the remainder in
100 nano seconds or (None, None) on error.
### Response:
def CopyToStatTimeTuple(self):
"""Copies the date time value to a stat timestamp tuple.
Returns:
tuple[int, int]: a POSIX timestamp in seconds and the remainder in
100 nano seconds or (None, None) on error.
"""
normalized_timestamp = self._GetNormalizedTimestamp()
if normalized_timestamp is None:
return None, None
if self._precision in (
definitions.PRECISION_1_NANOSECOND,
definitions.PRECISION_100_NANOSECONDS,
definitions.PRECISION_1_MICROSECOND,
definitions.PRECISION_1_MILLISECOND,
definitions.PRECISION_100_MILLISECONDS):
remainder = int((normalized_timestamp % 1) * self._100NS_PER_SECOND)
return int(normalized_timestamp), remainder
return int(normalized_timestamp), None |
def flat_git_tree_to_nested(flat_tree, prefix=''):
'''
Given an array in format:
[
["100644", "blob", "ab3ce...", "748", ".gitignore" ],
["100644", "blob", "ab3ce...", "748", "path/to/thing" ],
...
]
Outputs in a nested format:
{
"path": "/",
"type": "directory",
"children": [
{
"type": "blob",
"size": 748,
"sha": "ab3ce...",
"mode": "100644",
},
...
],
...
}
'''
root = _make_empty_dir_dict(prefix if prefix else '/')
# Filter all descendents of this prefix
descendent_files = [
info for info in flat_tree
if os.path.dirname(info[PATH]).startswith(prefix)
]
# Figure out strictly leaf nodes of this tree (can be immediately added as
# children)
children_files = [
info for info in descendent_files
if os.path.dirname(info[PATH]) == prefix
]
# Figure out all descendent directories
descendent_dirs = set(
os.path.dirname(info[PATH]) for info in descendent_files
if os.path.dirname(info[PATH]).startswith(prefix)
and not os.path.dirname(info[PATH]) == prefix
)
# Figure out all descendent directories
children_dirs = set(
dir_path for dir_path in descendent_dirs
if os.path.dirname(dir_path) == prefix
)
# Recurse into children dirs, constructing file trees for each of them,
# then appending those
for dir_path in children_dirs:
info = flat_git_tree_to_nested(descendent_files, prefix=dir_path)
root['children'].append(info)
# Append direct children files
for info in children_files:
root['children'].append(_make_child(info))
return root | Given an array in format:
[
["100644", "blob", "ab3ce...", "748", ".gitignore" ],
["100644", "blob", "ab3ce...", "748", "path/to/thing" ],
...
]
Outputs in a nested format:
{
"path": "/",
"type": "directory",
"children": [
{
"type": "blob",
"size": 748,
"sha": "ab3ce...",
"mode": "100644",
},
...
],
...
} | Below is the the instruction that describes the task:
### Input:
Given an array in format:
[
["100644", "blob", "ab3ce...", "748", ".gitignore" ],
["100644", "blob", "ab3ce...", "748", "path/to/thing" ],
...
]
Outputs in a nested format:
{
"path": "/",
"type": "directory",
"children": [
{
"type": "blob",
"size": 748,
"sha": "ab3ce...",
"mode": "100644",
},
...
],
...
}
### Response:
def flat_git_tree_to_nested(flat_tree, prefix=''):
'''
Given an array in format:
[
["100644", "blob", "ab3ce...", "748", ".gitignore" ],
["100644", "blob", "ab3ce...", "748", "path/to/thing" ],
...
]
Outputs in a nested format:
{
"path": "/",
"type": "directory",
"children": [
{
"type": "blob",
"size": 748,
"sha": "ab3ce...",
"mode": "100644",
},
...
],
...
}
'''
root = _make_empty_dir_dict(prefix if prefix else '/')
# Filter all descendents of this prefix
descendent_files = [
info for info in flat_tree
if os.path.dirname(info[PATH]).startswith(prefix)
]
# Figure out strictly leaf nodes of this tree (can be immediately added as
# children)
children_files = [
info for info in descendent_files
if os.path.dirname(info[PATH]) == prefix
]
# Figure out all descendent directories
descendent_dirs = set(
os.path.dirname(info[PATH]) for info in descendent_files
if os.path.dirname(info[PATH]).startswith(prefix)
and not os.path.dirname(info[PATH]) == prefix
)
# Figure out all descendent directories
children_dirs = set(
dir_path for dir_path in descendent_dirs
if os.path.dirname(dir_path) == prefix
)
# Recurse into children dirs, constructing file trees for each of them,
# then appending those
for dir_path in children_dirs:
info = flat_git_tree_to_nested(descendent_files, prefix=dir_path)
root['children'].append(info)
# Append direct children files
for info in children_files:
root['children'].append(_make_child(info))
return root |
def resize(self, width, height):
"""
Pyqt specific resize callback.
"""
if not self.fbo:
return
# pyqt reports sizes in actual buffer size
self.width = width // self.widget.devicePixelRatio()
self.height = height // self.widget.devicePixelRatio()
self.buffer_width = width
self.buffer_height = height
super().resize(width, height) | Pyqt specific resize callback. | Below is the the instruction that describes the task:
### Input:
Pyqt specific resize callback.
### Response:
def resize(self, width, height):
"""
Pyqt specific resize callback.
"""
if not self.fbo:
return
# pyqt reports sizes in actual buffer size
self.width = width // self.widget.devicePixelRatio()
self.height = height // self.widget.devicePixelRatio()
self.buffer_width = width
self.buffer_height = height
super().resize(width, height) |
def get_grouped_issues(self, keyfunc=None, sortby=None):
"""
Retrieves the issues in the collection grouped into buckets according
to the key generated by the keyfunc.
:param keyfunc:
a function that will be used to generate the key that identifies
the group that an issue will be assigned to. This function receives
a single tidypy.Issue argument and must return a string. If not
specified, the filename of the issue will be used.
:type keyfunc: func
:param sortby: the properties to sort the issues by
:type sortby: list(str)
:rtype: OrderedDict
"""
if not keyfunc:
keyfunc = default_group
if not sortby:
sortby = self.DEFAULT_SORT
self._ensure_cleaned_issues()
return self._group_issues(self._cleaned_issues, keyfunc, sortby) | Retrieves the issues in the collection grouped into buckets according
to the key generated by the keyfunc.
:param keyfunc:
a function that will be used to generate the key that identifies
the group that an issue will be assigned to. This function receives
a single tidypy.Issue argument and must return a string. If not
specified, the filename of the issue will be used.
:type keyfunc: func
:param sortby: the properties to sort the issues by
:type sortby: list(str)
:rtype: OrderedDict | Below is the the instruction that describes the task:
### Input:
Retrieves the issues in the collection grouped into buckets according
to the key generated by the keyfunc.
:param keyfunc:
a function that will be used to generate the key that identifies
the group that an issue will be assigned to. This function receives
a single tidypy.Issue argument and must return a string. If not
specified, the filename of the issue will be used.
:type keyfunc: func
:param sortby: the properties to sort the issues by
:type sortby: list(str)
:rtype: OrderedDict
### Response:
def get_grouped_issues(self, keyfunc=None, sortby=None):
"""
Retrieves the issues in the collection grouped into buckets according
to the key generated by the keyfunc.
:param keyfunc:
a function that will be used to generate the key that identifies
the group that an issue will be assigned to. This function receives
a single tidypy.Issue argument and must return a string. If not
specified, the filename of the issue will be used.
:type keyfunc: func
:param sortby: the properties to sort the issues by
:type sortby: list(str)
:rtype: OrderedDict
"""
if not keyfunc:
keyfunc = default_group
if not sortby:
sortby = self.DEFAULT_SORT
self._ensure_cleaned_issues()
return self._group_issues(self._cleaned_issues, keyfunc, sortby) |
def load_json(path: str, encoding: str = "utf-8") -> HistogramBase:
"""Load histogram from a JSON file."""
with open(path, "r", encoding=encoding) as f:
text = f.read()
return parse_json(text) | Load histogram from a JSON file. | Below is the the instruction that describes the task:
### Input:
Load histogram from a JSON file.
### Response:
def load_json(path: str, encoding: str = "utf-8") -> HistogramBase:
"""Load histogram from a JSON file."""
with open(path, "r", encoding=encoding) as f:
text = f.read()
return parse_json(text) |
def add(self, indent, line):
"""Appends the given text line with prefixed spaces in accordance with
the given number of indentation levels.
"""
if isinstance(line, str):
list.append(self, indent*4*' ' + line)
else:
for subline in line:
list.append(self, indent*4*' ' + subline) | Appends the given text line with prefixed spaces in accordance with
the given number of indentation levels. | Below is the the instruction that describes the task:
### Input:
Appends the given text line with prefixed spaces in accordance with
the given number of indentation levels.
### Response:
def add(self, indent, line):
"""Appends the given text line with prefixed spaces in accordance with
the given number of indentation levels.
"""
if isinstance(line, str):
list.append(self, indent*4*' ' + line)
else:
for subline in line:
list.append(self, indent*4*' ' + subline) |
def memoize(func):
""" Cache decorator for functions inside model classes """
def model(cls, energy, *args, **kwargs):
try:
memoize = cls._memoize
cache = cls._cache
queue = cls._queue
except AttributeError:
memoize = False
if memoize:
# Allow for dicts or tables with energy column, Quantity array or
# Quantity scalar
try:
with warnings.catch_warnings():
warnings.simplefilter(
"ignore",
getattr(np, "VisibleDeprecationWarning", None),
)
energy = u.Quantity(energy["energy"])
except (TypeError, ValueError, IndexError):
pass
try:
# tostring is 10 times faster than str(array).encode()
bstr = energy.value.tostring()
except AttributeError:
# scalar Quantity
bstr = str(energy.value).encode()
data = [hashlib.sha256(bstr).hexdigest()]
data.append(energy.unit.to_string())
data.append(str(kwargs.get("distance", 0)))
if args:
data.append(str(args))
if hasattr(cls, "particle_distribution"):
models = [cls, cls.particle_distribution]
else:
models = [cls]
for model in models:
if hasattr(model, "param_names"):
for par in model.param_names:
data.append(str(getattr(model, par)))
token = "".join(data)
digest = hashlib.sha256(token.encode()).hexdigest()
if digest in cache:
return cache[digest]
result = func(cls, energy, *args, **kwargs)
if memoize:
# remove first item in queue and remove from cache
if len(queue) > 16:
key = queue.pop(0)
cache.pop(key, None)
# save last result to cache
queue.append(digest)
cache[digest] = result
return result
model.__name__ = func.__name__
model.__doc__ = func.__doc__
return model | Cache decorator for functions inside model classes | Below is the the instruction that describes the task:
### Input:
Cache decorator for functions inside model classes
### Response:
def memoize(func):
""" Cache decorator for functions inside model classes """
def model(cls, energy, *args, **kwargs):
try:
memoize = cls._memoize
cache = cls._cache
queue = cls._queue
except AttributeError:
memoize = False
if memoize:
# Allow for dicts or tables with energy column, Quantity array or
# Quantity scalar
try:
with warnings.catch_warnings():
warnings.simplefilter(
"ignore",
getattr(np, "VisibleDeprecationWarning", None),
)
energy = u.Quantity(energy["energy"])
except (TypeError, ValueError, IndexError):
pass
try:
# tostring is 10 times faster than str(array).encode()
bstr = energy.value.tostring()
except AttributeError:
# scalar Quantity
bstr = str(energy.value).encode()
data = [hashlib.sha256(bstr).hexdigest()]
data.append(energy.unit.to_string())
data.append(str(kwargs.get("distance", 0)))
if args:
data.append(str(args))
if hasattr(cls, "particle_distribution"):
models = [cls, cls.particle_distribution]
else:
models = [cls]
for model in models:
if hasattr(model, "param_names"):
for par in model.param_names:
data.append(str(getattr(model, par)))
token = "".join(data)
digest = hashlib.sha256(token.encode()).hexdigest()
if digest in cache:
return cache[digest]
result = func(cls, energy, *args, **kwargs)
if memoize:
# remove first item in queue and remove from cache
if len(queue) > 16:
key = queue.pop(0)
cache.pop(key, None)
# save last result to cache
queue.append(digest)
cache[digest] = result
return result
model.__name__ = func.__name__
model.__doc__ = func.__doc__
return model |
def get_metadata(self, handle):
"""
Returns the associated metadata info for the given handle, the metadata
file must exist (``handle + '.metadata'``).
Args:
handle (str): Path to the template to get the metadata from
Returns:
dict: Metadata for the given handle
"""
handle = os.path.expanduser(os.path.expandvars(handle))
with open(self._prefixed('%s.metadata' % handle)) as f:
return json.load(f) | Returns the associated metadata info for the given handle, the metadata
file must exist (``handle + '.metadata'``).
Args:
handle (str): Path to the template to get the metadata from
Returns:
dict: Metadata for the given handle | Below is the the instruction that describes the task:
### Input:
Returns the associated metadata info for the given handle, the metadata
file must exist (``handle + '.metadata'``).
Args:
handle (str): Path to the template to get the metadata from
Returns:
dict: Metadata for the given handle
### Response:
def get_metadata(self, handle):
"""
Returns the associated metadata info for the given handle, the metadata
file must exist (``handle + '.metadata'``).
Args:
handle (str): Path to the template to get the metadata from
Returns:
dict: Metadata for the given handle
"""
handle = os.path.expanduser(os.path.expandvars(handle))
with open(self._prefixed('%s.metadata' % handle)) as f:
return json.load(f) |
def simplify_soc_marker(self, text, prev_text):
"""Simplify start of cell marker when previous line is blank"""
if self.cell_marker_start:
return text
if self.is_code() and text and text[0] == self.comment + ' + {}':
if not prev_text or not prev_text[-1].strip():
text[0] = self.comment + ' +'
return text | Simplify start of cell marker when previous line is blank | Below is the the instruction that describes the task:
### Input:
Simplify start of cell marker when previous line is blank
### Response:
def simplify_soc_marker(self, text, prev_text):
"""Simplify start of cell marker when previous line is blank"""
if self.cell_marker_start:
return text
if self.is_code() and text and text[0] == self.comment + ' + {}':
if not prev_text or not prev_text[-1].strip():
text[0] = self.comment + ' +'
return text |
def set(self, indexes, values=None):
"""
Given indexes will set a sub-set of the Series to the values provided. This method will direct to the below
methods based on what types are passed in for the indexes. If the indexes contains values not in the Series
then new rows or columns will be added.
:param indexes: indexes value, list of indexes values, or a list of booleans.
:param values: value or list of values to set. If a list then must be the same length as the indexes parameter.
:return: nothing
"""
if isinstance(indexes, (list, blist)):
self.set_rows(indexes, values)
else:
self.set_cell(indexes, values) | Given indexes will set a sub-set of the Series to the values provided. This method will direct to the below
methods based on what types are passed in for the indexes. If the indexes contains values not in the Series
then new rows or columns will be added.
:param indexes: indexes value, list of indexes values, or a list of booleans.
:param values: value or list of values to set. If a list then must be the same length as the indexes parameter.
:return: nothing | Below is the the instruction that describes the task:
### Input:
Given indexes will set a sub-set of the Series to the values provided. This method will direct to the below
methods based on what types are passed in for the indexes. If the indexes contains values not in the Series
then new rows or columns will be added.
:param indexes: indexes value, list of indexes values, or a list of booleans.
:param values: value or list of values to set. If a list then must be the same length as the indexes parameter.
:return: nothing
### Response:
def set(self, indexes, values=None):
"""
Given indexes will set a sub-set of the Series to the values provided. This method will direct to the below
methods based on what types are passed in for the indexes. If the indexes contains values not in the Series
then new rows or columns will be added.
:param indexes: indexes value, list of indexes values, or a list of booleans.
:param values: value or list of values to set. If a list then must be the same length as the indexes parameter.
:return: nothing
"""
if isinstance(indexes, (list, blist)):
self.set_rows(indexes, values)
else:
self.set_cell(indexes, values) |
def create_check(self, label=None, name=None, check_type=None,
disabled=False, metadata=None, details=None,
monitoring_zones_poll=None, timeout=None, period=None,
target_alias=None, target_hostname=None, target_receiver=None,
test_only=False, include_debug=False):
"""
Creates a check on this entity with the specified attributes. The
'details' parameter should be a dict with the keys as the option name,
and the value as the desired setting.
"""
return self._check_manager.create_check(label=label, name=name,
check_type=check_type, disabled=disabled, metadata=metadata,
details=details, monitoring_zones_poll=monitoring_zones_poll,
timeout=timeout, period=period, target_alias=target_alias,
target_hostname=target_hostname,
target_receiver=target_receiver, test_only=test_only,
include_debug=include_debug) | Creates a check on this entity with the specified attributes. The
'details' parameter should be a dict with the keys as the option name,
and the value as the desired setting. | Below is the the instruction that describes the task:
### Input:
Creates a check on this entity with the specified attributes. The
'details' parameter should be a dict with the keys as the option name,
and the value as the desired setting.
### Response:
def create_check(self, label=None, name=None, check_type=None,
disabled=False, metadata=None, details=None,
monitoring_zones_poll=None, timeout=None, period=None,
target_alias=None, target_hostname=None, target_receiver=None,
test_only=False, include_debug=False):
"""
Creates a check on this entity with the specified attributes. The
'details' parameter should be a dict with the keys as the option name,
and the value as the desired setting.
"""
return self._check_manager.create_check(label=label, name=name,
check_type=check_type, disabled=disabled, metadata=metadata,
details=details, monitoring_zones_poll=monitoring_zones_poll,
timeout=timeout, period=period, target_alias=target_alias,
target_hostname=target_hostname,
target_receiver=target_receiver, test_only=test_only,
include_debug=include_debug) |
def execute(self, string, max_tacts=None):
"""Execute algorithm (if max_times = None, there can be forever loop)."""
self.init_tape(string)
counter = 0
while True:
self.execute_once()
if self.state == self.TERM_STATE:
break
counter += 1
if max_tacts is not None and counter >= max_tacts:
raise TimeoutError("algorithm hasn't been stopped")
return self.get_tape() | Execute algorithm (if max_times = None, there can be forever loop). | Below is the the instruction that describes the task:
### Input:
Execute algorithm (if max_times = None, there can be forever loop).
### Response:
def execute(self, string, max_tacts=None):
"""Execute algorithm (if max_times = None, there can be forever loop)."""
self.init_tape(string)
counter = 0
while True:
self.execute_once()
if self.state == self.TERM_STATE:
break
counter += 1
if max_tacts is not None and counter >= max_tacts:
raise TimeoutError("algorithm hasn't been stopped")
return self.get_tape() |
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
pj = os.path.join
app(self.input_file.path) # Path to the input file
app(self.output_file.path) # Path to the output file
app(pj(self.workdir, self.prefix.idata)) # Prefix for input data
app(pj(self.workdir, self.prefix.odata)) # Prefix for output data
app(pj(self.workdir, self.prefix.tdata)) # Prefix for temporary data
# Paths to the pseudopotential files.
# Note that here the pseudos **must** be sorted according to znucl.
# Here we reorder the pseudos if the order is wrong.
ord_pseudos = []
znucl = [specie.number for specie in
self.input.structure.types_of_specie]
for z in znucl:
for p in self.pseudos:
if p.Z == z:
ord_pseudos.append(p)
break
else:
raise ValueError("Cannot find pseudo with znucl %s in pseudos:\n%s" % (z, self.pseudos))
for pseudo in ord_pseudos:
app(pseudo.path)
return "\n".join(lines) | String with the list of files and prefixes needed to execute ABINIT. | Below is the the instruction that describes the task:
### Input:
String with the list of files and prefixes needed to execute ABINIT.
### Response:
def filesfile_string(self):
"""String with the list of files and prefixes needed to execute ABINIT."""
lines = []
app = lines.append
pj = os.path.join
app(self.input_file.path) # Path to the input file
app(self.output_file.path) # Path to the output file
app(pj(self.workdir, self.prefix.idata)) # Prefix for input data
app(pj(self.workdir, self.prefix.odata)) # Prefix for output data
app(pj(self.workdir, self.prefix.tdata)) # Prefix for temporary data
# Paths to the pseudopotential files.
# Note that here the pseudos **must** be sorted according to znucl.
# Here we reorder the pseudos if the order is wrong.
ord_pseudos = []
znucl = [specie.number for specie in
self.input.structure.types_of_specie]
for z in znucl:
for p in self.pseudos:
if p.Z == z:
ord_pseudos.append(p)
break
else:
raise ValueError("Cannot find pseudo with znucl %s in pseudos:\n%s" % (z, self.pseudos))
for pseudo in ord_pseudos:
app(pseudo.path)
return "\n".join(lines) |
def list(self, group=None, host_filter=None, **kwargs):
"""Return a list of hosts.
=====API DOCS=====
Retrieve a list of hosts.
:param group: Primary key or name of the group whose hosts will be listed.
:type group: str
:param all_pages: Flag that if set, collect all pages of content from the API when returning results.
:type all_pages: bool
:param page: The page to show. Ignored if all_pages is set.
:type page: int
:param query: Contains 2-tuples used as query parameters to filter resulting resource objects.
:type query: list
:param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.
:returns: A JSON object containing details of all resource objects returned by Tower backend.
:rtype: dict
=====API DOCS=====
"""
if group:
kwargs['query'] = kwargs.get('query', ()) + (('groups__in', group),)
if host_filter:
kwargs['query'] = kwargs.get('query', ()) + (('host_filter', host_filter),)
return super(Resource, self).list(**kwargs) | Return a list of hosts.
=====API DOCS=====
Retrieve a list of hosts.
:param group: Primary key or name of the group whose hosts will be listed.
:type group: str
:param all_pages: Flag that if set, collect all pages of content from the API when returning results.
:type all_pages: bool
:param page: The page to show. Ignored if all_pages is set.
:type page: int
:param query: Contains 2-tuples used as query parameters to filter resulting resource objects.
:type query: list
:param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.
:returns: A JSON object containing details of all resource objects returned by Tower backend.
:rtype: dict
=====API DOCS===== | Below is the the instruction that describes the task:
### Input:
Return a list of hosts.
=====API DOCS=====
Retrieve a list of hosts.
:param group: Primary key or name of the group whose hosts will be listed.
:type group: str
:param all_pages: Flag that if set, collect all pages of content from the API when returning results.
:type all_pages: bool
:param page: The page to show. Ignored if all_pages is set.
:type page: int
:param query: Contains 2-tuples used as query parameters to filter resulting resource objects.
:type query: list
:param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.
:returns: A JSON object containing details of all resource objects returned by Tower backend.
:rtype: dict
=====API DOCS=====
### Response:
def list(self, group=None, host_filter=None, **kwargs):
"""Return a list of hosts.
=====API DOCS=====
Retrieve a list of hosts.
:param group: Primary key or name of the group whose hosts will be listed.
:type group: str
:param all_pages: Flag that if set, collect all pages of content from the API when returning results.
:type all_pages: bool
:param page: The page to show. Ignored if all_pages is set.
:type page: int
:param query: Contains 2-tuples used as query parameters to filter resulting resource objects.
:type query: list
:param `**kwargs`: Keyword arguments list of available fields used for searching resource objects.
:returns: A JSON object containing details of all resource objects returned by Tower backend.
:rtype: dict
=====API DOCS=====
"""
if group:
kwargs['query'] = kwargs.get('query', ()) + (('groups__in', group),)
if host_filter:
kwargs['query'] = kwargs.get('query', ()) + (('host_filter', host_filter),)
return super(Resource, self).list(**kwargs) |
def ldap_server_host_retries(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
retries = ET.SubElement(host, "retries")
retries.text = kwargs.pop('retries')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def ldap_server_host_retries(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
ldap_server = ET.SubElement(config, "ldap-server", xmlns="urn:brocade.com:mgmt:brocade-aaa")
host = ET.SubElement(ldap_server, "host")
hostname_key = ET.SubElement(host, "hostname")
hostname_key.text = kwargs.pop('hostname')
use_vrf_key = ET.SubElement(host, "use-vrf")
use_vrf_key.text = kwargs.pop('use_vrf')
retries = ET.SubElement(host, "retries")
retries.text = kwargs.pop('retries')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def unset_variable(section, value):
"""
Unset a variable in an environment file for the given section.
The value is given is the variable name, e.g.:
s3conf unset test ENV_VAR_NAME
"""
if not value:
value = section
section = None
try:
logger.debug('Running env command')
settings = config.Settings(section=section)
conf = s3conf.S3Conf(settings=settings)
env_vars = conf.get_envfile()
env_vars.unset(value)
except exceptions.EnvfilePathNotDefinedError:
raise exceptions.EnvfilePathNotDefinedUsageError() | Unset a variable in an environment file for the given section.
The value is given is the variable name, e.g.:
s3conf unset test ENV_VAR_NAME | Below is the the instruction that describes the task:
### Input:
Unset a variable in an environment file for the given section.
The value is given is the variable name, e.g.:
s3conf unset test ENV_VAR_NAME
### Response:
def unset_variable(section, value):
"""
Unset a variable in an environment file for the given section.
The value is given is the variable name, e.g.:
s3conf unset test ENV_VAR_NAME
"""
if not value:
value = section
section = None
try:
logger.debug('Running env command')
settings = config.Settings(section=section)
conf = s3conf.S3Conf(settings=settings)
env_vars = conf.get_envfile()
env_vars.unset(value)
except exceptions.EnvfilePathNotDefinedError:
raise exceptions.EnvfilePathNotDefinedUsageError() |
def is_import_exception(mod):
"""Check module name to see if import has been whitelisted.
Import based rules should not run on any whitelisted module
"""
return (mod in IMPORT_EXCEPTIONS or
any(mod.startswith(m + '.') for m in IMPORT_EXCEPTIONS)) | Check module name to see if import has been whitelisted.
Import based rules should not run on any whitelisted module | Below is the the instruction that describes the task:
### Input:
Check module name to see if import has been whitelisted.
Import based rules should not run on any whitelisted module
### Response:
def is_import_exception(mod):
"""Check module name to see if import has been whitelisted.
Import based rules should not run on any whitelisted module
"""
return (mod in IMPORT_EXCEPTIONS or
any(mod.startswith(m + '.') for m in IMPORT_EXCEPTIONS)) |
def _parse(self):
"""
Loop through all child elements and execute any available parse methods for them
"""
# Is this a shorthand template?
if self._element.tag == 'template':
return self._parse_template(self._element)
# Is this a shorthand redirect?
if self._element.tag == 'redirect':
return self._parse_redirect(self._element)
for child in self._element:
method_name = '_parse_' + child.tag
if hasattr(self, method_name):
parse = getattr(self, method_name)
parse(child) | Loop through all child elements and execute any available parse methods for them | Below is the the instruction that describes the task:
### Input:
Loop through all child elements and execute any available parse methods for them
### Response:
def _parse(self):
"""
Loop through all child elements and execute any available parse methods for them
"""
# Is this a shorthand template?
if self._element.tag == 'template':
return self._parse_template(self._element)
# Is this a shorthand redirect?
if self._element.tag == 'redirect':
return self._parse_redirect(self._element)
for child in self._element:
method_name = '_parse_' + child.tag
if hasattr(self, method_name):
parse = getattr(self, method_name)
parse(child) |
def items(self):
"""Get an iter of VenvDirs and VenvFiles within the directory."""
contents = self.paths
contents = (
BinFile(path.path) if path.is_file else BinDir(path.path)
for path in contents
)
return contents | Get an iter of VenvDirs and VenvFiles within the directory. | Below is the the instruction that describes the task:
### Input:
Get an iter of VenvDirs and VenvFiles within the directory.
### Response:
def items(self):
"""Get an iter of VenvDirs and VenvFiles within the directory."""
contents = self.paths
contents = (
BinFile(path.path) if path.is_file else BinDir(path.path)
for path in contents
)
return contents |
def essays(self):
"""Copy essays from the source profile to the destination profile."""
for essay_name in self.dest_user.profile.essays.essay_names:
setattr(self.dest_user.profile.essays, essay_name,
getattr(self.source_profile.essays, essay_name)) | Copy essays from the source profile to the destination profile. | Below is the the instruction that describes the task:
### Input:
Copy essays from the source profile to the destination profile.
### Response:
def essays(self):
"""Copy essays from the source profile to the destination profile."""
for essay_name in self.dest_user.profile.essays.essay_names:
setattr(self.dest_user.profile.essays, essay_name,
getattr(self.source_profile.essays, essay_name)) |
def _num_integral(self, r, c):
"""
numerical integral (1-e^{-c*x^2})/x dx [0..r]
:param r: radius
:param c: 1/2sigma^2
:return:
"""
out = integrate.quad(lambda x: (1-np.exp(-c*x**2))/x, 0, r)
return out[0] | numerical integral (1-e^{-c*x^2})/x dx [0..r]
:param r: radius
:param c: 1/2sigma^2
:return: | Below is the the instruction that describes the task:
### Input:
numerical integral (1-e^{-c*x^2})/x dx [0..r]
:param r: radius
:param c: 1/2sigma^2
:return:
### Response:
def _num_integral(self, r, c):
"""
numerical integral (1-e^{-c*x^2})/x dx [0..r]
:param r: radius
:param c: 1/2sigma^2
:return:
"""
out = integrate.quad(lambda x: (1-np.exp(-c*x**2))/x, 0, r)
return out[0] |
def export(self, exporter=None, force_stroke=False):
"""
Export this SWF using the specified exporter.
When no exporter is passed in the default exporter used
is swf.export.SVGExporter.
Exporters should extend the swf.export.BaseExporter class.
@param exporter : the exporter to use
@param force_stroke : set to true to force strokes on fills,
useful for some edge cases.
"""
exporter = SVGExporter() if exporter is None else exporter
if self._data is None:
raise Exception("This SWF was not loaded! (no data)")
if len(self.tags) == 0:
raise Exception("This SWF doesn't contain any tags!")
return exporter.export(self, force_stroke) | Export this SWF using the specified exporter.
When no exporter is passed in the default exporter used
is swf.export.SVGExporter.
Exporters should extend the swf.export.BaseExporter class.
@param exporter : the exporter to use
@param force_stroke : set to true to force strokes on fills,
useful for some edge cases. | Below is the the instruction that describes the task:
### Input:
Export this SWF using the specified exporter.
When no exporter is passed in the default exporter used
is swf.export.SVGExporter.
Exporters should extend the swf.export.BaseExporter class.
@param exporter : the exporter to use
@param force_stroke : set to true to force strokes on fills,
useful for some edge cases.
### Response:
def export(self, exporter=None, force_stroke=False):
"""
Export this SWF using the specified exporter.
When no exporter is passed in the default exporter used
is swf.export.SVGExporter.
Exporters should extend the swf.export.BaseExporter class.
@param exporter : the exporter to use
@param force_stroke : set to true to force strokes on fills,
useful for some edge cases.
"""
exporter = SVGExporter() if exporter is None else exporter
if self._data is None:
raise Exception("This SWF was not loaded! (no data)")
if len(self.tags) == 0:
raise Exception("This SWF doesn't contain any tags!")
return exporter.export(self, force_stroke) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.