code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def exists(self, using=None, **kwargs):
"""
Returns ``True`` if the index already exists in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.exists`` unchanged.
"""
return self._get_connection(using).indices.exists(index=self._name, **kwargs) | Returns ``True`` if the index already exists in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.exists`` unchanged. | Below is the the instruction that describes the task:
### Input:
Returns ``True`` if the index already exists in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.exists`` unchanged.
### Response:
def exists(self, using=None, **kwargs):
"""
Returns ``True`` if the index already exists in elasticsearch.
Any additional keyword arguments will be passed to
``Elasticsearch.indices.exists`` unchanged.
"""
return self._get_connection(using).indices.exists(index=self._name, **kwargs) |
def select_columns(self, column_names):
"""
Selects all columns where the name of the column or the type of column
is included in the column_names. An exception is raised if duplicate columns
are selected i.e. sf.select_columns(['a','a']), or non-existent columns
are selected.
Throws an exception for all other input types.
Parameters
----------
column_names: list[str or type]
The list of column names or a list of types.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``column_names`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = turicreate.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns]
"""
if not _is_non_string_iterable(column_names):
raise TypeError("column_names must be an iterable")
if not (all([isinstance(x, six.string_types) or isinstance(x, type) or isinstance(x, bytes)
for x in column_names])):
raise TypeError("Invalid key type: must be str, unicode, bytes or type")
requested_str_columns = [s for s in column_names if isinstance(s, six.string_types)]
# Make sure there are no duplicates keys
from collections import Counter
column_names_counter = Counter(column_names)
if (len(column_names)) != len(column_names_counter):
for key in column_names_counter:
if column_names_counter[key] > 1:
raise ValueError("There are duplicate keys in key list: '" + key + "'")
colnames_and_types = list(zip(self.column_names(), self.column_types()))
# Ok. we want the string columns to be in the ordering defined by the
# argument. And then all the type selection columns.
selected_columns = requested_str_columns
typelist = [s for s in column_names if isinstance(s, type)]
# next the type selection columns
# loop through all the columns, adding all columns with types in
# typelist. But don't add a column if it has already been added.
for i in colnames_and_types:
if i[1] in typelist and i[0] not in selected_columns:
selected_columns += [i[0]]
selected_columns = selected_columns
with cython_context():
return SFrame(data=[], _proxy=self.__proxy__.select_columns(selected_columns)) | Selects all columns where the name of the column or the type of column
is included in the column_names. An exception is raised if duplicate columns
are selected i.e. sf.select_columns(['a','a']), or non-existent columns
are selected.
Throws an exception for all other input types.
Parameters
----------
column_names: list[str or type]
The list of column names or a list of types.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``column_names`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = turicreate.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns] | Below is the the instruction that describes the task:
### Input:
Selects all columns where the name of the column or the type of column
is included in the column_names. An exception is raised if duplicate columns
are selected i.e. sf.select_columns(['a','a']), or non-existent columns
are selected.
Throws an exception for all other input types.
Parameters
----------
column_names: list[str or type]
The list of column names or a list of types.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``column_names`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = turicreate.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns]
### Response:
def select_columns(self, column_names):
"""
Selects all columns where the name of the column or the type of column
is included in the column_names. An exception is raised if duplicate columns
are selected i.e. sf.select_columns(['a','a']), or non-existent columns
are selected.
Throws an exception for all other input types.
Parameters
----------
column_names: list[str or type]
The list of column names or a list of types.
Returns
-------
out : SFrame
A new SFrame that is made up of the columns referred to in
``column_names`` from the current SFrame.
See Also
--------
select_column
Examples
--------
>>> sf = turicreate.SFrame({'user_id': [1,2,3],
... 'user_name': ['alice', 'bob', 'charlie'],
... 'zipcode': [98101, 98102, 98103]
... })
>>> # This line is equivalent to `sf2 = sf[['user_id', 'zipcode']]`
>>> sf2 = sf.select_columns(['user_id', 'zipcode'])
>>> sf2
+---------+---------+
| user_id | zipcode |
+---------+---------+
| 1 | 98101 |
| 2 | 98102 |
| 3 | 98103 |
+---------+---------+
[3 rows x 2 columns]
"""
if not _is_non_string_iterable(column_names):
raise TypeError("column_names must be an iterable")
if not (all([isinstance(x, six.string_types) or isinstance(x, type) or isinstance(x, bytes)
for x in column_names])):
raise TypeError("Invalid key type: must be str, unicode, bytes or type")
requested_str_columns = [s for s in column_names if isinstance(s, six.string_types)]
# Make sure there are no duplicates keys
from collections import Counter
column_names_counter = Counter(column_names)
if (len(column_names)) != len(column_names_counter):
for key in column_names_counter:
if column_names_counter[key] > 1:
raise ValueError("There are duplicate keys in key list: '" + key + "'")
colnames_and_types = list(zip(self.column_names(), self.column_types()))
# Ok. we want the string columns to be in the ordering defined by the
# argument. And then all the type selection columns.
selected_columns = requested_str_columns
typelist = [s for s in column_names if isinstance(s, type)]
# next the type selection columns
# loop through all the columns, adding all columns with types in
# typelist. But don't add a column if it has already been added.
for i in colnames_and_types:
if i[1] in typelist and i[0] not in selected_columns:
selected_columns += [i[0]]
selected_columns = selected_columns
with cython_context():
return SFrame(data=[], _proxy=self.__proxy__.select_columns(selected_columns)) |
def eta_bar(msg, max_value):
"""Display an adaptive ETA / countdown bar with a message.
Parameters
----------
msg: str
Message to prefix countdown bar line with
max_value: max_value
The max number of progress bar steps/updates
"""
widgets = [
"{msg}:".format(msg=msg),
progressbar.Bar(), ' ', progressbar.AdaptiveETA(),
]
return progressbar.ProgressBar(widgets=widgets, max_value=max_value) | Display an adaptive ETA / countdown bar with a message.
Parameters
----------
msg: str
Message to prefix countdown bar line with
max_value: max_value
The max number of progress bar steps/updates | Below is the the instruction that describes the task:
### Input:
Display an adaptive ETA / countdown bar with a message.
Parameters
----------
msg: str
Message to prefix countdown bar line with
max_value: max_value
The max number of progress bar steps/updates
### Response:
def eta_bar(msg, max_value):
"""Display an adaptive ETA / countdown bar with a message.
Parameters
----------
msg: str
Message to prefix countdown bar line with
max_value: max_value
The max number of progress bar steps/updates
"""
widgets = [
"{msg}:".format(msg=msg),
progressbar.Bar(), ' ', progressbar.AdaptiveETA(),
]
return progressbar.ProgressBar(widgets=widgets, max_value=max_value) |
def StatEntryFromStat(stat,
pathspec,
ext_attrs = True):
"""Build a stat entry object from a given stat object.
Args:
stat: A `Stat` object.
pathspec: A `PathSpec` from which `stat` was obtained.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
"""
result = rdf_client_fs.StatEntry(pathspec=pathspec)
for attr in _STAT_ATTRS:
value = getattr(stat.GetRaw(), attr, None)
if value is None:
continue
# TODO(hanuszczak): Why are we doing this?
value = int(value)
if value < 0:
value &= 0xFFFFFFFF
setattr(result, attr, value)
result.st_flags_linux = stat.GetLinuxFlags()
result.st_flags_osx = stat.GetOsxFlags()
if ext_attrs:
# TODO(hanuszczak): Can we somehow incorporate extended attribute getter to
# the `Stat` class? That would make the code a lot prettier but would force
# `utils` to depend on `xattrs`.
result.ext_attrs = list(GetExtAttrs(stat.GetPath()))
return result | Build a stat entry object from a given stat object.
Args:
stat: A `Stat` object.
pathspec: A `PathSpec` from which `stat` was obtained.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object. | Below is the the instruction that describes the task:
### Input:
Build a stat entry object from a given stat object.
Args:
stat: A `Stat` object.
pathspec: A `PathSpec` from which `stat` was obtained.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
### Response:
def StatEntryFromStat(stat,
pathspec,
ext_attrs = True):
"""Build a stat entry object from a given stat object.
Args:
stat: A `Stat` object.
pathspec: A `PathSpec` from which `stat` was obtained.
ext_attrs: Whether to include extended file attributes in the result.
Returns:
`StatEntry` object.
"""
result = rdf_client_fs.StatEntry(pathspec=pathspec)
for attr in _STAT_ATTRS:
value = getattr(stat.GetRaw(), attr, None)
if value is None:
continue
# TODO(hanuszczak): Why are we doing this?
value = int(value)
if value < 0:
value &= 0xFFFFFFFF
setattr(result, attr, value)
result.st_flags_linux = stat.GetLinuxFlags()
result.st_flags_osx = stat.GetOsxFlags()
if ext_attrs:
# TODO(hanuszczak): Can we somehow incorporate extended attribute getter to
# the `Stat` class? That would make the code a lot prettier but would force
# `utils` to depend on `xattrs`.
result.ext_attrs = list(GetExtAttrs(stat.GetPath()))
return result |
def getRootIntfPort(port: LPort):
"""
:return: most top port which contains this port
"""
while True:
if isinstance(port.parent, LNode):
return port
else:
port = port.parent | :return: most top port which contains this port | Below is the the instruction that describes the task:
### Input:
:return: most top port which contains this port
### Response:
def getRootIntfPort(port: LPort):
"""
:return: most top port which contains this port
"""
while True:
if isinstance(port.parent, LNode):
return port
else:
port = port.parent |
def update(self, path, node):
'''Update the dict with a new color using a 'path' through the dict. You can either pass an existing path e.g.
'Scaffold.mutations' to override a color or part of the hierarchy or you can add a new leaf node or dict.'''
assert(type(path) == type(self.name))
assert(type(node) == type(self.name) or type(node) == type(predefined))
d = self.color_scheme
tokens = path.split('.')
for t in tokens[:-1]:
d = d.get(t)
if d == None:
raise Exception("Path '%s' not found.")
d[tokens[-1]] = node | Update the dict with a new color using a 'path' through the dict. You can either pass an existing path e.g.
'Scaffold.mutations' to override a color or part of the hierarchy or you can add a new leaf node or dict. | Below is the the instruction that describes the task:
### Input:
Update the dict with a new color using a 'path' through the dict. You can either pass an existing path e.g.
'Scaffold.mutations' to override a color or part of the hierarchy or you can add a new leaf node or dict.
### Response:
def update(self, path, node):
'''Update the dict with a new color using a 'path' through the dict. You can either pass an existing path e.g.
'Scaffold.mutations' to override a color or part of the hierarchy or you can add a new leaf node or dict.'''
assert(type(path) == type(self.name))
assert(type(node) == type(self.name) or type(node) == type(predefined))
d = self.color_scheme
tokens = path.split('.')
for t in tokens[:-1]:
d = d.get(t)
if d == None:
raise Exception("Path '%s' not found.")
d[tokens[-1]] = node |
def write_secret(path, **kwargs):
'''
Set secret at the path in vault. The vault policy used must allow this.
CLI Example:
.. code-block:: bash
salt '*' vault.write_secret "secret/my/secret" user="foo" password="bar"
'''
log.debug('Writing vault secrets for %s at %s', __grains__['id'], path)
data = dict([(x, y) for x, y in kwargs.items() if not x.startswith('__')])
try:
url = 'v1/{0}'.format(path)
response = __utils__['vault.make_request']('POST', url, json=data)
if response.status_code == 200:
return response.json()['data']
elif response.status_code != 204:
response.raise_for_status()
return True
except Exception as err:
log.error('Failed to write secret! %s: %s', type(err).__name__, err)
return False | Set secret at the path in vault. The vault policy used must allow this.
CLI Example:
.. code-block:: bash
salt '*' vault.write_secret "secret/my/secret" user="foo" password="bar" | Below is the the instruction that describes the task:
### Input:
Set secret at the path in vault. The vault policy used must allow this.
CLI Example:
.. code-block:: bash
salt '*' vault.write_secret "secret/my/secret" user="foo" password="bar"
### Response:
def write_secret(path, **kwargs):
'''
Set secret at the path in vault. The vault policy used must allow this.
CLI Example:
.. code-block:: bash
salt '*' vault.write_secret "secret/my/secret" user="foo" password="bar"
'''
log.debug('Writing vault secrets for %s at %s', __grains__['id'], path)
data = dict([(x, y) for x, y in kwargs.items() if not x.startswith('__')])
try:
url = 'v1/{0}'.format(path)
response = __utils__['vault.make_request']('POST', url, json=data)
if response.status_code == 200:
return response.json()['data']
elif response.status_code != 204:
response.raise_for_status()
return True
except Exception as err:
log.error('Failed to write secret! %s: %s', type(err).__name__, err)
return False |
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = (np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64')
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype='int64'))
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, allow_dups=True) | Take items along any axis. | Below is the the instruction that describes the task:
### Input:
Take items along any axis.
### Response:
def take(self, indexer, axis=1, verify=True, convert=True):
"""
Take items along any axis.
"""
self._consolidate_inplace()
indexer = (np.arange(indexer.start, indexer.stop, indexer.step,
dtype='int64')
if isinstance(indexer, slice)
else np.asanyarray(indexer, dtype='int64'))
n = self.shape[axis]
if convert:
indexer = maybe_convert_indices(indexer, n)
if verify:
if ((indexer == -1) | (indexer >= n)).any():
raise Exception('Indices must be nonzero and less than '
'the axis length')
new_labels = self.axes[axis].take(indexer)
return self.reindex_indexer(new_axis=new_labels, indexer=indexer,
axis=axis, allow_dups=True) |
def provisions(ctx, provision, clear_existing, overwrite, list_provisions):
"""Install default provisioning data"""
install_provisions(ctx, provision, clear_existing, overwrite, list_provisions) | Install default provisioning data | Below is the the instruction that describes the task:
### Input:
Install default provisioning data
### Response:
def provisions(ctx, provision, clear_existing, overwrite, list_provisions):
"""Install default provisioning data"""
install_provisions(ctx, provision, clear_existing, overwrite, list_provisions) |
def modelshift_weaksec(koi):
"""
Max secondary depth based on model-shift secondary test from Jeff Coughlin
secondary metric: mod_depth_sec_dv * (1 + 3*mod_fred_dv / mod_sig_sec_dv)
"""
num = KOIDATA.ix[ku.koiname(koi), 'koi_tce_plnt_num']
if np.isnan(num):
num = 1
kid = KOIDATA.ix[ku.koiname(koi), 'kepid']
tce = '{:09.0f}-{:02.0f}'.format(kid,num)
#return largest depth between DV detrending and alternate detrending
try:
r = ROBOVETDATA.ix[tce]
except KeyError:
raise NoWeakSecondaryError(koi)
depth_dv = r['mod_depth_sec_dv'] * (1 + 3*r['mod_fred_dv'] / r['mod_sig_sec_dv'])
depth_alt = r['mod_depth_sec_alt'] * (1 + 3*r['mod_fred_alt'] / r['mod_sig_sec_alt'])
logging.debug(r[['mod_depth_sec_dv','mod_fred_dv','mod_sig_sec_dv']])
logging.debug(r[['mod_depth_sec_alt','mod_fred_alt','mod_sig_sec_alt']])
if np.isnan(depth_dv) and np.isnan(depth_alt):
#return weaksec_vv2(koi)
raise NoWeakSecondaryError(koi)
elif np.isnan(depth_dv):
return depth_alt
elif np.isnan(depth_alt):
return depth_dv
else:
return max(depth_dv, depth_alt) | Max secondary depth based on model-shift secondary test from Jeff Coughlin
secondary metric: mod_depth_sec_dv * (1 + 3*mod_fred_dv / mod_sig_sec_dv) | Below is the the instruction that describes the task:
### Input:
Max secondary depth based on model-shift secondary test from Jeff Coughlin
secondary metric: mod_depth_sec_dv * (1 + 3*mod_fred_dv / mod_sig_sec_dv)
### Response:
def modelshift_weaksec(koi):
"""
Max secondary depth based on model-shift secondary test from Jeff Coughlin
secondary metric: mod_depth_sec_dv * (1 + 3*mod_fred_dv / mod_sig_sec_dv)
"""
num = KOIDATA.ix[ku.koiname(koi), 'koi_tce_plnt_num']
if np.isnan(num):
num = 1
kid = KOIDATA.ix[ku.koiname(koi), 'kepid']
tce = '{:09.0f}-{:02.0f}'.format(kid,num)
#return largest depth between DV detrending and alternate detrending
try:
r = ROBOVETDATA.ix[tce]
except KeyError:
raise NoWeakSecondaryError(koi)
depth_dv = r['mod_depth_sec_dv'] * (1 + 3*r['mod_fred_dv'] / r['mod_sig_sec_dv'])
depth_alt = r['mod_depth_sec_alt'] * (1 + 3*r['mod_fred_alt'] / r['mod_sig_sec_alt'])
logging.debug(r[['mod_depth_sec_dv','mod_fred_dv','mod_sig_sec_dv']])
logging.debug(r[['mod_depth_sec_alt','mod_fred_alt','mod_sig_sec_alt']])
if np.isnan(depth_dv) and np.isnan(depth_alt):
#return weaksec_vv2(koi)
raise NoWeakSecondaryError(koi)
elif np.isnan(depth_dv):
return depth_alt
elif np.isnan(depth_alt):
return depth_dv
else:
return max(depth_dv, depth_alt) |
def request(
self,
method_name: str,
*args: Any,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
**kwargs: Any
) -> Response:
"""
Send a request by passing the method and arguments.
>>> client.request("cat", name="Yoko")
<Response[1]
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
"""
return self.send(
Request(method_name, id_generator=id_generator, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
) | Send a request by passing the method and arguments.
>>> client.request("cat", name="Yoko")
<Response[1]
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request. | Below is the the instruction that describes the task:
### Input:
Send a request by passing the method and arguments.
>>> client.request("cat", name="Yoko")
<Response[1]
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
### Response:
def request(
self,
method_name: str,
*args: Any,
trim_log_values: bool = False,
validate_against_schema: bool = True,
id_generator: Optional[Iterator] = None,
**kwargs: Any
) -> Response:
"""
Send a request by passing the method and arguments.
>>> client.request("cat", name="Yoko")
<Response[1]
Args:
method_name: The remote procedure's method name.
args: Positional arguments passed to the remote procedure.
kwargs: Keyword arguments passed to the remote procedure.
trim_log_values: Abbreviate the log entries of requests and responses.
validate_against_schema: Validate response against the JSON-RPC schema.
id_generator: Iterable of values to use as the "id" part of the request.
"""
return self.send(
Request(method_name, id_generator=id_generator, *args, **kwargs),
trim_log_values=trim_log_values,
validate_against_schema=validate_against_schema,
) |
def percentile_ranks(self, affinities, allele=None, alleles=None, throw=True):
"""
Return percentile ranks for the given ic50 affinities and alleles.
The 'allele' and 'alleles' argument are as in the `predict` method.
Specify one of these.
Parameters
----------
affinities : sequence of float
nM affinities
allele : string
alleles : sequence of string
throw : boolean
If True, a ValueError will be raised in the case of unsupported
alleles. If False, a warning will be logged and NaN will be returned
for those percentile ranks.
Returns
-------
numpy.array of float
"""
if allele is not None:
try:
transform = self.allele_to_percent_rank_transform[allele]
return transform.transform(affinities)
except KeyError:
msg = "Allele %s has no percentile rank information" % allele
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
# Return NaNs
return numpy.ones(len(affinities)) * numpy.nan
if alleles is None:
raise ValueError("Specify allele or alleles")
df = pandas.DataFrame({"affinity": affinities})
df["allele"] = alleles
df["result"] = numpy.nan
for (allele, sub_df) in df.groupby("allele"):
df.loc[sub_df.index, "result"] = self.percentile_ranks(
sub_df.affinity, allele=allele, throw=throw)
return df.result.values | Return percentile ranks for the given ic50 affinities and alleles.
The 'allele' and 'alleles' argument are as in the `predict` method.
Specify one of these.
Parameters
----------
affinities : sequence of float
nM affinities
allele : string
alleles : sequence of string
throw : boolean
If True, a ValueError will be raised in the case of unsupported
alleles. If False, a warning will be logged and NaN will be returned
for those percentile ranks.
Returns
-------
numpy.array of float | Below is the the instruction that describes the task:
### Input:
Return percentile ranks for the given ic50 affinities and alleles.
The 'allele' and 'alleles' argument are as in the `predict` method.
Specify one of these.
Parameters
----------
affinities : sequence of float
nM affinities
allele : string
alleles : sequence of string
throw : boolean
If True, a ValueError will be raised in the case of unsupported
alleles. If False, a warning will be logged and NaN will be returned
for those percentile ranks.
Returns
-------
numpy.array of float
### Response:
def percentile_ranks(self, affinities, allele=None, alleles=None, throw=True):
"""
Return percentile ranks for the given ic50 affinities and alleles.
The 'allele' and 'alleles' argument are as in the `predict` method.
Specify one of these.
Parameters
----------
affinities : sequence of float
nM affinities
allele : string
alleles : sequence of string
throw : boolean
If True, a ValueError will be raised in the case of unsupported
alleles. If False, a warning will be logged and NaN will be returned
for those percentile ranks.
Returns
-------
numpy.array of float
"""
if allele is not None:
try:
transform = self.allele_to_percent_rank_transform[allele]
return transform.transform(affinities)
except KeyError:
msg = "Allele %s has no percentile rank information" % allele
if throw:
raise ValueError(msg)
else:
warnings.warn(msg)
# Return NaNs
return numpy.ones(len(affinities)) * numpy.nan
if alleles is None:
raise ValueError("Specify allele or alleles")
df = pandas.DataFrame({"affinity": affinities})
df["allele"] = alleles
df["result"] = numpy.nan
for (allele, sub_df) in df.groupby("allele"):
df.loc[sub_df.index, "result"] = self.percentile_ranks(
sub_df.affinity, allele=allele, throw=throw)
return df.result.values |
def on_core_metadata_event(self, event):
"""Implementation of the core metadata-carrying Event proto callback.
Args:
event: An Event proto that contains core metadata about the debugged
Session::Run() in its log_message.message field, as a JSON string.
See the doc string of debug_data.DebugDumpDir.core_metadata for details.
"""
core_metadata = json.loads(event.log_message.message)
input_names = ','.join(core_metadata['input_names'])
output_names = ','.join(core_metadata['output_names'])
target_nodes = ','.join(core_metadata['target_nodes'])
self._run_key = RunKey(input_names, output_names, target_nodes)
if not self._graph_defs:
self._graph_defs_arrive_first = False
else:
for device_name in self._graph_defs:
self._add_graph_def(device_name, self._graph_defs[device_name])
self._outgoing_channel.put(_comm_metadata(self._run_key, event.wall_time))
# Wait for acknowledgement from client. Blocks until an item is got.
logger.info('on_core_metadata_event() waiting for client ack (meta)...')
self._incoming_channel.get()
logger.info('on_core_metadata_event() client ack received (meta).') | Implementation of the core metadata-carrying Event proto callback.
Args:
event: An Event proto that contains core metadata about the debugged
Session::Run() in its log_message.message field, as a JSON string.
See the doc string of debug_data.DebugDumpDir.core_metadata for details. | Below is the the instruction that describes the task:
### Input:
Implementation of the core metadata-carrying Event proto callback.
Args:
event: An Event proto that contains core metadata about the debugged
Session::Run() in its log_message.message field, as a JSON string.
See the doc string of debug_data.DebugDumpDir.core_metadata for details.
### Response:
def on_core_metadata_event(self, event):
"""Implementation of the core metadata-carrying Event proto callback.
Args:
event: An Event proto that contains core metadata about the debugged
Session::Run() in its log_message.message field, as a JSON string.
See the doc string of debug_data.DebugDumpDir.core_metadata for details.
"""
core_metadata = json.loads(event.log_message.message)
input_names = ','.join(core_metadata['input_names'])
output_names = ','.join(core_metadata['output_names'])
target_nodes = ','.join(core_metadata['target_nodes'])
self._run_key = RunKey(input_names, output_names, target_nodes)
if not self._graph_defs:
self._graph_defs_arrive_first = False
else:
for device_name in self._graph_defs:
self._add_graph_def(device_name, self._graph_defs[device_name])
self._outgoing_channel.put(_comm_metadata(self._run_key, event.wall_time))
# Wait for acknowledgement from client. Blocks until an item is got.
logger.info('on_core_metadata_event() waiting for client ack (meta)...')
self._incoming_channel.get()
logger.info('on_core_metadata_event() client ack received (meta).') |
def graphs(self):
"""Sorry for the black magic. The result is an object whose attributes
are all the graphs found in graphs.py initialized with this instance as
only argument."""
result = Dummy()
for graph in graphs.__all__:
cls = getattr(graphs, graph)
setattr(result, cls.short_name, cls(self))
return result | Sorry for the black magic. The result is an object whose attributes
are all the graphs found in graphs.py initialized with this instance as
only argument. | Below is the the instruction that describes the task:
### Input:
Sorry for the black magic. The result is an object whose attributes
are all the graphs found in graphs.py initialized with this instance as
only argument.
### Response:
def graphs(self):
"""Sorry for the black magic. The result is an object whose attributes
are all the graphs found in graphs.py initialized with this instance as
only argument."""
result = Dummy()
for graph in graphs.__all__:
cls = getattr(graphs, graph)
setattr(result, cls.short_name, cls(self))
return result |
def get_media_metadata(self, item_id):
"""Get metadata for a media item.
Args:
item_id (str): The item for which metadata is required.
Returns:
~collections.OrderedDict: The item's metadata, or `None`
See also:
The Sonos `getMediaMetadata API
<http://musicpartners.sonos.com/node/83>`_
"""
response = self.soap_client.call(
'getMediaMetadata',
[('id', item_id)])
return response.get('getMediaMetadataResult', None) | Get metadata for a media item.
Args:
item_id (str): The item for which metadata is required.
Returns:
~collections.OrderedDict: The item's metadata, or `None`
See also:
The Sonos `getMediaMetadata API
<http://musicpartners.sonos.com/node/83>`_ | Below is the the instruction that describes the task:
### Input:
Get metadata for a media item.
Args:
item_id (str): The item for which metadata is required.
Returns:
~collections.OrderedDict: The item's metadata, or `None`
See also:
The Sonos `getMediaMetadata API
<http://musicpartners.sonos.com/node/83>`_
### Response:
def get_media_metadata(self, item_id):
"""Get metadata for a media item.
Args:
item_id (str): The item for which metadata is required.
Returns:
~collections.OrderedDict: The item's metadata, or `None`
See also:
The Sonos `getMediaMetadata API
<http://musicpartners.sonos.com/node/83>`_
"""
response = self.soap_client.call(
'getMediaMetadata',
[('id', item_id)])
return response.get('getMediaMetadataResult', None) |
def find_spec(self, fullname, path, target=None):
'''finds the appropriate properties (spec) of a module, and sets
its loader.'''
if not path:
path = [os.getcwd()]
if "." in fullname:
name = fullname.split(".")[-1]
else:
name = fullname
for entry in path:
if os.path.isdir(os.path.join(entry, name)):
# this module has child modules
filename = os.path.join(entry, name, "__init__.py")
submodule_locations = [os.path.join(entry, name)]
else:
filename = os.path.join(entry, name + ".py")
submodule_locations = None
if not os.path.exists(filename):
continue
return spec_from_file_location(fullname, filename,
loader=MyLoader(filename),
submodule_search_locations=submodule_locations)
return None | finds the appropriate properties (spec) of a module, and sets
its loader. | Below is the the instruction that describes the task:
### Input:
finds the appropriate properties (spec) of a module, and sets
its loader.
### Response:
def find_spec(self, fullname, path, target=None):
'''finds the appropriate properties (spec) of a module, and sets
its loader.'''
if not path:
path = [os.getcwd()]
if "." in fullname:
name = fullname.split(".")[-1]
else:
name = fullname
for entry in path:
if os.path.isdir(os.path.join(entry, name)):
# this module has child modules
filename = os.path.join(entry, name, "__init__.py")
submodule_locations = [os.path.join(entry, name)]
else:
filename = os.path.join(entry, name + ".py")
submodule_locations = None
if not os.path.exists(filename):
continue
return spec_from_file_location(fullname, filename,
loader=MyLoader(filename),
submodule_search_locations=submodule_locations)
return None |
def load(*args, **kwargs):
"""Load an numpy.ndarray from a file stream.
This works exactly like the usual `json.load()` function,
but it uses our custom deserializer.
"""
kwargs.update(dict(object_hook=json_numpy_obj_hook))
return _json.load(*args, **kwargs) | Load an numpy.ndarray from a file stream.
This works exactly like the usual `json.load()` function,
but it uses our custom deserializer. | Below is the the instruction that describes the task:
### Input:
Load an numpy.ndarray from a file stream.
This works exactly like the usual `json.load()` function,
but it uses our custom deserializer.
### Response:
def load(*args, **kwargs):
"""Load an numpy.ndarray from a file stream.
This works exactly like the usual `json.load()` function,
but it uses our custom deserializer.
"""
kwargs.update(dict(object_hook=json_numpy_obj_hook))
return _json.load(*args, **kwargs) |
def format_t_into_dhms_format(timestamp):
""" Convert an amount of second into day, hour, min and sec
:param timestamp: seconds
:type timestamp: int
:return: 'Ad Bh Cm Ds'
:rtype: str
>>> format_t_into_dhms_format(456189)
'5d 6h 43m 9s'
>>> format_t_into_dhms_format(3600)
'0d 1h 0m 0s'
"""
mins, timestamp = divmod(timestamp, 60)
hour, mins = divmod(mins, 60)
day, hour = divmod(hour, 24)
return '%sd %sh %sm %ss' % (day, hour, mins, timestamp) | Convert an amount of second into day, hour, min and sec
:param timestamp: seconds
:type timestamp: int
:return: 'Ad Bh Cm Ds'
:rtype: str
>>> format_t_into_dhms_format(456189)
'5d 6h 43m 9s'
>>> format_t_into_dhms_format(3600)
'0d 1h 0m 0s' | Below is the the instruction that describes the task:
### Input:
Convert an amount of second into day, hour, min and sec
:param timestamp: seconds
:type timestamp: int
:return: 'Ad Bh Cm Ds'
:rtype: str
>>> format_t_into_dhms_format(456189)
'5d 6h 43m 9s'
>>> format_t_into_dhms_format(3600)
'0d 1h 0m 0s'
### Response:
def format_t_into_dhms_format(timestamp):
""" Convert an amount of second into day, hour, min and sec
:param timestamp: seconds
:type timestamp: int
:return: 'Ad Bh Cm Ds'
:rtype: str
>>> format_t_into_dhms_format(456189)
'5d 6h 43m 9s'
>>> format_t_into_dhms_format(3600)
'0d 1h 0m 0s'
"""
mins, timestamp = divmod(timestamp, 60)
hour, mins = divmod(mins, 60)
day, hour = divmod(hour, 24)
return '%sd %sh %sm %ss' % (day, hour, mins, timestamp) |
def reverse_index(self):
"""Move the cursor up one line in the same column. If the cursor
is at the first line, create a new line at the top.
"""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == top:
# TODO: mark only the lines within margins?
self.dirty.update(range(self.lines))
for y in range(bottom, top, -1):
self.buffer[y] = self.buffer[y - 1]
self.buffer.pop(top, None)
else:
self.cursor_up() | Move the cursor up one line in the same column. If the cursor
is at the first line, create a new line at the top. | Below is the the instruction that describes the task:
### Input:
Move the cursor up one line in the same column. If the cursor
is at the first line, create a new line at the top.
### Response:
def reverse_index(self):
"""Move the cursor up one line in the same column. If the cursor
is at the first line, create a new line at the top.
"""
top, bottom = self.margins or Margins(0, self.lines - 1)
if self.cursor.y == top:
# TODO: mark only the lines within margins?
self.dirty.update(range(self.lines))
for y in range(bottom, top, -1):
self.buffer[y] = self.buffer[y - 1]
self.buffer.pop(top, None)
else:
self.cursor_up() |
def _rr_new(self, rr_version, rr_name, rr_symlink_target, rr_relocated_child,
rr_relocated, rr_relocated_parent, file_mode):
# type: (str, bytes, bytes, bool, bool, bool, int) -> None
'''
Internal method to add Rock Ridge to a Directory Record.
Parameters:
rr_version - A string containing the version of Rock Ridge to use for
this record.
rr_name - The Rock Ridge name to associate with this directory record.
rr_symlink_target - The target for the symlink, if this is a symlink
record (otherwise, None).
rr_relocated_child - True if this is a directory record for a rock
ridge relocated child.
rr_relocated - True if this is a directory record for a relocated
entry.
rr_relocated_parent - True if this is a directory record for a rock
ridge relocated parent.
file_mode - The Unix file mode for this Rock Ridge entry.
Returns:
Nothing.
'''
if self.parent is None:
raise pycdlibexception.PyCdlibInternalError('Invalid call to create new Rock Ridge on root directory')
self.rock_ridge = rockridge.RockRidge()
is_first_dir_record_of_root = self.file_ident == b'\x00' and self.parent.is_root
bytes_to_skip = 0
if self.xa_record is not None:
bytes_to_skip = XARecord.length()
self.dr_len = self.rock_ridge.new(is_first_dir_record_of_root, rr_name,
file_mode, rr_symlink_target,
rr_version, rr_relocated_child,
rr_relocated, rr_relocated_parent,
bytes_to_skip, self.dr_len)
# For files, we are done
if not self.isdir:
return
# If this is a directory, we have to manipulate the file links
# appropriately.
if self.parent.is_root:
if self.file_ident == b'\x00' or self.file_ident == b'\x01':
# For the dot and dotdot children of the root, add one
# directly to their Rock Ridge links.
self.rock_ridge.add_to_file_links()
else:
# For all other children of the root, make sure to add one
# to each of the dot and dotdot entries.
if len(self.parent.children) < 2:
raise pycdlibexception.PyCdlibInvalidISO('Expected at least 2 children of the root directory record, saw %d' % (len(self.parent.children)))
if self.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot child of directory has no Rock Ridge; ISO is corrupt')
self.parent.children[0].rock_ridge.add_to_file_links()
if self.parent.children[1].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot-dot child of directory has no Rock Ridge; ISO is corrupt')
self.parent.children[1].rock_ridge.add_to_file_links()
else:
if self.parent.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Parent of the entry did not have Rock Ridge, ISO is corrupt')
if self.file_ident == b'\x00':
# If we are adding the dot directory, increment the parent
# file links and our file links.
self.parent.rock_ridge.add_to_file_links()
self.rock_ridge.add_to_file_links()
elif self.file_ident == b'\x01':
# If we are adding the dotdot directory, copy the file links
# from the dot directory of the grandparent.
if self.parent.parent is None:
raise pycdlibexception.PyCdlibInternalError('Grandparent of the entry did not exist; this cannot be')
if not self.parent.children:
raise pycdlibexception.PyCdlibInvalidISO('Grandparent of the entry did not have a dot entry; ISO is corrupt')
if self.parent.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Grandparent dotdot entry did not have Rock Ridge; ISO is corrupt')
self.rock_ridge.copy_file_links(self.parent.parent.children[0].rock_ridge)
else:
# For all other entries, increment the parents file links
# and the parents dot file links.
self.parent.rock_ridge.add_to_file_links()
if not self.parent.children:
raise pycdlibexception.PyCdlibInvalidISO('Parent of the entry did not have a dot entry; ISO is corrupt')
if self.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot child of the parent did not have a dot entry; ISO is corrupt')
self.parent.children[0].rock_ridge.add_to_file_links() | Internal method to add Rock Ridge to a Directory Record.
Parameters:
rr_version - A string containing the version of Rock Ridge to use for
this record.
rr_name - The Rock Ridge name to associate with this directory record.
rr_symlink_target - The target for the symlink, if this is a symlink
record (otherwise, None).
rr_relocated_child - True if this is a directory record for a rock
ridge relocated child.
rr_relocated - True if this is a directory record for a relocated
entry.
rr_relocated_parent - True if this is a directory record for a rock
ridge relocated parent.
file_mode - The Unix file mode for this Rock Ridge entry.
Returns:
Nothing. | Below is the the instruction that describes the task:
### Input:
Internal method to add Rock Ridge to a Directory Record.
Parameters:
rr_version - A string containing the version of Rock Ridge to use for
this record.
rr_name - The Rock Ridge name to associate with this directory record.
rr_symlink_target - The target for the symlink, if this is a symlink
record (otherwise, None).
rr_relocated_child - True if this is a directory record for a rock
ridge relocated child.
rr_relocated - True if this is a directory record for a relocated
entry.
rr_relocated_parent - True if this is a directory record for a rock
ridge relocated parent.
file_mode - The Unix file mode for this Rock Ridge entry.
Returns:
Nothing.
### Response:
def _rr_new(self, rr_version, rr_name, rr_symlink_target, rr_relocated_child,
rr_relocated, rr_relocated_parent, file_mode):
# type: (str, bytes, bytes, bool, bool, bool, int) -> None
'''
Internal method to add Rock Ridge to a Directory Record.
Parameters:
rr_version - A string containing the version of Rock Ridge to use for
this record.
rr_name - The Rock Ridge name to associate with this directory record.
rr_symlink_target - The target for the symlink, if this is a symlink
record (otherwise, None).
rr_relocated_child - True if this is a directory record for a rock
ridge relocated child.
rr_relocated - True if this is a directory record for a relocated
entry.
rr_relocated_parent - True if this is a directory record for a rock
ridge relocated parent.
file_mode - The Unix file mode for this Rock Ridge entry.
Returns:
Nothing.
'''
if self.parent is None:
raise pycdlibexception.PyCdlibInternalError('Invalid call to create new Rock Ridge on root directory')
self.rock_ridge = rockridge.RockRidge()
is_first_dir_record_of_root = self.file_ident == b'\x00' and self.parent.is_root
bytes_to_skip = 0
if self.xa_record is not None:
bytes_to_skip = XARecord.length()
self.dr_len = self.rock_ridge.new(is_first_dir_record_of_root, rr_name,
file_mode, rr_symlink_target,
rr_version, rr_relocated_child,
rr_relocated, rr_relocated_parent,
bytes_to_skip, self.dr_len)
# For files, we are done
if not self.isdir:
return
# If this is a directory, we have to manipulate the file links
# appropriately.
if self.parent.is_root:
if self.file_ident == b'\x00' or self.file_ident == b'\x01':
# For the dot and dotdot children of the root, add one
# directly to their Rock Ridge links.
self.rock_ridge.add_to_file_links()
else:
# For all other children of the root, make sure to add one
# to each of the dot and dotdot entries.
if len(self.parent.children) < 2:
raise pycdlibexception.PyCdlibInvalidISO('Expected at least 2 children of the root directory record, saw %d' % (len(self.parent.children)))
if self.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot child of directory has no Rock Ridge; ISO is corrupt')
self.parent.children[0].rock_ridge.add_to_file_links()
if self.parent.children[1].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot-dot child of directory has no Rock Ridge; ISO is corrupt')
self.parent.children[1].rock_ridge.add_to_file_links()
else:
if self.parent.rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Parent of the entry did not have Rock Ridge, ISO is corrupt')
if self.file_ident == b'\x00':
# If we are adding the dot directory, increment the parent
# file links and our file links.
self.parent.rock_ridge.add_to_file_links()
self.rock_ridge.add_to_file_links()
elif self.file_ident == b'\x01':
# If we are adding the dotdot directory, copy the file links
# from the dot directory of the grandparent.
if self.parent.parent is None:
raise pycdlibexception.PyCdlibInternalError('Grandparent of the entry did not exist; this cannot be')
if not self.parent.children:
raise pycdlibexception.PyCdlibInvalidISO('Grandparent of the entry did not have a dot entry; ISO is corrupt')
if self.parent.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Grandparent dotdot entry did not have Rock Ridge; ISO is corrupt')
self.rock_ridge.copy_file_links(self.parent.parent.children[0].rock_ridge)
else:
# For all other entries, increment the parents file links
# and the parents dot file links.
self.parent.rock_ridge.add_to_file_links()
if not self.parent.children:
raise pycdlibexception.PyCdlibInvalidISO('Parent of the entry did not have a dot entry; ISO is corrupt')
if self.parent.children[0].rock_ridge is None:
raise pycdlibexception.PyCdlibInvalidISO('Dot child of the parent did not have a dot entry; ISO is corrupt')
self.parent.children[0].rock_ridge.add_to_file_links() |
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_lldp_pdu_transmitted(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail")
local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name")
local_interface_name_key.text = kwargs.pop('local_interface_name')
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name")
remote_interface_name_key.text = kwargs.pop('remote_interface_name')
lldp_pdu_transmitted = ET.SubElement(lldp_neighbor_detail, "lldp-pdu-transmitted")
lldp_pdu_transmitted.text = kwargs.pop('lldp_pdu_transmitted')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def get_lldp_neighbor_detail_output_lldp_neighbor_detail_lldp_pdu_transmitted(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_lldp_neighbor_detail = ET.Element("get_lldp_neighbor_detail")
config = get_lldp_neighbor_detail
output = ET.SubElement(get_lldp_neighbor_detail, "output")
lldp_neighbor_detail = ET.SubElement(output, "lldp-neighbor-detail")
local_interface_name_key = ET.SubElement(lldp_neighbor_detail, "local-interface-name")
local_interface_name_key.text = kwargs.pop('local_interface_name')
remote_interface_name_key = ET.SubElement(lldp_neighbor_detail, "remote-interface-name")
remote_interface_name_key.text = kwargs.pop('remote_interface_name')
lldp_pdu_transmitted = ET.SubElement(lldp_neighbor_detail, "lldp-pdu-transmitted")
lldp_pdu_transmitted.text = kwargs.pop('lldp_pdu_transmitted')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def get_soap_locals(obj, Hpos, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0):
"""Get the RBF basis SOAP output for the given positions in a finite system.
Args:
obj(ase.Atoms): Atomic structure for which the SOAP output is
calculated.
Hpos: Positions at which to calculate SOAP
alp: Alphas
bet: Betas
rCut: Radial cutoff.
nMax: Maximum number of radial basis functions
Lmax: Maximum spherical harmonics degree
crossOver:
all_atomtypes: Can be used to specify the atomic elements for which to
calculate the output. If given the output is calculated only for the
given species and is ordered by atomic number.
eta: The gaussian smearing width.
Returns:
np.ndarray: SOAP output for the given positions.
"""
rCutHard = rCut + 5
assert Lmax <= 9, "l cannot exceed 9. Lmax={}".format(Lmax)
assert Lmax >= 0, "l cannot be negative.Lmax={}".format(Lmax)
assert rCutHard < 17.0001, "hard radius cuttof cannot be larger than 17 Angs. rCut={}".format(rCutHard)
assert rCutHard > 1.999, "hard redius cuttof cannot be lower than 1 Ang. rCut={}".format(rCutHard)
assert nMax >= 2, "number of basis functions cannot be lower than 2. nMax={}".format(nMax)
assert nMax <= 13, "number of basis functions cannot exceed 12. nMax={}".format(nMax)
assert eta >= 0.0001, "Eta cannot be zero or negative. nMax={}".format(eta)
# get clusgeo internal format for c-code
Apos, typeNs, py_Ntypes, atomtype_lst, totalAN = _format_ase2clusgeo(obj, all_atomtypes)
Hpos = np.array(Hpos)
py_Hsize = Hpos.shape[0]
# flatten arrays
Hpos = Hpos.flatten()
alp = alp.flatten()
bet = bet.flatten()
# convert int to c_int
lMax = c_int(Lmax)
Hsize = c_int(py_Hsize)
Ntypes = c_int(py_Ntypes)
totalAN = c_int(totalAN)
rCutHard = c_double(rCutHard)
Nsize = c_int(nMax)
c_eta = c_double(eta)
#convert int array to c_int array
typeNs = (c_int * len(typeNs))(*typeNs)
# convert to c_double arrays
# alphas
alphas = (c_double * len(alp))(*alp.tolist())
# betas
betas = (c_double * len(bet))(*bet.tolist())
#Apos
axyz = (c_double * len(Apos))(*Apos.tolist())
#Hpos
hxyz = (c_double * len(Hpos))(*Hpos.tolist())
### START SOAP###
#path_to_so = os.path.dirname(os.path.abspath(__file__))
_PATH_TO_SOAPLITE_SO = os.path.dirname(os.path.abspath(__file__))
_SOAPLITE_SOFILES = glob.glob( "".join([ _PATH_TO_SOAPLITE_SO, "/../lib/libsoap*.*so"]) ) ## NOT SURE ABOUT THIS
if py_Ntypes == 1 or (not crossOver):
substring = "lib/libsoapPySig."
libsoap = CDLL(next((s for s in _SOAPLITE_SOFILES if substring in s), None))
libsoap.soap.argtypes = [POINTER (c_double),POINTER (c_double), POINTER (c_double),POINTER (c_double), POINTER (c_double), POINTER (c_int),c_double,c_int,c_int,c_int,c_int,c_int,c_double]
libsoap.soap.restype = POINTER (c_double)
c = (c_double*(int((nMax*(nMax+1))/2)*(Lmax+1)*py_Ntypes*py_Hsize))()
libsoap.soap( c, axyz, hxyz, alphas, betas, typeNs, rCutHard, totalAN, Ntypes, Nsize, lMax, Hsize,c_eta)
else:
substring = "lib/libsoapGTO."
libsoapGTO = CDLL(next((s for s in _SOAPLITE_SOFILES if substring in s), None))
libsoapGTO.soap.argtypes = [POINTER (c_double),POINTER (c_double), POINTER (c_double),POINTER (c_double), POINTER (c_double), POINTER (c_int),c_double,c_int,c_int,c_int,c_int,c_int,c_double]
libsoapGTO.soap.restype = POINTER (c_double)
c = (c_double*(int((nMax*(nMax+1))/2)*(Lmax+1)*int((py_Ntypes*(py_Ntypes +1))/2)*py_Hsize))()
libsoapGTO.soap( c, axyz, hxyz, alphas, betas, typeNs, rCutHard, totalAN, Ntypes, Nsize, lMax, Hsize,c_eta)
# return c;
if crossOver:
crosTypes = int((py_Ntypes*(py_Ntypes+1))/2)
shape = (py_Hsize, int((nMax*(nMax+1))/2)*(Lmax+1)*crosTypes)
else:
shape = (py_Hsize, int((nMax*(nMax+1))/2)*(Lmax+1)*py_Ntypes)
a = np.ctypeslib.as_array(c)
a = a.reshape(shape)
return a | Get the RBF basis SOAP output for the given positions in a finite system.
Args:
obj(ase.Atoms): Atomic structure for which the SOAP output is
calculated.
Hpos: Positions at which to calculate SOAP
alp: Alphas
bet: Betas
rCut: Radial cutoff.
nMax: Maximum number of radial basis functions
Lmax: Maximum spherical harmonics degree
crossOver:
all_atomtypes: Can be used to specify the atomic elements for which to
calculate the output. If given the output is calculated only for the
given species and is ordered by atomic number.
eta: The gaussian smearing width.
Returns:
np.ndarray: SOAP output for the given positions. | Below is the the instruction that describes the task:
### Input:
Get the RBF basis SOAP output for the given positions in a finite system.
Args:
obj(ase.Atoms): Atomic structure for which the SOAP output is
calculated.
Hpos: Positions at which to calculate SOAP
alp: Alphas
bet: Betas
rCut: Radial cutoff.
nMax: Maximum number of radial basis functions
Lmax: Maximum spherical harmonics degree
crossOver:
all_atomtypes: Can be used to specify the atomic elements for which to
calculate the output. If given the output is calculated only for the
given species and is ordered by atomic number.
eta: The gaussian smearing width.
Returns:
np.ndarray: SOAP output for the given positions.
### Response:
def get_soap_locals(obj, Hpos, alp, bet, rCut=5.0, nMax=5, Lmax=5, crossOver=True, all_atomtypes=None, eta=1.0):
"""Get the RBF basis SOAP output for the given positions in a finite system.
Args:
obj(ase.Atoms): Atomic structure for which the SOAP output is
calculated.
Hpos: Positions at which to calculate SOAP
alp: Alphas
bet: Betas
rCut: Radial cutoff.
nMax: Maximum number of radial basis functions
Lmax: Maximum spherical harmonics degree
crossOver:
all_atomtypes: Can be used to specify the atomic elements for which to
calculate the output. If given the output is calculated only for the
given species and is ordered by atomic number.
eta: The gaussian smearing width.
Returns:
np.ndarray: SOAP output for the given positions.
"""
rCutHard = rCut + 5
assert Lmax <= 9, "l cannot exceed 9. Lmax={}".format(Lmax)
assert Lmax >= 0, "l cannot be negative.Lmax={}".format(Lmax)
assert rCutHard < 17.0001, "hard radius cuttof cannot be larger than 17 Angs. rCut={}".format(rCutHard)
assert rCutHard > 1.999, "hard redius cuttof cannot be lower than 1 Ang. rCut={}".format(rCutHard)
assert nMax >= 2, "number of basis functions cannot be lower than 2. nMax={}".format(nMax)
assert nMax <= 13, "number of basis functions cannot exceed 12. nMax={}".format(nMax)
assert eta >= 0.0001, "Eta cannot be zero or negative. nMax={}".format(eta)
# get clusgeo internal format for c-code
Apos, typeNs, py_Ntypes, atomtype_lst, totalAN = _format_ase2clusgeo(obj, all_atomtypes)
Hpos = np.array(Hpos)
py_Hsize = Hpos.shape[0]
# flatten arrays
Hpos = Hpos.flatten()
alp = alp.flatten()
bet = bet.flatten()
# convert int to c_int
lMax = c_int(Lmax)
Hsize = c_int(py_Hsize)
Ntypes = c_int(py_Ntypes)
totalAN = c_int(totalAN)
rCutHard = c_double(rCutHard)
Nsize = c_int(nMax)
c_eta = c_double(eta)
#convert int array to c_int array
typeNs = (c_int * len(typeNs))(*typeNs)
# convert to c_double arrays
# alphas
alphas = (c_double * len(alp))(*alp.tolist())
# betas
betas = (c_double * len(bet))(*bet.tolist())
#Apos
axyz = (c_double * len(Apos))(*Apos.tolist())
#Hpos
hxyz = (c_double * len(Hpos))(*Hpos.tolist())
### START SOAP###
#path_to_so = os.path.dirname(os.path.abspath(__file__))
_PATH_TO_SOAPLITE_SO = os.path.dirname(os.path.abspath(__file__))
_SOAPLITE_SOFILES = glob.glob( "".join([ _PATH_TO_SOAPLITE_SO, "/../lib/libsoap*.*so"]) ) ## NOT SURE ABOUT THIS
if py_Ntypes == 1 or (not crossOver):
substring = "lib/libsoapPySig."
libsoap = CDLL(next((s for s in _SOAPLITE_SOFILES if substring in s), None))
libsoap.soap.argtypes = [POINTER (c_double),POINTER (c_double), POINTER (c_double),POINTER (c_double), POINTER (c_double), POINTER (c_int),c_double,c_int,c_int,c_int,c_int,c_int,c_double]
libsoap.soap.restype = POINTER (c_double)
c = (c_double*(int((nMax*(nMax+1))/2)*(Lmax+1)*py_Ntypes*py_Hsize))()
libsoap.soap( c, axyz, hxyz, alphas, betas, typeNs, rCutHard, totalAN, Ntypes, Nsize, lMax, Hsize,c_eta)
else:
substring = "lib/libsoapGTO."
libsoapGTO = CDLL(next((s for s in _SOAPLITE_SOFILES if substring in s), None))
libsoapGTO.soap.argtypes = [POINTER (c_double),POINTER (c_double), POINTER (c_double),POINTER (c_double), POINTER (c_double), POINTER (c_int),c_double,c_int,c_int,c_int,c_int,c_int,c_double]
libsoapGTO.soap.restype = POINTER (c_double)
c = (c_double*(int((nMax*(nMax+1))/2)*(Lmax+1)*int((py_Ntypes*(py_Ntypes +1))/2)*py_Hsize))()
libsoapGTO.soap( c, axyz, hxyz, alphas, betas, typeNs, rCutHard, totalAN, Ntypes, Nsize, lMax, Hsize,c_eta)
# return c;
if crossOver:
crosTypes = int((py_Ntypes*(py_Ntypes+1))/2)
shape = (py_Hsize, int((nMax*(nMax+1))/2)*(Lmax+1)*crosTypes)
else:
shape = (py_Hsize, int((nMax*(nMax+1))/2)*(Lmax+1)*py_Ntypes)
a = np.ctypeslib.as_array(c)
a = a.reshape(shape)
return a |
def on_create_view(self):
""" Trigger the click
"""
d = self.declaration
changed = not d.condition
if changed:
d.condition = True
view = self.get_view()
if changed:
self.ready.set_result(True)
return view | Trigger the click | Below is the the instruction that describes the task:
### Input:
Trigger the click
### Response:
def on_create_view(self):
""" Trigger the click
"""
d = self.declaration
changed = not d.condition
if changed:
d.condition = True
view = self.get_view()
if changed:
self.ready.set_result(True)
return view |
def finalize_options(self):
"""Finalizes the command's options.
Args:
self (CoverageCommand): the ``CoverageCommand`` instance
Returns:
``None``
"""
self.cwd = os.path.abspath(os.path.dirname(__file__))
self.test_dir = os.path.join(self.cwd, 'tests') | Finalizes the command's options.
Args:
self (CoverageCommand): the ``CoverageCommand`` instance
Returns:
``None`` | Below is the the instruction that describes the task:
### Input:
Finalizes the command's options.
Args:
self (CoverageCommand): the ``CoverageCommand`` instance
Returns:
``None``
### Response:
def finalize_options(self):
"""Finalizes the command's options.
Args:
self (CoverageCommand): the ``CoverageCommand`` instance
Returns:
``None``
"""
self.cwd = os.path.abspath(os.path.dirname(__file__))
self.test_dir = os.path.join(self.cwd, 'tests') |
def writable(self):
"""True if the Slot is writable."""
return bool(lib.EnvSlotWritableP(self._env, self._cls, self._name)) | True if the Slot is writable. | Below is the the instruction that describes the task:
### Input:
True if the Slot is writable.
### Response:
def writable(self):
"""True if the Slot is writable."""
return bool(lib.EnvSlotWritableP(self._env, self._cls, self._name)) |
def get_adaptive_threshold(threshold_method, image, threshold,
mask = None,
adaptive_window_size = 10,
**kwargs):
"""Given a global threshold, compute a threshold per pixel
Break the image into blocks, computing the threshold per block.
Afterwards, constrain the block threshold to .7 T < t < 1.5 T.
Block sizes must be at least 50x50. Images > 500 x 500 get 10x10
blocks.
"""
# for the X and Y direction, find the # of blocks, given the
# size constraints
image_size = np.array(image.shape[:2],dtype=int)
nblocks = image_size // adaptive_window_size
#
# Use a floating point block size to apportion the roundoff
# roughly equally to each block
#
increment = ( np.array(image_size,dtype=float) /
np.array(nblocks,dtype=float))
#
# Put the answer here
#
thresh_out = np.zeros(image_size, image.dtype)
#
# Loop once per block, computing the "global" threshold within the
# block.
#
block_threshold = np.zeros([nblocks[0],nblocks[1]])
for i in range(nblocks[0]):
i0 = int(i*increment[0])
i1 = int((i+1)*increment[0])
for j in range(nblocks[1]):
j0 = int(j*increment[1])
j1 = int((j+1)*increment[1])
block = image[i0:i1,j0:j1]
block_mask = None if mask is None else mask[i0:i1,j0:j1]
block_threshold[i,j] = get_global_threshold(
threshold_method,
block, mask = block_mask,
**kwargs)
#
# Use a cubic spline to blend the thresholds across the image to avoid image artifacts
#
spline_order = min(3, np.min(nblocks) - 1)
xStart = int(increment[0] / 2)
xEnd = int((nblocks[0] - 0.5) * increment[0])
yStart = int(increment[1] / 2)
yEnd = int((nblocks[1] - 0.5) * increment[1])
xtStart = .5
xtEnd = image.shape[0] - .5
ytStart = .5
ytEnd = image.shape[1] - .5
block_x_coords = np.linspace(xStart,xEnd, nblocks[0])
block_y_coords = np.linspace(yStart,yEnd, nblocks[1])
adaptive_interpolation = scipy.interpolate.RectBivariateSpline(
block_x_coords, block_y_coords, block_threshold,
bbox = (xtStart, xtEnd, ytStart, ytEnd),
kx = spline_order, ky = spline_order)
thresh_out_x_coords = np.linspace(.5, int(nblocks[0] * increment[0]) - .5, thresh_out.shape[0])
thresh_out_y_coords = np.linspace(.5, int(nblocks[1] * increment[1]) - .5 , thresh_out.shape[1])
thresh_out = adaptive_interpolation(thresh_out_x_coords, thresh_out_y_coords)
return thresh_out | Given a global threshold, compute a threshold per pixel
Break the image into blocks, computing the threshold per block.
Afterwards, constrain the block threshold to .7 T < t < 1.5 T.
Block sizes must be at least 50x50. Images > 500 x 500 get 10x10
blocks. | Below is the the instruction that describes the task:
### Input:
Given a global threshold, compute a threshold per pixel
Break the image into blocks, computing the threshold per block.
Afterwards, constrain the block threshold to .7 T < t < 1.5 T.
Block sizes must be at least 50x50. Images > 500 x 500 get 10x10
blocks.
### Response:
def get_adaptive_threshold(threshold_method, image, threshold,
mask = None,
adaptive_window_size = 10,
**kwargs):
"""Given a global threshold, compute a threshold per pixel
Break the image into blocks, computing the threshold per block.
Afterwards, constrain the block threshold to .7 T < t < 1.5 T.
Block sizes must be at least 50x50. Images > 500 x 500 get 10x10
blocks.
"""
# for the X and Y direction, find the # of blocks, given the
# size constraints
image_size = np.array(image.shape[:2],dtype=int)
nblocks = image_size // adaptive_window_size
#
# Use a floating point block size to apportion the roundoff
# roughly equally to each block
#
increment = ( np.array(image_size,dtype=float) /
np.array(nblocks,dtype=float))
#
# Put the answer here
#
thresh_out = np.zeros(image_size, image.dtype)
#
# Loop once per block, computing the "global" threshold within the
# block.
#
block_threshold = np.zeros([nblocks[0],nblocks[1]])
for i in range(nblocks[0]):
i0 = int(i*increment[0])
i1 = int((i+1)*increment[0])
for j in range(nblocks[1]):
j0 = int(j*increment[1])
j1 = int((j+1)*increment[1])
block = image[i0:i1,j0:j1]
block_mask = None if mask is None else mask[i0:i1,j0:j1]
block_threshold[i,j] = get_global_threshold(
threshold_method,
block, mask = block_mask,
**kwargs)
#
# Use a cubic spline to blend the thresholds across the image to avoid image artifacts
#
spline_order = min(3, np.min(nblocks) - 1)
xStart = int(increment[0] / 2)
xEnd = int((nblocks[0] - 0.5) * increment[0])
yStart = int(increment[1] / 2)
yEnd = int((nblocks[1] - 0.5) * increment[1])
xtStart = .5
xtEnd = image.shape[0] - .5
ytStart = .5
ytEnd = image.shape[1] - .5
block_x_coords = np.linspace(xStart,xEnd, nblocks[0])
block_y_coords = np.linspace(yStart,yEnd, nblocks[1])
adaptive_interpolation = scipy.interpolate.RectBivariateSpline(
block_x_coords, block_y_coords, block_threshold,
bbox = (xtStart, xtEnd, ytStart, ytEnd),
kx = spline_order, ky = spline_order)
thresh_out_x_coords = np.linspace(.5, int(nblocks[0] * increment[0]) - .5, thresh_out.shape[0])
thresh_out_y_coords = np.linspace(.5, int(nblocks[1] * increment[1]) - .5 , thresh_out.shape[1])
thresh_out = adaptive_interpolation(thresh_out_x_coords, thresh_out_y_coords)
return thresh_out |
def get_config(name, expand=False):
"""
Returns the config value that corresponds to *name*, which must have the format
``<section>[.<option>]``. When an option is given and *expand* is *True*, variables are expanded
in the returned value.
"""
cfg = Config.instance()
only_section = "." not in name
# when only the section is given, print all keys
if only_section:
return "\n".join(cfg.keys(name))
else:
section, option = name.split(".", 1)
func = cfg.get_expanded if expand else cfg.get
return func(section, option) | Returns the config value that corresponds to *name*, which must have the format
``<section>[.<option>]``. When an option is given and *expand* is *True*, variables are expanded
in the returned value. | Below is the the instruction that describes the task:
### Input:
Returns the config value that corresponds to *name*, which must have the format
``<section>[.<option>]``. When an option is given and *expand* is *True*, variables are expanded
in the returned value.
### Response:
def get_config(name, expand=False):
"""
Returns the config value that corresponds to *name*, which must have the format
``<section>[.<option>]``. When an option is given and *expand* is *True*, variables are expanded
in the returned value.
"""
cfg = Config.instance()
only_section = "." not in name
# when only the section is given, print all keys
if only_section:
return "\n".join(cfg.keys(name))
else:
section, option = name.split(".", 1)
func = cfg.get_expanded if expand else cfg.get
return func(section, option) |
def set_dry_run(xml_root, value=True):
"""Sets dry-run so records are not updated, only log file is produced."""
value_str = str(value).lower()
assert value_str in ("true", "false")
if xml_root.tag == "testsuites":
_set_property(xml_root, "polarion-dry-run", value_str)
elif xml_root.tag in ("testcases", "requirements"):
_set_property(xml_root, "dry-run", value_str)
else:
raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG) | Sets dry-run so records are not updated, only log file is produced. | Below is the the instruction that describes the task:
### Input:
Sets dry-run so records are not updated, only log file is produced.
### Response:
def set_dry_run(xml_root, value=True):
"""Sets dry-run so records are not updated, only log file is produced."""
value_str = str(value).lower()
assert value_str in ("true", "false")
if xml_root.tag == "testsuites":
_set_property(xml_root, "polarion-dry-run", value_str)
elif xml_root.tag in ("testcases", "requirements"):
_set_property(xml_root, "dry-run", value_str)
else:
raise Dump2PolarionException(_NOT_EXPECTED_FORMAT_MSG) |
def unpack_bits( byte ):
"""Expand a bitfield into a 64-bit int (8 bool bytes)."""
longbits = byte & (0x00000000000000ff)
longbits = (longbits | (longbits<<28)) & (0x0000000f0000000f)
longbits = (longbits | (longbits<<14)) & (0x0003000300030003)
longbits = (longbits | (longbits<<7)) & (0x0101010101010101)
return longbits | Expand a bitfield into a 64-bit int (8 bool bytes). | Below is the the instruction that describes the task:
### Input:
Expand a bitfield into a 64-bit int (8 bool bytes).
### Response:
def unpack_bits( byte ):
"""Expand a bitfield into a 64-bit int (8 bool bytes)."""
longbits = byte & (0x00000000000000ff)
longbits = (longbits | (longbits<<28)) & (0x0000000f0000000f)
longbits = (longbits | (longbits<<14)) & (0x0003000300030003)
longbits = (longbits | (longbits<<7)) & (0x0101010101010101)
return longbits |
def _create_resource_group(self, region, resource_group_name):
"""
Create resource group if it does not exist.
"""
resource_group_config = {'location': region}
try:
self.resource.resource_groups.create_or_update(
resource_group_name, resource_group_config
)
except Exception as error:
raise AzureCloudException(
'Unable to create resource group: {0}.'.format(error)
) | Create resource group if it does not exist. | Below is the the instruction that describes the task:
### Input:
Create resource group if it does not exist.
### Response:
def _create_resource_group(self, region, resource_group_name):
"""
Create resource group if it does not exist.
"""
resource_group_config = {'location': region}
try:
self.resource.resource_groups.create_or_update(
resource_group_name, resource_group_config
)
except Exception as error:
raise AzureCloudException(
'Unable to create resource group: {0}.'.format(error)
) |
def vnic_attached_to_network(nicspec, network, logger):
"""
Attach vNIC to Network.
:param nicspec: <vim.vm.device.VirtualDeviceSpec>
:param network: <vim network obj>
:return: updated 'nicspec'
"""
if nicspec:
if network_is_portgroup(network):
return VNicService.vnic_attach_to_network_distributed(nicspec, network,
logger=logger)
elif network_is_standard(network):
return VNicService.vnic_attach_to_network_standard(nicspec, network,
logger=logger)
return None | Attach vNIC to Network.
:param nicspec: <vim.vm.device.VirtualDeviceSpec>
:param network: <vim network obj>
:return: updated 'nicspec' | Below is the the instruction that describes the task:
### Input:
Attach vNIC to Network.
:param nicspec: <vim.vm.device.VirtualDeviceSpec>
:param network: <vim network obj>
:return: updated 'nicspec'
### Response:
def vnic_attached_to_network(nicspec, network, logger):
"""
Attach vNIC to Network.
:param nicspec: <vim.vm.device.VirtualDeviceSpec>
:param network: <vim network obj>
:return: updated 'nicspec'
"""
if nicspec:
if network_is_portgroup(network):
return VNicService.vnic_attach_to_network_distributed(nicspec, network,
logger=logger)
elif network_is_standard(network):
return VNicService.vnic_attach_to_network_standard(nicspec, network,
logger=logger)
return None |
def generate(ast_tree: ast.Tree, model_name: str):
"""
:param ast_tree: AST to generate from
:param model_name: class to generate
:return: sympy source code for model
"""
component_ref = ast.ComponentRef.from_string(model_name)
ast_tree_new = copy.deepcopy(ast_tree)
ast_walker = TreeWalker()
flat_tree = flatten(ast_tree_new, component_ref)
gen = XmlGenerator()
ast_walker.walk(gen, flat_tree)
return etree.tostring(gen.xml[flat_tree], pretty_print=True).decode('utf-8') | :param ast_tree: AST to generate from
:param model_name: class to generate
:return: sympy source code for model | Below is the the instruction that describes the task:
### Input:
:param ast_tree: AST to generate from
:param model_name: class to generate
:return: sympy source code for model
### Response:
def generate(ast_tree: ast.Tree, model_name: str):
"""
:param ast_tree: AST to generate from
:param model_name: class to generate
:return: sympy source code for model
"""
component_ref = ast.ComponentRef.from_string(model_name)
ast_tree_new = copy.deepcopy(ast_tree)
ast_walker = TreeWalker()
flat_tree = flatten(ast_tree_new, component_ref)
gen = XmlGenerator()
ast_walker.walk(gen, flat_tree)
return etree.tostring(gen.xml[flat_tree], pretty_print=True).decode('utf-8') |
def inject_config(self, config, from_args):
"""
:param config:
:type config: list
:param from_args:
:type from_args: dict
"""
# First get required values from labelStore
runtime = self._get_runtime()
whitelist = self._get_whitelist()
#Run introspection on the libraries to retrieve list of libraries to link
found_libraries = self._run_introspection(runtime, whitelist, verbose=True)
container_path_set=set()
for library in found_libraries:
#disallow duplicate library targets
cpath = self.__get_container_path(library)
if cpath in container_path_set:
continue
container_path_set.add(cpath)
config.append('--volume={0}:{1}'.format(library, cpath))
config.extend(['-e', 'LD_LIBRARY_PATH={0}'.format(_container_lib_location)])
config.extend(['-e', 'LIBGL_DRIVERS_PATH={0}'.format(_container_lib_location)]) | :param config:
:type config: list
:param from_args:
:type from_args: dict | Below is the the instruction that describes the task:
### Input:
:param config:
:type config: list
:param from_args:
:type from_args: dict
### Response:
def inject_config(self, config, from_args):
"""
:param config:
:type config: list
:param from_args:
:type from_args: dict
"""
# First get required values from labelStore
runtime = self._get_runtime()
whitelist = self._get_whitelist()
#Run introspection on the libraries to retrieve list of libraries to link
found_libraries = self._run_introspection(runtime, whitelist, verbose=True)
container_path_set=set()
for library in found_libraries:
#disallow duplicate library targets
cpath = self.__get_container_path(library)
if cpath in container_path_set:
continue
container_path_set.add(cpath)
config.append('--volume={0}:{1}'.format(library, cpath))
config.extend(['-e', 'LD_LIBRARY_PATH={0}'.format(_container_lib_location)])
config.extend(['-e', 'LIBGL_DRIVERS_PATH={0}'.format(_container_lib_location)]) |
def init_logger(cls, log_level):
"""Initialize logger settings."""
logger = logging.getLogger("AutoMLBoard")
handler = logging.StreamHandler()
formatter = logging.Formatter("[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.setLevel(log_level)
logger.addHandler(handler)
return logger | Initialize logger settings. | Below is the the instruction that describes the task:
### Input:
Initialize logger settings.
### Response:
def init_logger(cls, log_level):
"""Initialize logger settings."""
logger = logging.getLogger("AutoMLBoard")
handler = logging.StreamHandler()
formatter = logging.Formatter("[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.setLevel(log_level)
logger.addHandler(handler)
return logger |
def get_squeezenet(version, pretrained=False, ctx=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""SqueezeNet model from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size" <https://arxiv.org/abs/1602.07360>`_ paper.
SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Parameters
----------
version : str
Version of squeezenet. Options are '1.0', '1.1'.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
net = SqueezeNet(version, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_parameters(get_model_file('squeezenet%s'%version, root=root), ctx=ctx)
return net | r"""SqueezeNet model from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size" <https://arxiv.org/abs/1602.07360>`_ paper.
SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Parameters
----------
version : str
Version of squeezenet. Options are '1.0', '1.1'.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters. | Below is the the instruction that describes the task:
### Input:
r"""SqueezeNet model from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size" <https://arxiv.org/abs/1602.07360>`_ paper.
SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Parameters
----------
version : str
Version of squeezenet. Options are '1.0', '1.1'.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
### Response:
def get_squeezenet(version, pretrained=False, ctx=cpu(),
root=os.path.join(base.data_dir(), 'models'), **kwargs):
r"""SqueezeNet model from the `"SqueezeNet: AlexNet-level accuracy with 50x fewer parameters
and <0.5MB model size" <https://arxiv.org/abs/1602.07360>`_ paper.
SqueezeNet 1.1 model from the `official SqueezeNet repo
<https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_.
SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters
than SqueezeNet 1.0, without sacrificing accuracy.
Parameters
----------
version : str
Version of squeezenet. Options are '1.0', '1.1'.
pretrained : bool, default False
Whether to load the pretrained weights for model.
ctx : Context, default CPU
The context in which to load the pretrained weights.
root : str, default $MXNET_HOME/models
Location for keeping the model parameters.
"""
net = SqueezeNet(version, **kwargs)
if pretrained:
from ..model_store import get_model_file
net.load_parameters(get_model_file('squeezenet%s'%version, root=root), ctx=ctx)
return net |
def search(self, regex, flags=0):
"""
Run a regex test against a dict value (only substring string has to
match).
>>> Query().f1.search(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(
lambda value: re.search(regex, value, flags),
('search', self._path, regex)
) | Run a regex test against a dict value (only substring string has to
match).
>>> Query().f1.search(r'^\w+$')
:param regex: The regular expression to use for matching | Below is the the instruction that describes the task:
### Input:
Run a regex test against a dict value (only substring string has to
match).
>>> Query().f1.search(r'^\w+$')
:param regex: The regular expression to use for matching
### Response:
def search(self, regex, flags=0):
"""
Run a regex test against a dict value (only substring string has to
match).
>>> Query().f1.search(r'^\w+$')
:param regex: The regular expression to use for matching
"""
return self._generate_test(
lambda value: re.search(regex, value, flags),
('search', self._path, regex)
) |
def getStates(self, Corpnum, reciptNumList, UserID=None):
""" 전송내역 요약정보 확인
args
CorpNum : 팝빌회원 사업자번호
reciptNumList : 문자전송 접수번호 배열
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException
"""
if reciptNumList == None or len(reciptNumList) < 1:
raise PopbillException(-99999999, "접수번호가 입력되지 않았습니다.")
postData = self._stringtify(reciptNumList)
return self._httppost('/Message/States', postData, Corpnum, UserID) | 전송내역 요약정보 확인
args
CorpNum : 팝빌회원 사업자번호
reciptNumList : 문자전송 접수번호 배열
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException | Below is the the instruction that describes the task:
### Input:
전송내역 요약정보 확인
args
CorpNum : 팝빌회원 사업자번호
reciptNumList : 문자전송 접수번호 배열
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException
### Response:
def getStates(self, Corpnum, reciptNumList, UserID=None):
""" 전송내역 요약정보 확인
args
CorpNum : 팝빌회원 사업자번호
reciptNumList : 문자전송 접수번호 배열
UserID : 팝빌회원 아이디
return
전송정보 as list
raise
PopbillException
"""
if reciptNumList == None or len(reciptNumList) < 1:
raise PopbillException(-99999999, "접수번호가 입력되지 않았습니다.")
postData = self._stringtify(reciptNumList)
return self._httppost('/Message/States', postData, Corpnum, UserID) |
def create(cls, service=None, endpoint=None, data=None, *args, **kwargs):
"""
Create an integration within the scope of an service.
Make sure that they should reasonably be able to query with an
service or endpoint that knows about an service.
"""
cls.validate(data)
if service is None and endpoint is None:
raise InvalidArguments(service, endpoint)
if endpoint is None:
sid = service['id'] if isinstance(service, Entity) else service
endpoint = 'services/{0}/integrations'.format(sid)
# otherwise endpoint should contain the service path too
return getattr(Entity, 'create').__func__(cls, endpoint=endpoint,
data=data, *args, **kwargs) | Create an integration within the scope of an service.
Make sure that they should reasonably be able to query with an
service or endpoint that knows about an service. | Below is the the instruction that describes the task:
### Input:
Create an integration within the scope of an service.
Make sure that they should reasonably be able to query with an
service or endpoint that knows about an service.
### Response:
def create(cls, service=None, endpoint=None, data=None, *args, **kwargs):
"""
Create an integration within the scope of an service.
Make sure that they should reasonably be able to query with an
service or endpoint that knows about an service.
"""
cls.validate(data)
if service is None and endpoint is None:
raise InvalidArguments(service, endpoint)
if endpoint is None:
sid = service['id'] if isinstance(service, Entity) else service
endpoint = 'services/{0}/integrations'.format(sid)
# otherwise endpoint should contain the service path too
return getattr(Entity, 'create').__func__(cls, endpoint=endpoint,
data=data, *args, **kwargs) |
def dump(self, dest_dir=None, to_local=1, from_local=0, archive=0, dump_fn=None, name=None, site=None, use_sudo=0, cleanup=1):
"""
Exports the target database to a single transportable file on the localhost,
appropriate for loading using load().
"""
r = self.local_renderer
site = site or self.genv.SITE
r = self.database_renderer(name=name, site=site)
# Load optional site-specific command, if given.
try:
r.env.dump_command = self.genv.sites[site]['postgresql_dump_command']
except KeyError:
pass
use_sudo = int(use_sudo)
from_local = int(from_local)
to_local = int(to_local)
dump_fn = dump_fn or r.env.dump_fn_template
# Render the snapshot filename.
r.env.dump_fn = self.get_default_db_fn(
fn_template=dump_fn,
dest_dir=dest_dir,
name=name,
site=site,
)
# Dump the database to a snapshot file.
#if not os.path.isfile(os.path.abspath(r.env.dump_fn))):
r.pc('Dumping database snapshot.')
if from_local:
r.local(r.env.dump_command)
elif use_sudo:
r.sudo(r.env.dump_command)
else:
r.run(r.env.dump_command)
# Download the database dump file on the remote host to localhost.
if not from_local and to_local:
r.pc('Downloading database snapshot to localhost.')
r.local('rsync -rvz --progress --recursive --no-p --no-g '
'--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" {user}@{host_string}:{dump_fn} {dump_fn}')
# Delete the snapshot file on the remote system.
if int(cleanup):
r.pc('Deleting database snapshot on remote host.')
r.sudo('rm {dump_fn}')
# Move the database snapshot to an archive directory.
if to_local and int(archive):
r.pc('Archiving database snapshot.')
db_fn = r.render_fn(r.env.dump_fn)
r.env.archive_fn = '%s/%s' % (env.db_dump_archive_dir, os.path.split(db_fn)[-1])
r.local('mv %s %s' % (db_fn, env.archive_fn))
return r.env.dump_fn | Exports the target database to a single transportable file on the localhost,
appropriate for loading using load(). | Below is the the instruction that describes the task:
### Input:
Exports the target database to a single transportable file on the localhost,
appropriate for loading using load().
### Response:
def dump(self, dest_dir=None, to_local=1, from_local=0, archive=0, dump_fn=None, name=None, site=None, use_sudo=0, cleanup=1):
"""
Exports the target database to a single transportable file on the localhost,
appropriate for loading using load().
"""
r = self.local_renderer
site = site or self.genv.SITE
r = self.database_renderer(name=name, site=site)
# Load optional site-specific command, if given.
try:
r.env.dump_command = self.genv.sites[site]['postgresql_dump_command']
except KeyError:
pass
use_sudo = int(use_sudo)
from_local = int(from_local)
to_local = int(to_local)
dump_fn = dump_fn or r.env.dump_fn_template
# Render the snapshot filename.
r.env.dump_fn = self.get_default_db_fn(
fn_template=dump_fn,
dest_dir=dest_dir,
name=name,
site=site,
)
# Dump the database to a snapshot file.
#if not os.path.isfile(os.path.abspath(r.env.dump_fn))):
r.pc('Dumping database snapshot.')
if from_local:
r.local(r.env.dump_command)
elif use_sudo:
r.sudo(r.env.dump_command)
else:
r.run(r.env.dump_command)
# Download the database dump file on the remote host to localhost.
if not from_local and to_local:
r.pc('Downloading database snapshot to localhost.')
r.local('rsync -rvz --progress --recursive --no-p --no-g '
'--rsh "ssh -o StrictHostKeyChecking=no -i {key_filename}" {user}@{host_string}:{dump_fn} {dump_fn}')
# Delete the snapshot file on the remote system.
if int(cleanup):
r.pc('Deleting database snapshot on remote host.')
r.sudo('rm {dump_fn}')
# Move the database snapshot to an archive directory.
if to_local and int(archive):
r.pc('Archiving database snapshot.')
db_fn = r.render_fn(r.env.dump_fn)
r.env.archive_fn = '%s/%s' % (env.db_dump_archive_dir, os.path.split(db_fn)[-1])
r.local('mv %s %s' % (db_fn, env.archive_fn))
return r.env.dump_fn |
def webex_teams_webhook_events():
"""Processes incoming requests to the '/events' URI."""
if request.method == 'GET':
return ("""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Webex Teams Bot served via Flask</title>
</head>
<body>
<p>
<strong>Your Flask web server is up and running!</strong>
</p>
<p>
Here is a nice Cat Fact for you:
</p>
<blockquote>{}</blockquote>
</body>
</html>
""".format(get_catfact()))
elif request.method == 'POST':
"""Respond to inbound webhook JSON HTTP POST from Webex Teams."""
# Get the POST data sent from Webex Teams
json_data = request.json
print("\n")
print("WEBHOOK POST RECEIVED:")
print(json_data)
print("\n")
# Create a Webhook object from the JSON data
webhook_obj = Webhook(json_data)
# Get the room details
room = api.rooms.get(webhook_obj.data.roomId)
# Get the message details
message = api.messages.get(webhook_obj.data.id)
# Get the sender's details
person = api.people.get(message.personId)
print("NEW MESSAGE IN ROOM '{}'".format(room.title))
print("FROM '{}'".format(person.displayName))
print("MESSAGE '{}'\n".format(message.text))
# This is a VERY IMPORTANT loop prevention control step.
# If you respond to all messages... You will respond to the messages
# that the bot posts and thereby create a loop condition.
me = api.people.me()
if message.personId == me.id:
# Message was sent by me (bot); do not respond.
return 'OK'
else:
# Message was sent by someone else; parse message and respond.
if "/CAT" in message.text:
print("FOUND '/CAT'")
# Get a cat fact
cat_fact = get_catfact()
print("SENDING CAT FACT '{}'".format(cat_fact))
# Post the fact to the room where the request was received
api.messages.create(room.id, text=cat_fact)
return 'OK' | Processes incoming requests to the '/events' URI. | Below is the the instruction that describes the task:
### Input:
Processes incoming requests to the '/events' URI.
### Response:
def webex_teams_webhook_events():
"""Processes incoming requests to the '/events' URI."""
if request.method == 'GET':
return ("""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>Webex Teams Bot served via Flask</title>
</head>
<body>
<p>
<strong>Your Flask web server is up and running!</strong>
</p>
<p>
Here is a nice Cat Fact for you:
</p>
<blockquote>{}</blockquote>
</body>
</html>
""".format(get_catfact()))
elif request.method == 'POST':
"""Respond to inbound webhook JSON HTTP POST from Webex Teams."""
# Get the POST data sent from Webex Teams
json_data = request.json
print("\n")
print("WEBHOOK POST RECEIVED:")
print(json_data)
print("\n")
# Create a Webhook object from the JSON data
webhook_obj = Webhook(json_data)
# Get the room details
room = api.rooms.get(webhook_obj.data.roomId)
# Get the message details
message = api.messages.get(webhook_obj.data.id)
# Get the sender's details
person = api.people.get(message.personId)
print("NEW MESSAGE IN ROOM '{}'".format(room.title))
print("FROM '{}'".format(person.displayName))
print("MESSAGE '{}'\n".format(message.text))
# This is a VERY IMPORTANT loop prevention control step.
# If you respond to all messages... You will respond to the messages
# that the bot posts and thereby create a loop condition.
me = api.people.me()
if message.personId == me.id:
# Message was sent by me (bot); do not respond.
return 'OK'
else:
# Message was sent by someone else; parse message and respond.
if "/CAT" in message.text:
print("FOUND '/CAT'")
# Get a cat fact
cat_fact = get_catfact()
print("SENDING CAT FACT '{}'".format(cat_fact))
# Post the fact to the room where the request was received
api.messages.create(room.id, text=cat_fact)
return 'OK' |
def update_image_member(self, img_id, status):
"""
Updates the image whose ID is given with the status specified. This
must be called by the user whose project_id is in the members for the
image. If called by the owner of the image, an InvalidImageMember
exception will be raised.
Valid values for 'status' include:
pending
accepted
rejected
Any other value will result in an InvalidImageMemberStatus exception
being raised.
"""
if status not in ("pending", "accepted", "rejected"):
raise exc.InvalidImageMemberStatus("The status value must be one "
"of 'accepted', 'rejected', or 'pending'. Received: '%s'" %
status)
api = self.api
project_id = api.identity.tenant_id
uri = "/%s/%s/members/%s" % (self.uri_base, img_id, project_id)
body = {"status": status}
try:
resp, resp_body = self.api.method_put(uri, body=body)
except exc.NotFound as e:
raise exc.InvalidImageMember("The update member request could not "
"be completed. No member request for that image was found.") | Updates the image whose ID is given with the status specified. This
must be called by the user whose project_id is in the members for the
image. If called by the owner of the image, an InvalidImageMember
exception will be raised.
Valid values for 'status' include:
pending
accepted
rejected
Any other value will result in an InvalidImageMemberStatus exception
being raised. | Below is the the instruction that describes the task:
### Input:
Updates the image whose ID is given with the status specified. This
must be called by the user whose project_id is in the members for the
image. If called by the owner of the image, an InvalidImageMember
exception will be raised.
Valid values for 'status' include:
pending
accepted
rejected
Any other value will result in an InvalidImageMemberStatus exception
being raised.
### Response:
def update_image_member(self, img_id, status):
"""
Updates the image whose ID is given with the status specified. This
must be called by the user whose project_id is in the members for the
image. If called by the owner of the image, an InvalidImageMember
exception will be raised.
Valid values for 'status' include:
pending
accepted
rejected
Any other value will result in an InvalidImageMemberStatus exception
being raised.
"""
if status not in ("pending", "accepted", "rejected"):
raise exc.InvalidImageMemberStatus("The status value must be one "
"of 'accepted', 'rejected', or 'pending'. Received: '%s'" %
status)
api = self.api
project_id = api.identity.tenant_id
uri = "/%s/%s/members/%s" % (self.uri_base, img_id, project_id)
body = {"status": status}
try:
resp, resp_body = self.api.method_put(uri, body=body)
except exc.NotFound as e:
raise exc.InvalidImageMember("The update member request could not "
"be completed. No member request for that image was found.") |
def reverse_transform(self, col):
"""Converts data back into original format.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
"""
output = pd.DataFrame()
new_name = '?' + self.col_name
col.loc[col[new_name] == 0, self.col_name] = np.nan
output[self.col_name] = col[self.col_name]
return output | Converts data back into original format.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame | Below is the the instruction that describes the task:
### Input:
Converts data back into original format.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
### Response:
def reverse_transform(self, col):
"""Converts data back into original format.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame
"""
output = pd.DataFrame()
new_name = '?' + self.col_name
col.loc[col[new_name] == 0, self.col_name] = np.nan
output[self.col_name] = col[self.col_name]
return output |
def solve_filter(expr, vars):
"""Filter values on the LHS by evaluating RHS with each value.
Returns any LHS values for which RHS evaluates to a true value.
"""
lhs_values, _ = __solve_for_repeated(expr.lhs, vars)
def lazy_filter():
for lhs_value in repeated.getvalues(lhs_values):
if solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value:
yield lhs_value
return Result(repeated.lazy(lazy_filter), ()) | Filter values on the LHS by evaluating RHS with each value.
Returns any LHS values for which RHS evaluates to a true value. | Below is the the instruction that describes the task:
### Input:
Filter values on the LHS by evaluating RHS with each value.
Returns any LHS values for which RHS evaluates to a true value.
### Response:
def solve_filter(expr, vars):
"""Filter values on the LHS by evaluating RHS with each value.
Returns any LHS values for which RHS evaluates to a true value.
"""
lhs_values, _ = __solve_for_repeated(expr.lhs, vars)
def lazy_filter():
for lhs_value in repeated.getvalues(lhs_values):
if solve(expr.rhs, __nest_scope(expr.lhs, vars, lhs_value)).value:
yield lhs_value
return Result(repeated.lazy(lazy_filter), ()) |
def logarithmic_profile(wind_speed, wind_speed_height, hub_height,
roughness_length, obstacle_height=0.0):
r"""
Calculates the wind speed at hub height using a logarithmic wind profile.
The logarithmic height equation is used. There is the possibility of
including the height of the surrounding obstacles in the calculation. This
function is carried out when the parameter `wind_speed_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'logarithmic'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length.
obstacle_height : float
Height of obstacles in the surrounding area of the wind turbine. Set
`obstacle_height` to zero for wide spread obstacles. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot
\frac{\ln\left(\frac{h_{hub}-d}{z_{0}}\right)}{\ln\left(
\frac{h_{data}-d}{z_{0}}\right)}
with:
v: wind speed, h: height, :math:`z_{0}`: roughness length,
d: boundary layer offset (estimated by d = 0.7 * `obstacle_height`)
For d = 0 it results in the following equation [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot\frac{\ln\left(\frac{h_{hub}}
{z_{0}}\right)}{\ln\left(\frac{h_{data}}{z_{0}}\right)}
:math:`h_{data}` is the height at which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 278
.. [2] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, p. 129
.. [3] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 515
"""
if 0.7 * obstacle_height > wind_speed_height:
raise ValueError("To take an obstacle height of {0} m ".format(
obstacle_height) + "into consideration, wind " +
"speed data of a greater height is needed.")
# Return np.array if wind_speed is np.array
if (isinstance(wind_speed, np.ndarray) and
isinstance(roughness_length, pd.Series)):
roughness_length = np.array(roughness_length)
return (wind_speed * np.log((hub_height - 0.7 * obstacle_height) /
roughness_length) /
np.log((wind_speed_height - 0.7 * obstacle_height) /
roughness_length)) | r"""
Calculates the wind speed at hub height using a logarithmic wind profile.
The logarithmic height equation is used. There is the possibility of
including the height of the surrounding obstacles in the calculation. This
function is carried out when the parameter `wind_speed_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'logarithmic'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length.
obstacle_height : float
Height of obstacles in the surrounding area of the wind turbine. Set
`obstacle_height` to zero for wide spread obstacles. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot
\frac{\ln\left(\frac{h_{hub}-d}{z_{0}}\right)}{\ln\left(
\frac{h_{data}-d}{z_{0}}\right)}
with:
v: wind speed, h: height, :math:`z_{0}`: roughness length,
d: boundary layer offset (estimated by d = 0.7 * `obstacle_height`)
For d = 0 it results in the following equation [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot\frac{\ln\left(\frac{h_{hub}}
{z_{0}}\right)}{\ln\left(\frac{h_{data}}{z_{0}}\right)}
:math:`h_{data}` is the height at which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 278
.. [2] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, p. 129
.. [3] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 515 | Below is the the instruction that describes the task:
### Input:
r"""
Calculates the wind speed at hub height using a logarithmic wind profile.
The logarithmic height equation is used. There is the possibility of
including the height of the surrounding obstacles in the calculation. This
function is carried out when the parameter `wind_speed_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'logarithmic'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length.
obstacle_height : float
Height of obstacles in the surrounding area of the wind turbine. Set
`obstacle_height` to zero for wide spread obstacles. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot
\frac{\ln\left(\frac{h_{hub}-d}{z_{0}}\right)}{\ln\left(
\frac{h_{data}-d}{z_{0}}\right)}
with:
v: wind speed, h: height, :math:`z_{0}`: roughness length,
d: boundary layer offset (estimated by d = 0.7 * `obstacle_height`)
For d = 0 it results in the following equation [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot\frac{\ln\left(\frac{h_{hub}}
{z_{0}}\right)}{\ln\left(\frac{h_{data}}{z_{0}}\right)}
:math:`h_{data}` is the height at which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 278
.. [2] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, p. 129
.. [3] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 515
### Response:
def logarithmic_profile(wind_speed, wind_speed_height, hub_height,
roughness_length, obstacle_height=0.0):
r"""
Calculates the wind speed at hub height using a logarithmic wind profile.
The logarithmic height equation is used. There is the possibility of
including the height of the surrounding obstacles in the calculation. This
function is carried out when the parameter `wind_speed_model` of an
instance of the :class:`~.modelchain.ModelChain` class is 'logarithmic'.
Parameters
----------
wind_speed : pandas.Series or numpy.array
Wind speed time series.
wind_speed_height : float
Height for which the parameter `wind_speed` applies.
hub_height : float
Hub height of wind turbine.
roughness_length : pandas.Series or numpy.array or float
Roughness length.
obstacle_height : float
Height of obstacles in the surrounding area of the wind turbine. Set
`obstacle_height` to zero for wide spread obstacles. Default: 0.
Returns
-------
pandas.Series or numpy.array
Wind speed at hub height. Data type depends on type of `wind_speed`.
Notes
-----
The following equation is used [1]_, [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot
\frac{\ln\left(\frac{h_{hub}-d}{z_{0}}\right)}{\ln\left(
\frac{h_{data}-d}{z_{0}}\right)}
with:
v: wind speed, h: height, :math:`z_{0}`: roughness length,
d: boundary layer offset (estimated by d = 0.7 * `obstacle_height`)
For d = 0 it results in the following equation [2]_, [3]_:
.. math:: v_{wind,hub}=v_{wind,data}\cdot\frac{\ln\left(\frac{h_{hub}}
{z_{0}}\right)}{\ln\left(\frac{h_{data}}{z_{0}}\right)}
:math:`h_{data}` is the height at which the wind speed
:math:`v_{wind,data}` is measured and :math:`v_{wind,hub}` is the wind
speed at hub height :math:`h_{hub}` of the wind turbine.
Parameters `wind_speed_height`, `roughness_length`, `hub_height` and
`obstacle_height` have to be of the same unit.
References
----------
.. [1] Quaschning V.: "Regenerative Energiesysteme". München, Hanser
Verlag, 2011, p. 278
.. [2] Gasch, R., Twele, J.: "Windkraftanlagen". 6. Auflage, Wiesbaden,
Vieweg + Teubner, 2010, p. 129
.. [3] Hau, E.: "Windkraftanlagen - Grundlagen, Technik, Einsatz,
Wirtschaftlichkeit". 4. Auflage, Springer-Verlag, 2008, p. 515
"""
if 0.7 * obstacle_height > wind_speed_height:
raise ValueError("To take an obstacle height of {0} m ".format(
obstacle_height) + "into consideration, wind " +
"speed data of a greater height is needed.")
# Return np.array if wind_speed is np.array
if (isinstance(wind_speed, np.ndarray) and
isinstance(roughness_length, pd.Series)):
roughness_length = np.array(roughness_length)
return (wind_speed * np.log((hub_height - 0.7 * obstacle_height) /
roughness_length) /
np.log((wind_speed_height - 0.7 * obstacle_height) /
roughness_length)) |
def all_tables(self) -> List[str]:
"""
List of all known tables
:return:
"""
return sorted([k for k in self.__dict__.keys()
if k not in _I2B2Tables._funcs and not k.startswith("_")]) | List of all known tables
:return: | Below is the the instruction that describes the task:
### Input:
List of all known tables
:return:
### Response:
def all_tables(self) -> List[str]:
"""
List of all known tables
:return:
"""
return sorted([k for k in self.__dict__.keys()
if k not in _I2B2Tables._funcs and not k.startswith("_")]) |
def _elbv2_load_balancer(self, lookup):
"""
Args:
lookup: the friendly name of the V2 elb to look up
Returns:
A dict with the load balancer description
Raises:
botocore.exceptions.ClientError: no such load-balancer
"""
client = EFAwsResolver.__CLIENTS['elbv2']
elbs = client.describe_load_balancers(Names=[lookup])
# getting the first one, since we requested only one lb
elb = elbs['LoadBalancers'][0]
return elb | Args:
lookup: the friendly name of the V2 elb to look up
Returns:
A dict with the load balancer description
Raises:
botocore.exceptions.ClientError: no such load-balancer | Below is the the instruction that describes the task:
### Input:
Args:
lookup: the friendly name of the V2 elb to look up
Returns:
A dict with the load balancer description
Raises:
botocore.exceptions.ClientError: no such load-balancer
### Response:
def _elbv2_load_balancer(self, lookup):
"""
Args:
lookup: the friendly name of the V2 elb to look up
Returns:
A dict with the load balancer description
Raises:
botocore.exceptions.ClientError: no such load-balancer
"""
client = EFAwsResolver.__CLIENTS['elbv2']
elbs = client.describe_load_balancers(Names=[lookup])
# getting the first one, since we requested only one lb
elb = elbs['LoadBalancers'][0]
return elb |
def AddClient(self, client):
"""Adds a client to the index.
Args:
client: A VFSGRRClient record to add or update.
"""
client_id, keywords = self.AnalyzeClient(client)
self.AddKeywordsForName(client_id, keywords) | Adds a client to the index.
Args:
client: A VFSGRRClient record to add or update. | Below is the the instruction that describes the task:
### Input:
Adds a client to the index.
Args:
client: A VFSGRRClient record to add or update.
### Response:
def AddClient(self, client):
"""Adds a client to the index.
Args:
client: A VFSGRRClient record to add or update.
"""
client_id, keywords = self.AnalyzeClient(client)
self.AddKeywordsForName(client_id, keywords) |
def data_to_string(self):
"""Returns a UTF8 string with the QR Code's data"""
# FIX-ME: if we don't add the BOM_UTF8 char, QtQR doesn't decode
# correctly; but if we add it, mobile apps don't.-
# Apparently is a zbar bug.
if self.data_type == 'text':
return BOM_UTF8 + self.__class__.data_encode[self.data_type](self.data).encode('utf-8')
else:
return self.__class__.data_encode[self.data_type](self.data).encode('utf-8') | Returns a UTF8 string with the QR Code's data | Below is the the instruction that describes the task:
### Input:
Returns a UTF8 string with the QR Code's data
### Response:
def data_to_string(self):
"""Returns a UTF8 string with the QR Code's data"""
# FIX-ME: if we don't add the BOM_UTF8 char, QtQR doesn't decode
# correctly; but if we add it, mobile apps don't.-
# Apparently is a zbar bug.
if self.data_type == 'text':
return BOM_UTF8 + self.__class__.data_encode[self.data_type](self.data).encode('utf-8')
else:
return self.__class__.data_encode[self.data_type](self.data).encode('utf-8') |
def restore(self, backup=None, delete_backup=False):
"""Restore the snapshot to the associated storage resource.
:param backup: name of the backup snapshot
:param delete_backup: Whether to delete the backup snap after a
successful restore.
"""
resp = self._cli.action(self.resource_class, self.get_id(),
'restore', copyName=backup)
resp.raise_if_err()
backup = resp.first_content['backup']
backup_snap = UnitySnap(_id=backup['id'], cli=self._cli)
if delete_backup:
log.info("Deleting the backup snap {} as the restoration "
"succeeded.".format(backup['id']))
backup_snap.delete()
return backup_snap | Restore the snapshot to the associated storage resource.
:param backup: name of the backup snapshot
:param delete_backup: Whether to delete the backup snap after a
successful restore. | Below is the the instruction that describes the task:
### Input:
Restore the snapshot to the associated storage resource.
:param backup: name of the backup snapshot
:param delete_backup: Whether to delete the backup snap after a
successful restore.
### Response:
def restore(self, backup=None, delete_backup=False):
"""Restore the snapshot to the associated storage resource.
:param backup: name of the backup snapshot
:param delete_backup: Whether to delete the backup snap after a
successful restore.
"""
resp = self._cli.action(self.resource_class, self.get_id(),
'restore', copyName=backup)
resp.raise_if_err()
backup = resp.first_content['backup']
backup_snap = UnitySnap(_id=backup['id'], cli=self._cli)
if delete_backup:
log.info("Deleting the backup snap {} as the restoration "
"succeeded.".format(backup['id']))
backup_snap.delete()
return backup_snap |
def get_user_from_social_auth(tpa_provider, tpa_username):
"""
Find the LMS user from the LMS model `UserSocialAuth`.
Arguments:
tpa_provider (third_party_auth.provider): third party auth provider object
tpa_username (str): Username returned by the third party auth
"""
user_social_auth = UserSocialAuth.objects.select_related('user').filter(
user__username=tpa_username, provider=tpa_provider.backend_name
).first()
return user_social_auth.user if user_social_auth else None | Find the LMS user from the LMS model `UserSocialAuth`.
Arguments:
tpa_provider (third_party_auth.provider): third party auth provider object
tpa_username (str): Username returned by the third party auth | Below is the the instruction that describes the task:
### Input:
Find the LMS user from the LMS model `UserSocialAuth`.
Arguments:
tpa_provider (third_party_auth.provider): third party auth provider object
tpa_username (str): Username returned by the third party auth
### Response:
def get_user_from_social_auth(tpa_provider, tpa_username):
"""
Find the LMS user from the LMS model `UserSocialAuth`.
Arguments:
tpa_provider (third_party_auth.provider): third party auth provider object
tpa_username (str): Username returned by the third party auth
"""
user_social_auth = UserSocialAuth.objects.select_related('user').filter(
user__username=tpa_username, provider=tpa_provider.backend_name
).first()
return user_social_auth.user if user_social_auth else None |
def clear_xcom_data(self, session=None):
"""
Clears all XCom data from the database for the task instance
"""
session.query(XCom).filter(
XCom.dag_id == self.dag_id,
XCom.task_id == self.task_id,
XCom.execution_date == self.execution_date
).delete()
session.commit() | Clears all XCom data from the database for the task instance | Below is the the instruction that describes the task:
### Input:
Clears all XCom data from the database for the task instance
### Response:
def clear_xcom_data(self, session=None):
"""
Clears all XCom data from the database for the task instance
"""
session.query(XCom).filter(
XCom.dag_id == self.dag_id,
XCom.task_id == self.task_id,
XCom.execution_date == self.execution_date
).delete()
session.commit() |
def lookup_prefix(self, prefix, timestamp=timestamp_now):
"""
Returns lookup data of a Prefix
Args:
prefix (string): Prefix of a Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the Prefix
Raises:
KeyError: No matching Prefix found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code shows how to obtain the information for the prefix "DH" from the countryfile.com
database (default database).
>>> from pyhamtools import LookupLib
>>> myLookupLib = LookupLib()
>>> print myLookupLib.lookup_prefix("DH")
{
'adif': 230,
'country': u'Fed. Rep. of Germany',
'longitude': 10.0,
'cqz': 14,
'ituz': 28,
'latitude': 51.0,
'continent': u'EU'
}
Note:
This method is available for
- clublogxml
- countryfile
- redis
"""
prefix = prefix.strip().upper()
if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
return self._check_data_for_date(prefix, timestamp, self._prefixes, self._prefixes_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_prefix_", "_prefix_index_", self._redis_prefix, prefix)
return self._check_data_for_date(prefix, timestamp, data_dict, index)
# no matching case
raise KeyError | Returns lookup data of a Prefix
Args:
prefix (string): Prefix of a Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the Prefix
Raises:
KeyError: No matching Prefix found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code shows how to obtain the information for the prefix "DH" from the countryfile.com
database (default database).
>>> from pyhamtools import LookupLib
>>> myLookupLib = LookupLib()
>>> print myLookupLib.lookup_prefix("DH")
{
'adif': 230,
'country': u'Fed. Rep. of Germany',
'longitude': 10.0,
'cqz': 14,
'ituz': 28,
'latitude': 51.0,
'continent': u'EU'
}
Note:
This method is available for
- clublogxml
- countryfile
- redis | Below is the the instruction that describes the task:
### Input:
Returns lookup data of a Prefix
Args:
prefix (string): Prefix of a Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the Prefix
Raises:
KeyError: No matching Prefix found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code shows how to obtain the information for the prefix "DH" from the countryfile.com
database (default database).
>>> from pyhamtools import LookupLib
>>> myLookupLib = LookupLib()
>>> print myLookupLib.lookup_prefix("DH")
{
'adif': 230,
'country': u'Fed. Rep. of Germany',
'longitude': 10.0,
'cqz': 14,
'ituz': 28,
'latitude': 51.0,
'continent': u'EU'
}
Note:
This method is available for
- clublogxml
- countryfile
- redis
### Response:
def lookup_prefix(self, prefix, timestamp=timestamp_now):
"""
Returns lookup data of a Prefix
Args:
prefix (string): Prefix of a Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
dict: Dictionary containing the country specific data of the Prefix
Raises:
KeyError: No matching Prefix found
APIKeyMissingError: API Key for Clublog missing or incorrect
Example:
The following code shows how to obtain the information for the prefix "DH" from the countryfile.com
database (default database).
>>> from pyhamtools import LookupLib
>>> myLookupLib = LookupLib()
>>> print myLookupLib.lookup_prefix("DH")
{
'adif': 230,
'country': u'Fed. Rep. of Germany',
'longitude': 10.0,
'cqz': 14,
'ituz': 28,
'latitude': 51.0,
'continent': u'EU'
}
Note:
This method is available for
- clublogxml
- countryfile
- redis
"""
prefix = prefix.strip().upper()
if self._lookuptype == "clublogxml" or self._lookuptype == "countryfile":
return self._check_data_for_date(prefix, timestamp, self._prefixes, self._prefixes_index)
elif self._lookuptype == "redis":
data_dict, index = self._get_dicts_from_redis("_prefix_", "_prefix_index_", self._redis_prefix, prefix)
return self._check_data_for_date(prefix, timestamp, data_dict, index)
# no matching case
raise KeyError |
def parameters(self):
"""Return a list where each element contains the parameters for a task.
"""
parameters = []
for task in self.tasks:
parameters.extend(task.parameters)
return parameters | Return a list where each element contains the parameters for a task. | Below is the the instruction that describes the task:
### Input:
Return a list where each element contains the parameters for a task.
### Response:
def parameters(self):
"""Return a list where each element contains the parameters for a task.
"""
parameters = []
for task in self.tasks:
parameters.extend(task.parameters)
return parameters |
def from_isodatetime(value, strict=False):
"""Convert an ISO formatted datetime into a Date object.
:param value: The ISO formatted datetime.
:param strict: If value is ``None``, then if strict is ``True`` it returns
the Date object of today, otherwise it returns ``None``.
(Default: ``False``)
:returns: The Date object or ``None``.
"""
if value or strict:
return arrow.get(value).datetime | Convert an ISO formatted datetime into a Date object.
:param value: The ISO formatted datetime.
:param strict: If value is ``None``, then if strict is ``True`` it returns
the Date object of today, otherwise it returns ``None``.
(Default: ``False``)
:returns: The Date object or ``None``. | Below is the the instruction that describes the task:
### Input:
Convert an ISO formatted datetime into a Date object.
:param value: The ISO formatted datetime.
:param strict: If value is ``None``, then if strict is ``True`` it returns
the Date object of today, otherwise it returns ``None``.
(Default: ``False``)
:returns: The Date object or ``None``.
### Response:
def from_isodatetime(value, strict=False):
"""Convert an ISO formatted datetime into a Date object.
:param value: The ISO formatted datetime.
:param strict: If value is ``None``, then if strict is ``True`` it returns
the Date object of today, otherwise it returns ``None``.
(Default: ``False``)
:returns: The Date object or ``None``.
"""
if value or strict:
return arrow.get(value).datetime |
def import_class(class_path):
"""imports and returns given class string.
:param class_path: Class path as string
:type class_path: str
:returns: Class that has given path
:rtype: class
:Example:
>>> import_class('collections.OrderedDict').__name__
'OrderedDict'
"""
try:
from django.utils.importlib import import_module
module_name = '.'.join(class_path.split(".")[:-1])
mod = import_module(module_name)
return getattr(mod, class_path.split(".")[-1])
except Exception, detail:
raise ImportError(detail) | imports and returns given class string.
:param class_path: Class path as string
:type class_path: str
:returns: Class that has given path
:rtype: class
:Example:
>>> import_class('collections.OrderedDict').__name__
'OrderedDict' | Below is the the instruction that describes the task:
### Input:
imports and returns given class string.
:param class_path: Class path as string
:type class_path: str
:returns: Class that has given path
:rtype: class
:Example:
>>> import_class('collections.OrderedDict').__name__
'OrderedDict'
### Response:
def import_class(class_path):
"""imports and returns given class string.
:param class_path: Class path as string
:type class_path: str
:returns: Class that has given path
:rtype: class
:Example:
>>> import_class('collections.OrderedDict').__name__
'OrderedDict'
"""
try:
from django.utils.importlib import import_module
module_name = '.'.join(class_path.split(".")[:-1])
mod = import_module(module_name)
return getattr(mod, class_path.split(".")[-1])
except Exception, detail:
raise ImportError(detail) |
def set_children(self, children):
"""Set children of the span block."""
if isinstance(children, tuple):
self._children = list(children)
else:
self._children = [children]
return self | Set children of the span block. | Below is the the instruction that describes the task:
### Input:
Set children of the span block.
### Response:
def set_children(self, children):
"""Set children of the span block."""
if isinstance(children, tuple):
self._children = list(children)
else:
self._children = [children]
return self |
def write_error(self, text):
"""Simulate stderr"""
self.flush()
self.write(text, flush=True, error=True)
if get_debug_level():
STDERR.write(text) | Simulate stderr | Below is the the instruction that describes the task:
### Input:
Simulate stderr
### Response:
def write_error(self, text):
"""Simulate stderr"""
self.flush()
self.write(text, flush=True, error=True)
if get_debug_level():
STDERR.write(text) |
def _notifyDone(self):
'''
Allow any other editatoms waiting on me to complete to resume
'''
if self.notified:
return
self.doneevent.set()
for buid in self.mybldgbuids:
del self.allbldgbuids[buid]
self.notified = True | Allow any other editatoms waiting on me to complete to resume | Below is the the instruction that describes the task:
### Input:
Allow any other editatoms waiting on me to complete to resume
### Response:
def _notifyDone(self):
'''
Allow any other editatoms waiting on me to complete to resume
'''
if self.notified:
return
self.doneevent.set()
for buid in self.mybldgbuids:
del self.allbldgbuids[buid]
self.notified = True |
def get_proxy(self, input_):
"""Gets a proxy.
arg: input (osid.proxy.ProxyCondition): a proxy condition
return: (osid.proxy.Proxy) - a proxy
raise: NullArgument - ``input`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``input`` is not of this service
*compliance: mandatory -- This method is must be implemented.*
"""
if input_._http_request is not None:
authentication = Authentication()
authentication.set_django_user(input_._http_request.user)
else:
authentication = None
effective_agent_id = input_._effective_agent_id
# Also need to deal with effective dates and Local
return rules.Proxy(authentication=authentication,
effective_agent_id=effective_agent_id) | Gets a proxy.
arg: input (osid.proxy.ProxyCondition): a proxy condition
return: (osid.proxy.Proxy) - a proxy
raise: NullArgument - ``input`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``input`` is not of this service
*compliance: mandatory -- This method is must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets a proxy.
arg: input (osid.proxy.ProxyCondition): a proxy condition
return: (osid.proxy.Proxy) - a proxy
raise: NullArgument - ``input`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``input`` is not of this service
*compliance: mandatory -- This method is must be implemented.*
### Response:
def get_proxy(self, input_):
"""Gets a proxy.
arg: input (osid.proxy.ProxyCondition): a proxy condition
return: (osid.proxy.Proxy) - a proxy
raise: NullArgument - ``input`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
raise: Unsupported - ``input`` is not of this service
*compliance: mandatory -- This method is must be implemented.*
"""
if input_._http_request is not None:
authentication = Authentication()
authentication.set_django_user(input_._http_request.user)
else:
authentication = None
effective_agent_id = input_._effective_agent_id
# Also need to deal with effective dates and Local
return rules.Proxy(authentication=authentication,
effective_agent_id=effective_agent_id) |
def __get_conn(self, flag_force_new=False, filename=None):
"""Returns connection to database. Tries to return existing connection, unless flag_force_new
Args:
flag_force_new:
filename:
Returns: sqlite3.Connection object
**Note** this is a private method because you can get a connection to any file, so it has to
be used in the right moment
"""
flag_open_new = flag_force_new or not self._conn_is_open()
if flag_open_new:
if filename is None:
filename = self.filename
# funny that __get_conn() calls _get_conn() but that's it
conn = self._get_conn(filename)
self._conn = conn
else:
conn = self._conn
return conn | Returns connection to database. Tries to return existing connection, unless flag_force_new
Args:
flag_force_new:
filename:
Returns: sqlite3.Connection object
**Note** this is a private method because you can get a connection to any file, so it has to
be used in the right moment | Below is the the instruction that describes the task:
### Input:
Returns connection to database. Tries to return existing connection, unless flag_force_new
Args:
flag_force_new:
filename:
Returns: sqlite3.Connection object
**Note** this is a private method because you can get a connection to any file, so it has to
be used in the right moment
### Response:
def __get_conn(self, flag_force_new=False, filename=None):
"""Returns connection to database. Tries to return existing connection, unless flag_force_new
Args:
flag_force_new:
filename:
Returns: sqlite3.Connection object
**Note** this is a private method because you can get a connection to any file, so it has to
be used in the right moment
"""
flag_open_new = flag_force_new or not self._conn_is_open()
if flag_open_new:
if filename is None:
filename = self.filename
# funny that __get_conn() calls _get_conn() but that's it
conn = self._get_conn(filename)
self._conn = conn
else:
conn = self._conn
return conn |
def percept(self, agent):
"By default, agent perceives things within a default radius."
return [self.thing_percept(thing, agent)
for thing in self.things_near(agent.location)] | By default, agent perceives things within a default radius. | Below is the the instruction that describes the task:
### Input:
By default, agent perceives things within a default radius.
### Response:
def percept(self, agent):
"By default, agent perceives things within a default radius."
return [self.thing_percept(thing, agent)
for thing in self.things_near(agent.location)] |
def remove_objects(self, bucket_name, objects_iter):
"""
Removes multiple objects from a bucket.
:param bucket_name: Bucket from which to remove objects
:param objects_iter: A list, tuple or iterator that provides
objects names to delete.
:return: An iterator of MultiDeleteError instances for each
object that had a delete error.
"""
is_valid_bucket_name(bucket_name)
if isinstance(objects_iter, basestring):
raise TypeError(
'objects_iter cannot be `str` or `bytes` instance. It must be '
'a list, tuple or iterator of object names'
)
# turn list like objects into an iterator.
objects_iter = itertools.chain(objects_iter)
obj_batch = []
exit_loop = False
while not exit_loop:
try:
object_name = next(objects_iter)
is_non_empty_string(object_name)
except StopIteration:
exit_loop = True
if not exit_loop:
obj_batch.append(object_name)
# if we have 1000 items in the batch, or we have to exit
# the loop, we have to make a request to delete objects.
if len(obj_batch) == 1000 or (exit_loop and len(obj_batch) > 0):
# send request and parse response
errs_result = self._process_remove_objects_batch(
bucket_name, obj_batch
)
# return the delete errors.
for err_result in errs_result:
yield err_result
# clear batch for next set of items
obj_batch = [] | Removes multiple objects from a bucket.
:param bucket_name: Bucket from which to remove objects
:param objects_iter: A list, tuple or iterator that provides
objects names to delete.
:return: An iterator of MultiDeleteError instances for each
object that had a delete error. | Below is the the instruction that describes the task:
### Input:
Removes multiple objects from a bucket.
:param bucket_name: Bucket from which to remove objects
:param objects_iter: A list, tuple or iterator that provides
objects names to delete.
:return: An iterator of MultiDeleteError instances for each
object that had a delete error.
### Response:
def remove_objects(self, bucket_name, objects_iter):
"""
Removes multiple objects from a bucket.
:param bucket_name: Bucket from which to remove objects
:param objects_iter: A list, tuple or iterator that provides
objects names to delete.
:return: An iterator of MultiDeleteError instances for each
object that had a delete error.
"""
is_valid_bucket_name(bucket_name)
if isinstance(objects_iter, basestring):
raise TypeError(
'objects_iter cannot be `str` or `bytes` instance. It must be '
'a list, tuple or iterator of object names'
)
# turn list like objects into an iterator.
objects_iter = itertools.chain(objects_iter)
obj_batch = []
exit_loop = False
while not exit_loop:
try:
object_name = next(objects_iter)
is_non_empty_string(object_name)
except StopIteration:
exit_loop = True
if not exit_loop:
obj_batch.append(object_name)
# if we have 1000 items in the batch, or we have to exit
# the loop, we have to make a request to delete objects.
if len(obj_batch) == 1000 or (exit_loop and len(obj_batch) > 0):
# send request and parse response
errs_result = self._process_remove_objects_batch(
bucket_name, obj_batch
)
# return the delete errors.
for err_result in errs_result:
yield err_result
# clear batch for next set of items
obj_batch = [] |
def add_suggestions(self, *suggestions, **kwargs):
"""
Add suggestion terms to the AutoCompleter engine. Each suggestion has a score and string.
If kwargs['increment'] is true and the terms are already in the server's dictionary, we increment their scores
"""
pipe = self.redis.pipeline()
for sug in suggestions:
args = [AutoCompleter.SUGADD_COMMAND, self.key, sug.string, sug.score]
if kwargs.get('increment'):
args.append(AutoCompleter.INCR)
if sug.payload:
args.append('PAYLOAD')
args.append(sug.payload)
pipe.execute_command(*args)
return pipe.execute()[-1] | Add suggestion terms to the AutoCompleter engine. Each suggestion has a score and string.
If kwargs['increment'] is true and the terms are already in the server's dictionary, we increment their scores | Below is the the instruction that describes the task:
### Input:
Add suggestion terms to the AutoCompleter engine. Each suggestion has a score and string.
If kwargs['increment'] is true and the terms are already in the server's dictionary, we increment their scores
### Response:
def add_suggestions(self, *suggestions, **kwargs):
"""
Add suggestion terms to the AutoCompleter engine. Each suggestion has a score and string.
If kwargs['increment'] is true and the terms are already in the server's dictionary, we increment their scores
"""
pipe = self.redis.pipeline()
for sug in suggestions:
args = [AutoCompleter.SUGADD_COMMAND, self.key, sug.string, sug.score]
if kwargs.get('increment'):
args.append(AutoCompleter.INCR)
if sug.payload:
args.append('PAYLOAD')
args.append(sug.payload)
pipe.execute_command(*args)
return pipe.execute()[-1] |
def _set_mip_policy(self, v, load=False):
"""
Setter method for mip_policy, mapped from YANG variable /protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/mip_policy (mip-policy-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_mip_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mip_policy() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'default': {'value': 1}, u'explicit': {'value': 2}},), is_leaf=True, yang_name="mip-policy", rest_name="mip-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Set MIP policy', u'cli-full-no': None, u'callpoint': u'setDot1agMipPolicy'}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='mip-policy-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mip_policy must be of a type compatible with mip-policy-type""",
'defined-type': "brocade-dot1ag:mip-policy-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'default': {'value': 1}, u'explicit': {'value': 2}},), is_leaf=True, yang_name="mip-policy", rest_name="mip-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Set MIP policy', u'cli-full-no': None, u'callpoint': u'setDot1agMipPolicy'}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='mip-policy-type', is_config=True)""",
})
self.__mip_policy = t
if hasattr(self, '_set'):
self._set() | Setter method for mip_policy, mapped from YANG variable /protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/mip_policy (mip-policy-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_mip_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mip_policy() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for mip_policy, mapped from YANG variable /protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/mip_policy (mip-policy-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_mip_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mip_policy() directly.
### Response:
def _set_mip_policy(self, v, load=False):
"""
Setter method for mip_policy, mapped from YANG variable /protocol/cfm/domain_name/ma_name/cfm_ma_sub_commands/mip_policy (mip-policy-type)
If this variable is read-only (config: false) in the
source YANG file, then _set_mip_policy is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mip_policy() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'default': {'value': 1}, u'explicit': {'value': 2}},), is_leaf=True, yang_name="mip-policy", rest_name="mip-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Set MIP policy', u'cli-full-no': None, u'callpoint': u'setDot1agMipPolicy'}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='mip-policy-type', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mip_policy must be of a type compatible with mip-policy-type""",
'defined-type': "brocade-dot1ag:mip-policy-type",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'default': {'value': 1}, u'explicit': {'value': 2}},), is_leaf=True, yang_name="mip-policy", rest_name="mip-policy", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'Set MIP policy', u'cli-full-no': None, u'callpoint': u'setDot1agMipPolicy'}}, namespace='urn:brocade.com:mgmt:brocade-dot1ag', defining_module='brocade-dot1ag', yang_type='mip-policy-type', is_config=True)""",
})
self.__mip_policy = t
if hasattr(self, '_set'):
self._set() |
def generate_data(self, plot_data):
"""
Generate data to be used by this layer
Parameters
----------
plot_data : dataframe
ggplot object data
"""
# Each layer that does not have data gets a copy of
# of the ggplot.data. If the has data it is replaced
# by copy so that we do not alter the users data
if self.data is None:
self.data = plot_data.copy()
elif hasattr(self.data, '__call__'):
self.data = self.data(plot_data)
if not isinstance(self.data, pd.DataFrame):
raise PlotnineError(
"Data function must return a dataframe")
else:
self.data = self.data.copy() | Generate data to be used by this layer
Parameters
----------
plot_data : dataframe
ggplot object data | Below is the the instruction that describes the task:
### Input:
Generate data to be used by this layer
Parameters
----------
plot_data : dataframe
ggplot object data
### Response:
def generate_data(self, plot_data):
"""
Generate data to be used by this layer
Parameters
----------
plot_data : dataframe
ggplot object data
"""
# Each layer that does not have data gets a copy of
# of the ggplot.data. If the has data it is replaced
# by copy so that we do not alter the users data
if self.data is None:
self.data = plot_data.copy()
elif hasattr(self.data, '__call__'):
self.data = self.data(plot_data)
if not isinstance(self.data, pd.DataFrame):
raise PlotnineError(
"Data function must return a dataframe")
else:
self.data = self.data.copy() |
def calc_fft_with_PyCUDA(Signal):
"""
Calculates the FFT of the passed signal by using
the scikit-cuda libary which relies on PyCUDA
Parameters
----------
Signal : ndarray
Signal to be transformed into Fourier space
Returns
-------
Signalfft : ndarray
Array containing the signal's FFT
"""
print("starting fft")
Signal = Signal.astype(_np.float32)
Signal_gpu = _gpuarray.to_gpu(Signal)
Signalfft_gpu = _gpuarray.empty(len(Signal)//2+1,_np.complex64)
plan = _Plan(Signal.shape,_np.float32,_np.complex64)
_fft(Signal_gpu, Signalfft_gpu, plan)
Signalfft = Signalfft_gpu.get() #only 2N+1 long
Signalfft = _np.hstack((Signalfft,_np.conj(_np.flipud(Signalfft[1:len(Signal)//2]))))
print("fft done")
return Signalfft | Calculates the FFT of the passed signal by using
the scikit-cuda libary which relies on PyCUDA
Parameters
----------
Signal : ndarray
Signal to be transformed into Fourier space
Returns
-------
Signalfft : ndarray
Array containing the signal's FFT | Below is the the instruction that describes the task:
### Input:
Calculates the FFT of the passed signal by using
the scikit-cuda libary which relies on PyCUDA
Parameters
----------
Signal : ndarray
Signal to be transformed into Fourier space
Returns
-------
Signalfft : ndarray
Array containing the signal's FFT
### Response:
def calc_fft_with_PyCUDA(Signal):
"""
Calculates the FFT of the passed signal by using
the scikit-cuda libary which relies on PyCUDA
Parameters
----------
Signal : ndarray
Signal to be transformed into Fourier space
Returns
-------
Signalfft : ndarray
Array containing the signal's FFT
"""
print("starting fft")
Signal = Signal.astype(_np.float32)
Signal_gpu = _gpuarray.to_gpu(Signal)
Signalfft_gpu = _gpuarray.empty(len(Signal)//2+1,_np.complex64)
plan = _Plan(Signal.shape,_np.float32,_np.complex64)
_fft(Signal_gpu, Signalfft_gpu, plan)
Signalfft = Signalfft_gpu.get() #only 2N+1 long
Signalfft = _np.hstack((Signalfft,_np.conj(_np.flipud(Signalfft[1:len(Signal)//2]))))
print("fft done")
return Signalfft |
def geocode(
self,
query,
bbox=None,
mapview=None,
exactly_one=True,
maxresults=None,
pageinformation=None,
language=None,
additional_data=False,
timeout=DEFAULT_SENTINEL
):
"""
Return a location point by address.
This implementation supports only a subset of all available parameters.
A list of all parameters of the pure REST API is available here:
https://developer.here.com/documentation/geocoder/topics/resource-geocode.html
:param str query: The address or query you wish to geocode.
For a structured query, provide a dictionary whose keys
are one of: `city`, `county`, `district`, `country`, `state`,
`street`, `housenumber`, or `postalcode`.
:param bbox: A type of spatial filter, limits the search for any other attributes
in the request. Specified by two coordinate (lat/lon)
pairs -- corners of the box. `The bbox search is currently similar
to mapview but it is not extended` (cited from the REST API docs).
Relevant global results are also returned.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:type bbox: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param mapview: The app's viewport, given as two coordinate pairs, specified
by two lat/lon pairs -- corners of the bounding box,
respectively. Matches from within the set map view plus an extended area
are ranked highest. Relevant global results are also returned.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:type mapview: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int maxresults: Defines the maximum number of items in the
response structure. If not provided and there are multiple results
the HERE API will return 10 results by default. This will be reset
to one if ``exactly_one`` is True.
:param int pageinformation: A key which identifies the page to be returned
when the response is separated into multiple pages. Only useful when
``maxresults`` is also provided.
:param str language: Affects the language of the response,
must be a RFC 4647 language code, e.g. 'en-US'.
:param str additional_data: A string with key-value pairs as described on
https://developer.here.com/documentation/geocoder/topics/resource-params-additional.html.
These will be added as one query parameter to the URL.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if isinstance(query, dict):
params = {
key: val
for key, val
in query.items()
if key in self.structured_query_params
}
params['app_id'] = self.app_id
params['app_code'] = self.app_code
else:
params = {
'searchtext': self.format_string % query,
'app_id': self.app_id,
'app_code': self.app_code
}
if bbox:
params['bbox'] = self._format_bounding_box(
bbox, "%(lat2)s,%(lon1)s;%(lat1)s,%(lon2)s")
if mapview:
params['mapview'] = self._format_bounding_box(
mapview, "%(lat2)s,%(lon1)s;%(lat1)s,%(lon2)s")
if pageinformation:
params['pageinformation'] = pageinformation
if maxresults:
params['maxresults'] = maxresults
if exactly_one:
params['maxresults'] = 1
if language:
params['language'] = language
if additional_data:
params['additionaldata'] = additional_data
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one
) | Return a location point by address.
This implementation supports only a subset of all available parameters.
A list of all parameters of the pure REST API is available here:
https://developer.here.com/documentation/geocoder/topics/resource-geocode.html
:param str query: The address or query you wish to geocode.
For a structured query, provide a dictionary whose keys
are one of: `city`, `county`, `district`, `country`, `state`,
`street`, `housenumber`, or `postalcode`.
:param bbox: A type of spatial filter, limits the search for any other attributes
in the request. Specified by two coordinate (lat/lon)
pairs -- corners of the box. `The bbox search is currently similar
to mapview but it is not extended` (cited from the REST API docs).
Relevant global results are also returned.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:type bbox: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param mapview: The app's viewport, given as two coordinate pairs, specified
by two lat/lon pairs -- corners of the bounding box,
respectively. Matches from within the set map view plus an extended area
are ranked highest. Relevant global results are also returned.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:type mapview: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int maxresults: Defines the maximum number of items in the
response structure. If not provided and there are multiple results
the HERE API will return 10 results by default. This will be reset
to one if ``exactly_one`` is True.
:param int pageinformation: A key which identifies the page to be returned
when the response is separated into multiple pages. Only useful when
``maxresults`` is also provided.
:param str language: Affects the language of the response,
must be a RFC 4647 language code, e.g. 'en-US'.
:param str additional_data: A string with key-value pairs as described on
https://developer.here.com/documentation/geocoder/topics/resource-params-additional.html.
These will be added as one query parameter to the URL.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``. | Below is the the instruction that describes the task:
### Input:
Return a location point by address.
This implementation supports only a subset of all available parameters.
A list of all parameters of the pure REST API is available here:
https://developer.here.com/documentation/geocoder/topics/resource-geocode.html
:param str query: The address or query you wish to geocode.
For a structured query, provide a dictionary whose keys
are one of: `city`, `county`, `district`, `country`, `state`,
`street`, `housenumber`, or `postalcode`.
:param bbox: A type of spatial filter, limits the search for any other attributes
in the request. Specified by two coordinate (lat/lon)
pairs -- corners of the box. `The bbox search is currently similar
to mapview but it is not extended` (cited from the REST API docs).
Relevant global results are also returned.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:type bbox: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param mapview: The app's viewport, given as two coordinate pairs, specified
by two lat/lon pairs -- corners of the bounding box,
respectively. Matches from within the set map view plus an extended area
are ranked highest. Relevant global results are also returned.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:type mapview: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int maxresults: Defines the maximum number of items in the
response structure. If not provided and there are multiple results
the HERE API will return 10 results by default. This will be reset
to one if ``exactly_one`` is True.
:param int pageinformation: A key which identifies the page to be returned
when the response is separated into multiple pages. Only useful when
``maxresults`` is also provided.
:param str language: Affects the language of the response,
must be a RFC 4647 language code, e.g. 'en-US'.
:param str additional_data: A string with key-value pairs as described on
https://developer.here.com/documentation/geocoder/topics/resource-params-additional.html.
These will be added as one query parameter to the URL.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
### Response:
def geocode(
self,
query,
bbox=None,
mapview=None,
exactly_one=True,
maxresults=None,
pageinformation=None,
language=None,
additional_data=False,
timeout=DEFAULT_SENTINEL
):
"""
Return a location point by address.
This implementation supports only a subset of all available parameters.
A list of all parameters of the pure REST API is available here:
https://developer.here.com/documentation/geocoder/topics/resource-geocode.html
:param str query: The address or query you wish to geocode.
For a structured query, provide a dictionary whose keys
are one of: `city`, `county`, `district`, `country`, `state`,
`street`, `housenumber`, or `postalcode`.
:param bbox: A type of spatial filter, limits the search for any other attributes
in the request. Specified by two coordinate (lat/lon)
pairs -- corners of the box. `The bbox search is currently similar
to mapview but it is not extended` (cited from the REST API docs).
Relevant global results are also returned.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:type bbox: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param mapview: The app's viewport, given as two coordinate pairs, specified
by two lat/lon pairs -- corners of the bounding box,
respectively. Matches from within the set map view plus an extended area
are ranked highest. Relevant global results are also returned.
Example: ``[Point(22, 180), Point(-22, -180)]``.
:type mapview: list or tuple of 2 items of :class:`geopy.point.Point` or
``(latitude, longitude)`` or ``"%(latitude)s, %(longitude)s"``.
:param bool exactly_one: Return one result or a list of results, if
available.
:param int maxresults: Defines the maximum number of items in the
response structure. If not provided and there are multiple results
the HERE API will return 10 results by default. This will be reset
to one if ``exactly_one`` is True.
:param int pageinformation: A key which identifies the page to be returned
when the response is separated into multiple pages. Only useful when
``maxresults`` is also provided.
:param str language: Affects the language of the response,
must be a RFC 4647 language code, e.g. 'en-US'.
:param str additional_data: A string with key-value pairs as described on
https://developer.here.com/documentation/geocoder/topics/resource-params-additional.html.
These will be added as one query parameter to the URL.
:param int timeout: Time, in seconds, to wait for the geocoding service
to respond before raising a :class:`geopy.exc.GeocoderTimedOut`
exception. Set this only if you wish to override, on this call
only, the value set during the geocoder's initialization.
:rtype: ``None``, :class:`geopy.location.Location` or a list of them, if
``exactly_one=False``.
"""
if isinstance(query, dict):
params = {
key: val
for key, val
in query.items()
if key in self.structured_query_params
}
params['app_id'] = self.app_id
params['app_code'] = self.app_code
else:
params = {
'searchtext': self.format_string % query,
'app_id': self.app_id,
'app_code': self.app_code
}
if bbox:
params['bbox'] = self._format_bounding_box(
bbox, "%(lat2)s,%(lon1)s;%(lat1)s,%(lon2)s")
if mapview:
params['mapview'] = self._format_bounding_box(
mapview, "%(lat2)s,%(lon1)s;%(lat1)s,%(lon2)s")
if pageinformation:
params['pageinformation'] = pageinformation
if maxresults:
params['maxresults'] = maxresults
if exactly_one:
params['maxresults'] = 1
if language:
params['language'] = language
if additional_data:
params['additionaldata'] = additional_data
url = "?".join((self.api, urlencode(params)))
logger.debug("%s.geocode: %s", self.__class__.__name__, url)
return self._parse_json(
self._call_geocoder(url, timeout=timeout),
exactly_one
) |
def _fromJSON(cls, jsonobject):
"""Generates a new instance of :class:`maspy.core.MzmlScan` from a
decoded JSON object (as generated by
:func:`maspy.core.MzmlScan._reprJSON()`).
:param jsonobject: decoded JSON object
:returns: a new instance of :class:`MzmlScan`
"""
scanWindowList = _mzmlListAttribToTuple(jsonobject[0])
params = [tuple(param) for param in jsonobject[1]]
return cls(scanWindowList, params) | Generates a new instance of :class:`maspy.core.MzmlScan` from a
decoded JSON object (as generated by
:func:`maspy.core.MzmlScan._reprJSON()`).
:param jsonobject: decoded JSON object
:returns: a new instance of :class:`MzmlScan` | Below is the the instruction that describes the task:
### Input:
Generates a new instance of :class:`maspy.core.MzmlScan` from a
decoded JSON object (as generated by
:func:`maspy.core.MzmlScan._reprJSON()`).
:param jsonobject: decoded JSON object
:returns: a new instance of :class:`MzmlScan`
### Response:
def _fromJSON(cls, jsonobject):
"""Generates a new instance of :class:`maspy.core.MzmlScan` from a
decoded JSON object (as generated by
:func:`maspy.core.MzmlScan._reprJSON()`).
:param jsonobject: decoded JSON object
:returns: a new instance of :class:`MzmlScan`
"""
scanWindowList = _mzmlListAttribToTuple(jsonobject[0])
params = [tuple(param) for param in jsonobject[1]]
return cls(scanWindowList, params) |
def clip_to_seconds(m: Union[int, pd.Series]) -> Union[int, pd.Series]:
"""Clips UTC datetime in nanoseconds to seconds."""
return m // pd.Timedelta(1, unit='s').value | Clips UTC datetime in nanoseconds to seconds. | Below is the the instruction that describes the task:
### Input:
Clips UTC datetime in nanoseconds to seconds.
### Response:
def clip_to_seconds(m: Union[int, pd.Series]) -> Union[int, pd.Series]:
"""Clips UTC datetime in nanoseconds to seconds."""
return m // pd.Timedelta(1, unit='s').value |
def context(self, size, placeholder=None, scope=None):
"""Returns this word in context, {size} words to the left, the current word, and {size} words to the right"""
return self.leftcontext(size, placeholder,scope) + [self] + self.rightcontext(size, placeholder,scope) | Returns this word in context, {size} words to the left, the current word, and {size} words to the right | Below is the the instruction that describes the task:
### Input:
Returns this word in context, {size} words to the left, the current word, and {size} words to the right
### Response:
def context(self, size, placeholder=None, scope=None):
"""Returns this word in context, {size} words to the left, the current word, and {size} words to the right"""
return self.leftcontext(size, placeholder,scope) + [self] + self.rightcontext(size, placeholder,scope) |
def _render_image(self, spec, container_args, alt_text=None):
""" Render an image specification into an <img> tag """
try:
path, image_args, title = image.parse_image_spec(spec)
except Exception as err: # pylint: disable=broad-except
logger.exception("Got error on spec %s: %s", spec, err)
return ('<span class="error">Couldn\'t parse image spec: ' +
'<code>{}</code> {}</span>'.format(flask.escape(spec),
flask.escape(str(err))))
composite_args = {**container_args, **image_args}
try:
img = image.get_image(path, self._search_path)
except Exception as err: # pylint: disable=broad-except
logger.exception("Got error on image %s: %s", path, err)
return ('<span class="error">Error loading image {}: {}</span>'.format(
flask.escape(spec), flask.escape(str(err))))
return img.get_img_tag(title, alt_text, **composite_args) | Render an image specification into an <img> tag | Below is the the instruction that describes the task:
### Input:
Render an image specification into an <img> tag
### Response:
def _render_image(self, spec, container_args, alt_text=None):
""" Render an image specification into an <img> tag """
try:
path, image_args, title = image.parse_image_spec(spec)
except Exception as err: # pylint: disable=broad-except
logger.exception("Got error on spec %s: %s", spec, err)
return ('<span class="error">Couldn\'t parse image spec: ' +
'<code>{}</code> {}</span>'.format(flask.escape(spec),
flask.escape(str(err))))
composite_args = {**container_args, **image_args}
try:
img = image.get_image(path, self._search_path)
except Exception as err: # pylint: disable=broad-except
logger.exception("Got error on image %s: %s", path, err)
return ('<span class="error">Error loading image {}: {}</span>'.format(
flask.escape(spec), flask.escape(str(err))))
return img.get_img_tag(title, alt_text, **composite_args) |
def _guess_concat(data):
"""
Guess concat function from given data
"""
return {
type(u''): u''.join,
type(b''): concat_bytes,
}.get(type(data), list) | Guess concat function from given data | Below is the the instruction that describes the task:
### Input:
Guess concat function from given data
### Response:
def _guess_concat(data):
"""
Guess concat function from given data
"""
return {
type(u''): u''.join,
type(b''): concat_bytes,
}.get(type(data), list) |
def logs(self):
"""Find the log directory and return all the logs sorted."""
if not self.parent.loaded: self.parent.load()
logs = self.parent.p.logs_dir.flat_directories
logs.sort(key=lambda x: x.mod_time)
return logs | Find the log directory and return all the logs sorted. | Below is the the instruction that describes the task:
### Input:
Find the log directory and return all the logs sorted.
### Response:
def logs(self):
"""Find the log directory and return all the logs sorted."""
if not self.parent.loaded: self.parent.load()
logs = self.parent.p.logs_dir.flat_directories
logs.sort(key=lambda x: x.mod_time)
return logs |
def get_grade_systems(self):
"""Gets the grade system list resulting from the search.
return: (osid.grading.GradeSystemList) - the grade system list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.GradeSystemList(self._results, runtime=self._runtime) | Gets the grade system list resulting from the search.
return: (osid.grading.GradeSystemList) - the grade system list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.* | Below is the the instruction that describes the task:
### Input:
Gets the grade system list resulting from the search.
return: (osid.grading.GradeSystemList) - the grade system list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
### Response:
def get_grade_systems(self):
"""Gets the grade system list resulting from the search.
return: (osid.grading.GradeSystemList) - the grade system list
raise: IllegalState - list already retrieved
*compliance: mandatory -- This method must be implemented.*
"""
if self.retrieved:
raise errors.IllegalState('List has already been retrieved.')
self.retrieved = True
return objects.GradeSystemList(self._results, runtime=self._runtime) |
def canonical_request(self, method, path, content, timestamp):
"""Return the canonical request string."""
request = collections.OrderedDict([
('Method', method.upper()),
('Hashed Path', path),
('X-Ops-Content-Hash', content),
('X-Ops-Timestamp', timestamp),
('X-Ops-UserId', self.user_id),
])
return '\n'.join(['%s:%s' % (key, value)
for key, value in request.items()]) | Return the canonical request string. | Below is the the instruction that describes the task:
### Input:
Return the canonical request string.
### Response:
def canonical_request(self, method, path, content, timestamp):
"""Return the canonical request string."""
request = collections.OrderedDict([
('Method', method.upper()),
('Hashed Path', path),
('X-Ops-Content-Hash', content),
('X-Ops-Timestamp', timestamp),
('X-Ops-UserId', self.user_id),
])
return '\n'.join(['%s:%s' % (key, value)
for key, value in request.items()]) |
def feature_extraction(self, algorithms):
"""Get a list of features.
Every algorithm has to return the features as a list."""
assert type(algorithms) is list
features = []
for algorithm in algorithms:
new_features = algorithm(self)
assert len(new_features) == algorithm.get_dimension(), \
"Expected %i features from algorithm %s, got %i features" % \
(algorithm.get_dimension(), str(algorithm), len(new_features))
features += new_features
return features | Get a list of features.
Every algorithm has to return the features as a list. | Below is the the instruction that describes the task:
### Input:
Get a list of features.
Every algorithm has to return the features as a list.
### Response:
def feature_extraction(self, algorithms):
"""Get a list of features.
Every algorithm has to return the features as a list."""
assert type(algorithms) is list
features = []
for algorithm in algorithms:
new_features = algorithm(self)
assert len(new_features) == algorithm.get_dimension(), \
"Expected %i features from algorithm %s, got %i features" % \
(algorithm.get_dimension(), str(algorithm), len(new_features))
features += new_features
return features |
def _exchange_refresh_tokens(self):
'Exchanges a refresh token for an access token'
if self.token_cache is not None and 'refresh' in self.token_cache:
# Attempt to use the refresh token to get a new access token.
refresh_form = {
'grant_type': 'refresh_token',
'refresh_token': self.token_cache['refresh'],
'client_id': self.client_id,
'client_secret': self.client_secret,
}
try:
tokens = self._request_tokens_from_token_endpoint(refresh_form)
tokens['refresh'] = self.token_cache['refresh']
return tokens
except OAuth2Exception:
logging.exception(
'Encountered an exception during refresh token flow.')
return None | Exchanges a refresh token for an access token | Below is the the instruction that describes the task:
### Input:
Exchanges a refresh token for an access token
### Response:
def _exchange_refresh_tokens(self):
'Exchanges a refresh token for an access token'
if self.token_cache is not None and 'refresh' in self.token_cache:
# Attempt to use the refresh token to get a new access token.
refresh_form = {
'grant_type': 'refresh_token',
'refresh_token': self.token_cache['refresh'],
'client_id': self.client_id,
'client_secret': self.client_secret,
}
try:
tokens = self._request_tokens_from_token_endpoint(refresh_form)
tokens['refresh'] = self.token_cache['refresh']
return tokens
except OAuth2Exception:
logging.exception(
'Encountered an exception during refresh token flow.')
return None |
def _path2uri(self, dirpath):
''' Convert directory path to uri '''
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.') | Convert directory path to uri | Below is the the instruction that describes the task:
### Input:
Convert directory path to uri
### Response:
def _path2uri(self, dirpath):
''' Convert directory path to uri '''
relpath = dirpath.replace(self.root_path, self.package_name)
if relpath.startswith(os.path.sep):
relpath = relpath[1:]
return relpath.replace(os.path.sep, '.') |
def check_collections_are_supported(saved_model_handler, supported):
"""Checks that SavedModelHandler only uses supported collections."""
for meta_graph in saved_model_handler.meta_graphs:
used_collection_keys = set(meta_graph.collection_def.keys())
unsupported = used_collection_keys - supported
if unsupported:
raise ValueError("Unsupported collections in graph: %s\n"
"Use hub.create_module_spec(..., drop_collections=[...])"
" as appropriate." % list(unsupported)) | Checks that SavedModelHandler only uses supported collections. | Below is the the instruction that describes the task:
### Input:
Checks that SavedModelHandler only uses supported collections.
### Response:
def check_collections_are_supported(saved_model_handler, supported):
"""Checks that SavedModelHandler only uses supported collections."""
for meta_graph in saved_model_handler.meta_graphs:
used_collection_keys = set(meta_graph.collection_def.keys())
unsupported = used_collection_keys - supported
if unsupported:
raise ValueError("Unsupported collections in graph: %s\n"
"Use hub.create_module_spec(..., drop_collections=[...])"
" as appropriate." % list(unsupported)) |
def symbols(self, *args, **kwargs):
"""Lookup multuple Equities as a list.
Parameters
----------
*args : iterable[str]
The ticker symbols to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equities : list[Equity]
The equities that held the given ticker symbols on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when one of the symbols was not held on the current
lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
"""
return [self.symbol(identifier, **kwargs) for identifier in args] | Lookup multuple Equities as a list.
Parameters
----------
*args : iterable[str]
The ticker symbols to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equities : list[Equity]
The equities that held the given ticker symbols on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when one of the symbols was not held on the current
lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date` | Below is the the instruction that describes the task:
### Input:
Lookup multuple Equities as a list.
Parameters
----------
*args : iterable[str]
The ticker symbols to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equities : list[Equity]
The equities that held the given ticker symbols on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when one of the symbols was not held on the current
lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
### Response:
def symbols(self, *args, **kwargs):
"""Lookup multuple Equities as a list.
Parameters
----------
*args : iterable[str]
The ticker symbols to lookup.
country_code : str or None, optional
A country to limit symbol searches to.
Returns
-------
equities : list[Equity]
The equities that held the given ticker symbols on the current
symbol lookup date.
Raises
------
SymbolNotFound
Raised when one of the symbols was not held on the current
lookup date.
See Also
--------
:func:`zipline.api.set_symbol_lookup_date`
"""
return [self.symbol(identifier, **kwargs) for identifier in args] |
def closed_paths(entities, vertices):
"""
Paths are lists of entity indices.
We first generate vertex paths using graph cycle algorithms,
and then convert them to entity paths.
This will also change the ordering of entity.points in place
so a path may be traversed without having to reverse the entity.
Parameters
-------------
entities : (n,) entity objects
Entity objects
vertices : (m, dimension) float
Vertex points in space
Returns
-------------
entity_paths : sequence of (n,) int
Ordered traversals of entities
"""
# get a networkx graph of entities
graph, closed = vertex_graph(entities)
# add entities that are closed as single- entity paths
entity_paths = np.reshape(closed, (-1, 1)).tolist()
# look for cycles in the graph, or closed loops
vertex_paths = np.array(nx.cycles.cycle_basis(graph))
# loop through every vertex cycle
for vertex_path in vertex_paths:
# a path has no length if it has fewer than 2 vertices
if len(vertex_path) < 2:
continue
# convert vertex indices to entity indices
entity_paths.append(
vertex_to_entity_path(vertex_path,
graph,
entities,
vertices))
entity_paths = np.array(entity_paths)
return entity_paths | Paths are lists of entity indices.
We first generate vertex paths using graph cycle algorithms,
and then convert them to entity paths.
This will also change the ordering of entity.points in place
so a path may be traversed without having to reverse the entity.
Parameters
-------------
entities : (n,) entity objects
Entity objects
vertices : (m, dimension) float
Vertex points in space
Returns
-------------
entity_paths : sequence of (n,) int
Ordered traversals of entities | Below is the the instruction that describes the task:
### Input:
Paths are lists of entity indices.
We first generate vertex paths using graph cycle algorithms,
and then convert them to entity paths.
This will also change the ordering of entity.points in place
so a path may be traversed without having to reverse the entity.
Parameters
-------------
entities : (n,) entity objects
Entity objects
vertices : (m, dimension) float
Vertex points in space
Returns
-------------
entity_paths : sequence of (n,) int
Ordered traversals of entities
### Response:
def closed_paths(entities, vertices):
"""
Paths are lists of entity indices.
We first generate vertex paths using graph cycle algorithms,
and then convert them to entity paths.
This will also change the ordering of entity.points in place
so a path may be traversed without having to reverse the entity.
Parameters
-------------
entities : (n,) entity objects
Entity objects
vertices : (m, dimension) float
Vertex points in space
Returns
-------------
entity_paths : sequence of (n,) int
Ordered traversals of entities
"""
# get a networkx graph of entities
graph, closed = vertex_graph(entities)
# add entities that are closed as single- entity paths
entity_paths = np.reshape(closed, (-1, 1)).tolist()
# look for cycles in the graph, or closed loops
vertex_paths = np.array(nx.cycles.cycle_basis(graph))
# loop through every vertex cycle
for vertex_path in vertex_paths:
# a path has no length if it has fewer than 2 vertices
if len(vertex_path) < 2:
continue
# convert vertex indices to entity indices
entity_paths.append(
vertex_to_entity_path(vertex_path,
graph,
entities,
vertices))
entity_paths = np.array(entity_paths)
return entity_paths |
def format_packet(command):
"""Format packet to be sent."""
frame_header = b"\xaa"
verify = b"\x0b"
send_delim = b"\xbb"
return frame_header + command.ljust(17, b"\x00") + verify + send_delim | Format packet to be sent. | Below is the the instruction that describes the task:
### Input:
Format packet to be sent.
### Response:
def format_packet(command):
"""Format packet to be sent."""
frame_header = b"\xaa"
verify = b"\x0b"
send_delim = b"\xbb"
return frame_header + command.ljust(17, b"\x00") + verify + send_delim |
def xmoe2_v1_l4k_local_only():
"""With sequence length 4096."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"local_att" if l == "att" else l for l in hparams.decoder_layers]
return hparams | With sequence length 4096. | Below is the the instruction that describes the task:
### Input:
With sequence length 4096.
### Response:
def xmoe2_v1_l4k_local_only():
"""With sequence length 4096."""
hparams = xmoe2_v1_l4k()
hparams.decoder_layers = [
"local_att" if l == "att" else l for l in hparams.decoder_layers]
return hparams |
def hmget(self, hashkey, keys, *args):
"""Emulate hmget."""
redis_hash = self._get_hash(hashkey, 'HMGET')
attributes = self._list_or_args(keys, args)
return [redis_hash.get(self._encode(attribute)) for attribute in attributes] | Emulate hmget. | Below is the the instruction that describes the task:
### Input:
Emulate hmget.
### Response:
def hmget(self, hashkey, keys, *args):
"""Emulate hmget."""
redis_hash = self._get_hash(hashkey, 'HMGET')
attributes = self._list_or_args(keys, args)
return [redis_hash.get(self._encode(attribute)) for attribute in attributes] |
def start(self, skip_choose=False, fixed_workspace_dir=None):
"""
Start the application.
Looks for workspace_location persistent string. If it doesn't find it, uses a default
workspace location.
Then checks to see if that workspace exists. If not and if skip_choose has not been
set to True, asks the user for a workspace location. User may choose new folder or
existing location. This works by putting up the dialog which will either call start
again or exit.
Creates workspace in location if it doesn't exist.
Migrates database to latest version.
Creates document model, resources path, etc.
"""
logging.getLogger("migration").setLevel(logging.INFO)
if fixed_workspace_dir:
workspace_dir = fixed_workspace_dir
else:
documents_dir = self.ui.get_document_location()
workspace_dir = os.path.join(documents_dir, "Nion Swift Libraries")
workspace_dir = self.ui.get_persistent_string("workspace_location", workspace_dir)
welcome_message_enabled = fixed_workspace_dir is None
profile, is_created = Profile.create_profile(pathlib.Path(workspace_dir), welcome_message_enabled, skip_choose)
if not profile:
self.choose_library()
return True
self.workspace_dir = workspace_dir
DocumentModel.DocumentModel.computation_min_period = 0.1
document_model = DocumentModel.DocumentModel(profile=profile)
document_model.create_default_data_groups()
document_model.start_dispatcher()
# parse the hardware aliases file
alias_path = os.path.join(self.workspace_dir, "aliases.ini")
HardwareSource.parse_hardware_aliases_config_file(alias_path)
# create the document controller
document_controller = self.create_document_controller(document_model, "library")
if self.__resources_path is not None:
document_model.create_sample_images(self.__resources_path)
workspace_history = self.ui.get_persistent_object("workspace_history", list())
if workspace_dir in workspace_history:
workspace_history.remove(workspace_dir)
workspace_history.insert(0, workspace_dir)
self.ui.set_persistent_object("workspace_history", workspace_history)
self.ui.set_persistent_string("workspace_location", workspace_dir)
if welcome_message_enabled:
logging.info("Welcome to Nion Swift.")
if is_created and len(document_model.display_items) > 0:
document_controller.selected_display_panel.set_display_panel_display_item(document_model.display_items[0])
document_controller.selected_display_panel.perform_action("set_fill_mode")
return True | Start the application.
Looks for workspace_location persistent string. If it doesn't find it, uses a default
workspace location.
Then checks to see if that workspace exists. If not and if skip_choose has not been
set to True, asks the user for a workspace location. User may choose new folder or
existing location. This works by putting up the dialog which will either call start
again or exit.
Creates workspace in location if it doesn't exist.
Migrates database to latest version.
Creates document model, resources path, etc. | Below is the the instruction that describes the task:
### Input:
Start the application.
Looks for workspace_location persistent string. If it doesn't find it, uses a default
workspace location.
Then checks to see if that workspace exists. If not and if skip_choose has not been
set to True, asks the user for a workspace location. User may choose new folder or
existing location. This works by putting up the dialog which will either call start
again or exit.
Creates workspace in location if it doesn't exist.
Migrates database to latest version.
Creates document model, resources path, etc.
### Response:
def start(self, skip_choose=False, fixed_workspace_dir=None):
"""
Start the application.
Looks for workspace_location persistent string. If it doesn't find it, uses a default
workspace location.
Then checks to see if that workspace exists. If not and if skip_choose has not been
set to True, asks the user for a workspace location. User may choose new folder or
existing location. This works by putting up the dialog which will either call start
again or exit.
Creates workspace in location if it doesn't exist.
Migrates database to latest version.
Creates document model, resources path, etc.
"""
logging.getLogger("migration").setLevel(logging.INFO)
if fixed_workspace_dir:
workspace_dir = fixed_workspace_dir
else:
documents_dir = self.ui.get_document_location()
workspace_dir = os.path.join(documents_dir, "Nion Swift Libraries")
workspace_dir = self.ui.get_persistent_string("workspace_location", workspace_dir)
welcome_message_enabled = fixed_workspace_dir is None
profile, is_created = Profile.create_profile(pathlib.Path(workspace_dir), welcome_message_enabled, skip_choose)
if not profile:
self.choose_library()
return True
self.workspace_dir = workspace_dir
DocumentModel.DocumentModel.computation_min_period = 0.1
document_model = DocumentModel.DocumentModel(profile=profile)
document_model.create_default_data_groups()
document_model.start_dispatcher()
# parse the hardware aliases file
alias_path = os.path.join(self.workspace_dir, "aliases.ini")
HardwareSource.parse_hardware_aliases_config_file(alias_path)
# create the document controller
document_controller = self.create_document_controller(document_model, "library")
if self.__resources_path is not None:
document_model.create_sample_images(self.__resources_path)
workspace_history = self.ui.get_persistent_object("workspace_history", list())
if workspace_dir in workspace_history:
workspace_history.remove(workspace_dir)
workspace_history.insert(0, workspace_dir)
self.ui.set_persistent_object("workspace_history", workspace_history)
self.ui.set_persistent_string("workspace_location", workspace_dir)
if welcome_message_enabled:
logging.info("Welcome to Nion Swift.")
if is_created and len(document_model.display_items) > 0:
document_controller.selected_display_panel.set_display_panel_display_item(document_model.display_items[0])
document_controller.selected_display_panel.perform_action("set_fill_mode")
return True |
def eval_objfn(self):
"""Compute components of objective function as well as total
contribution to objective function.
"""
if self.opt['fEvalX']:
rnn = np.sum(self.ss)
else:
rnn = sp.norm_nuclear(self.obfn_fvar())
rl1 = np.sum(np.abs(self.obfn_gvar()))
cns = np.linalg.norm(self.X + self.Y - self.S)
obj = rnn + self.lmbda*rl1
return (obj, rnn, rl1, cns) | Compute components of objective function as well as total
contribution to objective function. | Below is the the instruction that describes the task:
### Input:
Compute components of objective function as well as total
contribution to objective function.
### Response:
def eval_objfn(self):
"""Compute components of objective function as well as total
contribution to objective function.
"""
if self.opt['fEvalX']:
rnn = np.sum(self.ss)
else:
rnn = sp.norm_nuclear(self.obfn_fvar())
rl1 = np.sum(np.abs(self.obfn_gvar()))
cns = np.linalg.norm(self.X + self.Y - self.S)
obj = rnn + self.lmbda*rl1
return (obj, rnn, rl1, cns) |
def _query_for_individual_audio(self, run, tag, sample, index):
"""Builds a URL for accessing the specified audio.
This should be kept in sync with _serve_audio_metadata. Note that the URL is
*not* guaranteed to always return the same audio, since audio may be
unloaded from the reservoir as new audio entries come in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the audio entry. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th sampled audio
in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'sample': sample,
'index': index,
})
return query_string | Builds a URL for accessing the specified audio.
This should be kept in sync with _serve_audio_metadata. Note that the URL is
*not* guaranteed to always return the same audio, since audio may be
unloaded from the reservoir as new audio entries come in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the audio entry. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th sampled audio
in the given run with the given tag. | Below is the the instruction that describes the task:
### Input:
Builds a URL for accessing the specified audio.
This should be kept in sync with _serve_audio_metadata. Note that the URL is
*not* guaranteed to always return the same audio, since audio may be
unloaded from the reservoir as new audio entries come in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the audio entry. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th sampled audio
in the given run with the given tag.
### Response:
def _query_for_individual_audio(self, run, tag, sample, index):
"""Builds a URL for accessing the specified audio.
This should be kept in sync with _serve_audio_metadata. Note that the URL is
*not* guaranteed to always return the same audio, since audio may be
unloaded from the reservoir as new audio entries come in.
Args:
run: The name of the run.
tag: The tag.
index: The index of the audio entry. Negative values are OK.
Returns:
A string representation of a URL that will load the index-th sampled audio
in the given run with the given tag.
"""
query_string = urllib.parse.urlencode({
'run': run,
'tag': tag,
'sample': sample,
'index': index,
})
return query_string |
def parse_metric_family(self, response, scraper_config):
"""
Parse the MetricFamily from a valid requests.Response object to provide a MetricFamily object (see [0])
The text format uses iter_lines() generator.
:param response: requests.Response
:return: core.Metric
"""
input_gen = response.iter_lines(chunk_size=self.REQUESTS_CHUNK_SIZE, decode_unicode=True)
if scraper_config['_text_filter_blacklist']:
input_gen = self._text_filter_input(input_gen, scraper_config)
for metric in text_fd_to_metric_families(input_gen):
metric.type = scraper_config['type_overrides'].get(metric.name, metric.type)
if metric.type not in self.METRIC_TYPES:
continue
metric.name = self._remove_metric_prefix(metric.name, scraper_config)
yield metric | Parse the MetricFamily from a valid requests.Response object to provide a MetricFamily object (see [0])
The text format uses iter_lines() generator.
:param response: requests.Response
:return: core.Metric | Below is the the instruction that describes the task:
### Input:
Parse the MetricFamily from a valid requests.Response object to provide a MetricFamily object (see [0])
The text format uses iter_lines() generator.
:param response: requests.Response
:return: core.Metric
### Response:
def parse_metric_family(self, response, scraper_config):
"""
Parse the MetricFamily from a valid requests.Response object to provide a MetricFamily object (see [0])
The text format uses iter_lines() generator.
:param response: requests.Response
:return: core.Metric
"""
input_gen = response.iter_lines(chunk_size=self.REQUESTS_CHUNK_SIZE, decode_unicode=True)
if scraper_config['_text_filter_blacklist']:
input_gen = self._text_filter_input(input_gen, scraper_config)
for metric in text_fd_to_metric_families(input_gen):
metric.type = scraper_config['type_overrides'].get(metric.name, metric.type)
if metric.type not in self.METRIC_TYPES:
continue
metric.name = self._remove_metric_prefix(metric.name, scraper_config)
yield metric |
def get_extended_summary(self, s, base=None):
"""Get the extended summary from a docstring
This here is the extended summary
Parameters
----------
s: str
The docstring to use
base: str or None
A key under which the summary shall be stored in the :attr:`params`
attribute. If not None, the summary will be stored in
``base + '.summary_ext'``. Otherwise, it will not be stored at
all
Returns
-------
str
The extracted extended summary"""
# Remove the summary and dedent
s = self._remove_summary(s)
ret = ''
if not self._all_sections_patt.match(s):
m = self._extended_summary_patt.match(s)
if m is not None:
ret = m.group().strip()
if base is not None:
self.params[base + '.summary_ext'] = ret
return ret | Get the extended summary from a docstring
This here is the extended summary
Parameters
----------
s: str
The docstring to use
base: str or None
A key under which the summary shall be stored in the :attr:`params`
attribute. If not None, the summary will be stored in
``base + '.summary_ext'``. Otherwise, it will not be stored at
all
Returns
-------
str
The extracted extended summary | Below is the the instruction that describes the task:
### Input:
Get the extended summary from a docstring
This here is the extended summary
Parameters
----------
s: str
The docstring to use
base: str or None
A key under which the summary shall be stored in the :attr:`params`
attribute. If not None, the summary will be stored in
``base + '.summary_ext'``. Otherwise, it will not be stored at
all
Returns
-------
str
The extracted extended summary
### Response:
def get_extended_summary(self, s, base=None):
"""Get the extended summary from a docstring
This here is the extended summary
Parameters
----------
s: str
The docstring to use
base: str or None
A key under which the summary shall be stored in the :attr:`params`
attribute. If not None, the summary will be stored in
``base + '.summary_ext'``. Otherwise, it will not be stored at
all
Returns
-------
str
The extracted extended summary"""
# Remove the summary and dedent
s = self._remove_summary(s)
ret = ''
if not self._all_sections_patt.match(s):
m = self._extended_summary_patt.match(s)
if m is not None:
ret = m.group().strip()
if base is not None:
self.params[base + '.summary_ext'] = ret
return ret |
def convert_meas(direction, Rec):
"""
converts measurments tables from magic 2 to 3 (direction=magic3)
or from model 3 to 2.5 (direction=magic2) [not available]
"""
if direction == 'magic3':
columns = meas_magic2_2_magic3_map
MeasRec = {}
for key in columns:
if key in list(Rec.keys()):
# transfer info and change column name to data model 3.0
MeasRec[columns[key]] = Rec[key]
return MeasRec
else: # haven't added this way yet
pass | converts measurments tables from magic 2 to 3 (direction=magic3)
or from model 3 to 2.5 (direction=magic2) [not available] | Below is the the instruction that describes the task:
### Input:
converts measurments tables from magic 2 to 3 (direction=magic3)
or from model 3 to 2.5 (direction=magic2) [not available]
### Response:
def convert_meas(direction, Rec):
"""
converts measurments tables from magic 2 to 3 (direction=magic3)
or from model 3 to 2.5 (direction=magic2) [not available]
"""
if direction == 'magic3':
columns = meas_magic2_2_magic3_map
MeasRec = {}
for key in columns:
if key in list(Rec.keys()):
# transfer info and change column name to data model 3.0
MeasRec[columns[key]] = Rec[key]
return MeasRec
else: # haven't added this way yet
pass |
def log_instantiation(LOGGER, classname, args, forbidden, with_date=False):
'''
Log the instantiation of an object to the given logger.
:LOGGER: A logger to log to. Please see module "logging".
:classname: The name of the class that is being
instantiated.
:args: A dictionary of arguments passed to the instantiation,
which will be logged on debug level.
:forbidden: A list of arguments whose values should not be
logged, e.g. "password".
:with_date: Optional. Boolean. Indicated whether the initiation
date and time should be logged.
'''
# Info:
if with_date:
LOGGER.info('Instantiating '+classname+' at '+datetime.datetime.now().strftime('%Y-%m-%d_%H:%M'))
else:
LOGGER.info('Instantiating '+classname)
# Debug:
for argname in args:
if args[argname] is not None:
if argname in forbidden:
LOGGER.debug('Param '+argname+'*******')
else:
LOGGER.debug('Param '+argname+'='+str(args[argname])) | Log the instantiation of an object to the given logger.
:LOGGER: A logger to log to. Please see module "logging".
:classname: The name of the class that is being
instantiated.
:args: A dictionary of arguments passed to the instantiation,
which will be logged on debug level.
:forbidden: A list of arguments whose values should not be
logged, e.g. "password".
:with_date: Optional. Boolean. Indicated whether the initiation
date and time should be logged. | Below is the the instruction that describes the task:
### Input:
Log the instantiation of an object to the given logger.
:LOGGER: A logger to log to. Please see module "logging".
:classname: The name of the class that is being
instantiated.
:args: A dictionary of arguments passed to the instantiation,
which will be logged on debug level.
:forbidden: A list of arguments whose values should not be
logged, e.g. "password".
:with_date: Optional. Boolean. Indicated whether the initiation
date and time should be logged.
### Response:
def log_instantiation(LOGGER, classname, args, forbidden, with_date=False):
'''
Log the instantiation of an object to the given logger.
:LOGGER: A logger to log to. Please see module "logging".
:classname: The name of the class that is being
instantiated.
:args: A dictionary of arguments passed to the instantiation,
which will be logged on debug level.
:forbidden: A list of arguments whose values should not be
logged, e.g. "password".
:with_date: Optional. Boolean. Indicated whether the initiation
date and time should be logged.
'''
# Info:
if with_date:
LOGGER.info('Instantiating '+classname+' at '+datetime.datetime.now().strftime('%Y-%m-%d_%H:%M'))
else:
LOGGER.info('Instantiating '+classname)
# Debug:
for argname in args:
if args[argname] is not None:
if argname in forbidden:
LOGGER.debug('Param '+argname+'*******')
else:
LOGGER.debug('Param '+argname+'='+str(args[argname])) |
def listar_permissao(self, nome_equipamento, nome_interface):
"""List all VLANS having communication permission to trunk from a port in switch.
Run script 'configurador'.
::
The value of 'stdout' key of return dictionary can have a list of numbers or
number intervals of VLAN´s, comma separated. Examples of possible returns of 'stdout' below:
- 100,103,111,...
- 100-110,...
- 100-110,112,115,...
- 100,103,105-111,113,115-118,...
:param nome_equipamento: Equipment name.
:param nome_interface: Interface name.
:return: Following dictionary:
::
{‘sucesso’: {‘codigo’: < codigo >,
‘descricao’: {'stdout':< stdout >, 'stderr':< stderr >}}}
:raise InvalidParameterError: Equipment name and/or interface name is invalid or none.
:raise EquipamentoNaoExisteError: Equipment does not exist.
:raise LigacaoFrontInterfaceNaoExisteError: There is no interface on front link of informed interface.
:raise InterfaceNaoExisteError: Interface does not exist or is not associated to equipment.
:raise LigacaoFrontNaoTerminaSwitchError: Interface does not have switch connected.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
:raise ScriptError: Failed to run the script.
"""
vlan_map = dict()
vlan_map['nome'] = nome_equipamento
vlan_map['nome_interface'] = nome_interface
code, xml = self.submit({'equipamento': vlan_map}, 'PUT', 'vlan/list/')
return self.response(code, xml) | List all VLANS having communication permission to trunk from a port in switch.
Run script 'configurador'.
::
The value of 'stdout' key of return dictionary can have a list of numbers or
number intervals of VLAN´s, comma separated. Examples of possible returns of 'stdout' below:
- 100,103,111,...
- 100-110,...
- 100-110,112,115,...
- 100,103,105-111,113,115-118,...
:param nome_equipamento: Equipment name.
:param nome_interface: Interface name.
:return: Following dictionary:
::
{‘sucesso’: {‘codigo’: < codigo >,
‘descricao’: {'stdout':< stdout >, 'stderr':< stderr >}}}
:raise InvalidParameterError: Equipment name and/or interface name is invalid or none.
:raise EquipamentoNaoExisteError: Equipment does not exist.
:raise LigacaoFrontInterfaceNaoExisteError: There is no interface on front link of informed interface.
:raise InterfaceNaoExisteError: Interface does not exist or is not associated to equipment.
:raise LigacaoFrontNaoTerminaSwitchError: Interface does not have switch connected.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
:raise ScriptError: Failed to run the script. | Below is the the instruction that describes the task:
### Input:
List all VLANS having communication permission to trunk from a port in switch.
Run script 'configurador'.
::
The value of 'stdout' key of return dictionary can have a list of numbers or
number intervals of VLAN´s, comma separated. Examples of possible returns of 'stdout' below:
- 100,103,111,...
- 100-110,...
- 100-110,112,115,...
- 100,103,105-111,113,115-118,...
:param nome_equipamento: Equipment name.
:param nome_interface: Interface name.
:return: Following dictionary:
::
{‘sucesso’: {‘codigo’: < codigo >,
‘descricao’: {'stdout':< stdout >, 'stderr':< stderr >}}}
:raise InvalidParameterError: Equipment name and/or interface name is invalid or none.
:raise EquipamentoNaoExisteError: Equipment does not exist.
:raise LigacaoFrontInterfaceNaoExisteError: There is no interface on front link of informed interface.
:raise InterfaceNaoExisteError: Interface does not exist or is not associated to equipment.
:raise LigacaoFrontNaoTerminaSwitchError: Interface does not have switch connected.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
:raise ScriptError: Failed to run the script.
### Response:
def listar_permissao(self, nome_equipamento, nome_interface):
"""List all VLANS having communication permission to trunk from a port in switch.
Run script 'configurador'.
::
The value of 'stdout' key of return dictionary can have a list of numbers or
number intervals of VLAN´s, comma separated. Examples of possible returns of 'stdout' below:
- 100,103,111,...
- 100-110,...
- 100-110,112,115,...
- 100,103,105-111,113,115-118,...
:param nome_equipamento: Equipment name.
:param nome_interface: Interface name.
:return: Following dictionary:
::
{‘sucesso’: {‘codigo’: < codigo >,
‘descricao’: {'stdout':< stdout >, 'stderr':< stderr >}}}
:raise InvalidParameterError: Equipment name and/or interface name is invalid or none.
:raise EquipamentoNaoExisteError: Equipment does not exist.
:raise LigacaoFrontInterfaceNaoExisteError: There is no interface on front link of informed interface.
:raise InterfaceNaoExisteError: Interface does not exist or is not associated to equipment.
:raise LigacaoFrontNaoTerminaSwitchError: Interface does not have switch connected.
:raise DataBaseError: Networkapi failed to access the database.
:raise XMLError: Networkapi failed to generate the XML response.
:raise ScriptError: Failed to run the script.
"""
vlan_map = dict()
vlan_map['nome'] = nome_equipamento
vlan_map['nome_interface'] = nome_interface
code, xml = self.submit({'equipamento': vlan_map}, 'PUT', 'vlan/list/')
return self.response(code, xml) |
def load_ner_model(lang="en", version="2"):
"""Return a named entity extractor parameters for `lang` and of version `version`
Args:
lang (string): language code.
version (string): version of the parameters to be used.
"""
src_dir = "ner{}".format(version)
p = locate_resource(src_dir, lang)
fh = _open(p)
try:
return pickle.load(fh)
except UnicodeDecodeError:
fh.seek(0)
return pickle.load(fh, encoding='latin1') | Return a named entity extractor parameters for `lang` and of version `version`
Args:
lang (string): language code.
version (string): version of the parameters to be used. | Below is the the instruction that describes the task:
### Input:
Return a named entity extractor parameters for `lang` and of version `version`
Args:
lang (string): language code.
version (string): version of the parameters to be used.
### Response:
def load_ner_model(lang="en", version="2"):
"""Return a named entity extractor parameters for `lang` and of version `version`
Args:
lang (string): language code.
version (string): version of the parameters to be used.
"""
src_dir = "ner{}".format(version)
p = locate_resource(src_dir, lang)
fh = _open(p)
try:
return pickle.load(fh)
except UnicodeDecodeError:
fh.seek(0)
return pickle.load(fh, encoding='latin1') |
def run_false_positive_experiment_correlation(seed,
num_neurons = 1,
a = 32,
dim = 4000,
num_samples = 20000,
num_dendrites = 500,
dendrite_length = 20,
num_trials = 1000,
nonlinearity = threshold_nonlinearity(10)):
"""
Run an experiment to test the false positive rate based on the correlation
between bits. Correlation is measured as the average pairwise correlation
between bits for each pattern in the data (across all of the data).
To generate the results shown in the false positive vs. correlation figure,
we used the parameter settings:
1. a = 32, dim = 2000
2. a = 32, dim = 4000
3. a = 64, dim = 4000
In each case, we ran approximately 4000 trials in parallel, and then binned
the results based on correlation. The code for binning can be found in
plot_effect_of_correlation.py. Note that your results may not match ours
exactly, as the number of different seeds used depends on how many processes
are created, but the general trend of the results should be very stable due
to the large number of data points.
"""
numpy.random.seed(seed)
possible_cluster_sizes = range(2, 10)
for trial in range(num_trials):
num_cluster_sizes = numpy.random.choice([1, 1, 2] + range(1, 8), 1)
cluster_sizes = numpy.random.choice(possible_cluster_sizes, num_cluster_sizes, replace = False)
num_cells_per_cluster_size = [numpy.random.randint(dim, 3*dim) for i in range(num_cluster_sizes)]
data = generate_correlated_data(dim = dim,
num_active = a,
num_samples = num_samples,
num_cells_per_cluster_size =
num_cells_per_cluster_size,
cluster_sizes = cluster_sizes)
correlation = get_pattern_correlations(data)
closest_correlations = get_biased_correlations(data, threshold = 10)
print "Generated {} samples with total average pattern correlation {}, biased threshold-10 correlation {}, using cluster sizes {} with cells per cluster size of {}".format(num_samples, correlation, closest_correlations, cluster_sizes, num_cells_per_cluster_size)
fps = []
fns = []
errors = []
for i in range((num_samples/2)/num_dendrites):
current_data = data.getSlice(i*(num_dendrites*2), (i+1)*(num_dendrites*2), 0, dim)
neuron = Neuron(size = dendrite_length*num_dendrites, num_dendrites = num_dendrites, dendrite_length = dendrite_length, dim = dim, nonlinearity = nonlinearity)
labels = numpy.asarray([1 for i in range(num_dendrites)] + [-1 for i in range(num_dendrites)])
neuron.HTM_style_initialize_on_data(current_data, labels)
error, fp, fn = get_error(current_data, labels, [neuron])
fps.append(fp)
fns.append(fn)
errors.append(error)
print "Error at r = {} is {}, with {} false positives out of {} samples".format(correlation, numpy.mean(errors), sum(fps), num_samples/2)
with open("correlation_results_a{}_n{}_s{}.txt".format(a, dim, dendrite_length), "a") as f:
f.write(str(correlation) + ", " + str(sum(fps)) + ", " + str(num_samples/2) + "\n") | Run an experiment to test the false positive rate based on the correlation
between bits. Correlation is measured as the average pairwise correlation
between bits for each pattern in the data (across all of the data).
To generate the results shown in the false positive vs. correlation figure,
we used the parameter settings:
1. a = 32, dim = 2000
2. a = 32, dim = 4000
3. a = 64, dim = 4000
In each case, we ran approximately 4000 trials in parallel, and then binned
the results based on correlation. The code for binning can be found in
plot_effect_of_correlation.py. Note that your results may not match ours
exactly, as the number of different seeds used depends on how many processes
are created, but the general trend of the results should be very stable due
to the large number of data points. | Below is the the instruction that describes the task:
### Input:
Run an experiment to test the false positive rate based on the correlation
between bits. Correlation is measured as the average pairwise correlation
between bits for each pattern in the data (across all of the data).
To generate the results shown in the false positive vs. correlation figure,
we used the parameter settings:
1. a = 32, dim = 2000
2. a = 32, dim = 4000
3. a = 64, dim = 4000
In each case, we ran approximately 4000 trials in parallel, and then binned
the results based on correlation. The code for binning can be found in
plot_effect_of_correlation.py. Note that your results may not match ours
exactly, as the number of different seeds used depends on how many processes
are created, but the general trend of the results should be very stable due
to the large number of data points.
### Response:
def run_false_positive_experiment_correlation(seed,
num_neurons = 1,
a = 32,
dim = 4000,
num_samples = 20000,
num_dendrites = 500,
dendrite_length = 20,
num_trials = 1000,
nonlinearity = threshold_nonlinearity(10)):
"""
Run an experiment to test the false positive rate based on the correlation
between bits. Correlation is measured as the average pairwise correlation
between bits for each pattern in the data (across all of the data).
To generate the results shown in the false positive vs. correlation figure,
we used the parameter settings:
1. a = 32, dim = 2000
2. a = 32, dim = 4000
3. a = 64, dim = 4000
In each case, we ran approximately 4000 trials in parallel, and then binned
the results based on correlation. The code for binning can be found in
plot_effect_of_correlation.py. Note that your results may not match ours
exactly, as the number of different seeds used depends on how many processes
are created, but the general trend of the results should be very stable due
to the large number of data points.
"""
numpy.random.seed(seed)
possible_cluster_sizes = range(2, 10)
for trial in range(num_trials):
num_cluster_sizes = numpy.random.choice([1, 1, 2] + range(1, 8), 1)
cluster_sizes = numpy.random.choice(possible_cluster_sizes, num_cluster_sizes, replace = False)
num_cells_per_cluster_size = [numpy.random.randint(dim, 3*dim) for i in range(num_cluster_sizes)]
data = generate_correlated_data(dim = dim,
num_active = a,
num_samples = num_samples,
num_cells_per_cluster_size =
num_cells_per_cluster_size,
cluster_sizes = cluster_sizes)
correlation = get_pattern_correlations(data)
closest_correlations = get_biased_correlations(data, threshold = 10)
print "Generated {} samples with total average pattern correlation {}, biased threshold-10 correlation {}, using cluster sizes {} with cells per cluster size of {}".format(num_samples, correlation, closest_correlations, cluster_sizes, num_cells_per_cluster_size)
fps = []
fns = []
errors = []
for i in range((num_samples/2)/num_dendrites):
current_data = data.getSlice(i*(num_dendrites*2), (i+1)*(num_dendrites*2), 0, dim)
neuron = Neuron(size = dendrite_length*num_dendrites, num_dendrites = num_dendrites, dendrite_length = dendrite_length, dim = dim, nonlinearity = nonlinearity)
labels = numpy.asarray([1 for i in range(num_dendrites)] + [-1 for i in range(num_dendrites)])
neuron.HTM_style_initialize_on_data(current_data, labels)
error, fp, fn = get_error(current_data, labels, [neuron])
fps.append(fp)
fns.append(fn)
errors.append(error)
print "Error at r = {} is {}, with {} false positives out of {} samples".format(correlation, numpy.mean(errors), sum(fps), num_samples/2)
with open("correlation_results_a{}_n{}_s{}.txt".format(a, dim, dendrite_length), "a") as f:
f.write(str(correlation) + ", " + str(sum(fps)) + ", " + str(num_samples/2) + "\n") |
def _tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = self._whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.lower:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = self._whitespace_tokenize(' '.join(split_tokens))
return output_tokens | Tokenizes a piece of text. | Below is the the instruction that describes the task:
### Input:
Tokenizes a piece of text.
### Response:
def _tokenize(self, text):
"""Tokenizes a piece of text."""
text = self._clean_text(text)
# This was added on November 1st, 2018 for the multilingual and Chinese
# models. This is also applied to the English models now, but it doesn't
# matter since the English models were not trained on any Chinese data
# and generally don't have any Chinese data in them (there are Chinese
# characters in the vocabulary because Wikipedia does have some Chinese
# words in the English Wikipedia.).
text = self._tokenize_chinese_chars(text)
orig_tokens = self._whitespace_tokenize(text)
split_tokens = []
for token in orig_tokens:
if self.lower:
token = token.lower()
token = self._run_strip_accents(token)
split_tokens.extend(self._run_split_on_punc(token))
output_tokens = self._whitespace_tokenize(' '.join(split_tokens))
return output_tokens |
def getHostDetailsByIndex(self, index, lanInterfaceId=1, timeout=1):
"""Execute GetGenericHostEntry action to get detailed information's of a connected host.
:param index: the index of the host
:param int lanInterfaceId: the id of the LAN interface
:param float timeout: the timeout to wait for the action to be executed
:return: the detailed information's of a connected host.
:rtype: HostDetails
.. seealso:: :meth:`~simpletr64.actions.Lan.getAmountOfHostsConnected`
"""
namespace = Lan.getServiceType("getHostDetailsByIndex") + str(lanInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetGenericHostEntry", timeout=timeout, NewIndex=index)
return HostDetails(results) | Execute GetGenericHostEntry action to get detailed information's of a connected host.
:param index: the index of the host
:param int lanInterfaceId: the id of the LAN interface
:param float timeout: the timeout to wait for the action to be executed
:return: the detailed information's of a connected host.
:rtype: HostDetails
.. seealso:: :meth:`~simpletr64.actions.Lan.getAmountOfHostsConnected` | Below is the the instruction that describes the task:
### Input:
Execute GetGenericHostEntry action to get detailed information's of a connected host.
:param index: the index of the host
:param int lanInterfaceId: the id of the LAN interface
:param float timeout: the timeout to wait for the action to be executed
:return: the detailed information's of a connected host.
:rtype: HostDetails
.. seealso:: :meth:`~simpletr64.actions.Lan.getAmountOfHostsConnected`
### Response:
def getHostDetailsByIndex(self, index, lanInterfaceId=1, timeout=1):
"""Execute GetGenericHostEntry action to get detailed information's of a connected host.
:param index: the index of the host
:param int lanInterfaceId: the id of the LAN interface
:param float timeout: the timeout to wait for the action to be executed
:return: the detailed information's of a connected host.
:rtype: HostDetails
.. seealso:: :meth:`~simpletr64.actions.Lan.getAmountOfHostsConnected`
"""
namespace = Lan.getServiceType("getHostDetailsByIndex") + str(lanInterfaceId)
uri = self.getControlURL(namespace)
results = self.execute(uri, namespace, "GetGenericHostEntry", timeout=timeout, NewIndex=index)
return HostDetails(results) |
def stationary_distribution(H, pi=None, P=None):
"""Computes the stationary distribution of a random walk on the given
hypergraph using the iterative approach explained in the paper:
Aurelien Ducournau, Alain Bretto, Random walks in directed hypergraphs and
application to semi-supervised image segmentation,
Computer Vision and Image Understanding, Volume 120, March 2014,
Pages 91-102, ISSN 1077-3142, http://dx.doi.org/10.1016/j.cviu.2013.10.012.
(http://www.sciencedirect.com/science/article/pii/S1077314213002038)
:param H: the hypergraph to find the 'Stationary Distribution'
algorithm on.
:param pi: the initial distribution over the nodes. If not provided,
it will be created with a random distribution.
:param P: the transition matrix for the hypergraph. If not provided,
it will be created.
:returns: list -- list of the stationary probabilities for all nodes
in the hypergraph.
:raises: TypeError -- Algorithm only applicable to undirected hypergraphs
:raises: AssertionError -- Each node must have at least 1 outgoing
hyperedge (even if it's only a self-loop).
"""
if not isinstance(H, DirectedHypergraph):
raise TypeError("Algorithm only applicable to undirected hypergraphs")
for node in H.node_iterator():
if len(H.get_forward_star(node)) == 0:
raise AssertionError("Each node must have at least 1 outgoing \
hyperedge (even if it's only a self-loop).")
indices_to_nodes, nodes_to_indices = \
dmat.get_node_mapping(H)
indices_to_hyperedge_ids, hyperedge_ids_to_indices = \
dmat.get_hyperedge_id_mapping(H)
if P is None:
P = _compute_transition_matrix(H,
nodes_to_indices,
hyperedge_ids_to_indices)
node_count = len(H.get_node_set())
if pi is None:
pi = _create_random_starter(node_count)
pi_star = _create_random_starter(node_count)
while not _has_converged(pi_star, pi):
pi = pi_star
pi_star = pi * P
return pi | Computes the stationary distribution of a random walk on the given
hypergraph using the iterative approach explained in the paper:
Aurelien Ducournau, Alain Bretto, Random walks in directed hypergraphs and
application to semi-supervised image segmentation,
Computer Vision and Image Understanding, Volume 120, March 2014,
Pages 91-102, ISSN 1077-3142, http://dx.doi.org/10.1016/j.cviu.2013.10.012.
(http://www.sciencedirect.com/science/article/pii/S1077314213002038)
:param H: the hypergraph to find the 'Stationary Distribution'
algorithm on.
:param pi: the initial distribution over the nodes. If not provided,
it will be created with a random distribution.
:param P: the transition matrix for the hypergraph. If not provided,
it will be created.
:returns: list -- list of the stationary probabilities for all nodes
in the hypergraph.
:raises: TypeError -- Algorithm only applicable to undirected hypergraphs
:raises: AssertionError -- Each node must have at least 1 outgoing
hyperedge (even if it's only a self-loop). | Below is the the instruction that describes the task:
### Input:
Computes the stationary distribution of a random walk on the given
hypergraph using the iterative approach explained in the paper:
Aurelien Ducournau, Alain Bretto, Random walks in directed hypergraphs and
application to semi-supervised image segmentation,
Computer Vision and Image Understanding, Volume 120, March 2014,
Pages 91-102, ISSN 1077-3142, http://dx.doi.org/10.1016/j.cviu.2013.10.012.
(http://www.sciencedirect.com/science/article/pii/S1077314213002038)
:param H: the hypergraph to find the 'Stationary Distribution'
algorithm on.
:param pi: the initial distribution over the nodes. If not provided,
it will be created with a random distribution.
:param P: the transition matrix for the hypergraph. If not provided,
it will be created.
:returns: list -- list of the stationary probabilities for all nodes
in the hypergraph.
:raises: TypeError -- Algorithm only applicable to undirected hypergraphs
:raises: AssertionError -- Each node must have at least 1 outgoing
hyperedge (even if it's only a self-loop).
### Response:
def stationary_distribution(H, pi=None, P=None):
"""Computes the stationary distribution of a random walk on the given
hypergraph using the iterative approach explained in the paper:
Aurelien Ducournau, Alain Bretto, Random walks in directed hypergraphs and
application to semi-supervised image segmentation,
Computer Vision and Image Understanding, Volume 120, March 2014,
Pages 91-102, ISSN 1077-3142, http://dx.doi.org/10.1016/j.cviu.2013.10.012.
(http://www.sciencedirect.com/science/article/pii/S1077314213002038)
:param H: the hypergraph to find the 'Stationary Distribution'
algorithm on.
:param pi: the initial distribution over the nodes. If not provided,
it will be created with a random distribution.
:param P: the transition matrix for the hypergraph. If not provided,
it will be created.
:returns: list -- list of the stationary probabilities for all nodes
in the hypergraph.
:raises: TypeError -- Algorithm only applicable to undirected hypergraphs
:raises: AssertionError -- Each node must have at least 1 outgoing
hyperedge (even if it's only a self-loop).
"""
if not isinstance(H, DirectedHypergraph):
raise TypeError("Algorithm only applicable to undirected hypergraphs")
for node in H.node_iterator():
if len(H.get_forward_star(node)) == 0:
raise AssertionError("Each node must have at least 1 outgoing \
hyperedge (even if it's only a self-loop).")
indices_to_nodes, nodes_to_indices = \
dmat.get_node_mapping(H)
indices_to_hyperedge_ids, hyperedge_ids_to_indices = \
dmat.get_hyperedge_id_mapping(H)
if P is None:
P = _compute_transition_matrix(H,
nodes_to_indices,
hyperedge_ids_to_indices)
node_count = len(H.get_node_set())
if pi is None:
pi = _create_random_starter(node_count)
pi_star = _create_random_starter(node_count)
while not _has_converged(pi_star, pi):
pi = pi_star
pi_star = pi * P
return pi |
def load(package, prefix, offset=0, limit=1000):
""" Load lines from the log file with pagination support. """
logs = package.all(LogFile, unicode(prefix))
logs = sorted(logs, key=lambda l: l.name, reverse=True)
seen = 0
record = None
tmp = tempfile.NamedTemporaryFile(suffix='.log')
for log in logs:
shutil.copyfileobj(log.fh(), tmp)
tmp.seek(0)
for line in reversed(list(tmp)):
seen += 1
if seen < offset:
continue
if seen > limit:
tmp.close()
return
try:
d, mo, l, m = line.split(' %s ' % SEP, 4)
if record is not None:
yield record
record = {'time': d, 'module': mo, 'level': l, 'message': m}
except ValueError:
if record is not None:
record['message'] += '\n' + line
tmp.seek(0)
tmp.close()
if record is not None:
yield record | Load lines from the log file with pagination support. | Below is the the instruction that describes the task:
### Input:
Load lines from the log file with pagination support.
### Response:
def load(package, prefix, offset=0, limit=1000):
""" Load lines from the log file with pagination support. """
logs = package.all(LogFile, unicode(prefix))
logs = sorted(logs, key=lambda l: l.name, reverse=True)
seen = 0
record = None
tmp = tempfile.NamedTemporaryFile(suffix='.log')
for log in logs:
shutil.copyfileobj(log.fh(), tmp)
tmp.seek(0)
for line in reversed(list(tmp)):
seen += 1
if seen < offset:
continue
if seen > limit:
tmp.close()
return
try:
d, mo, l, m = line.split(' %s ' % SEP, 4)
if record is not None:
yield record
record = {'time': d, 'module': mo, 'level': l, 'message': m}
except ValueError:
if record is not None:
record['message'] += '\n' + line
tmp.seek(0)
tmp.close()
if record is not None:
yield record |
def _authenticate(self):
"""Determine the hosted zone id for the domain."""
try:
hosted_zones = self.r53_client.list_hosted_zones_by_name()[
'HostedZones'
]
hosted_zone = next(
hz for hz in hosted_zones
if self.filter_zone(hz)
)
self.domain_id = hosted_zone['Id']
except StopIteration:
raise Exception('No domain found') | Determine the hosted zone id for the domain. | Below is the the instruction that describes the task:
### Input:
Determine the hosted zone id for the domain.
### Response:
def _authenticate(self):
"""Determine the hosted zone id for the domain."""
try:
hosted_zones = self.r53_client.list_hosted_zones_by_name()[
'HostedZones'
]
hosted_zone = next(
hz for hz in hosted_zones
if self.filter_zone(hz)
)
self.domain_id = hosted_zone['Id']
except StopIteration:
raise Exception('No domain found') |
def rotate_capture_handler_log(self, name):
''' Force a rotation of a handler's log file
Args:
name:
The name of the handler who's log file should be rotated.
'''
for sc_key, sc in self._stream_capturers.iteritems():
for h in sc[0].capture_handlers:
if h['name'] == name:
sc[0]._rotate_log(h) | Force a rotation of a handler's log file
Args:
name:
The name of the handler who's log file should be rotated. | Below is the the instruction that describes the task:
### Input:
Force a rotation of a handler's log file
Args:
name:
The name of the handler who's log file should be rotated.
### Response:
def rotate_capture_handler_log(self, name):
''' Force a rotation of a handler's log file
Args:
name:
The name of the handler who's log file should be rotated.
'''
for sc_key, sc in self._stream_capturers.iteritems():
for h in sc[0].capture_handlers:
if h['name'] == name:
sc[0]._rotate_log(h) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.