code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def _set_show_mpls_ldp(self, v, load=False):
"""
Setter method for show_mpls_ldp, mapped from YANG variable /brocade_mpls_rpc/show_mpls_ldp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_mpls_ldp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_mpls_ldp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_mpls_ldp.show_mpls_ldp, is_leaf=True, yang_name="show-mpls-ldp", rest_name="show-mpls-ldp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMplsSummary'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """show_mpls_ldp must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=show_mpls_ldp.show_mpls_ldp, is_leaf=True, yang_name="show-mpls-ldp", rest_name="show-mpls-ldp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMplsSummary'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""",
})
self.__show_mpls_ldp = t
if hasattr(self, '_set'):
self._set() | Setter method for show_mpls_ldp, mapped from YANG variable /brocade_mpls_rpc/show_mpls_ldp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_mpls_ldp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_mpls_ldp() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for show_mpls_ldp, mapped from YANG variable /brocade_mpls_rpc/show_mpls_ldp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_mpls_ldp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_mpls_ldp() directly.
### Response:
def _set_show_mpls_ldp(self, v, load=False):
"""
Setter method for show_mpls_ldp, mapped from YANG variable /brocade_mpls_rpc/show_mpls_ldp (rpc)
If this variable is read-only (config: false) in the
source YANG file, then _set_show_mpls_ldp is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_show_mpls_ldp() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=show_mpls_ldp.show_mpls_ldp, is_leaf=True, yang_name="show-mpls-ldp", rest_name="show-mpls-ldp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMplsSummary'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """show_mpls_ldp must be of a type compatible with rpc""",
'defined-type': "rpc",
'generated-type': """YANGDynClass(base=show_mpls_ldp.show_mpls_ldp, is_leaf=True, yang_name="show-mpls-ldp", rest_name="show-mpls-ldp", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions={u'tailf-common': {u'hidden': u'full', u'actionpoint': u'showMplsSummary'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='rpc', is_config=True)""",
})
self.__show_mpls_ldp = t
if hasattr(self, '_set'):
self._set() |
def escape_windows_cmd_string(s):
"""Returns a string that is usable by the Windows cmd.exe.
The escaping is based on details here and emperical testing:
http://www.robvanderwoude.com/escapechars.php
"""
for c in '()%!^<>&|"':
s = s.replace(c, '^' + c)
s = s.replace('/?', '/.')
return s | Returns a string that is usable by the Windows cmd.exe.
The escaping is based on details here and emperical testing:
http://www.robvanderwoude.com/escapechars.php | Below is the the instruction that describes the task:
### Input:
Returns a string that is usable by the Windows cmd.exe.
The escaping is based on details here and emperical testing:
http://www.robvanderwoude.com/escapechars.php
### Response:
def escape_windows_cmd_string(s):
"""Returns a string that is usable by the Windows cmd.exe.
The escaping is based on details here and emperical testing:
http://www.robvanderwoude.com/escapechars.php
"""
for c in '()%!^<>&|"':
s = s.replace(c, '^' + c)
s = s.replace('/?', '/.')
return s |
def save_var(name, value):
"""
Save a variable to the table specified by _State.vars_table_name. Key is
the name of the variable, and value is the value.
"""
connection = _State.connection()
_State.reflect_metadata()
vars_table = sqlalchemy.Table(
_State.vars_table_name, _State.metadata,
sqlalchemy.Column('name', sqlalchemy.types.Text, primary_key=True),
sqlalchemy.Column('value_blob', sqlalchemy.types.LargeBinary),
sqlalchemy.Column('type', sqlalchemy.types.Text),
keep_existing=True
)
vars_table.create(bind=connection, checkfirst=True)
column_type = get_column_type(value)
if column_type == sqlalchemy.types.LargeBinary:
value_blob = value
else:
value_blob = unicode(value).encode('utf-8')
values = dict(name=name,
value_blob=value_blob,
# value_blob=Blob(value),
type=column_type.__visit_name__.lower())
vars_table.insert(prefixes=['OR REPLACE']).values(**values).execute() | Save a variable to the table specified by _State.vars_table_name. Key is
the name of the variable, and value is the value. | Below is the the instruction that describes the task:
### Input:
Save a variable to the table specified by _State.vars_table_name. Key is
the name of the variable, and value is the value.
### Response:
def save_var(name, value):
"""
Save a variable to the table specified by _State.vars_table_name. Key is
the name of the variable, and value is the value.
"""
connection = _State.connection()
_State.reflect_metadata()
vars_table = sqlalchemy.Table(
_State.vars_table_name, _State.metadata,
sqlalchemy.Column('name', sqlalchemy.types.Text, primary_key=True),
sqlalchemy.Column('value_blob', sqlalchemy.types.LargeBinary),
sqlalchemy.Column('type', sqlalchemy.types.Text),
keep_existing=True
)
vars_table.create(bind=connection, checkfirst=True)
column_type = get_column_type(value)
if column_type == sqlalchemy.types.LargeBinary:
value_blob = value
else:
value_blob = unicode(value).encode('utf-8')
values = dict(name=name,
value_blob=value_blob,
# value_blob=Blob(value),
type=column_type.__visit_name__.lower())
vars_table.insert(prefixes=['OR REPLACE']).values(**values).execute() |
def set_pixel(self, x, y, value):
"""Set pixel at position x, y to the given value. X and Y should be values
of 0 to 8. Value should be OFF, GREEN, RED, or YELLOW.
"""
if x < 0 or x > 7 or y < 0 or y > 7:
# Ignore out of bounds pixels.
return
# Set green LED based on 1st bit in value.
self.set_led(y * 16 + x, 1 if value & GREEN > 0 else 0)
# Set red LED based on 2nd bit in value.
self.set_led(y * 16 + x + 8, 1 if value & RED > 0 else 0) | Set pixel at position x, y to the given value. X and Y should be values
of 0 to 8. Value should be OFF, GREEN, RED, or YELLOW. | Below is the the instruction that describes the task:
### Input:
Set pixel at position x, y to the given value. X and Y should be values
of 0 to 8. Value should be OFF, GREEN, RED, or YELLOW.
### Response:
def set_pixel(self, x, y, value):
"""Set pixel at position x, y to the given value. X and Y should be values
of 0 to 8. Value should be OFF, GREEN, RED, or YELLOW.
"""
if x < 0 or x > 7 or y < 0 or y > 7:
# Ignore out of bounds pixels.
return
# Set green LED based on 1st bit in value.
self.set_led(y * 16 + x, 1 if value & GREEN > 0 else 0)
# Set red LED based on 2nd bit in value.
self.set_led(y * 16 + x + 8, 1 if value & RED > 0 else 0) |
def convert_string_to_number(value):
"""
Convert strings to numbers
"""
if value is None:
return 1
if isinstance(value, int):
return value
if value.isdigit():
return int(value)
num_list = map(lambda s: NUMBERS[s], re.findall(numbers + '+', value.lower()))
return sum(num_list) | Convert strings to numbers | Below is the the instruction that describes the task:
### Input:
Convert strings to numbers
### Response:
def convert_string_to_number(value):
"""
Convert strings to numbers
"""
if value is None:
return 1
if isinstance(value, int):
return value
if value.isdigit():
return int(value)
num_list = map(lambda s: NUMBERS[s], re.findall(numbers + '+', value.lower()))
return sum(num_list) |
def get_allow_repeat_items_metadata(self):
"""get the metadata for allow repeat items"""
metadata = dict(self._allow_repeat_items_metadata)
metadata.update({'existing_id_values': self.my_osid_object_form._my_map['allowRepeatItems']})
return Metadata(**metadata) | get the metadata for allow repeat items | Below is the the instruction that describes the task:
### Input:
get the metadata for allow repeat items
### Response:
def get_allow_repeat_items_metadata(self):
"""get the metadata for allow repeat items"""
metadata = dict(self._allow_repeat_items_metadata)
metadata.update({'existing_id_values': self.my_osid_object_form._my_map['allowRepeatItems']})
return Metadata(**metadata) |
def describe(table_name, region=None, key=None, keyid=None, profile=None):
'''
Describe a DynamoDB table.
CLI example::
salt myminion boto_dynamodb.describe table_name region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
table = Table(table_name, connection=conn)
return table.describe() | Describe a DynamoDB table.
CLI example::
salt myminion boto_dynamodb.describe table_name region=us-east-1 | Below is the the instruction that describes the task:
### Input:
Describe a DynamoDB table.
CLI example::
salt myminion boto_dynamodb.describe table_name region=us-east-1
### Response:
def describe(table_name, region=None, key=None, keyid=None, profile=None):
'''
Describe a DynamoDB table.
CLI example::
salt myminion boto_dynamodb.describe table_name region=us-east-1
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
table = Table(table_name, connection=conn)
return table.describe() |
def is_valid_delta_name(file):
"""Return if a file has a valid name
A delta file name can be:
- pre-all.py
- pre-all.sql
- delta_x.x.x_ddmmyyyy.pre.py
- delta_x.x.x_ddmmyyyy.pre.sql
- delta_x.x.x_ddmmyyyy.py
- delta_x.x.x_ddmmyyyy.sql
- delta_x.x.x_ddmmyyyy.post.py
- delta_x.x.x_ddmmyyyy.post.sql
- post-all.py
- post-all.sql
where x.x.x is the version number and _ddmmyyyy is an optional
description, usually representing the date of the delta file
"""
filename = basename(file)
pattern = re.compile(Delta.FILENAME_PATTERN)
if re.match(pattern, filename):
return True
return False | Return if a file has a valid name
A delta file name can be:
- pre-all.py
- pre-all.sql
- delta_x.x.x_ddmmyyyy.pre.py
- delta_x.x.x_ddmmyyyy.pre.sql
- delta_x.x.x_ddmmyyyy.py
- delta_x.x.x_ddmmyyyy.sql
- delta_x.x.x_ddmmyyyy.post.py
- delta_x.x.x_ddmmyyyy.post.sql
- post-all.py
- post-all.sql
where x.x.x is the version number and _ddmmyyyy is an optional
description, usually representing the date of the delta file | Below is the the instruction that describes the task:
### Input:
Return if a file has a valid name
A delta file name can be:
- pre-all.py
- pre-all.sql
- delta_x.x.x_ddmmyyyy.pre.py
- delta_x.x.x_ddmmyyyy.pre.sql
- delta_x.x.x_ddmmyyyy.py
- delta_x.x.x_ddmmyyyy.sql
- delta_x.x.x_ddmmyyyy.post.py
- delta_x.x.x_ddmmyyyy.post.sql
- post-all.py
- post-all.sql
where x.x.x is the version number and _ddmmyyyy is an optional
description, usually representing the date of the delta file
### Response:
def is_valid_delta_name(file):
"""Return if a file has a valid name
A delta file name can be:
- pre-all.py
- pre-all.sql
- delta_x.x.x_ddmmyyyy.pre.py
- delta_x.x.x_ddmmyyyy.pre.sql
- delta_x.x.x_ddmmyyyy.py
- delta_x.x.x_ddmmyyyy.sql
- delta_x.x.x_ddmmyyyy.post.py
- delta_x.x.x_ddmmyyyy.post.sql
- post-all.py
- post-all.sql
where x.x.x is the version number and _ddmmyyyy is an optional
description, usually representing the date of the delta file
"""
filename = basename(file)
pattern = re.compile(Delta.FILENAME_PATTERN)
if re.match(pattern, filename):
return True
return False |
def stream_directory(directory,
recursive=False,
patterns='**',
chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming directories.
Returns a buffered generator which encodes a directory as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
directory : str
The filepath of the directory to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk
"""
stream = DirectoryStream(directory,
recursive=recursive,
patterns=patterns,
chunk_size=chunk_size)
return stream.body(), stream.headers | Gets a buffered generator for streaming directories.
Returns a buffered generator which encodes a directory as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
directory : str
The filepath of the directory to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk | Below is the the instruction that describes the task:
### Input:
Gets a buffered generator for streaming directories.
Returns a buffered generator which encodes a directory as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
directory : str
The filepath of the directory to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk
### Response:
def stream_directory(directory,
recursive=False,
patterns='**',
chunk_size=default_chunk_size):
"""Gets a buffered generator for streaming directories.
Returns a buffered generator which encodes a directory as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
directory : str
The filepath of the directory to stream
recursive : bool
Stream all content within the directory recursively?
patterns : str | list
Single *glob* pattern or list of *glob* patterns and compiled
regular expressions to match the names of the filepaths to keep
chunk_size : int
Maximum size of each stream chunk
"""
stream = DirectoryStream(directory,
recursive=recursive,
patterns=patterns,
chunk_size=chunk_size)
return stream.body(), stream.headers |
def filelist(jottapath, JFS):
"""Get a set() of files from a jottapath (a folder)"""
log.debug("filelist %r", jottapath)
try:
jf = JFS.getObject(jottapath)
except JFSNotFoundError:
return set() # folder does not exist, so pretend it is an empty folder
if not isinstance(jf, JFSFolder):
return False
return set([f.name for f in jf.files() if not f.is_deleted()]) | Get a set() of files from a jottapath (a folder) | Below is the the instruction that describes the task:
### Input:
Get a set() of files from a jottapath (a folder)
### Response:
def filelist(jottapath, JFS):
"""Get a set() of files from a jottapath (a folder)"""
log.debug("filelist %r", jottapath)
try:
jf = JFS.getObject(jottapath)
except JFSNotFoundError:
return set() # folder does not exist, so pretend it is an empty folder
if not isinstance(jf, JFSFolder):
return False
return set([f.name for f in jf.files() if not f.is_deleted()]) |
def render_to_message(self, extra_context=None, *args, **kwargs):
"""
Renders and returns an unsent message with the given context.
Any extra keyword arguments passed will be passed through as keyword
arguments to the message constructor.
:param extra_context: Any additional context to use when rendering
templated content.
:type extra_context: :class:`dict`
:returns: A message instance.
:rtype: :attr:`.message_class`
"""
message = super(TemplatedHTMLEmailMessageView, self)\
.render_to_message(extra_context, *args, **kwargs)
if extra_context is None:
extra_context = {}
context = self.get_context_data(**extra_context)
content = self.render_html_body(context)
message.attach_alternative(content, mimetype='text/html')
return message | Renders and returns an unsent message with the given context.
Any extra keyword arguments passed will be passed through as keyword
arguments to the message constructor.
:param extra_context: Any additional context to use when rendering
templated content.
:type extra_context: :class:`dict`
:returns: A message instance.
:rtype: :attr:`.message_class` | Below is the the instruction that describes the task:
### Input:
Renders and returns an unsent message with the given context.
Any extra keyword arguments passed will be passed through as keyword
arguments to the message constructor.
:param extra_context: Any additional context to use when rendering
templated content.
:type extra_context: :class:`dict`
:returns: A message instance.
:rtype: :attr:`.message_class`
### Response:
def render_to_message(self, extra_context=None, *args, **kwargs):
"""
Renders and returns an unsent message with the given context.
Any extra keyword arguments passed will be passed through as keyword
arguments to the message constructor.
:param extra_context: Any additional context to use when rendering
templated content.
:type extra_context: :class:`dict`
:returns: A message instance.
:rtype: :attr:`.message_class`
"""
message = super(TemplatedHTMLEmailMessageView, self)\
.render_to_message(extra_context, *args, **kwargs)
if extra_context is None:
extra_context = {}
context = self.get_context_data(**extra_context)
content = self.render_html_body(context)
message.attach_alternative(content, mimetype='text/html')
return message |
def update_from_file(yaml_dict, filepaths):
'''
Override YAML settings with loaded values from filepaths.
- File paths in the list gets the priority by their orders of the list.
'''
# load YAML settings with only fields in yaml_dict
yaml_dict.update(registry.load(filepaths, list(yaml_dict))) | Override YAML settings with loaded values from filepaths.
- File paths in the list gets the priority by their orders of the list. | Below is the the instruction that describes the task:
### Input:
Override YAML settings with loaded values from filepaths.
- File paths in the list gets the priority by their orders of the list.
### Response:
def update_from_file(yaml_dict, filepaths):
'''
Override YAML settings with loaded values from filepaths.
- File paths in the list gets the priority by their orders of the list.
'''
# load YAML settings with only fields in yaml_dict
yaml_dict.update(registry.load(filepaths, list(yaml_dict))) |
def csv(self, jql, limit=1000):
"""
Get issues from jql search result with all related fields
:param jql: JQL query
:param limit: max results in the output file
:return: CSV file
"""
url = 'sr/jira.issueviews:searchrequest-csv-all-fields/temp/SearchRequest.csv?tempMax={limit}&jqlQuery={jql}'.format(
limit=limit, jql=jql)
return self.get(url, not_json_response=True, headers={'Accept': 'application/csv'}) | Get issues from jql search result with all related fields
:param jql: JQL query
:param limit: max results in the output file
:return: CSV file | Below is the the instruction that describes the task:
### Input:
Get issues from jql search result with all related fields
:param jql: JQL query
:param limit: max results in the output file
:return: CSV file
### Response:
def csv(self, jql, limit=1000):
"""
Get issues from jql search result with all related fields
:param jql: JQL query
:param limit: max results in the output file
:return: CSV file
"""
url = 'sr/jira.issueviews:searchrequest-csv-all-fields/temp/SearchRequest.csv?tempMax={limit}&jqlQuery={jql}'.format(
limit=limit, jql=jql)
return self.get(url, not_json_response=True, headers={'Accept': 'application/csv'}) |
def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
"""
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
"""
Loader.add_path_resolver(tag, path, kind)
Dumper.add_path_resolver(tag, path, kind) | Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None. | Below is the the instruction that describes the task:
### Input:
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
### Response:
def add_path_resolver(tag, path, kind=None, Loader=Loader, Dumper=Dumper):
"""
Add a path based resolver for the given tag.
A path is a list of keys that forms a path
to a node in the representation tree.
Keys can be string values, integers, or None.
"""
Loader.add_path_resolver(tag, path, kind)
Dumper.add_path_resolver(tag, path, kind) |
def get_display_dataframe(self):
'''
Gets list of terms to display that have some interesting diachronic variation.
Returns
-------
pd.DataFrame
e.g.,
term variable frequency trending
2 in 200310 1.0 0.000000
19 for 200310 1.0 0.000000
20 to 200311 1.0 0.000000
'''
X = self.corpus_.get_term_doc_mat()
categories = pd.Series(self.corpus_.get_category_ids())
cat_ar = np.array(self.corpus_.get_categories())
cat_idx_sort = np.argsort(cat_ar)
if self.seasonality_column_:
print('YES')
seasonality_ar = np.array(self.corpus_.get_df()[self.seasonality_column_])
terms = self.corpus_.get_terms()
category_idx_store = self.corpus_.get_category_index_store()
data = {}
seasondata = {}
for i, cati in enumerate(cat_idx_sort):
cat = cat_ar[cati]
if cat >= self.start_category_ and i > self.timesteps_to_lag_:
neg_cats = self.sorted_categores_[i - self.timesteps_to_lag_:i]
neg_mask = categories.isin(category_idx_store.getidxstrictbatch(neg_cats)).values
scores = self._regress_terms(X, cat, categories, category_idx_store, neg_mask, terms)
data[cat] = scores
if self.seasonality_column_:
neg_cats = set(categories[(seasonality_ar == seasonality_ar[cati]) & (categories != categories[cati])])
neg_mask = categories.isin(neg_cats).values
scores = self._regress_terms(X, cat, categories, category_idx_store, neg_mask, terms)
seasondata[cat] = scores
coefs = pd.DataFrame(data)
pos_coefs = (coefs.apply(lambda x: (x > 0) * x, axis=1)
.sum(axis=1)
.sort_values(ascending=False))
term_cat_counts = self.corpus_.get_term_freq_df('')[coefs.columns]
def dense_percentile(x):
# ranks = rankdata(x, 'dense')
return pd.Series(x / x.max(), index=x.index)
rank_df = pd.DataFrame({'coefr': dense_percentile(pos_coefs),
'freqr': dense_percentile(term_cat_counts.max(axis=1)),
'coef': pos_coefs,
'freq': term_cat_counts.max(axis=1)})
if self.seasonality_column_:
seasoncoefs = (pd.DataFrame(seasondata).sum(axis=1))
rank_df['seasoncoefr'] = dense_percentile(seasoncoefs.sort_values(ascending=False) + np.abs(seasoncoefs.min()))
weights = [2, 1, 1]
vals = ['freqr', 'coefr', 'seasoncoefr']
def gethmean(x):
if min(x[vals]) == 0:
return 0
return sum(weights) * 1. / sum([weights[i] / x[val] for i, val in enumerate(vals)])
rank_df['hmean'] = rank_df.apply(gethmean, axis=1)
else:
beta = 0.5
rank_df['hmean'] = (rank_df
.apply(lambda x: 0 if min(x) == 0 else
(1 + beta ** 2) * (x.coefr * x.freqr) / ((beta ** 2 * x.coefr) + x.freqr),
axis=1))
rank_df = rank_df.sort_values(by='hmean', ascending=False)
display_df = pd.merge((term_cat_counts
.loc[rank_df.iloc[:self.num_terms_].index]
.reset_index()
.melt(id_vars=['index'])
.rename(columns={'index': 'term', 'value': 'frequency'})),
(coefs.loc[rank_df.iloc[:self.num_terms_].index]
.reset_index()
.melt(id_vars=['index'])
.rename(columns={'index': 'term', 'value': 'trending'})),
on=['term', 'variable'])
display_df[display_df['frequency'] == 0] = np.nan
display_df = display_df.dropna()
return display_df[display_df.term.isin(rank_df.index)] | Gets list of terms to display that have some interesting diachronic variation.
Returns
-------
pd.DataFrame
e.g.,
term variable frequency trending
2 in 200310 1.0 0.000000
19 for 200310 1.0 0.000000
20 to 200311 1.0 0.000000 | Below is the the instruction that describes the task:
### Input:
Gets list of terms to display that have some interesting diachronic variation.
Returns
-------
pd.DataFrame
e.g.,
term variable frequency trending
2 in 200310 1.0 0.000000
19 for 200310 1.0 0.000000
20 to 200311 1.0 0.000000
### Response:
def get_display_dataframe(self):
'''
Gets list of terms to display that have some interesting diachronic variation.
Returns
-------
pd.DataFrame
e.g.,
term variable frequency trending
2 in 200310 1.0 0.000000
19 for 200310 1.0 0.000000
20 to 200311 1.0 0.000000
'''
X = self.corpus_.get_term_doc_mat()
categories = pd.Series(self.corpus_.get_category_ids())
cat_ar = np.array(self.corpus_.get_categories())
cat_idx_sort = np.argsort(cat_ar)
if self.seasonality_column_:
print('YES')
seasonality_ar = np.array(self.corpus_.get_df()[self.seasonality_column_])
terms = self.corpus_.get_terms()
category_idx_store = self.corpus_.get_category_index_store()
data = {}
seasondata = {}
for i, cati in enumerate(cat_idx_sort):
cat = cat_ar[cati]
if cat >= self.start_category_ and i > self.timesteps_to_lag_:
neg_cats = self.sorted_categores_[i - self.timesteps_to_lag_:i]
neg_mask = categories.isin(category_idx_store.getidxstrictbatch(neg_cats)).values
scores = self._regress_terms(X, cat, categories, category_idx_store, neg_mask, terms)
data[cat] = scores
if self.seasonality_column_:
neg_cats = set(categories[(seasonality_ar == seasonality_ar[cati]) & (categories != categories[cati])])
neg_mask = categories.isin(neg_cats).values
scores = self._regress_terms(X, cat, categories, category_idx_store, neg_mask, terms)
seasondata[cat] = scores
coefs = pd.DataFrame(data)
pos_coefs = (coefs.apply(lambda x: (x > 0) * x, axis=1)
.sum(axis=1)
.sort_values(ascending=False))
term_cat_counts = self.corpus_.get_term_freq_df('')[coefs.columns]
def dense_percentile(x):
# ranks = rankdata(x, 'dense')
return pd.Series(x / x.max(), index=x.index)
rank_df = pd.DataFrame({'coefr': dense_percentile(pos_coefs),
'freqr': dense_percentile(term_cat_counts.max(axis=1)),
'coef': pos_coefs,
'freq': term_cat_counts.max(axis=1)})
if self.seasonality_column_:
seasoncoefs = (pd.DataFrame(seasondata).sum(axis=1))
rank_df['seasoncoefr'] = dense_percentile(seasoncoefs.sort_values(ascending=False) + np.abs(seasoncoefs.min()))
weights = [2, 1, 1]
vals = ['freqr', 'coefr', 'seasoncoefr']
def gethmean(x):
if min(x[vals]) == 0:
return 0
return sum(weights) * 1. / sum([weights[i] / x[val] for i, val in enumerate(vals)])
rank_df['hmean'] = rank_df.apply(gethmean, axis=1)
else:
beta = 0.5
rank_df['hmean'] = (rank_df
.apply(lambda x: 0 if min(x) == 0 else
(1 + beta ** 2) * (x.coefr * x.freqr) / ((beta ** 2 * x.coefr) + x.freqr),
axis=1))
rank_df = rank_df.sort_values(by='hmean', ascending=False)
display_df = pd.merge((term_cat_counts
.loc[rank_df.iloc[:self.num_terms_].index]
.reset_index()
.melt(id_vars=['index'])
.rename(columns={'index': 'term', 'value': 'frequency'})),
(coefs.loc[rank_df.iloc[:self.num_terms_].index]
.reset_index()
.melt(id_vars=['index'])
.rename(columns={'index': 'term', 'value': 'trending'})),
on=['term', 'variable'])
display_df[display_df['frequency'] == 0] = np.nan
display_df = display_df.dropna()
return display_df[display_df.term.isin(rank_df.index)] |
def copy(self):
"""
Returns a copy of the distribution.
Returns
-------
LinearGaussianCPD: copy of the distribution
Examples
--------
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> cpd = LinearGaussianCPD('Y', [0.2, -2, 3, 7], 9.6, ['X1', 'X2', 'X3'])
>>> copy_cpd = cpd.copy()
>>> copy_cpd.variable
'Y'
>>> copy_cpd.evidence
['X1', 'X2', 'X3']
"""
copy_cpd = LinearGaussianCPD(self.variable, self.beta, self.variance,
list(self.evidence))
return copy_cpd | Returns a copy of the distribution.
Returns
-------
LinearGaussianCPD: copy of the distribution
Examples
--------
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> cpd = LinearGaussianCPD('Y', [0.2, -2, 3, 7], 9.6, ['X1', 'X2', 'X3'])
>>> copy_cpd = cpd.copy()
>>> copy_cpd.variable
'Y'
>>> copy_cpd.evidence
['X1', 'X2', 'X3'] | Below is the the instruction that describes the task:
### Input:
Returns a copy of the distribution.
Returns
-------
LinearGaussianCPD: copy of the distribution
Examples
--------
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> cpd = LinearGaussianCPD('Y', [0.2, -2, 3, 7], 9.6, ['X1', 'X2', 'X3'])
>>> copy_cpd = cpd.copy()
>>> copy_cpd.variable
'Y'
>>> copy_cpd.evidence
['X1', 'X2', 'X3']
### Response:
def copy(self):
"""
Returns a copy of the distribution.
Returns
-------
LinearGaussianCPD: copy of the distribution
Examples
--------
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> cpd = LinearGaussianCPD('Y', [0.2, -2, 3, 7], 9.6, ['X1', 'X2', 'X3'])
>>> copy_cpd = cpd.copy()
>>> copy_cpd.variable
'Y'
>>> copy_cpd.evidence
['X1', 'X2', 'X3']
"""
copy_cpd = LinearGaussianCPD(self.variable, self.beta, self.variance,
list(self.evidence))
return copy_cpd |
def get_loc(self, key, method=None, tolerance=None):
"""Adapted from pandas.tseries.index.DatetimeIndex.get_loc"""
if isinstance(key, str):
return self._get_string_slice(key)
else:
return pd.Index.get_loc(self, key, method=method,
tolerance=tolerance) | Adapted from pandas.tseries.index.DatetimeIndex.get_loc | Below is the the instruction that describes the task:
### Input:
Adapted from pandas.tseries.index.DatetimeIndex.get_loc
### Response:
def get_loc(self, key, method=None, tolerance=None):
"""Adapted from pandas.tseries.index.DatetimeIndex.get_loc"""
if isinstance(key, str):
return self._get_string_slice(key)
else:
return pd.Index.get_loc(self, key, method=method,
tolerance=tolerance) |
def _buildPointList(self):
"""
Upon connection to build the device point list and properties.
"""
try:
self.properties.pss.value = self.properties.network.read(
"{} device {} protocolServicesSupported".format(
self.properties.address, self.properties.device_id
)
)
except NoResponseFromController as error:
self._log.error("Controller not found, aborting. ({})".format(error))
return ("Not Found", "", [], [])
except SegmentationNotSupported as error:
self._log.warning("Segmentation not supported")
self.segmentation_supported = False
self.new_state(DeviceDisconnected)
self.properties.name = self.properties.network.read(
"{} device {} objectName".format(
self.properties.address, self.properties.device_id
)
)
self._log.info(
"Device {}:[{}] found... building points list".format(
self.properties.device_id, self.properties.name
)
)
try:
self.properties.objects_list, self.points, self.trendlogs = self._discoverPoints(
self.custom_object_list
)
if self.properties.pollDelay > 0:
self.poll(delay=self.properties.pollDelay)
except NoResponseFromController as error:
self._log.error("Cannot retrieve object list, disconnecting...")
self.segmentation_supported = False
self.new_state(DeviceDisconnected)
except IndexError as error:
self._log.error("Device creation failed... disconnecting")
self.new_state(DeviceDisconnected) | Upon connection to build the device point list and properties. | Below is the the instruction that describes the task:
### Input:
Upon connection to build the device point list and properties.
### Response:
def _buildPointList(self):
"""
Upon connection to build the device point list and properties.
"""
try:
self.properties.pss.value = self.properties.network.read(
"{} device {} protocolServicesSupported".format(
self.properties.address, self.properties.device_id
)
)
except NoResponseFromController as error:
self._log.error("Controller not found, aborting. ({})".format(error))
return ("Not Found", "", [], [])
except SegmentationNotSupported as error:
self._log.warning("Segmentation not supported")
self.segmentation_supported = False
self.new_state(DeviceDisconnected)
self.properties.name = self.properties.network.read(
"{} device {} objectName".format(
self.properties.address, self.properties.device_id
)
)
self._log.info(
"Device {}:[{}] found... building points list".format(
self.properties.device_id, self.properties.name
)
)
try:
self.properties.objects_list, self.points, self.trendlogs = self._discoverPoints(
self.custom_object_list
)
if self.properties.pollDelay > 0:
self.poll(delay=self.properties.pollDelay)
except NoResponseFromController as error:
self._log.error("Cannot retrieve object list, disconnecting...")
self.segmentation_supported = False
self.new_state(DeviceDisconnected)
except IndexError as error:
self._log.error("Device creation failed... disconnecting")
self.new_state(DeviceDisconnected) |
def _get_database(self, database_name):
"""
Get PyMongo client pointing to the current database.
:return: MongoDB client of the current database.
:raise DataSourceError
"""
try:
return self._client[database_name]
except InvalidName as ex:
raise DataSourceError("Cannot connect to database %s!"
% self._database) from ex | Get PyMongo client pointing to the current database.
:return: MongoDB client of the current database.
:raise DataSourceError | Below is the the instruction that describes the task:
### Input:
Get PyMongo client pointing to the current database.
:return: MongoDB client of the current database.
:raise DataSourceError
### Response:
def _get_database(self, database_name):
"""
Get PyMongo client pointing to the current database.
:return: MongoDB client of the current database.
:raise DataSourceError
"""
try:
return self._client[database_name]
except InvalidName as ex:
raise DataSourceError("Cannot connect to database %s!"
% self._database) from ex |
def __upload_title(self, kibiter_major):
"""Upload to Kibiter the title for the dashboard.
The title is shown on top of the dashboard menu, and is Usually
the name of the project being dashboarded.
This is done only for Kibiter 6.x.
:param kibiter_major: major version of kibiter
"""
if kibiter_major == "6":
resource = ".kibana/doc/projectname"
data = {"projectname": {"name": self.project_name}}
mapping_resource = ".kibana/_mapping/doc"
mapping = {"dynamic": "true"}
url = urijoin(self.conf['es_enrichment']['url'], resource)
mapping_url = urijoin(self.conf['es_enrichment']['url'],
mapping_resource)
logger.debug("Adding mapping for dashboard title")
res = self.grimoire_con.put(mapping_url, data=json.dumps(mapping),
headers=ES6_HEADER)
try:
res.raise_for_status()
except requests.exceptions.HTTPError:
logger.error("Couldn't create mapping for dashboard title.")
logger.error(res.json())
logger.debug("Uploading dashboard title")
res = self.grimoire_con.post(url, data=json.dumps(data),
headers=ES6_HEADER)
try:
res.raise_for_status()
except requests.exceptions.HTTPError:
logger.error("Couldn't create dashboard title.")
logger.error(res.json()) | Upload to Kibiter the title for the dashboard.
The title is shown on top of the dashboard menu, and is Usually
the name of the project being dashboarded.
This is done only for Kibiter 6.x.
:param kibiter_major: major version of kibiter | Below is the the instruction that describes the task:
### Input:
Upload to Kibiter the title for the dashboard.
The title is shown on top of the dashboard menu, and is Usually
the name of the project being dashboarded.
This is done only for Kibiter 6.x.
:param kibiter_major: major version of kibiter
### Response:
def __upload_title(self, kibiter_major):
"""Upload to Kibiter the title for the dashboard.
The title is shown on top of the dashboard menu, and is Usually
the name of the project being dashboarded.
This is done only for Kibiter 6.x.
:param kibiter_major: major version of kibiter
"""
if kibiter_major == "6":
resource = ".kibana/doc/projectname"
data = {"projectname": {"name": self.project_name}}
mapping_resource = ".kibana/_mapping/doc"
mapping = {"dynamic": "true"}
url = urijoin(self.conf['es_enrichment']['url'], resource)
mapping_url = urijoin(self.conf['es_enrichment']['url'],
mapping_resource)
logger.debug("Adding mapping for dashboard title")
res = self.grimoire_con.put(mapping_url, data=json.dumps(mapping),
headers=ES6_HEADER)
try:
res.raise_for_status()
except requests.exceptions.HTTPError:
logger.error("Couldn't create mapping for dashboard title.")
logger.error(res.json())
logger.debug("Uploading dashboard title")
res = self.grimoire_con.post(url, data=json.dumps(data),
headers=ES6_HEADER)
try:
res.raise_for_status()
except requests.exceptions.HTTPError:
logger.error("Couldn't create dashboard title.")
logger.error(res.json()) |
def split_lines(source, maxline=79):
"""Split inputs according to lines.
If a line is short enough, just yield it.
Otherwise, fix it.
"""
result = []
extend = result.extend
append = result.append
line = []
multiline = False
count = 0
find = str.find
for item in source:
index = find(item, '\n')
if index:
line.append(item)
multiline = index > 0
count += len(item)
else:
if line:
if count <= maxline or multiline:
extend(line)
else:
wrap_line(line, maxline, result)
count = 0
multiline = False
line = []
append(item)
return result | Split inputs according to lines.
If a line is short enough, just yield it.
Otherwise, fix it. | Below is the the instruction that describes the task:
### Input:
Split inputs according to lines.
If a line is short enough, just yield it.
Otherwise, fix it.
### Response:
def split_lines(source, maxline=79):
"""Split inputs according to lines.
If a line is short enough, just yield it.
Otherwise, fix it.
"""
result = []
extend = result.extend
append = result.append
line = []
multiline = False
count = 0
find = str.find
for item in source:
index = find(item, '\n')
if index:
line.append(item)
multiline = index > 0
count += len(item)
else:
if line:
if count <= maxline or multiline:
extend(line)
else:
wrap_line(line, maxline, result)
count = 0
multiline = False
line = []
append(item)
return result |
def root_urns_for_deletion(self):
"""Roots of the graph of urns marked for deletion."""
roots = set()
for urn in self._urns_for_deletion:
new_root = True
str_urn = utils.SmartUnicode(urn)
fake_roots = []
for root in roots:
str_root = utils.SmartUnicode(root)
if str_urn.startswith(str_root):
new_root = False
break
elif str_root.startswith(str_urn):
fake_roots.append(root)
if new_root:
roots -= set(fake_roots)
roots.add(urn)
return roots | Roots of the graph of urns marked for deletion. | Below is the the instruction that describes the task:
### Input:
Roots of the graph of urns marked for deletion.
### Response:
def root_urns_for_deletion(self):
"""Roots of the graph of urns marked for deletion."""
roots = set()
for urn in self._urns_for_deletion:
new_root = True
str_urn = utils.SmartUnicode(urn)
fake_roots = []
for root in roots:
str_root = utils.SmartUnicode(root)
if str_urn.startswith(str_root):
new_root = False
break
elif str_root.startswith(str_urn):
fake_roots.append(root)
if new_root:
roots -= set(fake_roots)
roots.add(urn)
return roots |
def wait_for(self, timeout=None):
""" When a timeout should be applied for awaiting use this method.
:param timeout: optional timeout in seconds.
:returns: a future returning the emitted value
"""
from broqer.op import OnEmitFuture # due circular dependency
return self | OnEmitFuture(timeout=timeout) | When a timeout should be applied for awaiting use this method.
:param timeout: optional timeout in seconds.
:returns: a future returning the emitted value | Below is the the instruction that describes the task:
### Input:
When a timeout should be applied for awaiting use this method.
:param timeout: optional timeout in seconds.
:returns: a future returning the emitted value
### Response:
def wait_for(self, timeout=None):
""" When a timeout should be applied for awaiting use this method.
:param timeout: optional timeout in seconds.
:returns: a future returning the emitted value
"""
from broqer.op import OnEmitFuture # due circular dependency
return self | OnEmitFuture(timeout=timeout) |
def due(self):
"""
The amount due for this invoice. Takes into account all entities in the invoice.
Can be < 0 if the invoice was overpaid.
"""
invoice_charges = Charge.objects.filter(invoice=self)
invoice_transactions = Transaction.successful.filter(invoice=self)
return total_amount(invoice_charges) - total_amount(invoice_transactions) | The amount due for this invoice. Takes into account all entities in the invoice.
Can be < 0 if the invoice was overpaid. | Below is the the instruction that describes the task:
### Input:
The amount due for this invoice. Takes into account all entities in the invoice.
Can be < 0 if the invoice was overpaid.
### Response:
def due(self):
"""
The amount due for this invoice. Takes into account all entities in the invoice.
Can be < 0 if the invoice was overpaid.
"""
invoice_charges = Charge.objects.filter(invoice=self)
invoice_transactions = Transaction.successful.filter(invoice=self)
return total_amount(invoice_charges) - total_amount(invoice_transactions) |
def connect(self, retry=0, delay=0):
"""Initiate connection to CM. Blocks until connected unless ``retry`` is specified.
:param retry: number of retries before returning. Unlimited when set to ``None``
:type retry: :class:`int`
:param delay: delay in secnds before connection attempt
:type delay: :class:`int`
:return: successful connection
:rtype: :class:`bool`
"""
if self.connected:
self._LOG.debug("Connect called, but we are connected?")
return
if self._connecting:
self._LOG.debug("Connect called, but we are already connecting.")
return
self._connecting = True
if delay:
self._LOG.debug("Delayed connect: %d seconds" % delay)
self.emit(self.EVENT_RECONNECT, delay)
self.sleep(delay)
self._LOG.debug("Connect initiated.")
for i, server_addr in enumerate(self.cm_servers):
if retry and i > retry:
return False
start = time()
if self.connection.connect(server_addr):
break
diff = time() - start
self._LOG.debug("Failed to connect. Retrying...")
if diff < 5:
self.sleep(5 - diff)
self.current_server_addr = server_addr
self.connected = True
self.emit(self.EVENT_CONNECTED)
self._recv_loop = gevent.spawn(self._recv_messages)
self._connecting = False
return True | Initiate connection to CM. Blocks until connected unless ``retry`` is specified.
:param retry: number of retries before returning. Unlimited when set to ``None``
:type retry: :class:`int`
:param delay: delay in secnds before connection attempt
:type delay: :class:`int`
:return: successful connection
:rtype: :class:`bool` | Below is the the instruction that describes the task:
### Input:
Initiate connection to CM. Blocks until connected unless ``retry`` is specified.
:param retry: number of retries before returning. Unlimited when set to ``None``
:type retry: :class:`int`
:param delay: delay in secnds before connection attempt
:type delay: :class:`int`
:return: successful connection
:rtype: :class:`bool`
### Response:
def connect(self, retry=0, delay=0):
"""Initiate connection to CM. Blocks until connected unless ``retry`` is specified.
:param retry: number of retries before returning. Unlimited when set to ``None``
:type retry: :class:`int`
:param delay: delay in secnds before connection attempt
:type delay: :class:`int`
:return: successful connection
:rtype: :class:`bool`
"""
if self.connected:
self._LOG.debug("Connect called, but we are connected?")
return
if self._connecting:
self._LOG.debug("Connect called, but we are already connecting.")
return
self._connecting = True
if delay:
self._LOG.debug("Delayed connect: %d seconds" % delay)
self.emit(self.EVENT_RECONNECT, delay)
self.sleep(delay)
self._LOG.debug("Connect initiated.")
for i, server_addr in enumerate(self.cm_servers):
if retry and i > retry:
return False
start = time()
if self.connection.connect(server_addr):
break
diff = time() - start
self._LOG.debug("Failed to connect. Retrying...")
if diff < 5:
self.sleep(5 - diff)
self.current_server_addr = server_addr
self.connected = True
self.emit(self.EVENT_CONNECTED)
self._recv_loop = gevent.spawn(self._recv_messages)
self._connecting = False
return True |
def exif_name(self):
'''
Name of file in the form {lat}_{lon}_{ca}_{datetime}_{filename}_{hash}
'''
mapillary_description = json.loads(self.extract_image_description())
lat = None
lon = None
ca = None
date_time = None
if "MAPLatitude" in mapillary_description:
lat = mapillary_description["MAPLatitude"]
if "MAPLongitude" in mapillary_description:
lon = mapillary_description["MAPLongitude"]
if "MAPCompassHeading" in mapillary_description:
if 'TrueHeading' in mapillary_description["MAPCompassHeading"]:
ca = mapillary_description["MAPCompassHeading"]['TrueHeading']
if "MAPCaptureTime" in mapillary_description:
date_time = datetime.datetime.strptime(
mapillary_description["MAPCaptureTime"], "%Y_%m_%d_%H_%M_%S_%f").strftime("%Y-%m-%d-%H-%M-%S-%f")[:-3]
filename = '{}_{}_{}_{}_{}'.format(
lat, lon, ca, date_time, uuid.uuid4())
return filename | Name of file in the form {lat}_{lon}_{ca}_{datetime}_{filename}_{hash} | Below is the the instruction that describes the task:
### Input:
Name of file in the form {lat}_{lon}_{ca}_{datetime}_{filename}_{hash}
### Response:
def exif_name(self):
'''
Name of file in the form {lat}_{lon}_{ca}_{datetime}_{filename}_{hash}
'''
mapillary_description = json.loads(self.extract_image_description())
lat = None
lon = None
ca = None
date_time = None
if "MAPLatitude" in mapillary_description:
lat = mapillary_description["MAPLatitude"]
if "MAPLongitude" in mapillary_description:
lon = mapillary_description["MAPLongitude"]
if "MAPCompassHeading" in mapillary_description:
if 'TrueHeading' in mapillary_description["MAPCompassHeading"]:
ca = mapillary_description["MAPCompassHeading"]['TrueHeading']
if "MAPCaptureTime" in mapillary_description:
date_time = datetime.datetime.strptime(
mapillary_description["MAPCaptureTime"], "%Y_%m_%d_%H_%M_%S_%f").strftime("%Y-%m-%d-%H-%M-%S-%f")[:-3]
filename = '{}_{}_{}_{}_{}'.format(
lat, lon, ca, date_time, uuid.uuid4())
return filename |
def doDelete(self, WHAT={}):
"""This function will perform the command -delete."""
if hasattr(WHAT, '_modified'):
self._addDBParam('RECORDID', WHAT.RECORDID)
self._addDBParam('MODID', WHAT.MODID)
elif type(WHAT) == dict and WHAT.has_key('RECORDID'):
self._addDBParam('RECORDID', WHAT['RECORDID'])
else:
raise FMError, 'Python Runtime: Object type (%s) given to function doDelete as argument WHAT cannot be used.' % type(WHAT)
if self._layout == '':
raise FMError, 'No layout was selected'
if self._checkRecordID() == 0:
raise FMError, 'RecordID is missing'
return self._doAction('-delete') | This function will perform the command -delete. | Below is the the instruction that describes the task:
### Input:
This function will perform the command -delete.
### Response:
def doDelete(self, WHAT={}):
"""This function will perform the command -delete."""
if hasattr(WHAT, '_modified'):
self._addDBParam('RECORDID', WHAT.RECORDID)
self._addDBParam('MODID', WHAT.MODID)
elif type(WHAT) == dict and WHAT.has_key('RECORDID'):
self._addDBParam('RECORDID', WHAT['RECORDID'])
else:
raise FMError, 'Python Runtime: Object type (%s) given to function doDelete as argument WHAT cannot be used.' % type(WHAT)
if self._layout == '':
raise FMError, 'No layout was selected'
if self._checkRecordID() == 0:
raise FMError, 'RecordID is missing'
return self._doAction('-delete') |
def get_column_flat(self, field, components=None, computed_type='for_observations'):
"""
TODO: add documentation
return a single merged value (hstacked) from all meshes
:parameter str field: name of the mesh columnname
:parameter components:
"""
return self.pack_column_flat(self.get_column(field, components, computed_type),
components,
offset=field=='triangles') | TODO: add documentation
return a single merged value (hstacked) from all meshes
:parameter str field: name of the mesh columnname
:parameter components: | Below is the the instruction that describes the task:
### Input:
TODO: add documentation
return a single merged value (hstacked) from all meshes
:parameter str field: name of the mesh columnname
:parameter components:
### Response:
def get_column_flat(self, field, components=None, computed_type='for_observations'):
"""
TODO: add documentation
return a single merged value (hstacked) from all meshes
:parameter str field: name of the mesh columnname
:parameter components:
"""
return self.pack_column_flat(self.get_column(field, components, computed_type),
components,
offset=field=='triangles') |
def get_anchor_href(markup):
"""
Given HTML markup, return a list of hrefs for each anchor tag.
"""
soup = BeautifulSoup(markup, 'lxml')
return ['%s' % link.get('href') for link in soup.find_all('a')] | Given HTML markup, return a list of hrefs for each anchor tag. | Below is the the instruction that describes the task:
### Input:
Given HTML markup, return a list of hrefs for each anchor tag.
### Response:
def get_anchor_href(markup):
"""
Given HTML markup, return a list of hrefs for each anchor tag.
"""
soup = BeautifulSoup(markup, 'lxml')
return ['%s' % link.get('href') for link in soup.find_all('a')] |
def grabImage(self, index):
"""Gets an image of the item at *index*
:param index: index of an item in the view
:type index: :qtdoc:`QModelIndex`
:returns: :qtdoc:`QPixmap`
"""
# rect = self._rects[index.row()][index.column()]
rect = self.visualRect(index)
pixmap = QtGui.QPixmap()
pixmap = pixmap.grabWidget(self, rect)
return pixmap | Gets an image of the item at *index*
:param index: index of an item in the view
:type index: :qtdoc:`QModelIndex`
:returns: :qtdoc:`QPixmap` | Below is the the instruction that describes the task:
### Input:
Gets an image of the item at *index*
:param index: index of an item in the view
:type index: :qtdoc:`QModelIndex`
:returns: :qtdoc:`QPixmap`
### Response:
def grabImage(self, index):
"""Gets an image of the item at *index*
:param index: index of an item in the view
:type index: :qtdoc:`QModelIndex`
:returns: :qtdoc:`QPixmap`
"""
# rect = self._rects[index.row()][index.column()]
rect = self.visualRect(index)
pixmap = QtGui.QPixmap()
pixmap = pixmap.grabWidget(self, rect)
return pixmap |
def _shutdown_proc(p, timeout):
"""Wait for a proc to shut down, then terminate or kill it after `timeout`."""
freq = 10 # how often to check per second
for _ in range(1 + timeout * freq):
ret = p.poll()
if ret is not None:
logging.info("Shutdown gracefully.")
return ret
time.sleep(1 / freq)
logging.warning("Killing the process.")
p.kill()
return p.wait() | Wait for a proc to shut down, then terminate or kill it after `timeout`. | Below is the the instruction that describes the task:
### Input:
Wait for a proc to shut down, then terminate or kill it after `timeout`.
### Response:
def _shutdown_proc(p, timeout):
"""Wait for a proc to shut down, then terminate or kill it after `timeout`."""
freq = 10 # how often to check per second
for _ in range(1 + timeout * freq):
ret = p.poll()
if ret is not None:
logging.info("Shutdown gracefully.")
return ret
time.sleep(1 / freq)
logging.warning("Killing the process.")
p.kill()
return p.wait() |
def collect_filtered_models(discard, *input_values):
''' Collect a duplicate-free list of all other Bokeh models referred to by
this model, or by any of its references, etc, unless filtered-out by the
provided callable.
Iterate over ``input_values`` and descend through their structure
collecting all nested ``Models`` on the go.
Args:
*discard (Callable[[Model], bool])
a callable which accepts a *Model* instance as its single argument
and returns a boolean stating whether to discard the instance. The
latter means that the instance will not be added to collected
models nor will its references be explored.
*input_values (Model)
Bokeh models to collect other models from
Returns:
None
'''
ids = set([])
collected = []
queued = []
def queue_one(obj):
if obj.id not in ids and not (callable(discard) and discard(obj)):
queued.append(obj)
for value in input_values:
_visit_value_and_its_immediate_references(value, queue_one)
while queued:
obj = queued.pop(0)
if obj.id not in ids:
ids.add(obj.id)
collected.append(obj)
_visit_immediate_value_references(obj, queue_one)
return collected | Collect a duplicate-free list of all other Bokeh models referred to by
this model, or by any of its references, etc, unless filtered-out by the
provided callable.
Iterate over ``input_values`` and descend through their structure
collecting all nested ``Models`` on the go.
Args:
*discard (Callable[[Model], bool])
a callable which accepts a *Model* instance as its single argument
and returns a boolean stating whether to discard the instance. The
latter means that the instance will not be added to collected
models nor will its references be explored.
*input_values (Model)
Bokeh models to collect other models from
Returns:
None | Below is the the instruction that describes the task:
### Input:
Collect a duplicate-free list of all other Bokeh models referred to by
this model, or by any of its references, etc, unless filtered-out by the
provided callable.
Iterate over ``input_values`` and descend through their structure
collecting all nested ``Models`` on the go.
Args:
*discard (Callable[[Model], bool])
a callable which accepts a *Model* instance as its single argument
and returns a boolean stating whether to discard the instance. The
latter means that the instance will not be added to collected
models nor will its references be explored.
*input_values (Model)
Bokeh models to collect other models from
Returns:
None
### Response:
def collect_filtered_models(discard, *input_values):
''' Collect a duplicate-free list of all other Bokeh models referred to by
this model, or by any of its references, etc, unless filtered-out by the
provided callable.
Iterate over ``input_values`` and descend through their structure
collecting all nested ``Models`` on the go.
Args:
*discard (Callable[[Model], bool])
a callable which accepts a *Model* instance as its single argument
and returns a boolean stating whether to discard the instance. The
latter means that the instance will not be added to collected
models nor will its references be explored.
*input_values (Model)
Bokeh models to collect other models from
Returns:
None
'''
ids = set([])
collected = []
queued = []
def queue_one(obj):
if obj.id not in ids and not (callable(discard) and discard(obj)):
queued.append(obj)
for value in input_values:
_visit_value_and_its_immediate_references(value, queue_one)
while queued:
obj = queued.pop(0)
if obj.id not in ids:
ids.add(obj.id)
collected.append(obj)
_visit_immediate_value_references(obj, queue_one)
return collected |
async def set_volume(self, vol: int):
""" Sets the player's volume (150% or 1000% limit imposed by lavalink depending on the version). """
if self._lavalink._server_version <= 2:
self.volume = max(min(vol, 150), 0)
else:
self.volume = max(min(vol, 1000), 0)
await self._lavalink.ws.send(op='volume', guildId=self.guild_id, volume=self.volume) | Sets the player's volume (150% or 1000% limit imposed by lavalink depending on the version). | Below is the the instruction that describes the task:
### Input:
Sets the player's volume (150% or 1000% limit imposed by lavalink depending on the version).
### Response:
async def set_volume(self, vol: int):
""" Sets the player's volume (150% or 1000% limit imposed by lavalink depending on the version). """
if self._lavalink._server_version <= 2:
self.volume = max(min(vol, 150), 0)
else:
self.volume = max(min(vol, 1000), 0)
await self._lavalink.ws.send(op='volume', guildId=self.guild_id, volume=self.volume) |
def fit_transform(self, X, y=None, **params):
"""Learn vocabulary and return document id matrix.
This is equivalent to fit followed by transform.
Args:
X : iterable
an iterable which yields either str, unicode or file objects.
Returns:
list : document id matrix.
list: label id matrix.
"""
return self.fit(X, y).transform(X, y) | Learn vocabulary and return document id matrix.
This is equivalent to fit followed by transform.
Args:
X : iterable
an iterable which yields either str, unicode or file objects.
Returns:
list : document id matrix.
list: label id matrix. | Below is the the instruction that describes the task:
### Input:
Learn vocabulary and return document id matrix.
This is equivalent to fit followed by transform.
Args:
X : iterable
an iterable which yields either str, unicode or file objects.
Returns:
list : document id matrix.
list: label id matrix.
### Response:
def fit_transform(self, X, y=None, **params):
"""Learn vocabulary and return document id matrix.
This is equivalent to fit followed by transform.
Args:
X : iterable
an iterable which yields either str, unicode or file objects.
Returns:
list : document id matrix.
list: label id matrix.
"""
return self.fit(X, y).transform(X, y) |
def wide(self):
"""
``True`` if this instruction needs to be prefixed by the WIDE
opcode.
"""
if not opcode_table[self.opcode].get('can_be_wide'):
return False
if self.operands[0].value >= 255:
return True
if self.opcode == 0x84:
if self.operands[1].value >= 255:
return True
return False | ``True`` if this instruction needs to be prefixed by the WIDE
opcode. | Below is the the instruction that describes the task:
### Input:
``True`` if this instruction needs to be prefixed by the WIDE
opcode.
### Response:
def wide(self):
"""
``True`` if this instruction needs to be prefixed by the WIDE
opcode.
"""
if not opcode_table[self.opcode].get('can_be_wide'):
return False
if self.operands[0].value >= 255:
return True
if self.opcode == 0x84:
if self.operands[1].value >= 255:
return True
return False |
def load_html(self, mode, html):
"""Load HTML to this class with the mode specified.
There are two modes that can be used:
* HTML_FILE_MODE: Directly from a local HTML file.
* HTML_STR_MODE: From a valid HTML string.
:param mode: The mode.
:type mode: int
:param html: The html that will be loaded. If the mode is a file,
then it should be a path to the htm lfile. If the mode is a string,
then it should be a valid HTML string.
:type html: str
"""
# noinspection PyCallByClass,PyTypeChecker,PyArgumentList
self._html_loaded_flag = False
if mode == HTML_FILE_MODE:
self.setUrl(QtCore.QUrl.fromLocalFile(html))
elif mode == HTML_STR_MODE:
self.setHtml(html)
else:
raise InvalidParameterError('The mode is not supported.')
counter = 0
sleep_period = 0.1 # sec
timeout = 20 # it's generous enough!
while not self._html_loaded_flag and counter < timeout:
# Block until the event loop is done
counter += sleep_period
time.sleep(sleep_period)
# noinspection PyArgumentList
QgsApplication.processEvents() | Load HTML to this class with the mode specified.
There are two modes that can be used:
* HTML_FILE_MODE: Directly from a local HTML file.
* HTML_STR_MODE: From a valid HTML string.
:param mode: The mode.
:type mode: int
:param html: The html that will be loaded. If the mode is a file,
then it should be a path to the htm lfile. If the mode is a string,
then it should be a valid HTML string.
:type html: str | Below is the the instruction that describes the task:
### Input:
Load HTML to this class with the mode specified.
There are two modes that can be used:
* HTML_FILE_MODE: Directly from a local HTML file.
* HTML_STR_MODE: From a valid HTML string.
:param mode: The mode.
:type mode: int
:param html: The html that will be loaded. If the mode is a file,
then it should be a path to the htm lfile. If the mode is a string,
then it should be a valid HTML string.
:type html: str
### Response:
def load_html(self, mode, html):
"""Load HTML to this class with the mode specified.
There are two modes that can be used:
* HTML_FILE_MODE: Directly from a local HTML file.
* HTML_STR_MODE: From a valid HTML string.
:param mode: The mode.
:type mode: int
:param html: The html that will be loaded. If the mode is a file,
then it should be a path to the htm lfile. If the mode is a string,
then it should be a valid HTML string.
:type html: str
"""
# noinspection PyCallByClass,PyTypeChecker,PyArgumentList
self._html_loaded_flag = False
if mode == HTML_FILE_MODE:
self.setUrl(QtCore.QUrl.fromLocalFile(html))
elif mode == HTML_STR_MODE:
self.setHtml(html)
else:
raise InvalidParameterError('The mode is not supported.')
counter = 0
sleep_period = 0.1 # sec
timeout = 20 # it's generous enough!
while not self._html_loaded_flag and counter < timeout:
# Block until the event loop is done
counter += sleep_period
time.sleep(sleep_period)
# noinspection PyArgumentList
QgsApplication.processEvents() |
def create_char(self, location, bitmap):
"""Create a new character.
The HD44780 supports up to 8 custom characters (location 0-7).
:param location: The place in memory where the character is stored.
Values need to be integers between 0 and 7.
:type location: int
:param bitmap: The bitmap containing the character. This should be a
tuple of 8 numbers, each representing a 5 pixel row.
:type bitmap: tuple of int
:raises AssertionError: Raised when an invalid location is passed in or
when bitmap has an incorrect size.
Example:
.. sourcecode:: python
>>> smiley = (
... 0b00000,
... 0b01010,
... 0b01010,
... 0b00000,
... 0b10001,
... 0b10001,
... 0b01110,
... 0b00000,
... )
>>> lcd.create_char(0, smiley)
"""
assert 0 <= location <= 7, 'Only locations 0-7 are valid.'
assert len(bitmap) == 8, 'Bitmap should have exactly 8 rows.'
# Store previous position
pos = self.cursor_pos
# Write character to CGRAM
self.command(c.LCD_SETCGRAMADDR | location << 3)
for row in bitmap:
self._send_data(row)
# Restore cursor pos
self.cursor_pos = pos | Create a new character.
The HD44780 supports up to 8 custom characters (location 0-7).
:param location: The place in memory where the character is stored.
Values need to be integers between 0 and 7.
:type location: int
:param bitmap: The bitmap containing the character. This should be a
tuple of 8 numbers, each representing a 5 pixel row.
:type bitmap: tuple of int
:raises AssertionError: Raised when an invalid location is passed in or
when bitmap has an incorrect size.
Example:
.. sourcecode:: python
>>> smiley = (
... 0b00000,
... 0b01010,
... 0b01010,
... 0b00000,
... 0b10001,
... 0b10001,
... 0b01110,
... 0b00000,
... )
>>> lcd.create_char(0, smiley) | Below is the the instruction that describes the task:
### Input:
Create a new character.
The HD44780 supports up to 8 custom characters (location 0-7).
:param location: The place in memory where the character is stored.
Values need to be integers between 0 and 7.
:type location: int
:param bitmap: The bitmap containing the character. This should be a
tuple of 8 numbers, each representing a 5 pixel row.
:type bitmap: tuple of int
:raises AssertionError: Raised when an invalid location is passed in or
when bitmap has an incorrect size.
Example:
.. sourcecode:: python
>>> smiley = (
... 0b00000,
... 0b01010,
... 0b01010,
... 0b00000,
... 0b10001,
... 0b10001,
... 0b01110,
... 0b00000,
... )
>>> lcd.create_char(0, smiley)
### Response:
def create_char(self, location, bitmap):
"""Create a new character.
The HD44780 supports up to 8 custom characters (location 0-7).
:param location: The place in memory where the character is stored.
Values need to be integers between 0 and 7.
:type location: int
:param bitmap: The bitmap containing the character. This should be a
tuple of 8 numbers, each representing a 5 pixel row.
:type bitmap: tuple of int
:raises AssertionError: Raised when an invalid location is passed in or
when bitmap has an incorrect size.
Example:
.. sourcecode:: python
>>> smiley = (
... 0b00000,
... 0b01010,
... 0b01010,
... 0b00000,
... 0b10001,
... 0b10001,
... 0b01110,
... 0b00000,
... )
>>> lcd.create_char(0, smiley)
"""
assert 0 <= location <= 7, 'Only locations 0-7 are valid.'
assert len(bitmap) == 8, 'Bitmap should have exactly 8 rows.'
# Store previous position
pos = self.cursor_pos
# Write character to CGRAM
self.command(c.LCD_SETCGRAMADDR | location << 3)
for row in bitmap:
self._send_data(row)
# Restore cursor pos
self.cursor_pos = pos |
def _parse_canonical_regex(doc):
"""Decode a JSON regex to bson.regex.Regex."""
regex = doc['$regularExpression']
if len(doc) != 1:
raise TypeError('Bad $regularExpression, extra field(s): %s' % (doc,))
if len(regex) != 2:
raise TypeError('Bad $regularExpression must include only "pattern"'
'and "options" components: %s' % (doc,))
return Regex(regex['pattern'], regex['options']) | Decode a JSON regex to bson.regex.Regex. | Below is the the instruction that describes the task:
### Input:
Decode a JSON regex to bson.regex.Regex.
### Response:
def _parse_canonical_regex(doc):
"""Decode a JSON regex to bson.regex.Regex."""
regex = doc['$regularExpression']
if len(doc) != 1:
raise TypeError('Bad $regularExpression, extra field(s): %s' % (doc,))
if len(regex) != 2:
raise TypeError('Bad $regularExpression must include only "pattern"'
'and "options" components: %s' % (doc,))
return Regex(regex['pattern'], regex['options']) |
def _list_po_to_dict(tokens) -> ListAbundance:
"""Convert a list parse object to a node.
:type tokens: ParseResult
"""
func = tokens[FUNCTION]
dsl = FUNC_TO_LIST_DSL[func]
members = [parse_result_to_dsl(token) for token in tokens[MEMBERS]]
return dsl(members) | Convert a list parse object to a node.
:type tokens: ParseResult | Below is the the instruction that describes the task:
### Input:
Convert a list parse object to a node.
:type tokens: ParseResult
### Response:
def _list_po_to_dict(tokens) -> ListAbundance:
"""Convert a list parse object to a node.
:type tokens: ParseResult
"""
func = tokens[FUNCTION]
dsl = FUNC_TO_LIST_DSL[func]
members = [parse_result_to_dsl(token) for token in tokens[MEMBERS]]
return dsl(members) |
def calc_model(cortex, model_argument, model_hemi=Ellipsis, radius=np.pi/3):
'''
calc_model loads the appropriate model object given the model argument, which may given the name
of the model or a model object itself.
Required afferent parameters:
@ model_argument Must be either a RegisteredRetinotopyModel object or the name of a model that
can be loaded.
Optional afferent parameters:
@ model_hemi May be used to specify the hemisphere of the model; this is usually only used
when the fsaverage_sym hemisphere is desired, in which case this should be set to None; if
left at the default value (Ellipsis), then it will use the hemisphere of the cortex param.
Provided efferent values:
@ model Will be the RegisteredRetinotopyModel object to which the mesh should be registered.
'''
if pimms.is_str(model_argument):
h = cortex.chirality if model_hemi is Ellipsis else \
None if model_hemi is None else \
model_hemi
model = retinotopy_model(model_argument, hemi=h, radius=radius)
else:
model = model_argument
if not isinstance(model, RegisteredRetinotopyModel):
raise ValueError('model must be a RegisteredRetinotopyModel')
return model | calc_model loads the appropriate model object given the model argument, which may given the name
of the model or a model object itself.
Required afferent parameters:
@ model_argument Must be either a RegisteredRetinotopyModel object or the name of a model that
can be loaded.
Optional afferent parameters:
@ model_hemi May be used to specify the hemisphere of the model; this is usually only used
when the fsaverage_sym hemisphere is desired, in which case this should be set to None; if
left at the default value (Ellipsis), then it will use the hemisphere of the cortex param.
Provided efferent values:
@ model Will be the RegisteredRetinotopyModel object to which the mesh should be registered. | Below is the the instruction that describes the task:
### Input:
calc_model loads the appropriate model object given the model argument, which may given the name
of the model or a model object itself.
Required afferent parameters:
@ model_argument Must be either a RegisteredRetinotopyModel object or the name of a model that
can be loaded.
Optional afferent parameters:
@ model_hemi May be used to specify the hemisphere of the model; this is usually only used
when the fsaverage_sym hemisphere is desired, in which case this should be set to None; if
left at the default value (Ellipsis), then it will use the hemisphere of the cortex param.
Provided efferent values:
@ model Will be the RegisteredRetinotopyModel object to which the mesh should be registered.
### Response:
def calc_model(cortex, model_argument, model_hemi=Ellipsis, radius=np.pi/3):
'''
calc_model loads the appropriate model object given the model argument, which may given the name
of the model or a model object itself.
Required afferent parameters:
@ model_argument Must be either a RegisteredRetinotopyModel object or the name of a model that
can be loaded.
Optional afferent parameters:
@ model_hemi May be used to specify the hemisphere of the model; this is usually only used
when the fsaverage_sym hemisphere is desired, in which case this should be set to None; if
left at the default value (Ellipsis), then it will use the hemisphere of the cortex param.
Provided efferent values:
@ model Will be the RegisteredRetinotopyModel object to which the mesh should be registered.
'''
if pimms.is_str(model_argument):
h = cortex.chirality if model_hemi is Ellipsis else \
None if model_hemi is None else \
model_hemi
model = retinotopy_model(model_argument, hemi=h, radius=radius)
else:
model = model_argument
if not isinstance(model, RegisteredRetinotopyModel):
raise ValueError('model must be a RegisteredRetinotopyModel')
return model |
def enable_vxlan_feature(self, nexus_host, nve_int_num, src_intf):
"""Enable VXLAN on the switch."""
# Configure the "feature" commands and NVE interface
# (without "member" subcommand configuration).
# The Nexus 9K will not allow the "interface nve" configuration
# until the "feature nv overlay" command is issued and installed.
# To get around the N9K failing on the "interface nve" command
# send the two XML snippets down separately.
starttime = time.time()
# Do CLI 'feature nv overlay'
self.send_edit_string(nexus_host, snipp.PATH_VXLAN_STATE,
(snipp.BODY_VXLAN_STATE % "enabled"))
# Do CLI 'feature vn-segment-vlan-based'
self.send_edit_string(nexus_host, snipp.PATH_VNSEG_STATE,
(snipp.BODY_VNSEG_STATE % "enabled"))
# Do CLI 'int nve1' to Create nve1
self.send_edit_string(
nexus_host,
(snipp.PATH_NVE_CREATE % nve_int_num),
(snipp.BODY_NVE_CREATE % nve_int_num))
# Do CLI 'no shut
# source-interface loopback %s'
# beneath int nve1
self.send_edit_string(
nexus_host,
(snipp.PATH_NVE_CREATE % nve_int_num),
(snipp.BODY_NVE_ADD_LOOPBACK % ("enabled", src_intf)))
self.capture_and_print_timeshot(
starttime, "enable_vxlan",
switch=nexus_host) | Enable VXLAN on the switch. | Below is the the instruction that describes the task:
### Input:
Enable VXLAN on the switch.
### Response:
def enable_vxlan_feature(self, nexus_host, nve_int_num, src_intf):
"""Enable VXLAN on the switch."""
# Configure the "feature" commands and NVE interface
# (without "member" subcommand configuration).
# The Nexus 9K will not allow the "interface nve" configuration
# until the "feature nv overlay" command is issued and installed.
# To get around the N9K failing on the "interface nve" command
# send the two XML snippets down separately.
starttime = time.time()
# Do CLI 'feature nv overlay'
self.send_edit_string(nexus_host, snipp.PATH_VXLAN_STATE,
(snipp.BODY_VXLAN_STATE % "enabled"))
# Do CLI 'feature vn-segment-vlan-based'
self.send_edit_string(nexus_host, snipp.PATH_VNSEG_STATE,
(snipp.BODY_VNSEG_STATE % "enabled"))
# Do CLI 'int nve1' to Create nve1
self.send_edit_string(
nexus_host,
(snipp.PATH_NVE_CREATE % nve_int_num),
(snipp.BODY_NVE_CREATE % nve_int_num))
# Do CLI 'no shut
# source-interface loopback %s'
# beneath int nve1
self.send_edit_string(
nexus_host,
(snipp.PATH_NVE_CREATE % nve_int_num),
(snipp.BODY_NVE_ADD_LOOPBACK % ("enabled", src_intf)))
self.capture_and_print_timeshot(
starttime, "enable_vxlan",
switch=nexus_host) |
def source(self):
"""
Returns the single source name for a variant collection if it is unique,
otherwise raises an error.
"""
if len(self.sources) == 0:
raise ValueError("No source associated with %s" % self.__class__.__name__)
elif len(self.sources) > 1:
raise ValueError("Multiple sources for %s" % self.__class__.__name__)
return list(self.sources)[0] | Returns the single source name for a variant collection if it is unique,
otherwise raises an error. | Below is the the instruction that describes the task:
### Input:
Returns the single source name for a variant collection if it is unique,
otherwise raises an error.
### Response:
def source(self):
"""
Returns the single source name for a variant collection if it is unique,
otherwise raises an error.
"""
if len(self.sources) == 0:
raise ValueError("No source associated with %s" % self.__class__.__name__)
elif len(self.sources) > 1:
raise ValueError("Multiple sources for %s" % self.__class__.__name__)
return list(self.sources)[0] |
def validate_location_instance_valid_for_arc(sender, instance, action, reverse, pk_set, *args, **kwargs):
'''
Evaluates attempts to add location instances to arc, ensuring they are from same outline.
'''
if action == 'pre_add':
if reverse:
# Fetch arc definition through link.
for apk in pk_set:
arc_node = ArcElementNode.objects.get(pk=apk)
if arc_node.parent_outline != instance.outline:
raise IntegrityError(_('Location instance must be from same outline as arc element.'))
else:
for lpk in pk_set:
loc_instance = LocationInstance.objects.get(pk=lpk)
if loc_instance.outline != instance.parent_outline:
raise IntegrityError(_('Location Instance must be from the same outline as arc element.')) | Evaluates attempts to add location instances to arc, ensuring they are from same outline. | Below is the the instruction that describes the task:
### Input:
Evaluates attempts to add location instances to arc, ensuring they are from same outline.
### Response:
def validate_location_instance_valid_for_arc(sender, instance, action, reverse, pk_set, *args, **kwargs):
'''
Evaluates attempts to add location instances to arc, ensuring they are from same outline.
'''
if action == 'pre_add':
if reverse:
# Fetch arc definition through link.
for apk in pk_set:
arc_node = ArcElementNode.objects.get(pk=apk)
if arc_node.parent_outline != instance.outline:
raise IntegrityError(_('Location instance must be from same outline as arc element.'))
else:
for lpk in pk_set:
loc_instance = LocationInstance.objects.get(pk=lpk)
if loc_instance.outline != instance.parent_outline:
raise IntegrityError(_('Location Instance must be from the same outline as arc element.')) |
def find_one(cls, *args, **kw):
"""Get a single document from the collection this class is bound to.
Additional arguments are processed according to `_prepare_find` prior to passing to PyMongo, where positional
parameters are interpreted as query fragments, parametric keyword arguments combined, and other keyword
arguments passed along with minor transformation.
Automatically calls `to_mongo` with the retrieved data.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find_one
"""
if len(args) == 1 and not isinstance(args[0], Filter):
args = (getattr(cls, cls.__pk__) == args[0], )
Doc, collection, query, options = cls._prepare_find(*args, **kw)
result = Doc.from_mongo(collection.find_one(query, **options))
return result | Get a single document from the collection this class is bound to.
Additional arguments are processed according to `_prepare_find` prior to passing to PyMongo, where positional
parameters are interpreted as query fragments, parametric keyword arguments combined, and other keyword
arguments passed along with minor transformation.
Automatically calls `to_mongo` with the retrieved data.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find_one | Below is the the instruction that describes the task:
### Input:
Get a single document from the collection this class is bound to.
Additional arguments are processed according to `_prepare_find` prior to passing to PyMongo, where positional
parameters are interpreted as query fragments, parametric keyword arguments combined, and other keyword
arguments passed along with minor transformation.
Automatically calls `to_mongo` with the retrieved data.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find_one
### Response:
def find_one(cls, *args, **kw):
"""Get a single document from the collection this class is bound to.
Additional arguments are processed according to `_prepare_find` prior to passing to PyMongo, where positional
parameters are interpreted as query fragments, parametric keyword arguments combined, and other keyword
arguments passed along with minor transformation.
Automatically calls `to_mongo` with the retrieved data.
https://api.mongodb.com/python/current/api/pymongo/collection.html#pymongo.collection.Collection.find_one
"""
if len(args) == 1 and not isinstance(args[0], Filter):
args = (getattr(cls, cls.__pk__) == args[0], )
Doc, collection, query, options = cls._prepare_find(*args, **kw)
result = Doc.from_mongo(collection.find_one(query, **options))
return result |
def onCancelButton(self, event):
"""
Quit grid with warning if unsaved changes present
"""
if self.grid.changes:
dlg1 = wx.MessageDialog(self, caption="Message:",
message="Are you sure you want to exit this grid?\nYour changes will not be saved.\n ",
style=wx.OK|wx.CANCEL)
result = dlg1.ShowModal()
if result == wx.ID_OK:
dlg1.Destroy()
self.Destroy()
else:
self.Destroy()
if self.main_frame:
self.main_frame.Show()
self.main_frame.Raise() | Quit grid with warning if unsaved changes present | Below is the the instruction that describes the task:
### Input:
Quit grid with warning if unsaved changes present
### Response:
def onCancelButton(self, event):
"""
Quit grid with warning if unsaved changes present
"""
if self.grid.changes:
dlg1 = wx.MessageDialog(self, caption="Message:",
message="Are you sure you want to exit this grid?\nYour changes will not be saved.\n ",
style=wx.OK|wx.CANCEL)
result = dlg1.ShowModal()
if result == wx.ID_OK:
dlg1.Destroy()
self.Destroy()
else:
self.Destroy()
if self.main_frame:
self.main_frame.Show()
self.main_frame.Raise() |
def find_node_type(ast, node_type):
"""Return list of array references in AST."""
if type(ast) is node_type:
return [ast]
elif type(ast) is list:
return reduce(operator.add, list(map(lambda a: find_node_type(a, node_type), ast)), [])
elif ast is None:
return []
else:
return reduce(operator.add,
[find_node_type(o[1], node_type) for o in ast.children()], []) | Return list of array references in AST. | Below is the the instruction that describes the task:
### Input:
Return list of array references in AST.
### Response:
def find_node_type(ast, node_type):
"""Return list of array references in AST."""
if type(ast) is node_type:
return [ast]
elif type(ast) is list:
return reduce(operator.add, list(map(lambda a: find_node_type(a, node_type), ast)), [])
elif ast is None:
return []
else:
return reduce(operator.add,
[find_node_type(o[1], node_type) for o in ast.children()], []) |
def zoom2D(xi, yi, zi, xi_zoom=3., yi_zoom=3., order=3, mode="nearest", cval=0.):
"""Zoom a 2D array, with axes.
Parameters
----------
xi : 1D array
x axis points.
yi : 1D array
y axis points.
zi : 2D array
array values. Shape of (x, y).
xi_zoom : float (optional)
Zoom factor along x axis. Default is 3.
yi_zoom : float (optional)
Zoom factor along y axis. Default is 3.
order : int (optional)
The order of the spline interpolation, between 0 and 5. Default is 3.
mode : {'constant', 'nearest', 'reflect', or 'wrap'}
Points outside the boundaries of the input are filled according to the
given mode. Default is nearest.
cval : scalar (optional)
Value used for constant mode. Default is 0.0.
"""
xi = ndimage.interpolation.zoom(xi, xi_zoom, order=order, mode="nearest")
yi = ndimage.interpolation.zoom(yi, yi_zoom, order=order, mode="nearest")
zi = ndimage.interpolation.zoom(zi, (xi_zoom, yi_zoom), order=order, mode=mode, cval=cval)
return xi, yi, zi | Zoom a 2D array, with axes.
Parameters
----------
xi : 1D array
x axis points.
yi : 1D array
y axis points.
zi : 2D array
array values. Shape of (x, y).
xi_zoom : float (optional)
Zoom factor along x axis. Default is 3.
yi_zoom : float (optional)
Zoom factor along y axis. Default is 3.
order : int (optional)
The order of the spline interpolation, between 0 and 5. Default is 3.
mode : {'constant', 'nearest', 'reflect', or 'wrap'}
Points outside the boundaries of the input are filled according to the
given mode. Default is nearest.
cval : scalar (optional)
Value used for constant mode. Default is 0.0. | Below is the the instruction that describes the task:
### Input:
Zoom a 2D array, with axes.
Parameters
----------
xi : 1D array
x axis points.
yi : 1D array
y axis points.
zi : 2D array
array values. Shape of (x, y).
xi_zoom : float (optional)
Zoom factor along x axis. Default is 3.
yi_zoom : float (optional)
Zoom factor along y axis. Default is 3.
order : int (optional)
The order of the spline interpolation, between 0 and 5. Default is 3.
mode : {'constant', 'nearest', 'reflect', or 'wrap'}
Points outside the boundaries of the input are filled according to the
given mode. Default is nearest.
cval : scalar (optional)
Value used for constant mode. Default is 0.0.
### Response:
def zoom2D(xi, yi, zi, xi_zoom=3., yi_zoom=3., order=3, mode="nearest", cval=0.):
"""Zoom a 2D array, with axes.
Parameters
----------
xi : 1D array
x axis points.
yi : 1D array
y axis points.
zi : 2D array
array values. Shape of (x, y).
xi_zoom : float (optional)
Zoom factor along x axis. Default is 3.
yi_zoom : float (optional)
Zoom factor along y axis. Default is 3.
order : int (optional)
The order of the spline interpolation, between 0 and 5. Default is 3.
mode : {'constant', 'nearest', 'reflect', or 'wrap'}
Points outside the boundaries of the input are filled according to the
given mode. Default is nearest.
cval : scalar (optional)
Value used for constant mode. Default is 0.0.
"""
xi = ndimage.interpolation.zoom(xi, xi_zoom, order=order, mode="nearest")
yi = ndimage.interpolation.zoom(yi, yi_zoom, order=order, mode="nearest")
zi = ndimage.interpolation.zoom(zi, (xi_zoom, yi_zoom), order=order, mode=mode, cval=cval)
return xi, yi, zi |
def V_vertical_torispherical_concave(D, f, k, h):
r'''Calculates volume of a vertical tank with a concave torispherical bottom,
according to [1]_. No provision for the top of the tank is made here.
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_1(h=a_1 + a_2 -h),\; 0 \le h < a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_2(h=a_1 + a_2 -h),\; a_2 \le h < a_1 + a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + 0,\; h \ge a_1 + a_2
.. math::
v_1 = \frac{\pi}{4}\left(\frac{2a_1^3}{3} + \frac{a_1D_1^2}{2}\right)
+\pi u\left[\left(\frac{D}{2}-kD\right)^2 +s\right]
+ \frac{\pi tu^2}{2} - \frac{\pi u^3}{3} + \pi D(1-2k)\left[
\frac{2u-t}{4}\sqrt{s+tu-u^2} + \frac{t\sqrt{s}}{4}
+ \frac{k^2D^2}{2}\left(\cos^{-1}\frac{t-2u}{2kD}-\alpha\right)\right]
.. math::
v_2 = \frac{\pi h^2}{4}\left(2a_1 + \frac{D_1^2}{2a_1} - \frac{4h}{3}\right)
.. math::
\alpha = \sin^{-1}\frac{1-2k}{2(f-k)}
.. math::
a_1 = fD(1-\cos\alpha)
.. math::
a_2 = kD\cos\alpha
.. math::
D_1 = 2fD\sin\alpha
.. math::
s = (kD\sin\alpha)^2
.. math::
t = 2a_2
.. math::
u = h - fD(1-\cos\alpha)
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
f : float
Dish-radius parameter; fD = dish radius [1/m]
k : float
knuckle-radius parameter ; kD = knuckle radius [1/m]
h : float
Height, as measured up to where the fluid ends, [m]
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_vertical_torispherical_concave(D=113., f=0.71, k=0.081, h=15)/231
103.88569287163769
References
----------
.. [1] Jones, D. "Compute Fluid Volumes in Vertical Tanks." Chemical
Processing. December 18, 2003.
http://www.chemicalprocessing.com/articles/2003/193/
'''
alpha = asin((1-2*k)/(2.*(f-k)))
a1 = f*D*(1-cos(alpha))
a2 = k*D*cos(alpha)
D1 = 2*f*D*sin(alpha)
s = (k*D*sin(alpha))**2
t = 2*a2
def V1(h):
u = h-f*D*(1-cos(alpha))
v1 = pi/4*(2*a1**3/3. + a1*D1**2/2.) + pi*u*((D/2.-k*D)**2 +s)
v1 += pi*t*u**2/2. - pi*u**3/3.
v1 += pi*D*(1-2*k)*((2*u-t)/4.*(s+t*u-u**2)**0.5 + t*s**0.5/4.
+ k**2*D**2/2.*(acos((t-2*u)/(2*k*D)) -alpha))
return v1
def V2(h):
v2 = pi*h**2/4.*(2*a1 + D1**2/(2.*a1) - 4*h/3.)
return v2
if 0 <= h < a2:
Vf = pi*D**2*h/4 - V1(a1+a2) + V1(a1+a2-h)
elif a2 <= h < a1 + a2:
Vf = pi*D**2*h/4 - V1(a1+a2) + V2(a1+a2-h)
else:
Vf = pi*D**2*h/4 - V1(a1+a2)
return Vf | r'''Calculates volume of a vertical tank with a concave torispherical bottom,
according to [1]_. No provision for the top of the tank is made here.
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_1(h=a_1 + a_2 -h),\; 0 \le h < a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_2(h=a_1 + a_2 -h),\; a_2 \le h < a_1 + a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + 0,\; h \ge a_1 + a_2
.. math::
v_1 = \frac{\pi}{4}\left(\frac{2a_1^3}{3} + \frac{a_1D_1^2}{2}\right)
+\pi u\left[\left(\frac{D}{2}-kD\right)^2 +s\right]
+ \frac{\pi tu^2}{2} - \frac{\pi u^3}{3} + \pi D(1-2k)\left[
\frac{2u-t}{4}\sqrt{s+tu-u^2} + \frac{t\sqrt{s}}{4}
+ \frac{k^2D^2}{2}\left(\cos^{-1}\frac{t-2u}{2kD}-\alpha\right)\right]
.. math::
v_2 = \frac{\pi h^2}{4}\left(2a_1 + \frac{D_1^2}{2a_1} - \frac{4h}{3}\right)
.. math::
\alpha = \sin^{-1}\frac{1-2k}{2(f-k)}
.. math::
a_1 = fD(1-\cos\alpha)
.. math::
a_2 = kD\cos\alpha
.. math::
D_1 = 2fD\sin\alpha
.. math::
s = (kD\sin\alpha)^2
.. math::
t = 2a_2
.. math::
u = h - fD(1-\cos\alpha)
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
f : float
Dish-radius parameter; fD = dish radius [1/m]
k : float
knuckle-radius parameter ; kD = knuckle radius [1/m]
h : float
Height, as measured up to where the fluid ends, [m]
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_vertical_torispherical_concave(D=113., f=0.71, k=0.081, h=15)/231
103.88569287163769
References
----------
.. [1] Jones, D. "Compute Fluid Volumes in Vertical Tanks." Chemical
Processing. December 18, 2003.
http://www.chemicalprocessing.com/articles/2003/193/ | Below is the the instruction that describes the task:
### Input:
r'''Calculates volume of a vertical tank with a concave torispherical bottom,
according to [1]_. No provision for the top of the tank is made here.
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_1(h=a_1 + a_2 -h),\; 0 \le h < a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_2(h=a_1 + a_2 -h),\; a_2 \le h < a_1 + a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + 0,\; h \ge a_1 + a_2
.. math::
v_1 = \frac{\pi}{4}\left(\frac{2a_1^3}{3} + \frac{a_1D_1^2}{2}\right)
+\pi u\left[\left(\frac{D}{2}-kD\right)^2 +s\right]
+ \frac{\pi tu^2}{2} - \frac{\pi u^3}{3} + \pi D(1-2k)\left[
\frac{2u-t}{4}\sqrt{s+tu-u^2} + \frac{t\sqrt{s}}{4}
+ \frac{k^2D^2}{2}\left(\cos^{-1}\frac{t-2u}{2kD}-\alpha\right)\right]
.. math::
v_2 = \frac{\pi h^2}{4}\left(2a_1 + \frac{D_1^2}{2a_1} - \frac{4h}{3}\right)
.. math::
\alpha = \sin^{-1}\frac{1-2k}{2(f-k)}
.. math::
a_1 = fD(1-\cos\alpha)
.. math::
a_2 = kD\cos\alpha
.. math::
D_1 = 2fD\sin\alpha
.. math::
s = (kD\sin\alpha)^2
.. math::
t = 2a_2
.. math::
u = h - fD(1-\cos\alpha)
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
f : float
Dish-radius parameter; fD = dish radius [1/m]
k : float
knuckle-radius parameter ; kD = knuckle radius [1/m]
h : float
Height, as measured up to where the fluid ends, [m]
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_vertical_torispherical_concave(D=113., f=0.71, k=0.081, h=15)/231
103.88569287163769
References
----------
.. [1] Jones, D. "Compute Fluid Volumes in Vertical Tanks." Chemical
Processing. December 18, 2003.
http://www.chemicalprocessing.com/articles/2003/193/
### Response:
def V_vertical_torispherical_concave(D, f, k, h):
r'''Calculates volume of a vertical tank with a concave torispherical bottom,
according to [1]_. No provision for the top of the tank is made here.
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_1(h=a_1 + a_2 -h),\; 0 \le h < a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + v_2(h=a_1 + a_2 -h),\; a_2 \le h < a_1 + a_2
.. math::
V = \frac{\pi D^2 h}{4} - v_1(h=a_1+a_2) + 0,\; h \ge a_1 + a_2
.. math::
v_1 = \frac{\pi}{4}\left(\frac{2a_1^3}{3} + \frac{a_1D_1^2}{2}\right)
+\pi u\left[\left(\frac{D}{2}-kD\right)^2 +s\right]
+ \frac{\pi tu^2}{2} - \frac{\pi u^3}{3} + \pi D(1-2k)\left[
\frac{2u-t}{4}\sqrt{s+tu-u^2} + \frac{t\sqrt{s}}{4}
+ \frac{k^2D^2}{2}\left(\cos^{-1}\frac{t-2u}{2kD}-\alpha\right)\right]
.. math::
v_2 = \frac{\pi h^2}{4}\left(2a_1 + \frac{D_1^2}{2a_1} - \frac{4h}{3}\right)
.. math::
\alpha = \sin^{-1}\frac{1-2k}{2(f-k)}
.. math::
a_1 = fD(1-\cos\alpha)
.. math::
a_2 = kD\cos\alpha
.. math::
D_1 = 2fD\sin\alpha
.. math::
s = (kD\sin\alpha)^2
.. math::
t = 2a_2
.. math::
u = h - fD(1-\cos\alpha)
Parameters
----------
D : float
Diameter of the main cylindrical section, [m]
f : float
Dish-radius parameter; fD = dish radius [1/m]
k : float
knuckle-radius parameter ; kD = knuckle radius [1/m]
h : float
Height, as measured up to where the fluid ends, [m]
Returns
-------
V : float
Volume [m^3]
Examples
--------
Matching example from [1]_, with inputs in inches and volume in gallons.
>>> V_vertical_torispherical_concave(D=113., f=0.71, k=0.081, h=15)/231
103.88569287163769
References
----------
.. [1] Jones, D. "Compute Fluid Volumes in Vertical Tanks." Chemical
Processing. December 18, 2003.
http://www.chemicalprocessing.com/articles/2003/193/
'''
alpha = asin((1-2*k)/(2.*(f-k)))
a1 = f*D*(1-cos(alpha))
a2 = k*D*cos(alpha)
D1 = 2*f*D*sin(alpha)
s = (k*D*sin(alpha))**2
t = 2*a2
def V1(h):
u = h-f*D*(1-cos(alpha))
v1 = pi/4*(2*a1**3/3. + a1*D1**2/2.) + pi*u*((D/2.-k*D)**2 +s)
v1 += pi*t*u**2/2. - pi*u**3/3.
v1 += pi*D*(1-2*k)*((2*u-t)/4.*(s+t*u-u**2)**0.5 + t*s**0.5/4.
+ k**2*D**2/2.*(acos((t-2*u)/(2*k*D)) -alpha))
return v1
def V2(h):
v2 = pi*h**2/4.*(2*a1 + D1**2/(2.*a1) - 4*h/3.)
return v2
if 0 <= h < a2:
Vf = pi*D**2*h/4 - V1(a1+a2) + V1(a1+a2-h)
elif a2 <= h < a1 + a2:
Vf = pi*D**2*h/4 - V1(a1+a2) + V2(a1+a2-h)
else:
Vf = pi*D**2*h/4 - V1(a1+a2)
return Vf |
def fly(self):
"""
Generate doc tree.
"""
dst_dir = Path(self.conf_file).parent.abspath
package_dir = Path(dst_dir, self.package.shortname)
# delete existing api document
try:
if package_dir.exists():
shutil.rmtree(package_dir.abspath)
except Exception as e:
print("'%s' can't be removed! Error: %s" % (package_dir, e))
# create .rst files
for pkg, parent, sub_packages, sub_modules in self.package.walk():
if not is_ignored(pkg, self.ignored_package):
dir_path = Path(*([dst_dir, ] + pkg.fullname.split(".")))
init_path = Path(dir_path, "__init__.rst")
make_dir(dir_path.abspath)
make_file(
init_path.abspath,
self.generate_package_content(pkg),
)
for mod in sub_modules:
if not is_ignored(mod, self.ignored_package):
module_path = Path(dir_path, mod.shortname + ".rst")
make_file(
module_path.abspath,
self.generate_module_content(mod),
) | Generate doc tree. | Below is the the instruction that describes the task:
### Input:
Generate doc tree.
### Response:
def fly(self):
"""
Generate doc tree.
"""
dst_dir = Path(self.conf_file).parent.abspath
package_dir = Path(dst_dir, self.package.shortname)
# delete existing api document
try:
if package_dir.exists():
shutil.rmtree(package_dir.abspath)
except Exception as e:
print("'%s' can't be removed! Error: %s" % (package_dir, e))
# create .rst files
for pkg, parent, sub_packages, sub_modules in self.package.walk():
if not is_ignored(pkg, self.ignored_package):
dir_path = Path(*([dst_dir, ] + pkg.fullname.split(".")))
init_path = Path(dir_path, "__init__.rst")
make_dir(dir_path.abspath)
make_file(
init_path.abspath,
self.generate_package_content(pkg),
)
for mod in sub_modules:
if not is_ignored(mod, self.ignored_package):
module_path = Path(dir_path, mod.shortname + ".rst")
make_file(
module_path.abspath,
self.generate_module_content(mod),
) |
def render(self, renderer=None, **kwargs):
"""Render the navigational item using a renderer.
:param renderer: An object implementing the :class:`~.Renderer`
interface.
:return: A markupsafe string with the rendered result.
"""
return Markup(get_renderer(current_app, renderer)(**kwargs).visit(
self)) | Render the navigational item using a renderer.
:param renderer: An object implementing the :class:`~.Renderer`
interface.
:return: A markupsafe string with the rendered result. | Below is the the instruction that describes the task:
### Input:
Render the navigational item using a renderer.
:param renderer: An object implementing the :class:`~.Renderer`
interface.
:return: A markupsafe string with the rendered result.
### Response:
def render(self, renderer=None, **kwargs):
"""Render the navigational item using a renderer.
:param renderer: An object implementing the :class:`~.Renderer`
interface.
:return: A markupsafe string with the rendered result.
"""
return Markup(get_renderer(current_app, renderer)(**kwargs).visit(
self)) |
def main():
"""
NAME
fishqq.py
DESCRIPTION
makes qq plot from dec,inc input data
INPUT FORMAT
takes dec/inc pairs in space delimited file
SYNTAX
fishqq.py [command line options]
OPTIONS
-h help message
-f FILE, specify file on command line
-F FILE, specify output file for statistics
-sav save and quit [saves as input file name plus fmt extension]
-fmt specify format for output [png, eps, svg, pdf]
OUTPUT:
Dec Inc N Mu Mu_crit Me Me_crit Y/N
where direction is the principal component and Y/N is Fisherian or not
separate lines for each mode with N >=10 (N and R)
"""
fmt,plot='svg',0
outfile=""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
elif '-f' in sys.argv: # ask for filename
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outfile=open(sys.argv[ind+1],'w') # open output file
if '-sav' in sys.argv: plot=1
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
DIs,nDIs,rDIs= [],[],[] # set up list for data
for line in data: # read in the data from standard input
if '\t' in line:
rec=line.split('\t') # split each line on space to get records
else:
rec=line.split() # split each line on space to get records
DIs.append([float(rec[0]),float(rec[1])]) # append data to Inc
# split into two modes
ppars=pmag.doprinc(DIs) # get principal directions
for rec in DIs:
angle=pmag.angle([rec[0],rec[1]],[ppars['dec'],ppars['inc']])
if angle>90.:
rDIs.append(rec)
else:
nDIs.append(rec)
#
if len(rDIs) >=10 or len(nDIs) >=10:
D1,I1=[],[]
QQ={'unf1':1,'exp1':2}
pmagplotlib.plot_init(QQ['unf1'],5,5)
pmagplotlib.plot_init(QQ['exp1'],5,5)
if len(nDIs) < 10:
ppars=pmag.doprinc(rDIs) # get principal directions
Drbar,Irbar=ppars['dec']-180.,-ppars['inc']
Nr=len(rDIs)
for di in rDIs:
d,irot=pmag.dotilt(di[0],di[1],Drbar-180.,90.-Irbar) # rotate to mean
drot=d-180.
if drot<0:drot=drot+360.
D1.append(drot)
I1.append(irot)
Dtit='Mode 2 Declinations'
Itit='Mode 2 Inclinations'
else:
ppars=pmag.doprinc(nDIs) # get principal directions
Dnbar,Inbar=ppars['dec'],ppars['inc']
Nn=len(nDIs)
for di in nDIs:
d,irot=pmag.dotilt(di[0],di[1],Dnbar-180.,90.-Inbar) # rotate to mean
drot=d-180.
if drot<0:drot=drot+360.
D1.append(drot)
I1.append(irot)
Dtit='Mode 1 Declinations'
Itit='Mode 1 Inclinations'
Mu_n,Mu_ncr=pmagplotlib.plot_qq_unf(QQ['unf1'],D1,Dtit) # make plot
Me_n,Me_ncr=pmagplotlib.plot_qq_exp(QQ['exp1'],I1,Itit) # make plot
#print Mu_n,Mu_ncr,Me_n, Me_ncr
if outfile!="":
# Dec Inc N Mu Mu_crit Me Me_crit Y/N
if Mu_n<=Mu_ncr and Me_n<=Me_ncr:
F='Y'
else:
F='N'
outstring='%7.1f %7.1f %i %5.3f %5.3f %5.3f %5.3f %s \n'%(Dnbar,Inbar,Nn,Mu_n,Mu_ncr,Me_n,Me_ncr,F)
outfile.write(outstring)
else:
print('you need N> 10 for at least one mode')
sys.exit()
if len(rDIs)>10 and len(nDIs)>10:
D2,I2=[],[]
QQ['unf2']=3
QQ['exp2']=4
pmagplotlib.plot_init(QQ['unf2'],5,5)
pmagplotlib.plot_init(QQ['exp2'],5,5)
ppars=pmag.doprinc(rDIs) # get principal directions
Drbar,Irbar=ppars['dec']-180.,-ppars['inc']
Nr=len(rDIs)
for di in rDIs:
d,irot=pmag.dotilt(di[0],di[1],Drbar-180.,90.-Irbar) # rotate to mean
drot=d-180.
if drot<0:drot=drot+360.
D2.append(drot)
I2.append(irot)
Dtit='Mode 2 Declinations'
Itit='Mode 2 Inclinations'
Mu_r,Mu_rcr=pmagplotlib.plot_qq_unf(QQ['unf2'],D2,Dtit) # make plot
Me_r,Me_rcr=pmagplotlib.plot_qq_exp(QQ['exp2'],I2,Itit) # make plot
if outfile!="":
# Dec Inc N Mu Mu_crit Me Me_crit Y/N
if Mu_r<=Mu_rcr and Me_r<=Me_rcr:
F='Y'
else:
F='N'
outstring='%7.1f %7.1f %i %5.3f %5.3f %5.3f %5.3f %s \n'%(Drbar,Irbar,Nr,Mu_r,Mu_rcr,Me_r,Me_rcr,F)
outfile.write(outstring)
files={}
for key in list(QQ.keys()):
files[key]=file+'_'+key+'.'+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles={}
titles['eq']='Equal Area Plot'
EQ = pmagplotlib.add_borders(EQ,titles,black,purple)
pmagplotlib.save_plots(QQ,files)
elif plot==1:
pmagplotlib.save_plots(QQ,files)
else:
pmagplotlib.draw_figs(QQ)
ans=input(" S[a]ve to save plot, [q]uit without saving: ")
if ans=="a": pmagplotlib.save_plots(QQ,files) | NAME
fishqq.py
DESCRIPTION
makes qq plot from dec,inc input data
INPUT FORMAT
takes dec/inc pairs in space delimited file
SYNTAX
fishqq.py [command line options]
OPTIONS
-h help message
-f FILE, specify file on command line
-F FILE, specify output file for statistics
-sav save and quit [saves as input file name plus fmt extension]
-fmt specify format for output [png, eps, svg, pdf]
OUTPUT:
Dec Inc N Mu Mu_crit Me Me_crit Y/N
where direction is the principal component and Y/N is Fisherian or not
separate lines for each mode with N >=10 (N and R) | Below is the the instruction that describes the task:
### Input:
NAME
fishqq.py
DESCRIPTION
makes qq plot from dec,inc input data
INPUT FORMAT
takes dec/inc pairs in space delimited file
SYNTAX
fishqq.py [command line options]
OPTIONS
-h help message
-f FILE, specify file on command line
-F FILE, specify output file for statistics
-sav save and quit [saves as input file name plus fmt extension]
-fmt specify format for output [png, eps, svg, pdf]
OUTPUT:
Dec Inc N Mu Mu_crit Me Me_crit Y/N
where direction is the principal component and Y/N is Fisherian or not
separate lines for each mode with N >=10 (N and R)
### Response:
def main():
"""
NAME
fishqq.py
DESCRIPTION
makes qq plot from dec,inc input data
INPUT FORMAT
takes dec/inc pairs in space delimited file
SYNTAX
fishqq.py [command line options]
OPTIONS
-h help message
-f FILE, specify file on command line
-F FILE, specify output file for statistics
-sav save and quit [saves as input file name plus fmt extension]
-fmt specify format for output [png, eps, svg, pdf]
OUTPUT:
Dec Inc N Mu Mu_crit Me Me_crit Y/N
where direction is the principal component and Y/N is Fisherian or not
separate lines for each mode with N >=10 (N and R)
"""
fmt,plot='svg',0
outfile=""
if '-h' in sys.argv: # check if help is needed
print(main.__doc__)
sys.exit() # graceful quit
elif '-f' in sys.argv: # ask for filename
ind=sys.argv.index('-f')
file=sys.argv[ind+1]
f=open(file,'r')
data=f.readlines()
if '-F' in sys.argv:
ind=sys.argv.index('-F')
outfile=open(sys.argv[ind+1],'w') # open output file
if '-sav' in sys.argv: plot=1
if '-fmt' in sys.argv:
ind=sys.argv.index('-fmt')
fmt=sys.argv[ind+1]
DIs,nDIs,rDIs= [],[],[] # set up list for data
for line in data: # read in the data from standard input
if '\t' in line:
rec=line.split('\t') # split each line on space to get records
else:
rec=line.split() # split each line on space to get records
DIs.append([float(rec[0]),float(rec[1])]) # append data to Inc
# split into two modes
ppars=pmag.doprinc(DIs) # get principal directions
for rec in DIs:
angle=pmag.angle([rec[0],rec[1]],[ppars['dec'],ppars['inc']])
if angle>90.:
rDIs.append(rec)
else:
nDIs.append(rec)
#
if len(rDIs) >=10 or len(nDIs) >=10:
D1,I1=[],[]
QQ={'unf1':1,'exp1':2}
pmagplotlib.plot_init(QQ['unf1'],5,5)
pmagplotlib.plot_init(QQ['exp1'],5,5)
if len(nDIs) < 10:
ppars=pmag.doprinc(rDIs) # get principal directions
Drbar,Irbar=ppars['dec']-180.,-ppars['inc']
Nr=len(rDIs)
for di in rDIs:
d,irot=pmag.dotilt(di[0],di[1],Drbar-180.,90.-Irbar) # rotate to mean
drot=d-180.
if drot<0:drot=drot+360.
D1.append(drot)
I1.append(irot)
Dtit='Mode 2 Declinations'
Itit='Mode 2 Inclinations'
else:
ppars=pmag.doprinc(nDIs) # get principal directions
Dnbar,Inbar=ppars['dec'],ppars['inc']
Nn=len(nDIs)
for di in nDIs:
d,irot=pmag.dotilt(di[0],di[1],Dnbar-180.,90.-Inbar) # rotate to mean
drot=d-180.
if drot<0:drot=drot+360.
D1.append(drot)
I1.append(irot)
Dtit='Mode 1 Declinations'
Itit='Mode 1 Inclinations'
Mu_n,Mu_ncr=pmagplotlib.plot_qq_unf(QQ['unf1'],D1,Dtit) # make plot
Me_n,Me_ncr=pmagplotlib.plot_qq_exp(QQ['exp1'],I1,Itit) # make plot
#print Mu_n,Mu_ncr,Me_n, Me_ncr
if outfile!="":
# Dec Inc N Mu Mu_crit Me Me_crit Y/N
if Mu_n<=Mu_ncr and Me_n<=Me_ncr:
F='Y'
else:
F='N'
outstring='%7.1f %7.1f %i %5.3f %5.3f %5.3f %5.3f %s \n'%(Dnbar,Inbar,Nn,Mu_n,Mu_ncr,Me_n,Me_ncr,F)
outfile.write(outstring)
else:
print('you need N> 10 for at least one mode')
sys.exit()
if len(rDIs)>10 and len(nDIs)>10:
D2,I2=[],[]
QQ['unf2']=3
QQ['exp2']=4
pmagplotlib.plot_init(QQ['unf2'],5,5)
pmagplotlib.plot_init(QQ['exp2'],5,5)
ppars=pmag.doprinc(rDIs) # get principal directions
Drbar,Irbar=ppars['dec']-180.,-ppars['inc']
Nr=len(rDIs)
for di in rDIs:
d,irot=pmag.dotilt(di[0],di[1],Drbar-180.,90.-Irbar) # rotate to mean
drot=d-180.
if drot<0:drot=drot+360.
D2.append(drot)
I2.append(irot)
Dtit='Mode 2 Declinations'
Itit='Mode 2 Inclinations'
Mu_r,Mu_rcr=pmagplotlib.plot_qq_unf(QQ['unf2'],D2,Dtit) # make plot
Me_r,Me_rcr=pmagplotlib.plot_qq_exp(QQ['exp2'],I2,Itit) # make plot
if outfile!="":
# Dec Inc N Mu Mu_crit Me Me_crit Y/N
if Mu_r<=Mu_rcr and Me_r<=Me_rcr:
F='Y'
else:
F='N'
outstring='%7.1f %7.1f %i %5.3f %5.3f %5.3f %5.3f %s \n'%(Drbar,Irbar,Nr,Mu_r,Mu_rcr,Me_r,Me_rcr,F)
outfile.write(outstring)
files={}
for key in list(QQ.keys()):
files[key]=file+'_'+key+'.'+fmt
if pmagplotlib.isServer:
black = '#000000'
purple = '#800080'
titles={}
titles['eq']='Equal Area Plot'
EQ = pmagplotlib.add_borders(EQ,titles,black,purple)
pmagplotlib.save_plots(QQ,files)
elif plot==1:
pmagplotlib.save_plots(QQ,files)
else:
pmagplotlib.draw_figs(QQ)
ans=input(" S[a]ve to save plot, [q]uit without saving: ")
if ans=="a": pmagplotlib.save_plots(QQ,files) |
def show_vcs_output_vcs_nodes_vcs_node_info_switch_fcf_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
switch_fcf_mac = ET.SubElement(vcs_node_info, "switch-fcf-mac")
switch_fcf_mac.text = kwargs.pop('switch_fcf_mac')
callback = kwargs.pop('callback', self._callback)
return callback(config) | Auto Generated Code | Below is the the instruction that describes the task:
### Input:
Auto Generated Code
### Response:
def show_vcs_output_vcs_nodes_vcs_node_info_switch_fcf_mac(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
show_vcs = ET.Element("show_vcs")
config = show_vcs
output = ET.SubElement(show_vcs, "output")
vcs_nodes = ET.SubElement(output, "vcs-nodes")
vcs_node_info = ET.SubElement(vcs_nodes, "vcs-node-info")
switch_fcf_mac = ET.SubElement(vcs_node_info, "switch-fcf-mac")
switch_fcf_mac.text = kwargs.pop('switch_fcf_mac')
callback = kwargs.pop('callback', self._callback)
return callback(config) |
def seoify_hyperlink(hyperlink):
"""Modify a hyperlink to make it SEO-friendly by replacing
hyphens with spaces and trimming multiple spaces.
:param hyperlink: URL to attempt to grab SEO from """
last_slash = hyperlink.rfind('/')
return re.sub(r' +|-', ' ', hyperlink[last_slash + 1:]) | Modify a hyperlink to make it SEO-friendly by replacing
hyphens with spaces and trimming multiple spaces.
:param hyperlink: URL to attempt to grab SEO from | Below is the the instruction that describes the task:
### Input:
Modify a hyperlink to make it SEO-friendly by replacing
hyphens with spaces and trimming multiple spaces.
:param hyperlink: URL to attempt to grab SEO from
### Response:
def seoify_hyperlink(hyperlink):
"""Modify a hyperlink to make it SEO-friendly by replacing
hyphens with spaces and trimming multiple spaces.
:param hyperlink: URL to attempt to grab SEO from """
last_slash = hyperlink.rfind('/')
return re.sub(r' +|-', ' ', hyperlink[last_slash + 1:]) |
def update(self, scope, at=0):
"""Update scope. Add another scope to this one.
Args:
scope (Scope): Scope object
Kwargs:
at (int): Level to update
"""
if hasattr(scope, '_mixins') and not at:
self._mixins.update(scope._mixins)
self[at]['__variables__'].update(scope[at]['__variables__'])
self[at]['__blocks__'].extend(scope[at]['__blocks__'])
self[at]['__names__'].extend(scope[at]['__names__']) | Update scope. Add another scope to this one.
Args:
scope (Scope): Scope object
Kwargs:
at (int): Level to update | Below is the the instruction that describes the task:
### Input:
Update scope. Add another scope to this one.
Args:
scope (Scope): Scope object
Kwargs:
at (int): Level to update
### Response:
def update(self, scope, at=0):
"""Update scope. Add another scope to this one.
Args:
scope (Scope): Scope object
Kwargs:
at (int): Level to update
"""
if hasattr(scope, '_mixins') and not at:
self._mixins.update(scope._mixins)
self[at]['__variables__'].update(scope[at]['__variables__'])
self[at]['__blocks__'].extend(scope[at]['__blocks__'])
self[at]['__names__'].extend(scope[at]['__names__']) |
def transaction_status(transaction):
"""Returns a FormattedItem describing the given transaction.
:param item: An object capable of having an active transaction
"""
if not transaction or not transaction.get('transactionStatus'):
return blank()
return FormattedItem(
transaction['transactionStatus'].get('name'),
transaction['transactionStatus'].get('friendlyName')) | Returns a FormattedItem describing the given transaction.
:param item: An object capable of having an active transaction | Below is the the instruction that describes the task:
### Input:
Returns a FormattedItem describing the given transaction.
:param item: An object capable of having an active transaction
### Response:
def transaction_status(transaction):
"""Returns a FormattedItem describing the given transaction.
:param item: An object capable of having an active transaction
"""
if not transaction or not transaction.get('transactionStatus'):
return blank()
return FormattedItem(
transaction['transactionStatus'].get('name'),
transaction['transactionStatus'].get('friendlyName')) |
def listify_string(something):
"""Takes *something* and make it a list.
*something* is either a list of strings or a string, in which case the
function returns a list containing the string.
If *something* is None, an empty list is returned.
"""
if isinstance(something, (str, six.text_type)):
return [something]
elif something is not None:
return list(something)
else:
return list() | Takes *something* and make it a list.
*something* is either a list of strings or a string, in which case the
function returns a list containing the string.
If *something* is None, an empty list is returned. | Below is the the instruction that describes the task:
### Input:
Takes *something* and make it a list.
*something* is either a list of strings or a string, in which case the
function returns a list containing the string.
If *something* is None, an empty list is returned.
### Response:
def listify_string(something):
"""Takes *something* and make it a list.
*something* is either a list of strings or a string, in which case the
function returns a list containing the string.
If *something* is None, an empty list is returned.
"""
if isinstance(something, (str, six.text_type)):
return [something]
elif something is not None:
return list(something)
else:
return list() |
def get_gradebook_column_gradebook_assignment_session(self, proxy):
"""Gets the session for assigning gradebook column to gradebook mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnGradebookAssignmentSession)
- a ``GradebookColumnGradebookAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_gradebook_column_gradebook_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_gradebook_assignment()`` is
``true``.*
"""
if not self.supports_gradebook_column_gradebook_assignment():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.GradebookColumnGradebookAssignmentSession(proxy=proxy, runtime=self._runtime) | Gets the session for assigning gradebook column to gradebook mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnGradebookAssignmentSession)
- a ``GradebookColumnGradebookAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_gradebook_column_gradebook_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_gradebook_assignment()`` is
``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the session for assigning gradebook column to gradebook mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnGradebookAssignmentSession)
- a ``GradebookColumnGradebookAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_gradebook_column_gradebook_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_gradebook_assignment()`` is
``true``.*
### Response:
def get_gradebook_column_gradebook_assignment_session(self, proxy):
"""Gets the session for assigning gradebook column to gradebook mappings.
arg: proxy (osid.proxy.Proxy): a proxy
return: (osid.grading.GradebookColumnGradebookAssignmentSession)
- a ``GradebookColumnGradebookAssignmentSession``
raise: NullArgument - ``proxy`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unimplemented -
``supports_gradebook_column_gradebook_assignment()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_gradebook_assignment()`` is
``true``.*
"""
if not self.supports_gradebook_column_gradebook_assignment():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.GradebookColumnGradebookAssignmentSession(proxy=proxy, runtime=self._runtime) |
def profile_config_set(name, config_key, config_value,
remote_addr=None,
cert=None, key=None, verify_cert=True):
''' Set a profile config item.
name :
The name of the profile to set the config item to.
config_key :
The items key.
config_value :
Its items value.
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
$ salt '*' lxd.profile_config_set autostart boot.autostart 0
'''
profile = profile_get(
name,
remote_addr,
cert,
key,
verify_cert,
_raw=True
)
return _set_property_dict_item(
profile, 'config', config_key, config_value
) | Set a profile config item.
name :
The name of the profile to set the config item to.
config_key :
The items key.
config_value :
Its items value.
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
$ salt '*' lxd.profile_config_set autostart boot.autostart 0 | Below is the the instruction that describes the task:
### Input:
Set a profile config item.
name :
The name of the profile to set the config item to.
config_key :
The items key.
config_value :
Its items value.
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
$ salt '*' lxd.profile_config_set autostart boot.autostart 0
### Response:
def profile_config_set(name, config_key, config_value,
remote_addr=None,
cert=None, key=None, verify_cert=True):
''' Set a profile config item.
name :
The name of the profile to set the config item to.
config_key :
The items key.
config_value :
Its items value.
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
CLI Example:
.. code-block:: bash
$ salt '*' lxd.profile_config_set autostart boot.autostart 0
'''
profile = profile_get(
name,
remote_addr,
cert,
key,
verify_cert,
_raw=True
)
return _set_property_dict_item(
profile, 'config', config_key, config_value
) |
def community_post_votes(self, post_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/votes#list-votes"
api_path = "/api/v2/community/posts/{post_id}/votes.json"
api_path = api_path.format(post_id=post_id)
return self.call(api_path, **kwargs) | https://developer.zendesk.com/rest_api/docs/help_center/votes#list-votes | Below is the the instruction that describes the task:
### Input:
https://developer.zendesk.com/rest_api/docs/help_center/votes#list-votes
### Response:
def community_post_votes(self, post_id, **kwargs):
"https://developer.zendesk.com/rest_api/docs/help_center/votes#list-votes"
api_path = "/api/v2/community/posts/{post_id}/votes.json"
api_path = api_path.format(post_id=post_id)
return self.call(api_path, **kwargs) |
def get_content_charset(self, failobj=None):
"""Return the charset parameter of the Content-Type header.
The returned string is always coerced to lower case. If there is no
Content-Type header, or if that header has no charset parameter,
failobj is returned.
"""
missing = object()
charset = self.get_param('charset', missing)
if charset is missing:
return failobj
if isinstance(charset, tuple):
# RFC 2231 encoded, so decode it, and it better end up as ascii.
pcharset = charset[0] or 'us-ascii'
try:
# LookupError will be raised if the charset isn't known to
# Python. UnicodeError will be raised if the encoded text
# contains a character not in the charset.
as_bytes = charset[2].encode('raw-unicode-escape')
charset = str(as_bytes, pcharset)
except (LookupError, UnicodeError):
charset = charset[2]
# charset characters must be in us-ascii range
try:
charset.encode('us-ascii')
except UnicodeError:
return failobj
# RFC 2046, $4.1.2 says charsets are not case sensitive
return charset.lower() | Return the charset parameter of the Content-Type header.
The returned string is always coerced to lower case. If there is no
Content-Type header, or if that header has no charset parameter,
failobj is returned. | Below is the the instruction that describes the task:
### Input:
Return the charset parameter of the Content-Type header.
The returned string is always coerced to lower case. If there is no
Content-Type header, or if that header has no charset parameter,
failobj is returned.
### Response:
def get_content_charset(self, failobj=None):
"""Return the charset parameter of the Content-Type header.
The returned string is always coerced to lower case. If there is no
Content-Type header, or if that header has no charset parameter,
failobj is returned.
"""
missing = object()
charset = self.get_param('charset', missing)
if charset is missing:
return failobj
if isinstance(charset, tuple):
# RFC 2231 encoded, so decode it, and it better end up as ascii.
pcharset = charset[0] or 'us-ascii'
try:
# LookupError will be raised if the charset isn't known to
# Python. UnicodeError will be raised if the encoded text
# contains a character not in the charset.
as_bytes = charset[2].encode('raw-unicode-escape')
charset = str(as_bytes, pcharset)
except (LookupError, UnicodeError):
charset = charset[2]
# charset characters must be in us-ascii range
try:
charset.encode('us-ascii')
except UnicodeError:
return failobj
# RFC 2046, $4.1.2 says charsets are not case sensitive
return charset.lower() |
def permission_to_pyramid_acls(permissions):
"""
Returns a list of permissions in a format understood by pyramid
:param permissions:
:return:
"""
acls = []
for perm in permissions:
if perm.type == "user":
acls.append((Allow, perm.user.id, perm.perm_name))
elif perm.type == "group":
acls.append((Allow, "group:%s" % perm.group.id, perm.perm_name))
return acls | Returns a list of permissions in a format understood by pyramid
:param permissions:
:return: | Below is the the instruction that describes the task:
### Input:
Returns a list of permissions in a format understood by pyramid
:param permissions:
:return:
### Response:
def permission_to_pyramid_acls(permissions):
"""
Returns a list of permissions in a format understood by pyramid
:param permissions:
:return:
"""
acls = []
for perm in permissions:
if perm.type == "user":
acls.append((Allow, perm.user.id, perm.perm_name))
elif perm.type == "group":
acls.append((Allow, "group:%s" % perm.group.id, perm.perm_name))
return acls |
def run_rsync_project():
"""
Copies the project from the git repository to it's destination folder.
This has the nice side effect of rsync deleting all ``.pyc`` files and
removing other files that might have been left behind by sys admins messing
around on the server.
Usage::
fab <server> run_rsync_project
"""
excludes = ''
for exclude in settings.RSYNC_EXCLUDES:
excludes += " --exclude '{0}'".format(exclude)
command = "rsync -avz --stats --delete {0} {1} {2}".format(
excludes, settings.FAB_SETTING('SERVER_REPO_PROJECT_ROOT'),
settings.FAB_SETTING('SERVER_APP_ROOT'))
run(command) | Copies the project from the git repository to it's destination folder.
This has the nice side effect of rsync deleting all ``.pyc`` files and
removing other files that might have been left behind by sys admins messing
around on the server.
Usage::
fab <server> run_rsync_project | Below is the the instruction that describes the task:
### Input:
Copies the project from the git repository to it's destination folder.
This has the nice side effect of rsync deleting all ``.pyc`` files and
removing other files that might have been left behind by sys admins messing
around on the server.
Usage::
fab <server> run_rsync_project
### Response:
def run_rsync_project():
"""
Copies the project from the git repository to it's destination folder.
This has the nice side effect of rsync deleting all ``.pyc`` files and
removing other files that might have been left behind by sys admins messing
around on the server.
Usage::
fab <server> run_rsync_project
"""
excludes = ''
for exclude in settings.RSYNC_EXCLUDES:
excludes += " --exclude '{0}'".format(exclude)
command = "rsync -avz --stats --delete {0} {1} {2}".format(
excludes, settings.FAB_SETTING('SERVER_REPO_PROJECT_ROOT'),
settings.FAB_SETTING('SERVER_APP_ROOT'))
run(command) |
def refresh_modules(self, module_string=None, exact=True):
"""
Update modules.
if module_string is None all modules are refreshed
if module_string then modules with the exact name or those starting
with the given string depending on exact parameter will be refreshed.
If a module is an i3status one then we refresh i3status.
To prevent abuse, we rate limit this function to 100ms for full
refreshes.
"""
if not module_string:
if time.time() > (self.last_refresh_ts + 0.1):
self.last_refresh_ts = time.time()
else:
# rate limiting
return
update_i3status = False
for name, module in self.output_modules.items():
if (
module_string is None
or (exact and name == module_string)
or (not exact and name.startswith(module_string))
):
if module["type"] == "py3status":
if self.config["debug"]:
self.log("refresh py3status module {}".format(name))
module["module"].force_update()
else:
if self.config["debug"]:
self.log("refresh i3status module {}".format(name))
update_i3status = True
if update_i3status:
self.i3status_thread.refresh_i3status() | Update modules.
if module_string is None all modules are refreshed
if module_string then modules with the exact name or those starting
with the given string depending on exact parameter will be refreshed.
If a module is an i3status one then we refresh i3status.
To prevent abuse, we rate limit this function to 100ms for full
refreshes. | Below is the the instruction that describes the task:
### Input:
Update modules.
if module_string is None all modules are refreshed
if module_string then modules with the exact name or those starting
with the given string depending on exact parameter will be refreshed.
If a module is an i3status one then we refresh i3status.
To prevent abuse, we rate limit this function to 100ms for full
refreshes.
### Response:
def refresh_modules(self, module_string=None, exact=True):
"""
Update modules.
if module_string is None all modules are refreshed
if module_string then modules with the exact name or those starting
with the given string depending on exact parameter will be refreshed.
If a module is an i3status one then we refresh i3status.
To prevent abuse, we rate limit this function to 100ms for full
refreshes.
"""
if not module_string:
if time.time() > (self.last_refresh_ts + 0.1):
self.last_refresh_ts = time.time()
else:
# rate limiting
return
update_i3status = False
for name, module in self.output_modules.items():
if (
module_string is None
or (exact and name == module_string)
or (not exact and name.startswith(module_string))
):
if module["type"] == "py3status":
if self.config["debug"]:
self.log("refresh py3status module {}".format(name))
module["module"].force_update()
else:
if self.config["debug"]:
self.log("refresh i3status module {}".format(name))
update_i3status = True
if update_i3status:
self.i3status_thread.refresh_i3status() |
def update_milestone(id, **kwargs):
"""
Update a ProductMilestone
"""
data = update_milestone_raw(id, **kwargs)
if data:
return utils.format_json(data) | Update a ProductMilestone | Below is the the instruction that describes the task:
### Input:
Update a ProductMilestone
### Response:
def update_milestone(id, **kwargs):
"""
Update a ProductMilestone
"""
data = update_milestone_raw(id, **kwargs)
if data:
return utils.format_json(data) |
def space_list(args):
''' List accessible workspaces, in TSV form: <namespace><TAB>workspace'''
r = fapi.list_workspaces()
fapi._check_response_code(r, 200)
spaces = []
project = args.project
if project:
project = re.compile('^' + project)
for space in r.json():
ns = space['workspace']['namespace']
if project and not project.match(ns):
continue
ws = space['workspace']['name']
spaces.append(ns + '\t' + ws)
# Sort for easier downstream viewing, ignoring case
return sorted(spaces, key=lambda s: s.lower()) | List accessible workspaces, in TSV form: <namespace><TAB>workspace | Below is the the instruction that describes the task:
### Input:
List accessible workspaces, in TSV form: <namespace><TAB>workspace
### Response:
def space_list(args):
''' List accessible workspaces, in TSV form: <namespace><TAB>workspace'''
r = fapi.list_workspaces()
fapi._check_response_code(r, 200)
spaces = []
project = args.project
if project:
project = re.compile('^' + project)
for space in r.json():
ns = space['workspace']['namespace']
if project and not project.match(ns):
continue
ws = space['workspace']['name']
spaces.append(ns + '\t' + ws)
# Sort for easier downstream viewing, ignoring case
return sorted(spaces, key=lambda s: s.lower()) |
async def processClaims(self, allClaims: Dict[ID, Claims]):
"""
Processes and saves received Claims.
:param claims: claims to be processed and saved for each claim
definition.
"""
res = []
for schemaId, (claim_signature, claim_attributes) in allClaims.items():
res.append(await self.processClaim(schemaId, claim_attributes, claim_signature))
return res | Processes and saves received Claims.
:param claims: claims to be processed and saved for each claim
definition. | Below is the the instruction that describes the task:
### Input:
Processes and saves received Claims.
:param claims: claims to be processed and saved for each claim
definition.
### Response:
async def processClaims(self, allClaims: Dict[ID, Claims]):
"""
Processes and saves received Claims.
:param claims: claims to be processed and saved for each claim
definition.
"""
res = []
for schemaId, (claim_signature, claim_attributes) in allClaims.items():
res.append(await self.processClaim(schemaId, claim_attributes, claim_signature))
return res |
def add_done_callback(self, future, callback):
""" Add a callback on a future object put here so it can be
implemented with different event loops.
Parameters
-----------
future: Future or Deferred
Future implementation for the current EventLoop
callback: callable
Callback to invoke when the future is done
"""
if future is None:
raise bridge.BridgeReferenceError(
"Tried to add a callback to a nonexistent Future. "
"Make sure you pass the `returns` argument to your JavaMethod")
return self.loop.add_done_callback(future, callback) | Add a callback on a future object put here so it can be
implemented with different event loops.
Parameters
-----------
future: Future or Deferred
Future implementation for the current EventLoop
callback: callable
Callback to invoke when the future is done | Below is the the instruction that describes the task:
### Input:
Add a callback on a future object put here so it can be
implemented with different event loops.
Parameters
-----------
future: Future or Deferred
Future implementation for the current EventLoop
callback: callable
Callback to invoke when the future is done
### Response:
def add_done_callback(self, future, callback):
""" Add a callback on a future object put here so it can be
implemented with different event loops.
Parameters
-----------
future: Future or Deferred
Future implementation for the current EventLoop
callback: callable
Callback to invoke when the future is done
"""
if future is None:
raise bridge.BridgeReferenceError(
"Tried to add a callback to a nonexistent Future. "
"Make sure you pass the `returns` argument to your JavaMethod")
return self.loop.add_done_callback(future, callback) |
def _IfdEntryFactory(stream_rdr, offset):
"""
Return an |_IfdEntry| subclass instance containing the value of the
directory entry at *offset* in *stream_rdr*.
"""
ifd_entry_classes = {
TIFF_FLD.ASCII: _AsciiIfdEntry,
TIFF_FLD.SHORT: _ShortIfdEntry,
TIFF_FLD.LONG: _LongIfdEntry,
TIFF_FLD.RATIONAL: _RationalIfdEntry,
}
field_type = stream_rdr.read_short(offset, 2)
if field_type in ifd_entry_classes:
entry_cls = ifd_entry_classes[field_type]
else:
entry_cls = _IfdEntry
return entry_cls.from_stream(stream_rdr, offset) | Return an |_IfdEntry| subclass instance containing the value of the
directory entry at *offset* in *stream_rdr*. | Below is the the instruction that describes the task:
### Input:
Return an |_IfdEntry| subclass instance containing the value of the
directory entry at *offset* in *stream_rdr*.
### Response:
def _IfdEntryFactory(stream_rdr, offset):
"""
Return an |_IfdEntry| subclass instance containing the value of the
directory entry at *offset* in *stream_rdr*.
"""
ifd_entry_classes = {
TIFF_FLD.ASCII: _AsciiIfdEntry,
TIFF_FLD.SHORT: _ShortIfdEntry,
TIFF_FLD.LONG: _LongIfdEntry,
TIFF_FLD.RATIONAL: _RationalIfdEntry,
}
field_type = stream_rdr.read_short(offset, 2)
if field_type in ifd_entry_classes:
entry_cls = ifd_entry_classes[field_type]
else:
entry_cls = _IfdEntry
return entry_cls.from_stream(stream_rdr, offset) |
def sample(self, data, sample_size=15000,
blocked_proportion=0.5, original_length=None):
'''Draw a sample of record pairs from the dataset
(a mix of random pairs & pairs of similar records)
and initialize active learning with this sample
Arguments: data -- Dictionary of records, where the keys are
record_ids and the values are dictionaries with the keys being
field names
sample_size -- Size of the sample to draw
blocked_proportion -- Proportion of the sample that will be blocked
original_length -- Length of original data, should be set if `data` is
a sample of full data
'''
self._checkData(data)
self.active_learner = self.ActiveLearner(self.data_model)
self.active_learner.sample_combo(data, blocked_proportion,
sample_size, original_length) | Draw a sample of record pairs from the dataset
(a mix of random pairs & pairs of similar records)
and initialize active learning with this sample
Arguments: data -- Dictionary of records, where the keys are
record_ids and the values are dictionaries with the keys being
field names
sample_size -- Size of the sample to draw
blocked_proportion -- Proportion of the sample that will be blocked
original_length -- Length of original data, should be set if `data` is
a sample of full data | Below is the the instruction that describes the task:
### Input:
Draw a sample of record pairs from the dataset
(a mix of random pairs & pairs of similar records)
and initialize active learning with this sample
Arguments: data -- Dictionary of records, where the keys are
record_ids and the values are dictionaries with the keys being
field names
sample_size -- Size of the sample to draw
blocked_proportion -- Proportion of the sample that will be blocked
original_length -- Length of original data, should be set if `data` is
a sample of full data
### Response:
def sample(self, data, sample_size=15000,
blocked_proportion=0.5, original_length=None):
'''Draw a sample of record pairs from the dataset
(a mix of random pairs & pairs of similar records)
and initialize active learning with this sample
Arguments: data -- Dictionary of records, where the keys are
record_ids and the values are dictionaries with the keys being
field names
sample_size -- Size of the sample to draw
blocked_proportion -- Proportion of the sample that will be blocked
original_length -- Length of original data, should be set if `data` is
a sample of full data
'''
self._checkData(data)
self.active_learner = self.ActiveLearner(self.data_model)
self.active_learner.sample_combo(data, blocked_proportion,
sample_size, original_length) |
def find_disulfide_bridges(self, threshold=3.0):
"""Run Biopython's search_ss_bonds to find potential disulfide bridges for each chain and store in ChainProp.
Will add a list of tuple pairs into the annotations field, looks like this::
[ ((' ', 79, ' '), (' ', 110, ' ')),
((' ', 174, ' '), (' ', 180, ' ')),
((' ', 369, ' '), (' ', 377, ' '))]
Where each pair is a pair of cysteine residues close together in space.
"""
if self.structure:
parsed = self.structure
else:
parsed = self.parse_structure()
if not parsed:
log.error('{}: unable to open structure to find S-S bridges'.format(self.id))
return
disulfide_bridges = ssbio.protein.structure.properties.residues.search_ss_bonds(parsed.first_model,
threshold=threshold)
if not disulfide_bridges:
log.debug('{}: no disulfide bridges found'.format(self.id))
for chain, bridges in disulfide_bridges.items():
self.chains.get_by_id(chain).seq_record.annotations['SSBOND-biopython'] = disulfide_bridges[chain]
log.debug('{}: found {} disulfide bridges'.format(chain, len(bridges)))
log.debug('{}: stored disulfide bridges in the chain\'s seq_record letter_annotations'.format(chain)) | Run Biopython's search_ss_bonds to find potential disulfide bridges for each chain and store in ChainProp.
Will add a list of tuple pairs into the annotations field, looks like this::
[ ((' ', 79, ' '), (' ', 110, ' ')),
((' ', 174, ' '), (' ', 180, ' ')),
((' ', 369, ' '), (' ', 377, ' '))]
Where each pair is a pair of cysteine residues close together in space. | Below is the the instruction that describes the task:
### Input:
Run Biopython's search_ss_bonds to find potential disulfide bridges for each chain and store in ChainProp.
Will add a list of tuple pairs into the annotations field, looks like this::
[ ((' ', 79, ' '), (' ', 110, ' ')),
((' ', 174, ' '), (' ', 180, ' ')),
((' ', 369, ' '), (' ', 377, ' '))]
Where each pair is a pair of cysteine residues close together in space.
### Response:
def find_disulfide_bridges(self, threshold=3.0):
"""Run Biopython's search_ss_bonds to find potential disulfide bridges for each chain and store in ChainProp.
Will add a list of tuple pairs into the annotations field, looks like this::
[ ((' ', 79, ' '), (' ', 110, ' ')),
((' ', 174, ' '), (' ', 180, ' ')),
((' ', 369, ' '), (' ', 377, ' '))]
Where each pair is a pair of cysteine residues close together in space.
"""
if self.structure:
parsed = self.structure
else:
parsed = self.parse_structure()
if not parsed:
log.error('{}: unable to open structure to find S-S bridges'.format(self.id))
return
disulfide_bridges = ssbio.protein.structure.properties.residues.search_ss_bonds(parsed.first_model,
threshold=threshold)
if not disulfide_bridges:
log.debug('{}: no disulfide bridges found'.format(self.id))
for chain, bridges in disulfide_bridges.items():
self.chains.get_by_id(chain).seq_record.annotations['SSBOND-biopython'] = disulfide_bridges[chain]
log.debug('{}: found {} disulfide bridges'.format(chain, len(bridges)))
log.debug('{}: stored disulfide bridges in the chain\'s seq_record letter_annotations'.format(chain)) |
def load_file(self, filename):
"""Load and parse a RiveScript document.
:param str filename: The path to a RiveScript file.
"""
self._say("Loading file: " + filename)
fh = codecs.open(filename, 'r', 'utf-8')
lines = fh.readlines()
fh.close()
self._say("Parsing " + str(len(lines)) + " lines of code from " + filename)
self._parse(filename, lines) | Load and parse a RiveScript document.
:param str filename: The path to a RiveScript file. | Below is the the instruction that describes the task:
### Input:
Load and parse a RiveScript document.
:param str filename: The path to a RiveScript file.
### Response:
def load_file(self, filename):
"""Load and parse a RiveScript document.
:param str filename: The path to a RiveScript file.
"""
self._say("Loading file: " + filename)
fh = codecs.open(filename, 'r', 'utf-8')
lines = fh.readlines()
fh.close()
self._say("Parsing " + str(len(lines)) + " lines of code from " + filename)
self._parse(filename, lines) |
def get_uri(source):
"""
Check a media source as a valid file or uri and return the proper uri
"""
import gst
src_info = source_info(source)
if src_info['is_file']: # Is this a file?
return get_uri(src_info['uri'])
elif gst.uri_is_valid(source): # Is this a valid URI source for Gstreamer
uri_protocol = gst.uri_get_protocol(source)
if gst.uri_protocol_is_supported(gst.URI_SRC, uri_protocol):
return source
else:
raise IOError('Invalid URI source for Gstreamer')
else:
raise IOError('Failed getting uri for path %s: no such file' % source) | Check a media source as a valid file or uri and return the proper uri | Below is the the instruction that describes the task:
### Input:
Check a media source as a valid file or uri and return the proper uri
### Response:
def get_uri(source):
"""
Check a media source as a valid file or uri and return the proper uri
"""
import gst
src_info = source_info(source)
if src_info['is_file']: # Is this a file?
return get_uri(src_info['uri'])
elif gst.uri_is_valid(source): # Is this a valid URI source for Gstreamer
uri_protocol = gst.uri_get_protocol(source)
if gst.uri_protocol_is_supported(gst.URI_SRC, uri_protocol):
return source
else:
raise IOError('Invalid URI source for Gstreamer')
else:
raise IOError('Failed getting uri for path %s: no such file' % source) |
def subscribe(self, requested_timeout=None, auto_renew=False):
"""Subscribe to the service.
If requested_timeout is provided, a subscription valid for that number
of seconds will be requested, but not guaranteed. Check
`timeout` on return to find out what period of validity is
actually allocated.
Note:
SoCo will try to unsubscribe any subscriptions which are still
subscribed on program termination, but it is good practice for
you to clean up by making sure that you call :meth:`unsubscribe`
yourself.
Args:
requested_timeout(int, optional): The timeout to be requested.
auto_renew (bool, optional): If `True`, renew the subscription
automatically shortly before timeout. Default `False`.
"""
class AutoRenewThread(threading.Thread):
"""Used by the auto_renew code to renew a subscription from within
a thread.
"""
def __init__(self, interval, stop_flag, sub, *args, **kwargs):
super(AutoRenewThread, self).__init__(*args, **kwargs)
self.interval = interval
self.sub = sub
self.stop_flag = stop_flag
self.daemon = True
def run(self):
sub = self.sub
stop_flag = self.stop_flag
interval = self.interval
while not stop_flag.wait(interval):
log.info("Autorenewing subscription %s", sub.sid)
sub.renew()
# TIMEOUT is provided for in the UPnP spec, but it is not clear if
# Sonos pays any attention to it. A timeout of 86400 secs always seems
# to be allocated
self.requested_timeout = requested_timeout
if self._has_been_unsubscribed:
raise SoCoException(
'Cannot resubscribe instance once unsubscribed')
service = self.service
# The event listener must be running, so start it if not
if not event_listener.is_running:
event_listener.start(service.soco)
# an event subscription looks like this:
# SUBSCRIBE publisher path HTTP/1.1
# HOST: publisher host:publisher port
# CALLBACK: <delivery URL>
# NT: upnp:event
# TIMEOUT: Second-requested subscription duration (optional)
# pylint: disable=unbalanced-tuple-unpacking
ip_address, port = event_listener.address
if config.EVENT_ADVERTISE_IP:
ip_address = config.EVENT_ADVERTISE_IP
headers = {
'Callback': '<http://{}:{}>'.format(ip_address, port),
'NT': 'upnp:event'
}
if requested_timeout is not None:
headers["TIMEOUT"] = "Second-{}".format(requested_timeout)
# Lock out EventNotifyHandler during registration
with _subscriptions_lock:
response = requests.request(
'SUBSCRIBE', service.base_url + service.event_subscription_url,
headers=headers)
response.raise_for_status()
self.sid = response.headers['sid']
timeout = response.headers['timeout']
# According to the spec, timeout can be "infinite" or "second-123"
# where 123 is a number of seconds. Sonos uses "Second-123" (with
# a capital letter)
if timeout.lower() == 'infinite':
self.timeout = None
else:
self.timeout = int(timeout.lstrip('Second-'))
self._timestamp = time.time()
self.is_subscribed = True
log.info(
"Subscribed to %s, sid: %s",
service.base_url + service.event_subscription_url, self.sid)
# Add the subscription to the master dict so it can be looked up
# by sid
_subscriptions[self.sid] = self
# Register this subscription to be unsubscribed at exit if still alive
# This will not happen if exit is abnormal (eg in response to a
# signal or fatal interpreter error - see the docs for `atexit`).
atexit.register(self.unsubscribe)
# Set up auto_renew
if not auto_renew:
return
# Autorenew just before expiry, say at 85% of self.timeout seconds
interval = self.timeout * 85 / 100
auto_renew_thread = AutoRenewThread(
interval, self._auto_renew_thread_flag, self)
auto_renew_thread.start() | Subscribe to the service.
If requested_timeout is provided, a subscription valid for that number
of seconds will be requested, but not guaranteed. Check
`timeout` on return to find out what period of validity is
actually allocated.
Note:
SoCo will try to unsubscribe any subscriptions which are still
subscribed on program termination, but it is good practice for
you to clean up by making sure that you call :meth:`unsubscribe`
yourself.
Args:
requested_timeout(int, optional): The timeout to be requested.
auto_renew (bool, optional): If `True`, renew the subscription
automatically shortly before timeout. Default `False`. | Below is the the instruction that describes the task:
### Input:
Subscribe to the service.
If requested_timeout is provided, a subscription valid for that number
of seconds will be requested, but not guaranteed. Check
`timeout` on return to find out what period of validity is
actually allocated.
Note:
SoCo will try to unsubscribe any subscriptions which are still
subscribed on program termination, but it is good practice for
you to clean up by making sure that you call :meth:`unsubscribe`
yourself.
Args:
requested_timeout(int, optional): The timeout to be requested.
auto_renew (bool, optional): If `True`, renew the subscription
automatically shortly before timeout. Default `False`.
### Response:
def subscribe(self, requested_timeout=None, auto_renew=False):
"""Subscribe to the service.
If requested_timeout is provided, a subscription valid for that number
of seconds will be requested, but not guaranteed. Check
`timeout` on return to find out what period of validity is
actually allocated.
Note:
SoCo will try to unsubscribe any subscriptions which are still
subscribed on program termination, but it is good practice for
you to clean up by making sure that you call :meth:`unsubscribe`
yourself.
Args:
requested_timeout(int, optional): The timeout to be requested.
auto_renew (bool, optional): If `True`, renew the subscription
automatically shortly before timeout. Default `False`.
"""
class AutoRenewThread(threading.Thread):
"""Used by the auto_renew code to renew a subscription from within
a thread.
"""
def __init__(self, interval, stop_flag, sub, *args, **kwargs):
super(AutoRenewThread, self).__init__(*args, **kwargs)
self.interval = interval
self.sub = sub
self.stop_flag = stop_flag
self.daemon = True
def run(self):
sub = self.sub
stop_flag = self.stop_flag
interval = self.interval
while not stop_flag.wait(interval):
log.info("Autorenewing subscription %s", sub.sid)
sub.renew()
# TIMEOUT is provided for in the UPnP spec, but it is not clear if
# Sonos pays any attention to it. A timeout of 86400 secs always seems
# to be allocated
self.requested_timeout = requested_timeout
if self._has_been_unsubscribed:
raise SoCoException(
'Cannot resubscribe instance once unsubscribed')
service = self.service
# The event listener must be running, so start it if not
if not event_listener.is_running:
event_listener.start(service.soco)
# an event subscription looks like this:
# SUBSCRIBE publisher path HTTP/1.1
# HOST: publisher host:publisher port
# CALLBACK: <delivery URL>
# NT: upnp:event
# TIMEOUT: Second-requested subscription duration (optional)
# pylint: disable=unbalanced-tuple-unpacking
ip_address, port = event_listener.address
if config.EVENT_ADVERTISE_IP:
ip_address = config.EVENT_ADVERTISE_IP
headers = {
'Callback': '<http://{}:{}>'.format(ip_address, port),
'NT': 'upnp:event'
}
if requested_timeout is not None:
headers["TIMEOUT"] = "Second-{}".format(requested_timeout)
# Lock out EventNotifyHandler during registration
with _subscriptions_lock:
response = requests.request(
'SUBSCRIBE', service.base_url + service.event_subscription_url,
headers=headers)
response.raise_for_status()
self.sid = response.headers['sid']
timeout = response.headers['timeout']
# According to the spec, timeout can be "infinite" or "second-123"
# where 123 is a number of seconds. Sonos uses "Second-123" (with
# a capital letter)
if timeout.lower() == 'infinite':
self.timeout = None
else:
self.timeout = int(timeout.lstrip('Second-'))
self._timestamp = time.time()
self.is_subscribed = True
log.info(
"Subscribed to %s, sid: %s",
service.base_url + service.event_subscription_url, self.sid)
# Add the subscription to the master dict so it can be looked up
# by sid
_subscriptions[self.sid] = self
# Register this subscription to be unsubscribed at exit if still alive
# This will not happen if exit is abnormal (eg in response to a
# signal or fatal interpreter error - see the docs for `atexit`).
atexit.register(self.unsubscribe)
# Set up auto_renew
if not auto_renew:
return
# Autorenew just before expiry, say at 85% of self.timeout seconds
interval = self.timeout * 85 / 100
auto_renew_thread = AutoRenewThread(
interval, self._auto_renew_thread_flag, self)
auto_renew_thread.start() |
def getDatabaseFileSize(self):
""" Return the file size of the database as a pretty string. """
if DISABLE_PERSISTENT_CACHING:
return "?"
size = os.path.getsize(self.__db_filepath)
if size > 1000000000:
size = "%0.3fGB" % (size / 1000000000)
elif size > 1000000:
size = "%0.2fMB" % (size / 1000000)
elif size > 1000:
size = "%uKB" % (size // 1000)
else:
size = "%uB" % (size)
return size | Return the file size of the database as a pretty string. | Below is the the instruction that describes the task:
### Input:
Return the file size of the database as a pretty string.
### Response:
def getDatabaseFileSize(self):
""" Return the file size of the database as a pretty string. """
if DISABLE_PERSISTENT_CACHING:
return "?"
size = os.path.getsize(self.__db_filepath)
if size > 1000000000:
size = "%0.3fGB" % (size / 1000000000)
elif size > 1000000:
size = "%0.2fMB" % (size / 1000000)
elif size > 1000:
size = "%uKB" % (size // 1000)
else:
size = "%uB" % (size)
return size |
def extractFromURL(url,
cache=False,
cacheDir='_cache',
verbose=False,
encoding=None,
filters=None,
userAgent=None,
timeout=5,
blur=5,
ignore_robotstxt=False,
only_mime_types=None,
raw=False):
"""
Extracts text from a URL.
Parameters:
url := string
Remote URL or local filename where HTML will be read.
cache := bool
True=store and retrieve url from cache
False=always retrieve url from the web
cacheDir := str
Directory where cached url contents will be stored.
verbose := bool
True=print logging messages
False=print no output
encoding := string
The encoding of the page contents.
If none given, it will attempt to guess the encoding.
See http://docs.python.org/howto/unicode.html for further info
on Python Unicode and encoding support.
filters := string
Comma-delimited list of filters to apply before parsing.
only_mime_types := list of strings
A list of mime-types to limit parsing to.
If the mime-type of the raw-content retrieved does not match
one of these, a value of None will be returned.
"""
blur = int(blur)
try:
import chardet
except ImportError as e:
raise ImportError(("%s\nYou need to install chardet.\n" + \
"e.g. sudo pip install chardet") % e)
if only_mime_types and isinstance(only_mime_types, six.text_type):
only_mime_types = only_mime_types.split(',')
# Load url from cache if enabled.
if cache:
if not os.path.isdir(cacheDir):
cache_perms = 488 # 750 in octal, '-rwxr-x---'
os.makedirs(cacheDir, cache_perms)
cache_key = generate_key(url)
cached_content = cache_get(cacheDir, cache_key)
if cached_content:
return cached_content
if not ignore_robotstxt:
if not check_robotstxt(url, cache, cacheDir, userAgent=userAgent):
if verbose: print("Request denied by robots.txt")
return ''
# Otherwise download the url.
if verbose: print('Reading %s...' % url)
html = fetch(
url,
timeout=timeout,
userAgent=userAgent,
only_mime_types=only_mime_types)
if not html:
return ''
# If no encoding guess given, then attempt to determine
# encoding automatically.
if not encoding:
if isinstance(html, unicode):
html = html.encode('utf8', 'replace')
encoding_opinion = chardet.detect(html)
encoding = encoding_opinion['encoding']
if verbose: print('Using encoding %s.' % encoding)
# Save raw contents to cache if enabled.
if verbose: print('Read %i characters.' % len(html))
if cache:
raw_key = generate_key(url, "%s.raw")
cache_set(cacheDir, raw_key, html)
# Apply filters.
if filters:
filter_names = map(str.strip, filters.split(','))
for filter_name in filter_names:
fltr = get_filter(filter_name)
html = fltr(html)
# Clean up HTML.
html = tidyHTML(html)
if verbose: print('Extracted %i characters.' % len(html))
# Convert to Unicode.
if not html:
return ''
html = unicode(html, encoding=encoding, errors='replace')
if raw:
return html
# Extract text from HTML.
res = extractFromHTML(html, blur=blur)
assert isinstance(res, unicode)
# Save extracted text to cache if enabled.
res = res.encode(encoding, 'ignore')
if cache:
cache_set(cacheDir, cache_key, res)
return res | Extracts text from a URL.
Parameters:
url := string
Remote URL or local filename where HTML will be read.
cache := bool
True=store and retrieve url from cache
False=always retrieve url from the web
cacheDir := str
Directory where cached url contents will be stored.
verbose := bool
True=print logging messages
False=print no output
encoding := string
The encoding of the page contents.
If none given, it will attempt to guess the encoding.
See http://docs.python.org/howto/unicode.html for further info
on Python Unicode and encoding support.
filters := string
Comma-delimited list of filters to apply before parsing.
only_mime_types := list of strings
A list of mime-types to limit parsing to.
If the mime-type of the raw-content retrieved does not match
one of these, a value of None will be returned. | Below is the the instruction that describes the task:
### Input:
Extracts text from a URL.
Parameters:
url := string
Remote URL or local filename where HTML will be read.
cache := bool
True=store and retrieve url from cache
False=always retrieve url from the web
cacheDir := str
Directory where cached url contents will be stored.
verbose := bool
True=print logging messages
False=print no output
encoding := string
The encoding of the page contents.
If none given, it will attempt to guess the encoding.
See http://docs.python.org/howto/unicode.html for further info
on Python Unicode and encoding support.
filters := string
Comma-delimited list of filters to apply before parsing.
only_mime_types := list of strings
A list of mime-types to limit parsing to.
If the mime-type of the raw-content retrieved does not match
one of these, a value of None will be returned.
### Response:
def extractFromURL(url,
cache=False,
cacheDir='_cache',
verbose=False,
encoding=None,
filters=None,
userAgent=None,
timeout=5,
blur=5,
ignore_robotstxt=False,
only_mime_types=None,
raw=False):
"""
Extracts text from a URL.
Parameters:
url := string
Remote URL or local filename where HTML will be read.
cache := bool
True=store and retrieve url from cache
False=always retrieve url from the web
cacheDir := str
Directory where cached url contents will be stored.
verbose := bool
True=print logging messages
False=print no output
encoding := string
The encoding of the page contents.
If none given, it will attempt to guess the encoding.
See http://docs.python.org/howto/unicode.html for further info
on Python Unicode and encoding support.
filters := string
Comma-delimited list of filters to apply before parsing.
only_mime_types := list of strings
A list of mime-types to limit parsing to.
If the mime-type of the raw-content retrieved does not match
one of these, a value of None will be returned.
"""
blur = int(blur)
try:
import chardet
except ImportError as e:
raise ImportError(("%s\nYou need to install chardet.\n" + \
"e.g. sudo pip install chardet") % e)
if only_mime_types and isinstance(only_mime_types, six.text_type):
only_mime_types = only_mime_types.split(',')
# Load url from cache if enabled.
if cache:
if not os.path.isdir(cacheDir):
cache_perms = 488 # 750 in octal, '-rwxr-x---'
os.makedirs(cacheDir, cache_perms)
cache_key = generate_key(url)
cached_content = cache_get(cacheDir, cache_key)
if cached_content:
return cached_content
if not ignore_robotstxt:
if not check_robotstxt(url, cache, cacheDir, userAgent=userAgent):
if verbose: print("Request denied by robots.txt")
return ''
# Otherwise download the url.
if verbose: print('Reading %s...' % url)
html = fetch(
url,
timeout=timeout,
userAgent=userAgent,
only_mime_types=only_mime_types)
if not html:
return ''
# If no encoding guess given, then attempt to determine
# encoding automatically.
if not encoding:
if isinstance(html, unicode):
html = html.encode('utf8', 'replace')
encoding_opinion = chardet.detect(html)
encoding = encoding_opinion['encoding']
if verbose: print('Using encoding %s.' % encoding)
# Save raw contents to cache if enabled.
if verbose: print('Read %i characters.' % len(html))
if cache:
raw_key = generate_key(url, "%s.raw")
cache_set(cacheDir, raw_key, html)
# Apply filters.
if filters:
filter_names = map(str.strip, filters.split(','))
for filter_name in filter_names:
fltr = get_filter(filter_name)
html = fltr(html)
# Clean up HTML.
html = tidyHTML(html)
if verbose: print('Extracted %i characters.' % len(html))
# Convert to Unicode.
if not html:
return ''
html = unicode(html, encoding=encoding, errors='replace')
if raw:
return html
# Extract text from HTML.
res = extractFromHTML(html, blur=blur)
assert isinstance(res, unicode)
# Save extracted text to cache if enabled.
res = res.encode(encoding, 'ignore')
if cache:
cache_set(cacheDir, cache_key, res)
return res |
def successors(self):
"""Yield Compounds below self in the hierarchy.
Yields
-------
mb.Compound
The next Particle below self in the hierarchy
"""
if not self.children:
return
for part in self.children:
# Parts local to the current Compound.
yield part
# Parts further down the hierarchy.
for subpart in part.successors():
yield subpart | Yield Compounds below self in the hierarchy.
Yields
-------
mb.Compound
The next Particle below self in the hierarchy | Below is the the instruction that describes the task:
### Input:
Yield Compounds below self in the hierarchy.
Yields
-------
mb.Compound
The next Particle below self in the hierarchy
### Response:
def successors(self):
"""Yield Compounds below self in the hierarchy.
Yields
-------
mb.Compound
The next Particle below self in the hierarchy
"""
if not self.children:
return
for part in self.children:
# Parts local to the current Compound.
yield part
# Parts further down the hierarchy.
for subpart in part.successors():
yield subpart |
def _CreateComplexTypeFromData(
self, elem_type, type_is_override, data, set_type_attrs):
"""Initialize a SOAP element with specific data.
Args:
elem_type: The type of the element to create.
type_is_override: A boolean specifying if the type is being overridden.
data: The data to hydrate the type with.
set_type_attrs: A boolean indicating whether or not attributes that end
in .Type should be set. This is only necessary for batch job service.
Returns:
An fully initialized SOAP element.
"""
elem_arguments = dict(elem_type.elements)
# A post order traversal of the original data, need to instantiate from
# the bottom up.
instantiated_arguments = {
k: self._PackArgumentsHelper(elem_arguments[k], v, set_type_attrs)
for k, v in data if k != 'xsi_type'}
if set_type_attrs:
found_type_attr = next((e_name for e_name, _ in elem_type.elements
if e_name.endswith('.Type')), None)
if found_type_attr and type_is_override:
instantiated_arguments[found_type_attr] = elem_type.qname.localname
# Now go back through the tree instantiating SOAP types as we go.
return elem_type(**instantiated_arguments) | Initialize a SOAP element with specific data.
Args:
elem_type: The type of the element to create.
type_is_override: A boolean specifying if the type is being overridden.
data: The data to hydrate the type with.
set_type_attrs: A boolean indicating whether or not attributes that end
in .Type should be set. This is only necessary for batch job service.
Returns:
An fully initialized SOAP element. | Below is the the instruction that describes the task:
### Input:
Initialize a SOAP element with specific data.
Args:
elem_type: The type of the element to create.
type_is_override: A boolean specifying if the type is being overridden.
data: The data to hydrate the type with.
set_type_attrs: A boolean indicating whether or not attributes that end
in .Type should be set. This is only necessary for batch job service.
Returns:
An fully initialized SOAP element.
### Response:
def _CreateComplexTypeFromData(
self, elem_type, type_is_override, data, set_type_attrs):
"""Initialize a SOAP element with specific data.
Args:
elem_type: The type of the element to create.
type_is_override: A boolean specifying if the type is being overridden.
data: The data to hydrate the type with.
set_type_attrs: A boolean indicating whether or not attributes that end
in .Type should be set. This is only necessary for batch job service.
Returns:
An fully initialized SOAP element.
"""
elem_arguments = dict(elem_type.elements)
# A post order traversal of the original data, need to instantiate from
# the bottom up.
instantiated_arguments = {
k: self._PackArgumentsHelper(elem_arguments[k], v, set_type_attrs)
for k, v in data if k != 'xsi_type'}
if set_type_attrs:
found_type_attr = next((e_name for e_name, _ in elem_type.elements
if e_name.endswith('.Type')), None)
if found_type_attr and type_is_override:
instantiated_arguments[found_type_attr] = elem_type.qname.localname
# Now go back through the tree instantiating SOAP types as we go.
return elem_type(**instantiated_arguments) |
def stylize(self, images, style=None, verbose=True, max_size=800, batch_size = 4):
"""
Stylize an SFrame of Images given a style index or a list of
styles.
Parameters
----------
images : SFrame | Image
A dataset that has the same content image column that was used
during training.
style : int or list, optional
The selected style or list of styles to use on the ``images``. If
`None`, all styles will be applied to each image in ``images``.
verbose : bool, optional
If True, print progress updates.
max_size : int or tuple
Max input image size that will not get resized during stylization.
Images with a side larger than this value, will be scaled down, due
to time and memory constraints. If tuple, interpreted as (max
width, max height). Without resizing, larger input images take more
time to stylize. Resizing can effect the quality of the final
stylized image.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
Returns
-------
out : SFrame or SArray or turicreate.Image
If ``style`` is a list, an SFrame is always returned. If ``style``
is a single integer, the output type will match the input type
(Image, SArray, or SFrame).
See Also
--------
create
Examples
--------
>>> image = tc.Image("/path/to/image.jpg")
>>> stylized_images = model.stylize(image, style=[0, 1])
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
+--------+-------+------------------------+
[2 rows x 3 columns]
>>> images = tc.image_analysis.load_images('/path/to/images')
>>> stylized_images = model.stylize(images)
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
| 0 | 2 | Height: 256 Width: 256 |
| 0 | 3 | Height: 256 Width: 256 |
| 1 | 0 | Height: 640 Width: 648 |
| 1 | 1 | Height: 640 Width: 648 |
| 1 | 2 | Height: 640 Width: 648 |
| 1 | 3 | Height: 640 Width: 648 |
+--------+-------+------------------------+
[8 rows x 3 columns]
"""
if(batch_size < 1):
raise _ToolkitError("'batch_size' must be greater than or equal to 1")
from ._sframe_loader import SFrameSTIter as _SFrameSTIter
import mxnet as _mx
from mxnet import gluon as _gluon
from .._mxnet import _mxnet_utils
set_of_all_idx = self._style_indices()
style, single_style = self._style_input_check(style)
if isinstance(max_size, _six.integer_types):
input_shape = (max_size, max_size)
else:
# Outward-facing, we use (width, height), but internally we use
# (height, width)
input_shape = max_size[::-1]
images, unpack = self._canonize_content_input(images, single_style=single_style)
dataset_size = len(images)
output_size = dataset_size * len(style)
batch_size_each = min(batch_size, output_size)
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=batch_size_each)
if num_mxnet_gpus == 0:
# CPU processing prefers native size to prevent stylizing
# unnecessary regions
batch_size_each = 1
loader_type = 'favor-native-size'
else:
# GPU processing prefers batches of same size, using padding
# for smaller images
loader_type = 'pad'
self._model.batch_size = batch_size_each
self._model.hybridize()
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size_each)
batch_size = max(num_mxnet_gpus, 1) * batch_size_each
last_time = 0
if dataset_size == 0:
raise _ToolkitError("SFrame cannot be empty")
content_feature = _tkutl._find_only_image_column(images)
_raise_error_if_not_training_sframe(images, content_feature)
max_h = 0
max_w = 0
oversized_count = 0
for img in images[content_feature]:
if img.height > input_shape[0] or img.width > input_shape[1]:
oversized_count += 1
max_h = max(img.height, max_h)
max_w = max(img.width, max_w)
if input_shape[0] > max_h:
input_shape = (max_h, input_shape[1])
if input_shape[1] > max_w:
input_shape = (input_shape[0], max_w)
# If we find large images, let's switch to sequential iterator
# pre-processing, to prevent memory issues.
sequential = max(max_h, max_w) > 2000
if verbose and output_size != 1:
print('Stylizing {} image(s) using {} style(s)'.format(dataset_size, len(style)))
if oversized_count > 0:
print('Scaling down {} image(s) exceeding {}x{}'.format(oversized_count, input_shape[1], input_shape[0]))
content_images_loader = _SFrameSTIter(images, batch_size,
shuffle=False,
feature_column=content_feature,
input_shape=input_shape,
num_epochs=1,
loader_type=loader_type,
repeat_each_image=len(style),
sequential=sequential)
sb = _tc.SFrameBuilder([int, int, _tc.Image],
column_names=['row_id', 'style', 'stylized_{}'.format(self.content_feature)])
count = 0
for i, batch in enumerate(content_images_loader):
if loader_type == 'favor-native-size':
c_data = [batch.data[0][0].expand_dims(0)]
else:
c_data = _gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
indices_data = _gluon.utils.split_and_load(_mx.nd.array(batch.repeat_indices, dtype=_np.int64),
ctx_list=ctx, batch_axis=0)
outputs = []
for b_img, b_indices in zip(c_data, indices_data):
mx_style = _mx.nd.array(style, dtype=_np.int64, ctx=b_indices.context)
b_batch_styles = mx_style[b_indices]
output = self._model(b_img, b_batch_styles)
outputs.append(output)
image_data = _np.concatenate([
(output.asnumpy().transpose(0, 2, 3, 1) * 255).astype(_np.uint8)
for output in outputs], axis=0)
batch_styles = [style[idx] for idx in batch.repeat_indices]
for b in range(batch_size - (batch.pad or 0)):
image = image_data[b]
# Crop to remove added padding
crop = batch.crop[b]
cropped_image = image[crop[0]:crop[1], crop[2]:crop[3]]
tc_img = _tc.Image(_image_data=cropped_image.tobytes(),
_width=cropped_image.shape[1],
_height=cropped_image.shape[0],
_channels=cropped_image.shape[2],
_format_enum=2,
_image_data_size=cropped_image.size)
sb.append([batch.indices[b], batch_styles[b], tc_img])
count += 1
cur_time = _time.time()
if verbose and output_size != 1 and (cur_time > last_time + 10 or count == output_size):
print('Stylizing {curr_image:{width}d}/{max_n:{width}d}'.
format(curr_image=count, max_n=output_size, width=len(str(output_size))))
last_time = cur_time
return unpack(sb.close()) | Stylize an SFrame of Images given a style index or a list of
styles.
Parameters
----------
images : SFrame | Image
A dataset that has the same content image column that was used
during training.
style : int or list, optional
The selected style or list of styles to use on the ``images``. If
`None`, all styles will be applied to each image in ``images``.
verbose : bool, optional
If True, print progress updates.
max_size : int or tuple
Max input image size that will not get resized during stylization.
Images with a side larger than this value, will be scaled down, due
to time and memory constraints. If tuple, interpreted as (max
width, max height). Without resizing, larger input images take more
time to stylize. Resizing can effect the quality of the final
stylized image.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
Returns
-------
out : SFrame or SArray or turicreate.Image
If ``style`` is a list, an SFrame is always returned. If ``style``
is a single integer, the output type will match the input type
(Image, SArray, or SFrame).
See Also
--------
create
Examples
--------
>>> image = tc.Image("/path/to/image.jpg")
>>> stylized_images = model.stylize(image, style=[0, 1])
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
+--------+-------+------------------------+
[2 rows x 3 columns]
>>> images = tc.image_analysis.load_images('/path/to/images')
>>> stylized_images = model.stylize(images)
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
| 0 | 2 | Height: 256 Width: 256 |
| 0 | 3 | Height: 256 Width: 256 |
| 1 | 0 | Height: 640 Width: 648 |
| 1 | 1 | Height: 640 Width: 648 |
| 1 | 2 | Height: 640 Width: 648 |
| 1 | 3 | Height: 640 Width: 648 |
+--------+-------+------------------------+
[8 rows x 3 columns] | Below is the the instruction that describes the task:
### Input:
Stylize an SFrame of Images given a style index or a list of
styles.
Parameters
----------
images : SFrame | Image
A dataset that has the same content image column that was used
during training.
style : int or list, optional
The selected style or list of styles to use on the ``images``. If
`None`, all styles will be applied to each image in ``images``.
verbose : bool, optional
If True, print progress updates.
max_size : int or tuple
Max input image size that will not get resized during stylization.
Images with a side larger than this value, will be scaled down, due
to time and memory constraints. If tuple, interpreted as (max
width, max height). Without resizing, larger input images take more
time to stylize. Resizing can effect the quality of the final
stylized image.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
Returns
-------
out : SFrame or SArray or turicreate.Image
If ``style`` is a list, an SFrame is always returned. If ``style``
is a single integer, the output type will match the input type
(Image, SArray, or SFrame).
See Also
--------
create
Examples
--------
>>> image = tc.Image("/path/to/image.jpg")
>>> stylized_images = model.stylize(image, style=[0, 1])
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
+--------+-------+------------------------+
[2 rows x 3 columns]
>>> images = tc.image_analysis.load_images('/path/to/images')
>>> stylized_images = model.stylize(images)
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
| 0 | 2 | Height: 256 Width: 256 |
| 0 | 3 | Height: 256 Width: 256 |
| 1 | 0 | Height: 640 Width: 648 |
| 1 | 1 | Height: 640 Width: 648 |
| 1 | 2 | Height: 640 Width: 648 |
| 1 | 3 | Height: 640 Width: 648 |
+--------+-------+------------------------+
[8 rows x 3 columns]
### Response:
def stylize(self, images, style=None, verbose=True, max_size=800, batch_size = 4):
"""
Stylize an SFrame of Images given a style index or a list of
styles.
Parameters
----------
images : SFrame | Image
A dataset that has the same content image column that was used
during training.
style : int or list, optional
The selected style or list of styles to use on the ``images``. If
`None`, all styles will be applied to each image in ``images``.
verbose : bool, optional
If True, print progress updates.
max_size : int or tuple
Max input image size that will not get resized during stylization.
Images with a side larger than this value, will be scaled down, due
to time and memory constraints. If tuple, interpreted as (max
width, max height). Without resizing, larger input images take more
time to stylize. Resizing can effect the quality of the final
stylized image.
batch_size : int, optional
If you are getting memory errors, try decreasing this value. If you
have a powerful computer, increasing this value may improve
performance.
Returns
-------
out : SFrame or SArray or turicreate.Image
If ``style`` is a list, an SFrame is always returned. If ``style``
is a single integer, the output type will match the input type
(Image, SArray, or SFrame).
See Also
--------
create
Examples
--------
>>> image = tc.Image("/path/to/image.jpg")
>>> stylized_images = model.stylize(image, style=[0, 1])
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
+--------+-------+------------------------+
[2 rows x 3 columns]
>>> images = tc.image_analysis.load_images('/path/to/images')
>>> stylized_images = model.stylize(images)
Data:
+--------+-------+------------------------+
| row_id | style | stylized_image |
+--------+-------+------------------------+
| 0 | 0 | Height: 256 Width: 256 |
| 0 | 1 | Height: 256 Width: 256 |
| 0 | 2 | Height: 256 Width: 256 |
| 0 | 3 | Height: 256 Width: 256 |
| 1 | 0 | Height: 640 Width: 648 |
| 1 | 1 | Height: 640 Width: 648 |
| 1 | 2 | Height: 640 Width: 648 |
| 1 | 3 | Height: 640 Width: 648 |
+--------+-------+------------------------+
[8 rows x 3 columns]
"""
if(batch_size < 1):
raise _ToolkitError("'batch_size' must be greater than or equal to 1")
from ._sframe_loader import SFrameSTIter as _SFrameSTIter
import mxnet as _mx
from mxnet import gluon as _gluon
from .._mxnet import _mxnet_utils
set_of_all_idx = self._style_indices()
style, single_style = self._style_input_check(style)
if isinstance(max_size, _six.integer_types):
input_shape = (max_size, max_size)
else:
# Outward-facing, we use (width, height), but internally we use
# (height, width)
input_shape = max_size[::-1]
images, unpack = self._canonize_content_input(images, single_style=single_style)
dataset_size = len(images)
output_size = dataset_size * len(style)
batch_size_each = min(batch_size, output_size)
num_mxnet_gpus = _mxnet_utils.get_num_gpus_in_use(max_devices=batch_size_each)
if num_mxnet_gpus == 0:
# CPU processing prefers native size to prevent stylizing
# unnecessary regions
batch_size_each = 1
loader_type = 'favor-native-size'
else:
# GPU processing prefers batches of same size, using padding
# for smaller images
loader_type = 'pad'
self._model.batch_size = batch_size_each
self._model.hybridize()
ctx = _mxnet_utils.get_mxnet_context(max_devices=batch_size_each)
batch_size = max(num_mxnet_gpus, 1) * batch_size_each
last_time = 0
if dataset_size == 0:
raise _ToolkitError("SFrame cannot be empty")
content_feature = _tkutl._find_only_image_column(images)
_raise_error_if_not_training_sframe(images, content_feature)
max_h = 0
max_w = 0
oversized_count = 0
for img in images[content_feature]:
if img.height > input_shape[0] or img.width > input_shape[1]:
oversized_count += 1
max_h = max(img.height, max_h)
max_w = max(img.width, max_w)
if input_shape[0] > max_h:
input_shape = (max_h, input_shape[1])
if input_shape[1] > max_w:
input_shape = (input_shape[0], max_w)
# If we find large images, let's switch to sequential iterator
# pre-processing, to prevent memory issues.
sequential = max(max_h, max_w) > 2000
if verbose and output_size != 1:
print('Stylizing {} image(s) using {} style(s)'.format(dataset_size, len(style)))
if oversized_count > 0:
print('Scaling down {} image(s) exceeding {}x{}'.format(oversized_count, input_shape[1], input_shape[0]))
content_images_loader = _SFrameSTIter(images, batch_size,
shuffle=False,
feature_column=content_feature,
input_shape=input_shape,
num_epochs=1,
loader_type=loader_type,
repeat_each_image=len(style),
sequential=sequential)
sb = _tc.SFrameBuilder([int, int, _tc.Image],
column_names=['row_id', 'style', 'stylized_{}'.format(self.content_feature)])
count = 0
for i, batch in enumerate(content_images_loader):
if loader_type == 'favor-native-size':
c_data = [batch.data[0][0].expand_dims(0)]
else:
c_data = _gluon.utils.split_and_load(batch.data[0], ctx_list=ctx, batch_axis=0)
indices_data = _gluon.utils.split_and_load(_mx.nd.array(batch.repeat_indices, dtype=_np.int64),
ctx_list=ctx, batch_axis=0)
outputs = []
for b_img, b_indices in zip(c_data, indices_data):
mx_style = _mx.nd.array(style, dtype=_np.int64, ctx=b_indices.context)
b_batch_styles = mx_style[b_indices]
output = self._model(b_img, b_batch_styles)
outputs.append(output)
image_data = _np.concatenate([
(output.asnumpy().transpose(0, 2, 3, 1) * 255).astype(_np.uint8)
for output in outputs], axis=0)
batch_styles = [style[idx] for idx in batch.repeat_indices]
for b in range(batch_size - (batch.pad or 0)):
image = image_data[b]
# Crop to remove added padding
crop = batch.crop[b]
cropped_image = image[crop[0]:crop[1], crop[2]:crop[3]]
tc_img = _tc.Image(_image_data=cropped_image.tobytes(),
_width=cropped_image.shape[1],
_height=cropped_image.shape[0],
_channels=cropped_image.shape[2],
_format_enum=2,
_image_data_size=cropped_image.size)
sb.append([batch.indices[b], batch_styles[b], tc_img])
count += 1
cur_time = _time.time()
if verbose and output_size != 1 and (cur_time > last_time + 10 or count == output_size):
print('Stylizing {curr_image:{width}d}/{max_n:{width}d}'.
format(curr_image=count, max_n=output_size, width=len(str(output_size))))
last_time = cur_time
return unpack(sb.close()) |
def pick_flat_z(data):
"""Generate a 2D array of the quasiparticle weight by only selecting the
first particle data"""
zmes = []
for i in data['zeta']:
zmes.append(i[:, 0])
return np.asarray(zmes) | Generate a 2D array of the quasiparticle weight by only selecting the
first particle data | Below is the the instruction that describes the task:
### Input:
Generate a 2D array of the quasiparticle weight by only selecting the
first particle data
### Response:
def pick_flat_z(data):
"""Generate a 2D array of the quasiparticle weight by only selecting the
first particle data"""
zmes = []
for i in data['zeta']:
zmes.append(i[:, 0])
return np.asarray(zmes) |
def get_covalent_bonds(self, tol=0.2):
"""
Determines the covalent bonds in a molecule.
Args:
tol (float): The tol to determine bonds in a structure. See
CovalentBond.is_bonded.
Returns:
List of bonds
"""
bonds = []
for site1, site2 in itertools.combinations(self._sites, 2):
if CovalentBond.is_bonded(site1, site2, tol):
bonds.append(CovalentBond(site1, site2))
return bonds | Determines the covalent bonds in a molecule.
Args:
tol (float): The tol to determine bonds in a structure. See
CovalentBond.is_bonded.
Returns:
List of bonds | Below is the the instruction that describes the task:
### Input:
Determines the covalent bonds in a molecule.
Args:
tol (float): The tol to determine bonds in a structure. See
CovalentBond.is_bonded.
Returns:
List of bonds
### Response:
def get_covalent_bonds(self, tol=0.2):
"""
Determines the covalent bonds in a molecule.
Args:
tol (float): The tol to determine bonds in a structure. See
CovalentBond.is_bonded.
Returns:
List of bonds
"""
bonds = []
for site1, site2 in itertools.combinations(self._sites, 2):
if CovalentBond.is_bonded(site1, site2, tol):
bonds.append(CovalentBond(site1, site2))
return bonds |
def clear_preview(self):
"""stub"""
try:
rm = self.my_osid_object._get_provider_manager('REPOSITORY')
except AttributeError:
rm = self.my_osid_object_form._get_provider_manager('REPOSITORY')
try:
aas = rm.get_asset_admin_session_for_repository(
Id(self.my_osid_object._my_map['assignedBankIds'][0]))
except AttributeError:
# for update forms
aas = rm.get_asset_admin_session_for_repository(
Id(self.my_osid_object_form._my_map['assignedBankIds'][0]))
if 'preview' not in self.my_osid_object_form._my_map['fileIds']:
raise NotFound()
aas.delete_asset(
Id(self.my_osid_object_form._my_map['fileIds']['preview']['assetId']))
del self.my_osid_object_form._my_map['fileIds']['preview'] | stub | Below is the the instruction that describes the task:
### Input:
stub
### Response:
def clear_preview(self):
"""stub"""
try:
rm = self.my_osid_object._get_provider_manager('REPOSITORY')
except AttributeError:
rm = self.my_osid_object_form._get_provider_manager('REPOSITORY')
try:
aas = rm.get_asset_admin_session_for_repository(
Id(self.my_osid_object._my_map['assignedBankIds'][0]))
except AttributeError:
# for update forms
aas = rm.get_asset_admin_session_for_repository(
Id(self.my_osid_object_form._my_map['assignedBankIds'][0]))
if 'preview' not in self.my_osid_object_form._my_map['fileIds']:
raise NotFound()
aas.delete_asset(
Id(self.my_osid_object_form._my_map['fileIds']['preview']['assetId']))
del self.my_osid_object_form._my_map['fileIds']['preview'] |
def _contextualise(self):
'''Determine contextual idents (cidents)'''
# loop through hierarchy identifying unique lineages
# TODO: gain other contextual information, not just ident
deja_vues = []
for rank in reversed(self.taxonomy):
# return named clades -- '' are ignored
clades = [e for e in self.hierarchy[rank] if e[1]]
# print 'Rank: {0} - {1}'.format(rank, len(clades))
# get unique lineages at this level
uniques = [e for e in clades if len(e[0]) == 1]
# removed those already seen
uniques = [e for e in uniques if e[0][0].ident not in deja_vues]
# add each to self[ident]['cident']
for e in uniques:
ident = e[0][0].ident
self[ident]['cident'] = e[1]
deja_vues.append(ident) | Determine contextual idents (cidents) | Below is the the instruction that describes the task:
### Input:
Determine contextual idents (cidents)
### Response:
def _contextualise(self):
'''Determine contextual idents (cidents)'''
# loop through hierarchy identifying unique lineages
# TODO: gain other contextual information, not just ident
deja_vues = []
for rank in reversed(self.taxonomy):
# return named clades -- '' are ignored
clades = [e for e in self.hierarchy[rank] if e[1]]
# print 'Rank: {0} - {1}'.format(rank, len(clades))
# get unique lineages at this level
uniques = [e for e in clades if len(e[0]) == 1]
# removed those already seen
uniques = [e for e in uniques if e[0][0].ident not in deja_vues]
# add each to self[ident]['cident']
for e in uniques:
ident = e[0][0].ident
self[ident]['cident'] = e[1]
deja_vues.append(ident) |
def close_hover(self, element, use_js=False):
"""
Close hover by moving to a set offset "away" from the element being hovered.
:param element: element that triggered the hover to open
:param use_js: use javascript to close hover
:return: None
"""
try:
if use_js:
self._js_hover('mouseout', element)
else:
actions = ActionChains(self.driver)
actions.move_to_element_with_offset(element, -100, -100)
actions.reset_actions()
except (StaleElementReferenceException, MoveTargetOutOfBoundsException):
return True | Close hover by moving to a set offset "away" from the element being hovered.
:param element: element that triggered the hover to open
:param use_js: use javascript to close hover
:return: None | Below is the the instruction that describes the task:
### Input:
Close hover by moving to a set offset "away" from the element being hovered.
:param element: element that triggered the hover to open
:param use_js: use javascript to close hover
:return: None
### Response:
def close_hover(self, element, use_js=False):
"""
Close hover by moving to a set offset "away" from the element being hovered.
:param element: element that triggered the hover to open
:param use_js: use javascript to close hover
:return: None
"""
try:
if use_js:
self._js_hover('mouseout', element)
else:
actions = ActionChains(self.driver)
actions.move_to_element_with_offset(element, -100, -100)
actions.reset_actions()
except (StaleElementReferenceException, MoveTargetOutOfBoundsException):
return True |
def get_entity_propnames(entity):
""" Get entity property names
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set
"""
ins = entity if isinstance(entity, InstanceState) else inspect(entity)
return set(
ins.mapper.column_attrs.keys() + # Columns
ins.mapper.relationships.keys() # Relationships
) | Get entity property names
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set | Below is the the instruction that describes the task:
### Input:
Get entity property names
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set
### Response:
def get_entity_propnames(entity):
""" Get entity property names
:param entity: Entity
:type entity: sqlalchemy.ext.declarative.api.DeclarativeMeta
:returns: Set of entity property names
:rtype: set
"""
ins = entity if isinstance(entity, InstanceState) else inspect(entity)
return set(
ins.mapper.column_attrs.keys() + # Columns
ins.mapper.relationships.keys() # Relationships
) |
def CreateGroup(self, GroupName):
"""Creates a custom contact group.
:Parameters:
GroupName : unicode
Group name.
:return: A group object.
:rtype: `Group`
:see: `DeleteGroup`
"""
groups = self.CustomGroups
self._DoCommand('CREATE GROUP %s' % tounicode(GroupName))
for g in self.CustomGroups:
if g not in groups and g.DisplayName == GroupName:
return g
raise SkypeError(0, 'Group creating failed') | Creates a custom contact group.
:Parameters:
GroupName : unicode
Group name.
:return: A group object.
:rtype: `Group`
:see: `DeleteGroup` | Below is the the instruction that describes the task:
### Input:
Creates a custom contact group.
:Parameters:
GroupName : unicode
Group name.
:return: A group object.
:rtype: `Group`
:see: `DeleteGroup`
### Response:
def CreateGroup(self, GroupName):
"""Creates a custom contact group.
:Parameters:
GroupName : unicode
Group name.
:return: A group object.
:rtype: `Group`
:see: `DeleteGroup`
"""
groups = self.CustomGroups
self._DoCommand('CREATE GROUP %s' % tounicode(GroupName))
for g in self.CustomGroups:
if g not in groups and g.DisplayName == GroupName:
return g
raise SkypeError(0, 'Group creating failed') |
def add_sink(self, sink):
"""Add a vehicle data sink to the instance. ``sink`` should be a
sub-class of ``DataSink`` or at least have a ``receive(message,
**kwargs)`` method.
The sink will be started if it is startable. (i.e. it has a ``start()``
method).
"""
if sink is not None:
self.sinks.add(sink)
if hasattr(sink, 'start'):
sink.start() | Add a vehicle data sink to the instance. ``sink`` should be a
sub-class of ``DataSink`` or at least have a ``receive(message,
**kwargs)`` method.
The sink will be started if it is startable. (i.e. it has a ``start()``
method). | Below is the the instruction that describes the task:
### Input:
Add a vehicle data sink to the instance. ``sink`` should be a
sub-class of ``DataSink`` or at least have a ``receive(message,
**kwargs)`` method.
The sink will be started if it is startable. (i.e. it has a ``start()``
method).
### Response:
def add_sink(self, sink):
"""Add a vehicle data sink to the instance. ``sink`` should be a
sub-class of ``DataSink`` or at least have a ``receive(message,
**kwargs)`` method.
The sink will be started if it is startable. (i.e. it has a ``start()``
method).
"""
if sink is not None:
self.sinks.add(sink)
if hasattr(sink, 'start'):
sink.start() |
def managed(name,
venv_bin=None,
requirements=None,
system_site_packages=False,
distribute=False,
use_wheel=False,
clear=False,
python=None,
extra_search_dir=None,
never_download=None,
prompt=None,
user=None,
cwd=None,
index_url=None,
extra_index_url=None,
pre_releases=False,
no_deps=False,
pip_download=None,
pip_download_cache=None,
pip_exists_action=None,
pip_ignore_installed=False,
proxy=None,
use_vt=False,
env_vars=None,
no_use_wheel=False,
pip_upgrade=False,
pip_pkgs=None,
pip_no_cache_dir=False,
pip_cache_dir=None,
process_dependency_links=False,
no_binary=None,
**kwargs):
'''
Create a virtualenv and optionally manage it with pip
name
Path to the virtualenv.
venv_bin: virtualenv
The name (and optionally path) of the virtualenv command. This can also
be set globally in the minion config file as ``virtualenv.venv_bin``.
requirements: None
Path to a pip requirements file. If the path begins with ``salt://``
the file will be transferred from the master file server.
use_wheel: False
Prefer wheel archives (requires pip >= 1.4).
python : None
Python executable used to build the virtualenv
user: None
The user under which to run virtualenv and pip.
cwd: None
Path to the working directory where `pip install` is executed.
no_deps: False
Pass `--no-deps` to `pip install`.
pip_exists_action: None
Default action of pip when a path already exists: (s)witch, (i)gnore,
(w)ipe, (b)ackup.
proxy: None
Proxy address which is passed to `pip install`.
env_vars: None
Set environment variables that some builds will depend on. For example,
a Python C-module may have a Makefile that needs INCLUDE_PATH set to
pick up a header file while compiling.
no_use_wheel: False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
pip_upgrade: False
Pass `--upgrade` to `pip install`.
pip_pkgs: None
As an alternative to `requirements`, pass a list of pip packages that
should be installed.
process_dependency_links: False
Run pip install with the --process_dependency_links flag.
.. versionadded:: 2017.7.0
Also accepts any kwargs that the virtualenv module will. However, some
kwargs, such as the ``pip`` option, require ``- distribute: True``.
.. code-block:: yaml
/var/www/myvirtualenv.com:
virtualenv.managed:
- system_site_packages: False
- requirements: salt://REQUIREMENTS.txt
- env_vars:
PATH_VAR: '/usr/local/bin/'
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if 'virtualenv.create' not in __salt__:
ret['result'] = False
ret['comment'] = 'Virtualenv was not detected on this system'
return ret
if salt.utils.platform.is_windows():
venv_py = os.path.join(name, 'Scripts', 'python.exe')
else:
venv_py = os.path.join(name, 'bin', 'python')
venv_exists = os.path.exists(venv_py)
# Bail out early if the specified requirements file can't be found
if requirements and requirements.startswith('salt://'):
cached_requirements = __salt__['cp.is_cached'](requirements, __env__)
if not cached_requirements:
# It's not cached, let's cache it.
cached_requirements = __salt__['cp.cache_file'](
requirements, __env__
)
# Check if the master version has changed.
if cached_requirements and __salt__['cp.hash_file'](requirements, __env__) != \
__salt__['cp.hash_file'](cached_requirements, __env__):
cached_requirements = __salt__['cp.cache_file'](
requirements, __env__
)
if not cached_requirements:
ret.update({
'result': False,
'comment': 'pip requirements file \'{0}\' not found'.format(
requirements
)
})
return ret
requirements = cached_requirements
# If it already exists, grab the version for posterity
if venv_exists and clear:
ret['changes']['cleared_packages'] = \
__salt__['pip.freeze'](bin_env=name)
ret['changes']['old'] = \
__salt__['cmd.run_stderr']('{0} -V'.format(venv_py)).strip('\n')
# Create (or clear) the virtualenv
if __opts__['test']:
if venv_exists and clear:
ret['result'] = None
ret['comment'] = 'Virtualenv {0} is set to be cleared'.format(name)
return ret
if venv_exists and not clear:
ret['comment'] = 'Virtualenv {0} is already created'.format(name)
return ret
ret['result'] = None
ret['comment'] = 'Virtualenv {0} is set to be created'.format(name)
return ret
if not venv_exists or (venv_exists and clear):
try:
venv_ret = __salt__['virtualenv.create'](
name,
venv_bin=venv_bin,
system_site_packages=system_site_packages,
distribute=distribute,
clear=clear,
python=python,
extra_search_dir=extra_search_dir,
never_download=never_download,
prompt=prompt,
user=user,
use_vt=use_vt,
**kwargs
)
except CommandNotFoundError as err:
ret['result'] = False
ret['comment'] = 'Failed to create virtualenv: {0}'.format(err)
return ret
if venv_ret['retcode'] != 0:
ret['result'] = False
ret['comment'] = venv_ret['stdout'] + venv_ret['stderr']
return ret
ret['result'] = True
ret['changes']['new'] = __salt__['cmd.run_stderr'](
'{0} -V'.format(venv_py)).strip('\n')
if clear:
ret['comment'] = 'Cleared existing virtualenv'
else:
ret['comment'] = 'Created new virtualenv'
elif venv_exists:
ret['comment'] = 'virtualenv exists'
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
min_version = '1.4'
max_version = '9.0.3'
cur_version = __salt__['pip.version'](bin_env=name)
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_use_wheel' option
if no_use_wheel:
min_version = '1.4'
max_version = '9.0.3'
cur_version = __salt__['pip.version'](bin_env=name)
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'no_use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_binary' option
if no_binary:
min_version = '7.0.0'
cur_version = __salt__['pip.version'](bin_env=name)
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
ret['result'] = False
ret['comment'] = ('The \'no_binary\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
# Populate the venv via a requirements file
if requirements or pip_pkgs:
try:
before = set(__salt__['pip.freeze'](bin_env=name, user=user, use_vt=use_vt))
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = exc.strerror
return ret
if requirements:
if isinstance(requirements, six.string_types):
req_canary = requirements.split(',')[0]
elif isinstance(requirements, list):
req_canary = requirements[0]
else:
raise TypeError(
'pip requirements must be either a string or a list'
)
if req_canary != os.path.abspath(req_canary):
cwd = os.path.dirname(os.path.abspath(req_canary))
pip_ret = __salt__['pip.install'](
pkgs=pip_pkgs,
requirements=requirements,
process_dependency_links=process_dependency_links,
bin_env=name,
use_wheel=use_wheel,
no_use_wheel=no_use_wheel,
no_binary=no_binary,
user=user,
cwd=cwd,
index_url=index_url,
extra_index_url=extra_index_url,
download=pip_download,
download_cache=pip_download_cache,
pre_releases=pre_releases,
exists_action=pip_exists_action,
ignore_installed=pip_ignore_installed,
upgrade=pip_upgrade,
no_deps=no_deps,
proxy=proxy,
use_vt=use_vt,
env_vars=env_vars,
no_cache_dir=pip_no_cache_dir,
cache_dir=pip_cache_dir,
**kwargs
)
ret['result'] &= pip_ret['retcode'] == 0
if pip_ret['retcode'] > 0:
ret['comment'] = '{0}\n{1}\n{2}'.format(ret['comment'],
pip_ret['stdout'],
pip_ret['stderr'])
after = set(__salt__['pip.freeze'](bin_env=name))
new = list(after - before)
old = list(before - after)
if new or old:
ret['changes']['packages'] = {
'new': new if new else '',
'old': old if old else ''}
return ret | Create a virtualenv and optionally manage it with pip
name
Path to the virtualenv.
venv_bin: virtualenv
The name (and optionally path) of the virtualenv command. This can also
be set globally in the minion config file as ``virtualenv.venv_bin``.
requirements: None
Path to a pip requirements file. If the path begins with ``salt://``
the file will be transferred from the master file server.
use_wheel: False
Prefer wheel archives (requires pip >= 1.4).
python : None
Python executable used to build the virtualenv
user: None
The user under which to run virtualenv and pip.
cwd: None
Path to the working directory where `pip install` is executed.
no_deps: False
Pass `--no-deps` to `pip install`.
pip_exists_action: None
Default action of pip when a path already exists: (s)witch, (i)gnore,
(w)ipe, (b)ackup.
proxy: None
Proxy address which is passed to `pip install`.
env_vars: None
Set environment variables that some builds will depend on. For example,
a Python C-module may have a Makefile that needs INCLUDE_PATH set to
pick up a header file while compiling.
no_use_wheel: False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
pip_upgrade: False
Pass `--upgrade` to `pip install`.
pip_pkgs: None
As an alternative to `requirements`, pass a list of pip packages that
should be installed.
process_dependency_links: False
Run pip install with the --process_dependency_links flag.
.. versionadded:: 2017.7.0
Also accepts any kwargs that the virtualenv module will. However, some
kwargs, such as the ``pip`` option, require ``- distribute: True``.
.. code-block:: yaml
/var/www/myvirtualenv.com:
virtualenv.managed:
- system_site_packages: False
- requirements: salt://REQUIREMENTS.txt
- env_vars:
PATH_VAR: '/usr/local/bin/' | Below is the the instruction that describes the task:
### Input:
Create a virtualenv and optionally manage it with pip
name
Path to the virtualenv.
venv_bin: virtualenv
The name (and optionally path) of the virtualenv command. This can also
be set globally in the minion config file as ``virtualenv.venv_bin``.
requirements: None
Path to a pip requirements file. If the path begins with ``salt://``
the file will be transferred from the master file server.
use_wheel: False
Prefer wheel archives (requires pip >= 1.4).
python : None
Python executable used to build the virtualenv
user: None
The user under which to run virtualenv and pip.
cwd: None
Path to the working directory where `pip install` is executed.
no_deps: False
Pass `--no-deps` to `pip install`.
pip_exists_action: None
Default action of pip when a path already exists: (s)witch, (i)gnore,
(w)ipe, (b)ackup.
proxy: None
Proxy address which is passed to `pip install`.
env_vars: None
Set environment variables that some builds will depend on. For example,
a Python C-module may have a Makefile that needs INCLUDE_PATH set to
pick up a header file while compiling.
no_use_wheel: False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
pip_upgrade: False
Pass `--upgrade` to `pip install`.
pip_pkgs: None
As an alternative to `requirements`, pass a list of pip packages that
should be installed.
process_dependency_links: False
Run pip install with the --process_dependency_links flag.
.. versionadded:: 2017.7.0
Also accepts any kwargs that the virtualenv module will. However, some
kwargs, such as the ``pip`` option, require ``- distribute: True``.
.. code-block:: yaml
/var/www/myvirtualenv.com:
virtualenv.managed:
- system_site_packages: False
- requirements: salt://REQUIREMENTS.txt
- env_vars:
PATH_VAR: '/usr/local/bin/'
### Response:
def managed(name,
venv_bin=None,
requirements=None,
system_site_packages=False,
distribute=False,
use_wheel=False,
clear=False,
python=None,
extra_search_dir=None,
never_download=None,
prompt=None,
user=None,
cwd=None,
index_url=None,
extra_index_url=None,
pre_releases=False,
no_deps=False,
pip_download=None,
pip_download_cache=None,
pip_exists_action=None,
pip_ignore_installed=False,
proxy=None,
use_vt=False,
env_vars=None,
no_use_wheel=False,
pip_upgrade=False,
pip_pkgs=None,
pip_no_cache_dir=False,
pip_cache_dir=None,
process_dependency_links=False,
no_binary=None,
**kwargs):
'''
Create a virtualenv and optionally manage it with pip
name
Path to the virtualenv.
venv_bin: virtualenv
The name (and optionally path) of the virtualenv command. This can also
be set globally in the minion config file as ``virtualenv.venv_bin``.
requirements: None
Path to a pip requirements file. If the path begins with ``salt://``
the file will be transferred from the master file server.
use_wheel: False
Prefer wheel archives (requires pip >= 1.4).
python : None
Python executable used to build the virtualenv
user: None
The user under which to run virtualenv and pip.
cwd: None
Path to the working directory where `pip install` is executed.
no_deps: False
Pass `--no-deps` to `pip install`.
pip_exists_action: None
Default action of pip when a path already exists: (s)witch, (i)gnore,
(w)ipe, (b)ackup.
proxy: None
Proxy address which is passed to `pip install`.
env_vars: None
Set environment variables that some builds will depend on. For example,
a Python C-module may have a Makefile that needs INCLUDE_PATH set to
pick up a header file while compiling.
no_use_wheel: False
Force to not use wheel archives (requires pip>=1.4)
no_binary
Force to not use binary packages (requires pip >= 7.0.0)
Accepts either :all: to disable all binary packages, :none: to empty the set,
or a list of one or more packages
pip_upgrade: False
Pass `--upgrade` to `pip install`.
pip_pkgs: None
As an alternative to `requirements`, pass a list of pip packages that
should be installed.
process_dependency_links: False
Run pip install with the --process_dependency_links flag.
.. versionadded:: 2017.7.0
Also accepts any kwargs that the virtualenv module will. However, some
kwargs, such as the ``pip`` option, require ``- distribute: True``.
.. code-block:: yaml
/var/www/myvirtualenv.com:
virtualenv.managed:
- system_site_packages: False
- requirements: salt://REQUIREMENTS.txt
- env_vars:
PATH_VAR: '/usr/local/bin/'
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
if 'virtualenv.create' not in __salt__:
ret['result'] = False
ret['comment'] = 'Virtualenv was not detected on this system'
return ret
if salt.utils.platform.is_windows():
venv_py = os.path.join(name, 'Scripts', 'python.exe')
else:
venv_py = os.path.join(name, 'bin', 'python')
venv_exists = os.path.exists(venv_py)
# Bail out early if the specified requirements file can't be found
if requirements and requirements.startswith('salt://'):
cached_requirements = __salt__['cp.is_cached'](requirements, __env__)
if not cached_requirements:
# It's not cached, let's cache it.
cached_requirements = __salt__['cp.cache_file'](
requirements, __env__
)
# Check if the master version has changed.
if cached_requirements and __salt__['cp.hash_file'](requirements, __env__) != \
__salt__['cp.hash_file'](cached_requirements, __env__):
cached_requirements = __salt__['cp.cache_file'](
requirements, __env__
)
if not cached_requirements:
ret.update({
'result': False,
'comment': 'pip requirements file \'{0}\' not found'.format(
requirements
)
})
return ret
requirements = cached_requirements
# If it already exists, grab the version for posterity
if venv_exists and clear:
ret['changes']['cleared_packages'] = \
__salt__['pip.freeze'](bin_env=name)
ret['changes']['old'] = \
__salt__['cmd.run_stderr']('{0} -V'.format(venv_py)).strip('\n')
# Create (or clear) the virtualenv
if __opts__['test']:
if venv_exists and clear:
ret['result'] = None
ret['comment'] = 'Virtualenv {0} is set to be cleared'.format(name)
return ret
if venv_exists and not clear:
ret['comment'] = 'Virtualenv {0} is already created'.format(name)
return ret
ret['result'] = None
ret['comment'] = 'Virtualenv {0} is set to be created'.format(name)
return ret
if not venv_exists or (venv_exists and clear):
try:
venv_ret = __salt__['virtualenv.create'](
name,
venv_bin=venv_bin,
system_site_packages=system_site_packages,
distribute=distribute,
clear=clear,
python=python,
extra_search_dir=extra_search_dir,
never_download=never_download,
prompt=prompt,
user=user,
use_vt=use_vt,
**kwargs
)
except CommandNotFoundError as err:
ret['result'] = False
ret['comment'] = 'Failed to create virtualenv: {0}'.format(err)
return ret
if venv_ret['retcode'] != 0:
ret['result'] = False
ret['comment'] = venv_ret['stdout'] + venv_ret['stderr']
return ret
ret['result'] = True
ret['changes']['new'] = __salt__['cmd.run_stderr'](
'{0} -V'.format(venv_py)).strip('\n')
if clear:
ret['comment'] = 'Cleared existing virtualenv'
else:
ret['comment'] = 'Created new virtualenv'
elif venv_exists:
ret['comment'] = 'virtualenv exists'
# Check that the pip binary supports the 'use_wheel' option
if use_wheel:
min_version = '1.4'
max_version = '9.0.3'
cur_version = __salt__['pip.version'](bin_env=name)
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_use_wheel' option
if no_use_wheel:
min_version = '1.4'
max_version = '9.0.3'
cur_version = __salt__['pip.version'](bin_env=name)
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
too_high = salt.utils.versions.compare(ver1=cur_version, oper='>', ver2=max_version)
if too_low or too_high:
ret['result'] = False
ret['comment'] = ('The \'no_use_wheel\' option is only supported in '
'pip between {0} and {1}. The version of pip detected '
'was {2}.').format(min_version, max_version, cur_version)
return ret
# Check that the pip binary supports the 'no_binary' option
if no_binary:
min_version = '7.0.0'
cur_version = __salt__['pip.version'](bin_env=name)
too_low = salt.utils.versions.compare(ver1=cur_version, oper='<', ver2=min_version)
if too_low:
ret['result'] = False
ret['comment'] = ('The \'no_binary\' option is only supported in '
'pip {0} and newer. The version of pip detected '
'was {1}.').format(min_version, cur_version)
return ret
# Populate the venv via a requirements file
if requirements or pip_pkgs:
try:
before = set(__salt__['pip.freeze'](bin_env=name, user=user, use_vt=use_vt))
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = exc.strerror
return ret
if requirements:
if isinstance(requirements, six.string_types):
req_canary = requirements.split(',')[0]
elif isinstance(requirements, list):
req_canary = requirements[0]
else:
raise TypeError(
'pip requirements must be either a string or a list'
)
if req_canary != os.path.abspath(req_canary):
cwd = os.path.dirname(os.path.abspath(req_canary))
pip_ret = __salt__['pip.install'](
pkgs=pip_pkgs,
requirements=requirements,
process_dependency_links=process_dependency_links,
bin_env=name,
use_wheel=use_wheel,
no_use_wheel=no_use_wheel,
no_binary=no_binary,
user=user,
cwd=cwd,
index_url=index_url,
extra_index_url=extra_index_url,
download=pip_download,
download_cache=pip_download_cache,
pre_releases=pre_releases,
exists_action=pip_exists_action,
ignore_installed=pip_ignore_installed,
upgrade=pip_upgrade,
no_deps=no_deps,
proxy=proxy,
use_vt=use_vt,
env_vars=env_vars,
no_cache_dir=pip_no_cache_dir,
cache_dir=pip_cache_dir,
**kwargs
)
ret['result'] &= pip_ret['retcode'] == 0
if pip_ret['retcode'] > 0:
ret['comment'] = '{0}\n{1}\n{2}'.format(ret['comment'],
pip_ret['stdout'],
pip_ret['stderr'])
after = set(__salt__['pip.freeze'](bin_env=name))
new = list(after - before)
old = list(before - after)
if new or old:
ret['changes']['packages'] = {
'new': new if new else '',
'old': old if old else ''}
return ret |
def _crps_cdf_single(x, cdf_or_dist, xmin=None, xmax=None, tol=1e-6):
"""
See crps_cdf for docs.
"""
# TODO: this function is pretty slow. Look for clever ways to speed it up.
# allow for directly passing in scipy.stats distribution objects.
cdf = getattr(cdf_or_dist, 'cdf', cdf_or_dist)
assert callable(cdf)
# if bounds aren't given, discover them
if xmin is None or xmax is None:
# Note that infinite values for xmin and xmax are valid, but
# it slows down the resulting quadrature significantly.
xmin, xmax = _discover_bounds(cdf)
# make sure the bounds haven't clipped the cdf.
if (tol is not None) and (cdf(xmin) >= tol) or (cdf(xmax) <= (1. - tol)):
raise ValueError('CDF does not meet tolerance requirements at %s '
'extreme(s)! Consider using function defaults '
'or using infinities at the bounds. '
% ('lower' if cdf(xmin) >= tol else 'upper'))
# CRPS = int_-inf^inf (F(y) - H(x))**2 dy
# = int_-inf^x F(y)**2 dy + int_x^inf (1 - F(y))**2 dy
def lhs(y):
# left hand side of CRPS integral
return np.square(cdf(y))
# use quadrature to integrate the lhs
lhs_int, lhs_tol = integrate.quad(lhs, xmin, x)
# make sure the resulting CRPS will be with tolerance
if (tol is not None) and (lhs_tol >= 0.5 * tol):
raise ValueError('Lower integral did not evaluate to within tolerance! '
'Tolerance achieved: %f , Value of integral: %f \n'
'Consider setting the lower bound to -np.inf.' %
(lhs_tol, lhs_int))
def rhs(y):
# right hand side of CRPS integral
return np.square(1. - cdf(y))
rhs_int, rhs_tol = integrate.quad(rhs, x, xmax)
# make sure the resulting CRPS will be with tolerance
if (tol is not None) and (rhs_tol >= 0.5 * tol):
raise ValueError('Upper integral did not evaluate to within tolerance! \n'
'Tolerance achieved: %f , Value of integral: %f \n'
'Consider setting the upper bound to np.inf or if '
'you already have, set warn_level to `ignore`.' %
(rhs_tol, rhs_int))
return lhs_int + rhs_int | See crps_cdf for docs. | Below is the the instruction that describes the task:
### Input:
See crps_cdf for docs.
### Response:
def _crps_cdf_single(x, cdf_or_dist, xmin=None, xmax=None, tol=1e-6):
"""
See crps_cdf for docs.
"""
# TODO: this function is pretty slow. Look for clever ways to speed it up.
# allow for directly passing in scipy.stats distribution objects.
cdf = getattr(cdf_or_dist, 'cdf', cdf_or_dist)
assert callable(cdf)
# if bounds aren't given, discover them
if xmin is None or xmax is None:
# Note that infinite values for xmin and xmax are valid, but
# it slows down the resulting quadrature significantly.
xmin, xmax = _discover_bounds(cdf)
# make sure the bounds haven't clipped the cdf.
if (tol is not None) and (cdf(xmin) >= tol) or (cdf(xmax) <= (1. - tol)):
raise ValueError('CDF does not meet tolerance requirements at %s '
'extreme(s)! Consider using function defaults '
'or using infinities at the bounds. '
% ('lower' if cdf(xmin) >= tol else 'upper'))
# CRPS = int_-inf^inf (F(y) - H(x))**2 dy
# = int_-inf^x F(y)**2 dy + int_x^inf (1 - F(y))**2 dy
def lhs(y):
# left hand side of CRPS integral
return np.square(cdf(y))
# use quadrature to integrate the lhs
lhs_int, lhs_tol = integrate.quad(lhs, xmin, x)
# make sure the resulting CRPS will be with tolerance
if (tol is not None) and (lhs_tol >= 0.5 * tol):
raise ValueError('Lower integral did not evaluate to within tolerance! '
'Tolerance achieved: %f , Value of integral: %f \n'
'Consider setting the lower bound to -np.inf.' %
(lhs_tol, lhs_int))
def rhs(y):
# right hand side of CRPS integral
return np.square(1. - cdf(y))
rhs_int, rhs_tol = integrate.quad(rhs, x, xmax)
# make sure the resulting CRPS will be with tolerance
if (tol is not None) and (rhs_tol >= 0.5 * tol):
raise ValueError('Upper integral did not evaluate to within tolerance! \n'
'Tolerance achieved: %f , Value of integral: %f \n'
'Consider setting the upper bound to np.inf or if '
'you already have, set warn_level to `ignore`.' %
(rhs_tol, rhs_int))
return lhs_int + rhs_int |
def mean(data, n=3, **kwargs):
"""The mean forecast for the next point is the mean value of the previous ``n`` points in
the series.
Args:
data (np.array): Observed data, presumed to be ordered in time.
n (int): period over which to calculate the mean
Returns:
float: a single-valued forecast for the next value in the series.
"""
# don't start averaging until we've seen n points
if len(data[-n:]) < n:
forecast = np.nan
else:
# nb: we'll keep the forecast as a float
forecast = np.mean(data[-n:])
return forecast | The mean forecast for the next point is the mean value of the previous ``n`` points in
the series.
Args:
data (np.array): Observed data, presumed to be ordered in time.
n (int): period over which to calculate the mean
Returns:
float: a single-valued forecast for the next value in the series. | Below is the the instruction that describes the task:
### Input:
The mean forecast for the next point is the mean value of the previous ``n`` points in
the series.
Args:
data (np.array): Observed data, presumed to be ordered in time.
n (int): period over which to calculate the mean
Returns:
float: a single-valued forecast for the next value in the series.
### Response:
def mean(data, n=3, **kwargs):
"""The mean forecast for the next point is the mean value of the previous ``n`` points in
the series.
Args:
data (np.array): Observed data, presumed to be ordered in time.
n (int): period over which to calculate the mean
Returns:
float: a single-valued forecast for the next value in the series.
"""
# don't start averaging until we've seen n points
if len(data[-n:]) < n:
forecast = np.nan
else:
# nb: we'll keep the forecast as a float
forecast = np.mean(data[-n:])
return forecast |
def create_core(self, thing_name, config_file, region=None,
cert_dir=None, account_id=None,
policy_name='ggc-default-policy', profile_name=None):
"""
Using the `thing_name` value, creates a Thing in AWS IoT, attaches and
downloads new keys & certs to the certificate directory, then records
the created information in the local config file for inclusion in the
Greengrass Group as a Greengrass Core.
:param thing_name: the name of the thing to create and use as a
Greengrass Core
:param config_file: config file used to track the Greengrass Core in the
group
:param region: the region in which to create the new core.
[default: us-west-2]
:param cert_dir: the directory in which to store the thing's keys and
certs. If `None` then use the current directory.
:param account_id: the account_id in which to create the new core.
[default: None]
:param policy_name: the name of the policy to associate with the device.
[default: 'ggc-default-policy']
:param profile_name: the name of the `awscli` profile to use.
[default: None]
"""
config = GroupConfigFile(config_file=config_file)
if config.is_fresh() is False:
raise ValueError(
"Config file already tracking previously created core or group"
)
if region is None:
region = self._region
if account_id is None:
account_id = self._account_id
keys_cert, thing = self.create_thing(thing_name, region, cert_dir)
cert_arn = keys_cert['certificateArn']
config['core'] = {
'thing_arn': thing['thingArn'],
'cert_arn': cert_arn,
'cert_id': keys_cert['certificateId'],
'thing_name': thing_name
}
logging.debug("create_core cfg:{0}".format(config))
logging.info("Thing:'{0}' associated with cert:'{1}'".format(
thing_name, cert_arn))
core_policy = self.get_core_policy(
core_name=thing_name, account_id=account_id, region=region)
iot_client = _get_iot_session(region=region, profile_name=profile_name)
self._create_attach_thing_policy(
cert_arn, core_policy,
iot_client=iot_client, policy_name=policy_name
)
misc = config['misc']
misc['policy_name'] = policy_name
config['misc'] = misc | Using the `thing_name` value, creates a Thing in AWS IoT, attaches and
downloads new keys & certs to the certificate directory, then records
the created information in the local config file for inclusion in the
Greengrass Group as a Greengrass Core.
:param thing_name: the name of the thing to create and use as a
Greengrass Core
:param config_file: config file used to track the Greengrass Core in the
group
:param region: the region in which to create the new core.
[default: us-west-2]
:param cert_dir: the directory in which to store the thing's keys and
certs. If `None` then use the current directory.
:param account_id: the account_id in which to create the new core.
[default: None]
:param policy_name: the name of the policy to associate with the device.
[default: 'ggc-default-policy']
:param profile_name: the name of the `awscli` profile to use.
[default: None] | Below is the the instruction that describes the task:
### Input:
Using the `thing_name` value, creates a Thing in AWS IoT, attaches and
downloads new keys & certs to the certificate directory, then records
the created information in the local config file for inclusion in the
Greengrass Group as a Greengrass Core.
:param thing_name: the name of the thing to create and use as a
Greengrass Core
:param config_file: config file used to track the Greengrass Core in the
group
:param region: the region in which to create the new core.
[default: us-west-2]
:param cert_dir: the directory in which to store the thing's keys and
certs. If `None` then use the current directory.
:param account_id: the account_id in which to create the new core.
[default: None]
:param policy_name: the name of the policy to associate with the device.
[default: 'ggc-default-policy']
:param profile_name: the name of the `awscli` profile to use.
[default: None]
### Response:
def create_core(self, thing_name, config_file, region=None,
cert_dir=None, account_id=None,
policy_name='ggc-default-policy', profile_name=None):
"""
Using the `thing_name` value, creates a Thing in AWS IoT, attaches and
downloads new keys & certs to the certificate directory, then records
the created information in the local config file for inclusion in the
Greengrass Group as a Greengrass Core.
:param thing_name: the name of the thing to create and use as a
Greengrass Core
:param config_file: config file used to track the Greengrass Core in the
group
:param region: the region in which to create the new core.
[default: us-west-2]
:param cert_dir: the directory in which to store the thing's keys and
certs. If `None` then use the current directory.
:param account_id: the account_id in which to create the new core.
[default: None]
:param policy_name: the name of the policy to associate with the device.
[default: 'ggc-default-policy']
:param profile_name: the name of the `awscli` profile to use.
[default: None]
"""
config = GroupConfigFile(config_file=config_file)
if config.is_fresh() is False:
raise ValueError(
"Config file already tracking previously created core or group"
)
if region is None:
region = self._region
if account_id is None:
account_id = self._account_id
keys_cert, thing = self.create_thing(thing_name, region, cert_dir)
cert_arn = keys_cert['certificateArn']
config['core'] = {
'thing_arn': thing['thingArn'],
'cert_arn': cert_arn,
'cert_id': keys_cert['certificateId'],
'thing_name': thing_name
}
logging.debug("create_core cfg:{0}".format(config))
logging.info("Thing:'{0}' associated with cert:'{1}'".format(
thing_name, cert_arn))
core_policy = self.get_core_policy(
core_name=thing_name, account_id=account_id, region=region)
iot_client = _get_iot_session(region=region, profile_name=profile_name)
self._create_attach_thing_policy(
cert_arn, core_policy,
iot_client=iot_client, policy_name=policy_name
)
misc = config['misc']
misc['policy_name'] = policy_name
config['misc'] = misc |
def showLayer(self, title='', debugText=''):
"""
Shows the single layer.
:param title: A string with the title of the window where to render the image.
:param debugText: A string with some text to render over the image.
:rtype: Nothing.
"""
img = PIL.Image.fromarray(self.data, 'RGBA')
if debugText!='':
draw = PIL.ImageDraw.Draw(img)
font = PIL.ImageFont.truetype("DejaVuSansMono.ttf", 24)
draw.text((0, 0),debugText,(255,255,255),font=font)
img.show(title=title) | Shows the single layer.
:param title: A string with the title of the window where to render the image.
:param debugText: A string with some text to render over the image.
:rtype: Nothing. | Below is the the instruction that describes the task:
### Input:
Shows the single layer.
:param title: A string with the title of the window where to render the image.
:param debugText: A string with some text to render over the image.
:rtype: Nothing.
### Response:
def showLayer(self, title='', debugText=''):
"""
Shows the single layer.
:param title: A string with the title of the window where to render the image.
:param debugText: A string with some text to render over the image.
:rtype: Nothing.
"""
img = PIL.Image.fromarray(self.data, 'RGBA')
if debugText!='':
draw = PIL.ImageDraw.Draw(img)
font = PIL.ImageFont.truetype("DejaVuSansMono.ttf", 24)
draw.text((0, 0),debugText,(255,255,255),font=font)
img.show(title=title) |
def solveConsPrefShock(solution_next,IncomeDstn,PrefShkDstn,
LivPrb,DiscFac,CRRA,Rfree,PermGroFac,BoroCnstArt,
aXtraGrid,vFuncBool,CubicBool):
'''
Solves a single period of a consumption-saving model with preference shocks
to marginal utility. Problem is solved using the method of endogenous gridpoints.
Parameters
----------
solution_next : ConsumerSolution
The solution to the succeeding one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
PrefShkDstn : [np.array]
Discrete distribution of the multiplicative utility shifter. Order:
probabilities, preference shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroGac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
solution: ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using linear splines), a marginal value
function vPfunc, a minimum acceptable level of normalized market re-
sources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin
and MPCmax. It might also have a value function vFunc. The consumption
function is defined over normalized market resources and the preference
shock, c = cFunc(m,PrefShk), but the (marginal) value function is defined
unconditionally on the shock, just before it is revealed.
'''
solver = ConsPrefShockSolver(solution_next,IncomeDstn,PrefShkDstn,LivPrb,
DiscFac,CRRA,Rfree,PermGroFac,BoroCnstArt,aXtraGrid,
vFuncBool,CubicBool)
solver.prepareToSolve()
solution = solver.solve()
return solution | Solves a single period of a consumption-saving model with preference shocks
to marginal utility. Problem is solved using the method of endogenous gridpoints.
Parameters
----------
solution_next : ConsumerSolution
The solution to the succeeding one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
PrefShkDstn : [np.array]
Discrete distribution of the multiplicative utility shifter. Order:
probabilities, preference shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroGac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
solution: ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using linear splines), a marginal value
function vPfunc, a minimum acceptable level of normalized market re-
sources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin
and MPCmax. It might also have a value function vFunc. The consumption
function is defined over normalized market resources and the preference
shock, c = cFunc(m,PrefShk), but the (marginal) value function is defined
unconditionally on the shock, just before it is revealed. | Below is the the instruction that describes the task:
### Input:
Solves a single period of a consumption-saving model with preference shocks
to marginal utility. Problem is solved using the method of endogenous gridpoints.
Parameters
----------
solution_next : ConsumerSolution
The solution to the succeeding one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
PrefShkDstn : [np.array]
Discrete distribution of the multiplicative utility shifter. Order:
probabilities, preference shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroGac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
solution: ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using linear splines), a marginal value
function vPfunc, a minimum acceptable level of normalized market re-
sources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin
and MPCmax. It might also have a value function vFunc. The consumption
function is defined over normalized market resources and the preference
shock, c = cFunc(m,PrefShk), but the (marginal) value function is defined
unconditionally on the shock, just before it is revealed.
### Response:
def solveConsPrefShock(solution_next,IncomeDstn,PrefShkDstn,
LivPrb,DiscFac,CRRA,Rfree,PermGroFac,BoroCnstArt,
aXtraGrid,vFuncBool,CubicBool):
'''
Solves a single period of a consumption-saving model with preference shocks
to marginal utility. Problem is solved using the method of endogenous gridpoints.
Parameters
----------
solution_next : ConsumerSolution
The solution to the succeeding one period problem.
IncomeDstn : [np.array]
A list containing three arrays of floats, representing a discrete
approximation to the income process between the period being solved
and the one immediately following (in solution_next). Order: event
probabilities, permanent shocks, transitory shocks.
PrefShkDstn : [np.array]
Discrete distribution of the multiplicative utility shifter. Order:
probabilities, preference shocks.
LivPrb : float
Survival probability; likelihood of being alive at the beginning of
the succeeding period.
DiscFac : float
Intertemporal discount factor for future utility.
CRRA : float
Coefficient of relative risk aversion.
Rfree : float
Risk free interest factor on end-of-period assets.
PermGroGac : float
Expected permanent income growth factor at the end of this period.
BoroCnstArt: float or None
Borrowing constraint for the minimum allowable assets to end the
period with. If it is less than the natural borrowing constraint,
then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
rowing constraint.
aXtraGrid: np.array
Array of "extra" end-of-period asset values-- assets above the
absolute minimum acceptable level.
vFuncBool: boolean
An indicator for whether the value function should be computed and
included in the reported solution.
CubicBool: boolean
An indicator for whether the solver should use cubic or linear inter-
polation.
Returns
-------
solution: ConsumerSolution
The solution to the single period consumption-saving problem. Includes
a consumption function cFunc (using linear splines), a marginal value
function vPfunc, a minimum acceptable level of normalized market re-
sources mNrmMin, normalized human wealth hNrm, and bounding MPCs MPCmin
and MPCmax. It might also have a value function vFunc. The consumption
function is defined over normalized market resources and the preference
shock, c = cFunc(m,PrefShk), but the (marginal) value function is defined
unconditionally on the shock, just before it is revealed.
'''
solver = ConsPrefShockSolver(solution_next,IncomeDstn,PrefShkDstn,LivPrb,
DiscFac,CRRA,Rfree,PermGroFac,BoroCnstArt,aXtraGrid,
vFuncBool,CubicBool)
solver.prepareToSolve()
solution = solver.solve()
return solution |
def _pyshark_read_frame(self):
"""Read frames."""
from pcapkit.toolkit.pyshark import packet2dict, tcp_traceflow
# fetch PyShark packet
packet = next(self._extmp)
# def _pyshark_packet2chain(packet):
# """Fetch PyShark packet protocol chain."""
# return ':'.join(map(lambda layer: layer.layer_name.upper(), packet.layers))
# verbose output
self._frnum = int(packet.number)
self._proto = packet.frame_info.protocols
if self._flag_v:
print(f' - Frame {self._frnum:>3d}: {self._proto}')
# write plist
frnum = f'Frame {self._frnum}'
if not self._flag_q:
info = packet2dict(packet)
if self._flag_f:
ofile = self._ofile(f'{self._ofnm}/{frnum}.{self._fext}')
ofile(info, name=frnum)
else:
self._ofile(info, name=frnum)
# record frames
if self._flag_d:
setattr(packet, 'packet2dict', packet2dict)
self._frame.append(packet)
# trace flows
if self._flag_t:
flag, data = tcp_traceflow(packet)
if flag:
self._trace(data)
return packet | Read frames. | Below is the the instruction that describes the task:
### Input:
Read frames.
### Response:
def _pyshark_read_frame(self):
"""Read frames."""
from pcapkit.toolkit.pyshark import packet2dict, tcp_traceflow
# fetch PyShark packet
packet = next(self._extmp)
# def _pyshark_packet2chain(packet):
# """Fetch PyShark packet protocol chain."""
# return ':'.join(map(lambda layer: layer.layer_name.upper(), packet.layers))
# verbose output
self._frnum = int(packet.number)
self._proto = packet.frame_info.protocols
if self._flag_v:
print(f' - Frame {self._frnum:>3d}: {self._proto}')
# write plist
frnum = f'Frame {self._frnum}'
if not self._flag_q:
info = packet2dict(packet)
if self._flag_f:
ofile = self._ofile(f'{self._ofnm}/{frnum}.{self._fext}')
ofile(info, name=frnum)
else:
self._ofile(info, name=frnum)
# record frames
if self._flag_d:
setattr(packet, 'packet2dict', packet2dict)
self._frame.append(packet)
# trace flows
if self._flag_t:
flag, data = tcp_traceflow(packet)
if flag:
self._trace(data)
return packet |
def to_bedtool(iterator):
"""
Convert any iterator into a pybedtools.BedTool object.
Note that the supplied iterator is not consumed by this function. To save
to a temp file or to a known location, use the `.saveas()` method of the
returned BedTool object.
"""
def gen():
for i in iterator:
yield helpers.asinterval(i)
return pybedtools.BedTool(gen()) | Convert any iterator into a pybedtools.BedTool object.
Note that the supplied iterator is not consumed by this function. To save
to a temp file or to a known location, use the `.saveas()` method of the
returned BedTool object. | Below is the the instruction that describes the task:
### Input:
Convert any iterator into a pybedtools.BedTool object.
Note that the supplied iterator is not consumed by this function. To save
to a temp file or to a known location, use the `.saveas()` method of the
returned BedTool object.
### Response:
def to_bedtool(iterator):
"""
Convert any iterator into a pybedtools.BedTool object.
Note that the supplied iterator is not consumed by this function. To save
to a temp file or to a known location, use the `.saveas()` method of the
returned BedTool object.
"""
def gen():
for i in iterator:
yield helpers.asinterval(i)
return pybedtools.BedTool(gen()) |
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep
"""
layer_purpose = self.parent.step_kw_purpose.selected_purpose()
if layer_purpose != layer_purpose_aggregation:
subcategory = self.parent.step_kw_subcategory.\
selected_subcategory()
else:
subcategory = {'key': None}
# Has layer groups, go to field mapping
field_groups = get_field_groups(
layer_purpose['key'], subcategory['key'])
compulsory_field = get_compulsory_fields(
layer_purpose['key'], subcategory['key'])
# It's aggregation and has field_groups.
if field_groups and layer_purpose == layer_purpose_aggregation:
return self.parent.step_kw_fields_mapping
# It has field_groups and the compulsory field is population count.
if field_groups and compulsory_field == population_count_field:
return self.parent.step_kw_fields_mapping
# Has classifications, go to multi classifications
if subcategory.get('classifications'):
if layer_purpose == layer_purpose_hazard:
return self.parent.step_kw_multi_classifications
elif layer_purpose == layer_purpose_exposure:
return self.parent.step_kw_classification
# Check if it can go to inasafe field step
non_compulsory_fields = get_non_compulsory_fields(
layer_purpose['key'], subcategory['key'])
if not skip_inasafe_field(self.parent.layer, non_compulsory_fields):
return self.parent.step_kw_inasafe_fields
# Check if it can go to inasafe default field step
default_inasafe_fields = get_fields(
layer_purpose['key'],
subcategory['key'],
replace_null=True,
in_group=False)
if default_inasafe_fields:
return self.parent.step_kw_default_inasafe_fields
# Any other case
return self.parent.step_kw_source | Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep | Below is the the instruction that describes the task:
### Input:
Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep
### Response:
def get_next_step(self):
"""Find the proper step when user clicks the Next button.
:returns: The step to be switched to
:rtype: WizardStep
"""
layer_purpose = self.parent.step_kw_purpose.selected_purpose()
if layer_purpose != layer_purpose_aggregation:
subcategory = self.parent.step_kw_subcategory.\
selected_subcategory()
else:
subcategory = {'key': None}
# Has layer groups, go to field mapping
field_groups = get_field_groups(
layer_purpose['key'], subcategory['key'])
compulsory_field = get_compulsory_fields(
layer_purpose['key'], subcategory['key'])
# It's aggregation and has field_groups.
if field_groups and layer_purpose == layer_purpose_aggregation:
return self.parent.step_kw_fields_mapping
# It has field_groups and the compulsory field is population count.
if field_groups and compulsory_field == population_count_field:
return self.parent.step_kw_fields_mapping
# Has classifications, go to multi classifications
if subcategory.get('classifications'):
if layer_purpose == layer_purpose_hazard:
return self.parent.step_kw_multi_classifications
elif layer_purpose == layer_purpose_exposure:
return self.parent.step_kw_classification
# Check if it can go to inasafe field step
non_compulsory_fields = get_non_compulsory_fields(
layer_purpose['key'], subcategory['key'])
if not skip_inasafe_field(self.parent.layer, non_compulsory_fields):
return self.parent.step_kw_inasafe_fields
# Check if it can go to inasafe default field step
default_inasafe_fields = get_fields(
layer_purpose['key'],
subcategory['key'],
replace_null=True,
in_group=False)
if default_inasafe_fields:
return self.parent.step_kw_default_inasafe_fields
# Any other case
return self.parent.step_kw_source |
def nth(series, n, order_by=None):
"""
Returns the nth value of a series.
Args:
series (pandas.Series): column to summarize.
n (integer): position of desired value. Returns `NaN` if out of range.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization.
"""
if order_by is not None:
series = order_series_by(series, order_by)
try:
return series.iloc[n]
except:
return np.nan | Returns the nth value of a series.
Args:
series (pandas.Series): column to summarize.
n (integer): position of desired value. Returns `NaN` if out of range.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization. | Below is the the instruction that describes the task:
### Input:
Returns the nth value of a series.
Args:
series (pandas.Series): column to summarize.
n (integer): position of desired value. Returns `NaN` if out of range.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization.
### Response:
def nth(series, n, order_by=None):
"""
Returns the nth value of a series.
Args:
series (pandas.Series): column to summarize.
n (integer): position of desired value. Returns `NaN` if out of range.
Kwargs:
order_by: a pandas.Series or list of series (can be symbolic) to order
the input series by before summarization.
"""
if order_by is not None:
series = order_series_by(series, order_by)
try:
return series.iloc[n]
except:
return np.nan |
def list_customer_users(self, customer_id):
"""List all users from a specified customer id."""
content = self._fetch("/customer/users/%s" % customer_id)
return map(lambda x: FastlyUser(self, x), content) | List all users from a specified customer id. | Below is the the instruction that describes the task:
### Input:
List all users from a specified customer id.
### Response:
def list_customer_users(self, customer_id):
"""List all users from a specified customer id."""
content = self._fetch("/customer/users/%s" % customer_id)
return map(lambda x: FastlyUser(self, x), content) |
def create_control(self, pid, callback, callback_parsed=None):
"""Create a control for this Thing with a local point id (pid) and a control request feedback
Returns a new [Control](Point.m.html#IoticAgent.IOT.Point.Control) object
or the existing one if the Control already exists
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`pid` (required) (string) local id of your Control
`callback` (required) (function reference) callback function to invoke on receipt of a control request.
The callback receives a single dict argument, with keys of:
#!python
'data' # (decoded or raw bytes)
'mime' # (None, unless payload was not decoded and has a mime type)
'subId' # (the global id of the associated subscripion)
'entityLid' # (local id of the Thing to which the control belongs)
'lid' # (local id of control)
'confirm' # (whether a confirmation is expected)
'requestId' # (required for sending confirmation)
`callback_parsed` (optional) (function reference) callback function to invoke on receipt of control data. This
is equivalent to `callback` except the dict includes the `parsed` key which holds the set of values in a
[PointDataObject](./Point.m.html#IoticAgent.IOT.Point.PointDataObject) instance. If both
`callback_parsed` and `callback` have been specified, the former takes precedence and `callback` is only called
if the point data could not be parsed according to its current value description.
`NOTE`: `callback_parsed` can only be used if `auto_encode_decode` is enabled for the client instance.
"""
logger.info("create_control(pid=\"%s\", control_cb=%s) [lid=%s]", pid, callback, self.__lid)
if callback_parsed:
callback = self._client._get_parsed_control_callback(callback_parsed, callback)
return self.__create_point(R_CONTROL, pid, control_cb=callback) | Create a control for this Thing with a local point id (pid) and a control request feedback
Returns a new [Control](Point.m.html#IoticAgent.IOT.Point.Control) object
or the existing one if the Control already exists
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`pid` (required) (string) local id of your Control
`callback` (required) (function reference) callback function to invoke on receipt of a control request.
The callback receives a single dict argument, with keys of:
#!python
'data' # (decoded or raw bytes)
'mime' # (None, unless payload was not decoded and has a mime type)
'subId' # (the global id of the associated subscripion)
'entityLid' # (local id of the Thing to which the control belongs)
'lid' # (local id of control)
'confirm' # (whether a confirmation is expected)
'requestId' # (required for sending confirmation)
`callback_parsed` (optional) (function reference) callback function to invoke on receipt of control data. This
is equivalent to `callback` except the dict includes the `parsed` key which holds the set of values in a
[PointDataObject](./Point.m.html#IoticAgent.IOT.Point.PointDataObject) instance. If both
`callback_parsed` and `callback` have been specified, the former takes precedence and `callback` is only called
if the point data could not be parsed according to its current value description.
`NOTE`: `callback_parsed` can only be used if `auto_encode_decode` is enabled for the client instance. | Below is the the instruction that describes the task:
### Input:
Create a control for this Thing with a local point id (pid) and a control request feedback
Returns a new [Control](Point.m.html#IoticAgent.IOT.Point.Control) object
or the existing one if the Control already exists
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`pid` (required) (string) local id of your Control
`callback` (required) (function reference) callback function to invoke on receipt of a control request.
The callback receives a single dict argument, with keys of:
#!python
'data' # (decoded or raw bytes)
'mime' # (None, unless payload was not decoded and has a mime type)
'subId' # (the global id of the associated subscripion)
'entityLid' # (local id of the Thing to which the control belongs)
'lid' # (local id of control)
'confirm' # (whether a confirmation is expected)
'requestId' # (required for sending confirmation)
`callback_parsed` (optional) (function reference) callback function to invoke on receipt of control data. This
is equivalent to `callback` except the dict includes the `parsed` key which holds the set of values in a
[PointDataObject](./Point.m.html#IoticAgent.IOT.Point.PointDataObject) instance. If both
`callback_parsed` and `callback` have been specified, the former takes precedence and `callback` is only called
if the point data could not be parsed according to its current value description.
`NOTE`: `callback_parsed` can only be used if `auto_encode_decode` is enabled for the client instance.
### Response:
def create_control(self, pid, callback, callback_parsed=None):
"""Create a control for this Thing with a local point id (pid) and a control request feedback
Returns a new [Control](Point.m.html#IoticAgent.IOT.Point.Control) object
or the existing one if the Control already exists
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
`pid` (required) (string) local id of your Control
`callback` (required) (function reference) callback function to invoke on receipt of a control request.
The callback receives a single dict argument, with keys of:
#!python
'data' # (decoded or raw bytes)
'mime' # (None, unless payload was not decoded and has a mime type)
'subId' # (the global id of the associated subscripion)
'entityLid' # (local id of the Thing to which the control belongs)
'lid' # (local id of control)
'confirm' # (whether a confirmation is expected)
'requestId' # (required for sending confirmation)
`callback_parsed` (optional) (function reference) callback function to invoke on receipt of control data. This
is equivalent to `callback` except the dict includes the `parsed` key which holds the set of values in a
[PointDataObject](./Point.m.html#IoticAgent.IOT.Point.PointDataObject) instance. If both
`callback_parsed` and `callback` have been specified, the former takes precedence and `callback` is only called
if the point data could not be parsed according to its current value description.
`NOTE`: `callback_parsed` can only be used if `auto_encode_decode` is enabled for the client instance.
"""
logger.info("create_control(pid=\"%s\", control_cb=%s) [lid=%s]", pid, callback, self.__lid)
if callback_parsed:
callback = self._client._get_parsed_control_callback(callback_parsed, callback)
return self.__create_point(R_CONTROL, pid, control_cb=callback) |
def discard_member(self, member, pipe=None):
"""
Remove *member* from the collection, unconditionally.
"""
pipe = self.redis if pipe is None else pipe
pipe.zrem(self.key, self._pickle(member)) | Remove *member* from the collection, unconditionally. | Below is the the instruction that describes the task:
### Input:
Remove *member* from the collection, unconditionally.
### Response:
def discard_member(self, member, pipe=None):
"""
Remove *member* from the collection, unconditionally.
"""
pipe = self.redis if pipe is None else pipe
pipe.zrem(self.key, self._pickle(member)) |
def HA2(credentials, request, algorithm):
"""Create HA2 md5 hash
If the qop directive's value is "auth" or is unspecified, then HA2:
HA2 = md5(A2) = MD5(method:digestURI)
If the qop directive's value is "auth-int" , then HA2 is
HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody))
"""
if credentials.get("qop") == "auth" or credentials.get('qop') is None:
return H(b":".join([request['method'].encode('utf-8'), request['uri'].encode('utf-8')]), algorithm)
elif credentials.get("qop") == "auth-int":
for k in 'method', 'uri', 'body':
if k not in request:
raise ValueError("%s required" % k)
A2 = b":".join([request['method'].encode('utf-8'),
request['uri'].encode('utf-8'),
H(request['body'], algorithm).encode('utf-8')])
return H(A2, algorithm)
raise ValueError | Create HA2 md5 hash
If the qop directive's value is "auth" or is unspecified, then HA2:
HA2 = md5(A2) = MD5(method:digestURI)
If the qop directive's value is "auth-int" , then HA2 is
HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody)) | Below is the the instruction that describes the task:
### Input:
Create HA2 md5 hash
If the qop directive's value is "auth" or is unspecified, then HA2:
HA2 = md5(A2) = MD5(method:digestURI)
If the qop directive's value is "auth-int" , then HA2 is
HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody))
### Response:
def HA2(credentials, request, algorithm):
"""Create HA2 md5 hash
If the qop directive's value is "auth" or is unspecified, then HA2:
HA2 = md5(A2) = MD5(method:digestURI)
If the qop directive's value is "auth-int" , then HA2 is
HA2 = md5(A2) = MD5(method:digestURI:MD5(entityBody))
"""
if credentials.get("qop") == "auth" or credentials.get('qop') is None:
return H(b":".join([request['method'].encode('utf-8'), request['uri'].encode('utf-8')]), algorithm)
elif credentials.get("qop") == "auth-int":
for k in 'method', 'uri', 'body':
if k not in request:
raise ValueError("%s required" % k)
A2 = b":".join([request['method'].encode('utf-8'),
request['uri'].encode('utf-8'),
H(request['body'], algorithm).encode('utf-8')])
return H(A2, algorithm)
raise ValueError |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.