code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def write_event(self, event):
"""Writes an ``Event`` object to Splunk.
:param event: An ``Event`` object.
"""
if not self.header_written:
self._out.write("<stream>")
self.header_written = True
event.write_to(self._out) | Writes an ``Event`` object to Splunk.
:param event: An ``Event`` object. | Below is the the instruction that describes the task:
### Input:
Writes an ``Event`` object to Splunk.
:param event: An ``Event`` object.
### Response:
def write_event(self, event):
"""Writes an ``Event`` object to Splunk.
:param event: An ``Event`` object.
"""
if not self.header_written:
self._out.write("<stream>")
self.header_written = True
event.write_to(self._out) |
def build_agg_vec(agg_vec, **source):
""" Builds an combined aggregation vector based on various classifications
This function build an aggregation vector based on the order in agg_vec.
The naming and actual mapping is given in source, either explicitly or by
pointing to a folder with the mapping.
>>> build_agg_vec(['EU', 'OECD'], path = 'test')
['EU', 'EU', 'EU', 'OECD', 'REST', 'REST']
>>> build_agg_vec(['OECD', 'EU'], path = 'test', miss='RoW')
['OECD', 'EU', 'OECD', 'OECD', 'RoW', 'RoW']
>>> build_agg_vec(['EU', 'orig_regions'], path = 'test')
['EU', 'EU', 'EU', 'reg4', 'reg5', 'reg6']
>>> build_agg_vec(['supreg1', 'other'], path = 'test',
>>> other = [None, None, 'other1', 'other1', 'other2', 'other2'])
['supreg1', 'supreg1', 'other1', 'other1', 'other2', 'other2']
Parameters
----------
agg_vec : list
A list of sector or regions to which the IOSystem shall be aggregated.
The order in agg_vec is important:
If a string was assigned to one specific entry it will not be
overwritten if it is given in the next vector, e.g. ['EU', 'OECD']
would aggregate first into EU and the remaining one into OECD, whereas
['OECD', 'EU'] would first aggregate all countries into OECD and than
the remaining countries into EU.
source : list or string
Definition of the vectors in agg_vec. The input vectors (either in the
file or given as list for the entries in agg_vec) must be as long as
the desired output with a string for every position which should be
aggregated and None for position which should not be used.
Special keywords:
- path : Path to a folder with concordance matrices.
The files in the folder can have any extension but must be
in text format (tab separated) with one entry per row.
The last column in the file will be taken as aggregation
vectors (other columns can be used for documentation).
Values must be given for every entry in the original
classification (string None for all values not used) If
the same entry is given in source and as text file in
path than the one in source will be used.
Two special path entries are available so far:
- 'exio2'
Concordance matrices for EXIOBASE 2.0
- 'test'
Concordance matrices for the test IO system
If a entry is not found in source and no path is given
the current directory will be searched for the definition.
- miss : Entry to use for missing values, default: 'REST'
Returns
-------
list (aggregation vector)
"""
# build a dict with aggregation vectors in source and folder
if type(agg_vec) is str:
agg_vec = [agg_vec]
agg_dict = dict()
for entry in agg_vec:
try:
agg_dict[entry] = source[entry]
except KeyError:
folder = source.get('path', './')
folder = os.path.join(PYMRIO_PATH[folder], 'concordance')
for file in os.listdir(folder):
if entry == os.path.splitext(file)[0]:
_tmp = np.genfromtxt(os.path.join(folder, file), dtype=str)
if _tmp.ndim == 1:
agg_dict[entry] = [None if ee == 'None'
else ee for ee in _tmp.tolist()]
else:
agg_dict[entry] = [None if ee == 'None'
else ee
for ee in _tmp[:, -1].tolist()]
break
else:
logging.error(
'Aggregation vector -- {} -- not found'
.format(str(entry)))
# build the summary aggregation vector
def _rep(ll, ii, vv): ll[ii] = vv
miss_val = source.get('miss', 'REST')
vec_list = [agg_dict[ee] for ee in agg_vec]
out = [None, ] * len(vec_list[0])
for currvec in vec_list:
if len(currvec) != len(out):
logging.warn('Inconsistent vector length')
[_rep(out, ind, val) for ind, val in
enumerate(currvec) if not out[ind]]
[_rep(out, ind, miss_val) for ind, val in enumerate(out) if not val]
return out | Builds an combined aggregation vector based on various classifications
This function build an aggregation vector based on the order in agg_vec.
The naming and actual mapping is given in source, either explicitly or by
pointing to a folder with the mapping.
>>> build_agg_vec(['EU', 'OECD'], path = 'test')
['EU', 'EU', 'EU', 'OECD', 'REST', 'REST']
>>> build_agg_vec(['OECD', 'EU'], path = 'test', miss='RoW')
['OECD', 'EU', 'OECD', 'OECD', 'RoW', 'RoW']
>>> build_agg_vec(['EU', 'orig_regions'], path = 'test')
['EU', 'EU', 'EU', 'reg4', 'reg5', 'reg6']
>>> build_agg_vec(['supreg1', 'other'], path = 'test',
>>> other = [None, None, 'other1', 'other1', 'other2', 'other2'])
['supreg1', 'supreg1', 'other1', 'other1', 'other2', 'other2']
Parameters
----------
agg_vec : list
A list of sector or regions to which the IOSystem shall be aggregated.
The order in agg_vec is important:
If a string was assigned to one specific entry it will not be
overwritten if it is given in the next vector, e.g. ['EU', 'OECD']
would aggregate first into EU and the remaining one into OECD, whereas
['OECD', 'EU'] would first aggregate all countries into OECD and than
the remaining countries into EU.
source : list or string
Definition of the vectors in agg_vec. The input vectors (either in the
file or given as list for the entries in agg_vec) must be as long as
the desired output with a string for every position which should be
aggregated and None for position which should not be used.
Special keywords:
- path : Path to a folder with concordance matrices.
The files in the folder can have any extension but must be
in text format (tab separated) with one entry per row.
The last column in the file will be taken as aggregation
vectors (other columns can be used for documentation).
Values must be given for every entry in the original
classification (string None for all values not used) If
the same entry is given in source and as text file in
path than the one in source will be used.
Two special path entries are available so far:
- 'exio2'
Concordance matrices for EXIOBASE 2.0
- 'test'
Concordance matrices for the test IO system
If a entry is not found in source and no path is given
the current directory will be searched for the definition.
- miss : Entry to use for missing values, default: 'REST'
Returns
-------
list (aggregation vector) | Below is the the instruction that describes the task:
### Input:
Builds an combined aggregation vector based on various classifications
This function build an aggregation vector based on the order in agg_vec.
The naming and actual mapping is given in source, either explicitly or by
pointing to a folder with the mapping.
>>> build_agg_vec(['EU', 'OECD'], path = 'test')
['EU', 'EU', 'EU', 'OECD', 'REST', 'REST']
>>> build_agg_vec(['OECD', 'EU'], path = 'test', miss='RoW')
['OECD', 'EU', 'OECD', 'OECD', 'RoW', 'RoW']
>>> build_agg_vec(['EU', 'orig_regions'], path = 'test')
['EU', 'EU', 'EU', 'reg4', 'reg5', 'reg6']
>>> build_agg_vec(['supreg1', 'other'], path = 'test',
>>> other = [None, None, 'other1', 'other1', 'other2', 'other2'])
['supreg1', 'supreg1', 'other1', 'other1', 'other2', 'other2']
Parameters
----------
agg_vec : list
A list of sector or regions to which the IOSystem shall be aggregated.
The order in agg_vec is important:
If a string was assigned to one specific entry it will not be
overwritten if it is given in the next vector, e.g. ['EU', 'OECD']
would aggregate first into EU and the remaining one into OECD, whereas
['OECD', 'EU'] would first aggregate all countries into OECD and than
the remaining countries into EU.
source : list or string
Definition of the vectors in agg_vec. The input vectors (either in the
file or given as list for the entries in agg_vec) must be as long as
the desired output with a string for every position which should be
aggregated and None for position which should not be used.
Special keywords:
- path : Path to a folder with concordance matrices.
The files in the folder can have any extension but must be
in text format (tab separated) with one entry per row.
The last column in the file will be taken as aggregation
vectors (other columns can be used for documentation).
Values must be given for every entry in the original
classification (string None for all values not used) If
the same entry is given in source and as text file in
path than the one in source will be used.
Two special path entries are available so far:
- 'exio2'
Concordance matrices for EXIOBASE 2.0
- 'test'
Concordance matrices for the test IO system
If a entry is not found in source and no path is given
the current directory will be searched for the definition.
- miss : Entry to use for missing values, default: 'REST'
Returns
-------
list (aggregation vector)
### Response:
def build_agg_vec(agg_vec, **source):
""" Builds an combined aggregation vector based on various classifications
This function build an aggregation vector based on the order in agg_vec.
The naming and actual mapping is given in source, either explicitly or by
pointing to a folder with the mapping.
>>> build_agg_vec(['EU', 'OECD'], path = 'test')
['EU', 'EU', 'EU', 'OECD', 'REST', 'REST']
>>> build_agg_vec(['OECD', 'EU'], path = 'test', miss='RoW')
['OECD', 'EU', 'OECD', 'OECD', 'RoW', 'RoW']
>>> build_agg_vec(['EU', 'orig_regions'], path = 'test')
['EU', 'EU', 'EU', 'reg4', 'reg5', 'reg6']
>>> build_agg_vec(['supreg1', 'other'], path = 'test',
>>> other = [None, None, 'other1', 'other1', 'other2', 'other2'])
['supreg1', 'supreg1', 'other1', 'other1', 'other2', 'other2']
Parameters
----------
agg_vec : list
A list of sector or regions to which the IOSystem shall be aggregated.
The order in agg_vec is important:
If a string was assigned to one specific entry it will not be
overwritten if it is given in the next vector, e.g. ['EU', 'OECD']
would aggregate first into EU and the remaining one into OECD, whereas
['OECD', 'EU'] would first aggregate all countries into OECD and than
the remaining countries into EU.
source : list or string
Definition of the vectors in agg_vec. The input vectors (either in the
file or given as list for the entries in agg_vec) must be as long as
the desired output with a string for every position which should be
aggregated and None for position which should not be used.
Special keywords:
- path : Path to a folder with concordance matrices.
The files in the folder can have any extension but must be
in text format (tab separated) with one entry per row.
The last column in the file will be taken as aggregation
vectors (other columns can be used for documentation).
Values must be given for every entry in the original
classification (string None for all values not used) If
the same entry is given in source and as text file in
path than the one in source will be used.
Two special path entries are available so far:
- 'exio2'
Concordance matrices for EXIOBASE 2.0
- 'test'
Concordance matrices for the test IO system
If a entry is not found in source and no path is given
the current directory will be searched for the definition.
- miss : Entry to use for missing values, default: 'REST'
Returns
-------
list (aggregation vector)
"""
# build a dict with aggregation vectors in source and folder
if type(agg_vec) is str:
agg_vec = [agg_vec]
agg_dict = dict()
for entry in agg_vec:
try:
agg_dict[entry] = source[entry]
except KeyError:
folder = source.get('path', './')
folder = os.path.join(PYMRIO_PATH[folder], 'concordance')
for file in os.listdir(folder):
if entry == os.path.splitext(file)[0]:
_tmp = np.genfromtxt(os.path.join(folder, file), dtype=str)
if _tmp.ndim == 1:
agg_dict[entry] = [None if ee == 'None'
else ee for ee in _tmp.tolist()]
else:
agg_dict[entry] = [None if ee == 'None'
else ee
for ee in _tmp[:, -1].tolist()]
break
else:
logging.error(
'Aggregation vector -- {} -- not found'
.format(str(entry)))
# build the summary aggregation vector
def _rep(ll, ii, vv): ll[ii] = vv
miss_val = source.get('miss', 'REST')
vec_list = [agg_dict[ee] for ee in agg_vec]
out = [None, ] * len(vec_list[0])
for currvec in vec_list:
if len(currvec) != len(out):
logging.warn('Inconsistent vector length')
[_rep(out, ind, val) for ind, val in
enumerate(currvec) if not out[ind]]
[_rep(out, ind, miss_val) for ind, val in enumerate(out) if not val]
return out |
def _current_user_manager(self, session=None):
"""Return the current user, or SYSTEM user."""
if session is None:
session = db.session()
try:
user = g.user
except Exception:
return session.query(User).get(0)
if sa.orm.object_session(user) is not session:
# this can happen when called from a celery task during development
# (with CELERY_ALWAYS_EAGER=True): the task SA session is not
# app.db.session, and we should not attach this object to
# the other session, because it can make weird, hard-to-debug
# errors related to session.identity_map.
return session.query(User).get(user.id)
else:
return user | Return the current user, or SYSTEM user. | Below is the the instruction that describes the task:
### Input:
Return the current user, or SYSTEM user.
### Response:
def _current_user_manager(self, session=None):
"""Return the current user, or SYSTEM user."""
if session is None:
session = db.session()
try:
user = g.user
except Exception:
return session.query(User).get(0)
if sa.orm.object_session(user) is not session:
# this can happen when called from a celery task during development
# (with CELERY_ALWAYS_EAGER=True): the task SA session is not
# app.db.session, and we should not attach this object to
# the other session, because it can make weird, hard-to-debug
# errors related to session.identity_map.
return session.query(User).get(user.id)
else:
return user |
def create_txt_record(self, name, values, ttl=60, weight=None, region=None,
set_identifier=None):
"""
Creates a TXT record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created TXTResourceRecordSet instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(TXTResourceRecordSet, **values) | Creates a TXT record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created TXTResourceRecordSet instance. | Below is the the instruction that describes the task:
### Input:
Creates a TXT record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created TXTResourceRecordSet instance.
### Response:
def create_txt_record(self, name, values, ttl=60, weight=None, region=None,
set_identifier=None):
"""
Creates a TXT record attached to this hosted zone.
:param str name: The fully qualified name of the record to add.
:param list values: A list of value strings for the record.
:keyword int ttl: The time-to-live of the record (in seconds).
:keyword int weight: *For weighted record sets only*. Among resource record
sets that have the same combination of DNS name and type, a value
that determines what portion of traffic for the current resource
record set is routed to the associated location. Ranges from 0-255.
:keyword str region: *For latency-based record sets*. The Amazon EC2 region
where the resource that is specified in this resource record set
resides.
:keyword str set_identifier: *For weighted and latency resource record
sets only*. An identifier that differentiates among multiple
resource record sets that have the same combination of DNS name
and type. 1-128 chars.
:rtype: tuple
:returns: A tuple in the form of ``(rrset, change_info)``, where
``rrset`` is the newly created TXTResourceRecordSet instance.
"""
self._halt_if_already_deleted()
# Grab the params/kwargs here for brevity's sake.
values = locals()
del values['self']
return self._add_record(TXTResourceRecordSet, **values) |
def zGetRefresh(self):
"""Copy lens in UI to headless ZOS COM server"""
OpticalSystem._dde_link.zGetRefresh()
OpticalSystem._dde_link.zSaveFile(self._sync_ui_file)
self._iopticalsystem.LoadFile (self._sync_ui_file, False) | Copy lens in UI to headless ZOS COM server | Below is the the instruction that describes the task:
### Input:
Copy lens in UI to headless ZOS COM server
### Response:
def zGetRefresh(self):
"""Copy lens in UI to headless ZOS COM server"""
OpticalSystem._dde_link.zGetRefresh()
OpticalSystem._dde_link.zSaveFile(self._sync_ui_file)
self._iopticalsystem.LoadFile (self._sync_ui_file, False) |
def get_events_df(self, num_items=100, params=None, columns=None, drop_columns=None, convert_ips=True):
"""
Get events as pandas DataFrame
:param int num_items: Max items to retrieve
:param dict params: Additional params dictionary according to:
https://www.alienvault.com/documentation/api/usm-anywhere-api.htm#/events
:parama list columns: list of columns to include in DataFrame
:returns pandas.DataFrame: dataframe of events
"""
if params and set(params.keys()) - VALID_EVENT_PARAMS:
self.log.error("Invalid alarm query parameters: {set(params.keys()) - VALID_ALARM_PARAMS}")
return None
events = self._retrieve_items(item_type="events", num_items=num_items, params=params)
df = self._to_df(events, columns, drop_columns, convert_ips)
return df | Get events as pandas DataFrame
:param int num_items: Max items to retrieve
:param dict params: Additional params dictionary according to:
https://www.alienvault.com/documentation/api/usm-anywhere-api.htm#/events
:parama list columns: list of columns to include in DataFrame
:returns pandas.DataFrame: dataframe of events | Below is the the instruction that describes the task:
### Input:
Get events as pandas DataFrame
:param int num_items: Max items to retrieve
:param dict params: Additional params dictionary according to:
https://www.alienvault.com/documentation/api/usm-anywhere-api.htm#/events
:parama list columns: list of columns to include in DataFrame
:returns pandas.DataFrame: dataframe of events
### Response:
def get_events_df(self, num_items=100, params=None, columns=None, drop_columns=None, convert_ips=True):
"""
Get events as pandas DataFrame
:param int num_items: Max items to retrieve
:param dict params: Additional params dictionary according to:
https://www.alienvault.com/documentation/api/usm-anywhere-api.htm#/events
:parama list columns: list of columns to include in DataFrame
:returns pandas.DataFrame: dataframe of events
"""
if params and set(params.keys()) - VALID_EVENT_PARAMS:
self.log.error("Invalid alarm query parameters: {set(params.keys()) - VALID_ALARM_PARAMS}")
return None
events = self._retrieve_items(item_type="events", num_items=num_items, params=params)
df = self._to_df(events, columns, drop_columns, convert_ips)
return df |
def write_file(self, filename=None, buffer=None, fileobj=None):
"""Write this NBT file to a file."""
closefile = True
if buffer:
self.filename = None
self.file = buffer
closefile = False
elif filename:
self.filename = filename
self.file = GzipFile(filename, "wb")
elif fileobj:
self.filename = None
self.file = GzipFile(fileobj=fileobj, mode="wb")
elif self.filename:
self.file = GzipFile(self.filename, "wb")
elif not self.file:
raise ValueError(
"NBTFile.write_file(): Need to specify either a "
"filename or a file object"
)
# Render tree to file
TAG_Byte(self.id)._render_buffer(self.file)
TAG_String(self.name)._render_buffer(self.file)
self._render_buffer(self.file)
# make sure the file is complete
try:
self.file.flush()
except (AttributeError, IOError):
pass
if closefile:
try:
self.file.close()
except (AttributeError, IOError):
pass | Write this NBT file to a file. | Below is the the instruction that describes the task:
### Input:
Write this NBT file to a file.
### Response:
def write_file(self, filename=None, buffer=None, fileobj=None):
"""Write this NBT file to a file."""
closefile = True
if buffer:
self.filename = None
self.file = buffer
closefile = False
elif filename:
self.filename = filename
self.file = GzipFile(filename, "wb")
elif fileobj:
self.filename = None
self.file = GzipFile(fileobj=fileobj, mode="wb")
elif self.filename:
self.file = GzipFile(self.filename, "wb")
elif not self.file:
raise ValueError(
"NBTFile.write_file(): Need to specify either a "
"filename or a file object"
)
# Render tree to file
TAG_Byte(self.id)._render_buffer(self.file)
TAG_String(self.name)._render_buffer(self.file)
self._render_buffer(self.file)
# make sure the file is complete
try:
self.file.flush()
except (AttributeError, IOError):
pass
if closefile:
try:
self.file.close()
except (AttributeError, IOError):
pass |
def set_event(self,
simulation_start=None,
simulation_duration=None,
simulation_end=None,
rain_intensity=2,
rain_duration=timedelta(seconds=30*60),
event_type='EVENT',
):
"""
Initializes event for GSSHA model
"""
# ADD TEMPORTAL EVENT INFORMAITON
if event_type == 'LONG_TERM':
self.event = LongTermMode(self.project_manager,
self.db_session,
self.project_directory,
simulation_start=simulation_start,
simulation_end=simulation_end,
simulation_duration=simulation_duration,
)
else: # 'EVENT'
self.event = EventMode(self.project_manager,
self.db_session,
self.project_directory,
simulation_start=simulation_start,
simulation_duration=simulation_duration,
)
self.event.add_uniform_precip_event(intensity=rain_intensity,
duration=rain_duration) | Initializes event for GSSHA model | Below is the the instruction that describes the task:
### Input:
Initializes event for GSSHA model
### Response:
def set_event(self,
simulation_start=None,
simulation_duration=None,
simulation_end=None,
rain_intensity=2,
rain_duration=timedelta(seconds=30*60),
event_type='EVENT',
):
"""
Initializes event for GSSHA model
"""
# ADD TEMPORTAL EVENT INFORMAITON
if event_type == 'LONG_TERM':
self.event = LongTermMode(self.project_manager,
self.db_session,
self.project_directory,
simulation_start=simulation_start,
simulation_end=simulation_end,
simulation_duration=simulation_duration,
)
else: # 'EVENT'
self.event = EventMode(self.project_manager,
self.db_session,
self.project_directory,
simulation_start=simulation_start,
simulation_duration=simulation_duration,
)
self.event.add_uniform_precip_event(intensity=rain_intensity,
duration=rain_duration) |
def series_fetch_by_relname(self, name, column):
""" Construct DataFrame with component timeseries data from filtered
table data.
Parameters
----------
name : str
Component name.
column : str
Component field with timevarying data.
Returns
-------
pd.DataFrame
Component data.
"""
ormclass = self._mapped[name]
# TODO: This is implemented in a not very robust way.
id_column = re.findall(r'[A-Z][^A-Z]*', name)[0] + '_' + 'id'
id_column = id_column.lower()
query = self.session.query(
getattr(ormclass, id_column),
getattr(ormclass, column)[self.start_snapshot: self.end_snapshot].
label(column)).filter(and_(
ormclass.scn_name == self.scn_name,
ormclass.temp_id == self.temp_id))
if self.version:
query = query.filter(ormclass.version == self.version)
df = pd.io.sql.read_sql(query.statement,
self.session.bind,
columns=[column],
index_col=id_column)
df.index = df.index.astype(str)
# change of format to fit pypsa
df = df[column].apply(pd.Series).transpose()
try:
assert not df.empty
df.index = self.timeindex
except AssertionError:
print("No data for %s in column %s." % (name, column))
return df | Construct DataFrame with component timeseries data from filtered
table data.
Parameters
----------
name : str
Component name.
column : str
Component field with timevarying data.
Returns
-------
pd.DataFrame
Component data. | Below is the the instruction that describes the task:
### Input:
Construct DataFrame with component timeseries data from filtered
table data.
Parameters
----------
name : str
Component name.
column : str
Component field with timevarying data.
Returns
-------
pd.DataFrame
Component data.
### Response:
def series_fetch_by_relname(self, name, column):
""" Construct DataFrame with component timeseries data from filtered
table data.
Parameters
----------
name : str
Component name.
column : str
Component field with timevarying data.
Returns
-------
pd.DataFrame
Component data.
"""
ormclass = self._mapped[name]
# TODO: This is implemented in a not very robust way.
id_column = re.findall(r'[A-Z][^A-Z]*', name)[0] + '_' + 'id'
id_column = id_column.lower()
query = self.session.query(
getattr(ormclass, id_column),
getattr(ormclass, column)[self.start_snapshot: self.end_snapshot].
label(column)).filter(and_(
ormclass.scn_name == self.scn_name,
ormclass.temp_id == self.temp_id))
if self.version:
query = query.filter(ormclass.version == self.version)
df = pd.io.sql.read_sql(query.statement,
self.session.bind,
columns=[column],
index_col=id_column)
df.index = df.index.astype(str)
# change of format to fit pypsa
df = df[column].apply(pd.Series).transpose()
try:
assert not df.empty
df.index = self.timeindex
except AssertionError:
print("No data for %s in column %s." % (name, column))
return df |
def Reynolds(V, D, rho=None, mu=None, nu=None):
r'''Calculates Reynolds number or `Re` for a fluid with the given
properties for the specified velocity and diameter.
.. math::
Re = \frac{D \cdot V}{\nu} = \frac{\rho V D}{\mu}
Inputs either of any of the following sets:
* V, D, density `rho` and kinematic viscosity `mu`
* V, D, and dynamic viscosity `nu`
Parameters
----------
V : float
Velocity [m/s]
D : float
Diameter [m]
rho : float, optional
Density, [kg/m^3]
mu : float, optional
Dynamic viscosity, [Pa*s]
nu : float, optional
Kinematic viscosity, [m^2/s]
Returns
-------
Re : float
Reynolds number []
Notes
-----
.. math::
Re = \frac{\text{Momentum}}{\text{Viscosity}}
An error is raised if none of the required input sets are provided.
Examples
--------
>>> Reynolds(2.5, 0.25, 1.1613, 1.9E-5)
38200.65789473684
>>> Reynolds(2.5, 0.25, nu=1.636e-05)
38202.93398533008
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
if rho and mu:
nu = mu/rho
elif not nu:
raise Exception('Either density and viscosity, or dynamic viscosity, \
is needed')
return V*D/nu | r'''Calculates Reynolds number or `Re` for a fluid with the given
properties for the specified velocity and diameter.
.. math::
Re = \frac{D \cdot V}{\nu} = \frac{\rho V D}{\mu}
Inputs either of any of the following sets:
* V, D, density `rho` and kinematic viscosity `mu`
* V, D, and dynamic viscosity `nu`
Parameters
----------
V : float
Velocity [m/s]
D : float
Diameter [m]
rho : float, optional
Density, [kg/m^3]
mu : float, optional
Dynamic viscosity, [Pa*s]
nu : float, optional
Kinematic viscosity, [m^2/s]
Returns
-------
Re : float
Reynolds number []
Notes
-----
.. math::
Re = \frac{\text{Momentum}}{\text{Viscosity}}
An error is raised if none of the required input sets are provided.
Examples
--------
>>> Reynolds(2.5, 0.25, 1.1613, 1.9E-5)
38200.65789473684
>>> Reynolds(2.5, 0.25, nu=1.636e-05)
38202.93398533008
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006. | Below is the the instruction that describes the task:
### Input:
r'''Calculates Reynolds number or `Re` for a fluid with the given
properties for the specified velocity and diameter.
.. math::
Re = \frac{D \cdot V}{\nu} = \frac{\rho V D}{\mu}
Inputs either of any of the following sets:
* V, D, density `rho` and kinematic viscosity `mu`
* V, D, and dynamic viscosity `nu`
Parameters
----------
V : float
Velocity [m/s]
D : float
Diameter [m]
rho : float, optional
Density, [kg/m^3]
mu : float, optional
Dynamic viscosity, [Pa*s]
nu : float, optional
Kinematic viscosity, [m^2/s]
Returns
-------
Re : float
Reynolds number []
Notes
-----
.. math::
Re = \frac{\text{Momentum}}{\text{Viscosity}}
An error is raised if none of the required input sets are provided.
Examples
--------
>>> Reynolds(2.5, 0.25, 1.1613, 1.9E-5)
38200.65789473684
>>> Reynolds(2.5, 0.25, nu=1.636e-05)
38202.93398533008
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
### Response:
def Reynolds(V, D, rho=None, mu=None, nu=None):
r'''Calculates Reynolds number or `Re` for a fluid with the given
properties for the specified velocity and diameter.
.. math::
Re = \frac{D \cdot V}{\nu} = \frac{\rho V D}{\mu}
Inputs either of any of the following sets:
* V, D, density `rho` and kinematic viscosity `mu`
* V, D, and dynamic viscosity `nu`
Parameters
----------
V : float
Velocity [m/s]
D : float
Diameter [m]
rho : float, optional
Density, [kg/m^3]
mu : float, optional
Dynamic viscosity, [Pa*s]
nu : float, optional
Kinematic viscosity, [m^2/s]
Returns
-------
Re : float
Reynolds number []
Notes
-----
.. math::
Re = \frac{\text{Momentum}}{\text{Viscosity}}
An error is raised if none of the required input sets are provided.
Examples
--------
>>> Reynolds(2.5, 0.25, 1.1613, 1.9E-5)
38200.65789473684
>>> Reynolds(2.5, 0.25, nu=1.636e-05)
38202.93398533008
References
----------
.. [1] Green, Don, and Robert Perry. Perry's Chemical Engineers' Handbook,
Eighth Edition. McGraw-Hill Professional, 2007.
.. [2] Cengel, Yunus, and John Cimbala. Fluid Mechanics: Fundamentals and
Applications. Boston: McGraw Hill Higher Education, 2006.
'''
if rho and mu:
nu = mu/rho
elif not nu:
raise Exception('Either density and viscosity, or dynamic viscosity, \
is needed')
return V*D/nu |
def get_database(self, database_name=None, username=None, password=None):
"""
Get a pymongo database handle, after authenticating.
Authenticates using the username/password in the DB URI given to
__init__() unless username/password is supplied as arguments.
:param database_name: (optional) Name of database
:param username: (optional) Username to login with
:param password: (optional) Password to login with
:return: Pymongo database object
"""
if database_name is None:
database_name = self._database_name
if database_name is None:
raise ValueError('No database_name supplied, and no default provided to __init__')
db = self._connection[database_name]
if username and password:
db.authenticate(username, password)
elif self._parsed_uri.get("username", None):
if 'authSource' in self._options and self._options['authSource'] is not None:
db.authenticate(
self._parsed_uri.get("username", None),
self._parsed_uri.get("password", None),
source=self._options['authSource']
)
else:
db.authenticate(
self._parsed_uri.get("username", None),
self._parsed_uri.get("password", None)
)
return db | Get a pymongo database handle, after authenticating.
Authenticates using the username/password in the DB URI given to
__init__() unless username/password is supplied as arguments.
:param database_name: (optional) Name of database
:param username: (optional) Username to login with
:param password: (optional) Password to login with
:return: Pymongo database object | Below is the the instruction that describes the task:
### Input:
Get a pymongo database handle, after authenticating.
Authenticates using the username/password in the DB URI given to
__init__() unless username/password is supplied as arguments.
:param database_name: (optional) Name of database
:param username: (optional) Username to login with
:param password: (optional) Password to login with
:return: Pymongo database object
### Response:
def get_database(self, database_name=None, username=None, password=None):
"""
Get a pymongo database handle, after authenticating.
Authenticates using the username/password in the DB URI given to
__init__() unless username/password is supplied as arguments.
:param database_name: (optional) Name of database
:param username: (optional) Username to login with
:param password: (optional) Password to login with
:return: Pymongo database object
"""
if database_name is None:
database_name = self._database_name
if database_name is None:
raise ValueError('No database_name supplied, and no default provided to __init__')
db = self._connection[database_name]
if username and password:
db.authenticate(username, password)
elif self._parsed_uri.get("username", None):
if 'authSource' in self._options and self._options['authSource'] is not None:
db.authenticate(
self._parsed_uri.get("username", None),
self._parsed_uri.get("password", None),
source=self._options['authSource']
)
else:
db.authenticate(
self._parsed_uri.get("username", None),
self._parsed_uri.get("password", None)
)
return db |
def doInteractions(self, number=1):
""" Directly maps the agents and the tasks.
"""
t0 = time.time()
for _ in range(number):
self._oneInteraction()
elapsed = time.time() - t0
logger.info("%d interactions executed in %.3fs." % (number, elapsed))
return self.stepid | Directly maps the agents and the tasks. | Below is the the instruction that describes the task:
### Input:
Directly maps the agents and the tasks.
### Response:
def doInteractions(self, number=1):
""" Directly maps the agents and the tasks.
"""
t0 = time.time()
for _ in range(number):
self._oneInteraction()
elapsed = time.time() - t0
logger.info("%d interactions executed in %.3fs." % (number, elapsed))
return self.stepid |
def check_dependencies(model, model_queue, avaliable_models):
""" Check that all the depenedencies for this model are already in the queue. """
# A list of allowed links: existing fields, itself and the special case ContentType
allowed_links = [m.model.__name__ for m in model_queue] + [model.__name__, 'ContentType']
# For each ForeignKey or ManyToMany field, check that a link is possible
for field in model._meta.fields:
if not field.remote_field:
continue
if field.remote_field.model.__name__ not in allowed_links:
if field.remote_field.model not in avaliable_models:
continue
return False
for field in model._meta.many_to_many:
if not field.remote_field:
continue
if field.remote_field.model.__name__ not in allowed_links:
return False
return True | Check that all the depenedencies for this model are already in the queue. | Below is the the instruction that describes the task:
### Input:
Check that all the depenedencies for this model are already in the queue.
### Response:
def check_dependencies(model, model_queue, avaliable_models):
""" Check that all the depenedencies for this model are already in the queue. """
# A list of allowed links: existing fields, itself and the special case ContentType
allowed_links = [m.model.__name__ for m in model_queue] + [model.__name__, 'ContentType']
# For each ForeignKey or ManyToMany field, check that a link is possible
for field in model._meta.fields:
if not field.remote_field:
continue
if field.remote_field.model.__name__ not in allowed_links:
if field.remote_field.model not in avaliable_models:
continue
return False
for field in model._meta.many_to_many:
if not field.remote_field:
continue
if field.remote_field.model.__name__ not in allowed_links:
return False
return True |
def build_project(self):
""" Build IAR project """
# > IarBuild [project_path] -build [project_name]
proj_path = join(getcwd(), self.workspace['files']['ewp'])
if proj_path.split('.')[-1] != 'ewp':
proj_path += '.ewp'
if not os.path.exists(proj_path):
logger.debug("The file: %s does not exists, exported prior building?" % proj_path)
return -1
logger.debug("Building IAR project: %s" % proj_path)
args = [join(self.env_settings.get_env_settings('iar'), 'IarBuild.exe'), proj_path, '-build', os.path.splitext(os.path.basename(self.workspace['files']['ewp']))[0]]
logger.debug(args)
try:
p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
except:
logger.error("Project: %s build failed. Please check IARBUILD path in the user_settings.py file." % self.workspace['files']['ewp'])
return -1
else:
build_log_path = os.path.join(os.path.dirname(proj_path),'build_log.txt')
with open(build_log_path, 'w') as f:
f.write(output)
num_errors = self._parse_subprocess_output(output)
if num_errors == 0:
logger.info("Project: %s build completed." % self.workspace['files']['ewp'])
return 0
else:
logger.error("Project: %s build failed with %d errors" %
(self.workspace['files']['ewp'], num_errors))
return -1 | Build IAR project | Below is the the instruction that describes the task:
### Input:
Build IAR project
### Response:
def build_project(self):
""" Build IAR project """
# > IarBuild [project_path] -build [project_name]
proj_path = join(getcwd(), self.workspace['files']['ewp'])
if proj_path.split('.')[-1] != 'ewp':
proj_path += '.ewp'
if not os.path.exists(proj_path):
logger.debug("The file: %s does not exists, exported prior building?" % proj_path)
return -1
logger.debug("Building IAR project: %s" % proj_path)
args = [join(self.env_settings.get_env_settings('iar'), 'IarBuild.exe'), proj_path, '-build', os.path.splitext(os.path.basename(self.workspace['files']['ewp']))[0]]
logger.debug(args)
try:
p = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE)
output, err = p.communicate()
except:
logger.error("Project: %s build failed. Please check IARBUILD path in the user_settings.py file." % self.workspace['files']['ewp'])
return -1
else:
build_log_path = os.path.join(os.path.dirname(proj_path),'build_log.txt')
with open(build_log_path, 'w') as f:
f.write(output)
num_errors = self._parse_subprocess_output(output)
if num_errors == 0:
logger.info("Project: %s build completed." % self.workspace['files']['ewp'])
return 0
else:
logger.error("Project: %s build failed with %d errors" %
(self.workspace['files']['ewp'], num_errors))
return -1 |
def _prepare_temp_dir(self):
"""Cleans up and prepare temporary directory."""
if not shell_call(['sudo', 'rm', '-rf', os.path.join(self._temp_dir, '*')]):
logging.error('Failed to cleanup temporary directory.')
sys.exit(1)
# NOTE: we do not create self._extracted_submission_dir
# this is intentional because self._tmp_extracted_dir or it's subdir
# will be renames into self._extracted_submission_dir
os.mkdir(self._tmp_extracted_dir)
os.mkdir(self._sample_input_dir)
os.mkdir(self._sample_output_dir)
# make output dir world writable
shell_call(['chmod', 'a+rwX', '-R', self._sample_output_dir]) | Cleans up and prepare temporary directory. | Below is the the instruction that describes the task:
### Input:
Cleans up and prepare temporary directory.
### Response:
def _prepare_temp_dir(self):
"""Cleans up and prepare temporary directory."""
if not shell_call(['sudo', 'rm', '-rf', os.path.join(self._temp_dir, '*')]):
logging.error('Failed to cleanup temporary directory.')
sys.exit(1)
# NOTE: we do not create self._extracted_submission_dir
# this is intentional because self._tmp_extracted_dir or it's subdir
# will be renames into self._extracted_submission_dir
os.mkdir(self._tmp_extracted_dir)
os.mkdir(self._sample_input_dir)
os.mkdir(self._sample_output_dir)
# make output dir world writable
shell_call(['chmod', 'a+rwX', '-R', self._sample_output_dir]) |
def range_expr(arg):
'''
Accepts a range expression which generates a range of values for a variable.
Linear space range: "linspace:1,2,10" (start, stop, num) as in numpy.linspace
Pythonic range: "range:1,10,2" (start, stop[, step]) as in Python's range
Case range: "case:a,b,c" (comma-separated strings)
'''
key, value = arg.split('=', maxsplit=1)
assert _rx_range_key.match(key), 'The key must be a valid slug string.'
try:
if value.startswith('case:'):
return key, value[5:].split(',')
elif value.startswith('linspace:'):
start, stop, num = value[9:].split(',')
return key, tuple(drange(Decimal(start), Decimal(stop), int(num)))
elif value.startswith('range:'):
range_args = map(int, value[6:].split(','))
return key, tuple(range(*range_args))
else:
raise ArgumentTypeError('Unrecognized range expression type')
except ValueError as e:
raise ArgumentTypeError(str(e)) | Accepts a range expression which generates a range of values for a variable.
Linear space range: "linspace:1,2,10" (start, stop, num) as in numpy.linspace
Pythonic range: "range:1,10,2" (start, stop[, step]) as in Python's range
Case range: "case:a,b,c" (comma-separated strings) | Below is the the instruction that describes the task:
### Input:
Accepts a range expression which generates a range of values for a variable.
Linear space range: "linspace:1,2,10" (start, stop, num) as in numpy.linspace
Pythonic range: "range:1,10,2" (start, stop[, step]) as in Python's range
Case range: "case:a,b,c" (comma-separated strings)
### Response:
def range_expr(arg):
'''
Accepts a range expression which generates a range of values for a variable.
Linear space range: "linspace:1,2,10" (start, stop, num) as in numpy.linspace
Pythonic range: "range:1,10,2" (start, stop[, step]) as in Python's range
Case range: "case:a,b,c" (comma-separated strings)
'''
key, value = arg.split('=', maxsplit=1)
assert _rx_range_key.match(key), 'The key must be a valid slug string.'
try:
if value.startswith('case:'):
return key, value[5:].split(',')
elif value.startswith('linspace:'):
start, stop, num = value[9:].split(',')
return key, tuple(drange(Decimal(start), Decimal(stop), int(num)))
elif value.startswith('range:'):
range_args = map(int, value[6:].split(','))
return key, tuple(range(*range_args))
else:
raise ArgumentTypeError('Unrecognized range expression type')
except ValueError as e:
raise ArgumentTypeError(str(e)) |
def deconvolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
w_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None, with_bias=True,
apply_w=None, apply_b=None):
"""
Deconvolution layer.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of deconvolution kernels (which is equal to the number of output channels). For example, to apply deconvolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply deconvolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
apply_w (function): Lambda, function, or callable object applied to the weights.
apply_b (function): Lambda, function, or callable object applied to the bias.
Returns:
:class:`~nnabla.Variable`: N-D array. See :obj:`~nnabla.functions.deconvolution` for the output shape.
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(outmaps, inp.shape[base_axis], tuple(kernel)), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", (inp.shape[base_axis], outmaps // group) + tuple(kernel),
w_init, True, not fix_parameters)
if apply_w is not None:
w = apply_w(w)
b = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
if apply_b is not None:
b = apply_b(b)
return F.deconvolution(inp, w, b, base_axis, pad, stride, dilation, group) | Deconvolution layer.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of deconvolution kernels (which is equal to the number of output channels). For example, to apply deconvolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply deconvolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
apply_w (function): Lambda, function, or callable object applied to the weights.
apply_b (function): Lambda, function, or callable object applied to the bias.
Returns:
:class:`~nnabla.Variable`: N-D array. See :obj:`~nnabla.functions.deconvolution` for the output shape. | Below is the the instruction that describes the task:
### Input:
Deconvolution layer.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of deconvolution kernels (which is equal to the number of output channels). For example, to apply deconvolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply deconvolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
apply_w (function): Lambda, function, or callable object applied to the weights.
apply_b (function): Lambda, function, or callable object applied to the bias.
Returns:
:class:`~nnabla.Variable`: N-D array. See :obj:`~nnabla.functions.deconvolution` for the output shape.
### Response:
def deconvolution(inp, outmaps, kernel,
pad=None, stride=None, dilation=None, group=1,
w_init=None, b_init=None,
base_axis=1, fix_parameters=False, rng=None, with_bias=True,
apply_w=None, apply_b=None):
"""
Deconvolution layer.
Args:
inp (~nnabla.Variable): N-D array.
outmaps (int): Number of deconvolution kernels (which is equal to the number of output channels). For example, to apply deconvolution on an input with 16 types of filters, specify 16.
kernel (:obj:`tuple` of :obj:`int`): Convolution kernel size. For example, to apply deconvolution on an image with a 3 (height) by 5 (width) two-dimensional kernel, specify (3,5).
pad (:obj:`tuple` of :obj:`int`): Padding sizes for dimensions.
stride (:obj:`tuple` of :obj:`int`): Stride sizes for dimensions.
dilation (:obj:`tuple` of :obj:`int`): Dilation sizes for dimensions.
group (int): Number of groups of channels. This makes connections across channels sparser by grouping connections along map direction.
w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`.
b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`.
base_axis (int): Dimensions up to `base_axis` are treated as the sample dimensions.
fix_parameters (bool): When set to `True`, the weights and biases will not be updated.
rng (numpy.random.RandomState): Random generator for Initializer.
with_bias (bool): Specify whether to include the bias term.
apply_w (function): Lambda, function, or callable object applied to the weights.
apply_b (function): Lambda, function, or callable object applied to the bias.
Returns:
:class:`~nnabla.Variable`: N-D array. See :obj:`~nnabla.functions.deconvolution` for the output shape.
"""
if w_init is None:
w_init = UniformInitializer(
calc_uniform_lim_glorot(outmaps, inp.shape[base_axis], tuple(kernel)), rng=rng)
if with_bias and b_init is None:
b_init = ConstantInitializer()
w = get_parameter_or_create(
"W", (inp.shape[base_axis], outmaps // group) + tuple(kernel),
w_init, True, not fix_parameters)
if apply_w is not None:
w = apply_w(w)
b = None
if with_bias:
b = get_parameter_or_create(
"b", (outmaps,), b_init, True, not fix_parameters)
if apply_b is not None:
b = apply_b(b)
return F.deconvolution(inp, w, b, base_axis, pad, stride, dilation, group) |
def cachedir_index_del(minion_id, base=None):
'''
Delete an entry from the cachedir index. This generally only needs to happen
when an instance is deleted.
'''
base = init_cachedir(base)
index_file = os.path.join(base, 'index.p')
lock_file(index_file)
if os.path.exists(index_file):
mode = 'rb' if six.PY3 else 'r'
with salt.utils.files.fopen(index_file, mode) as fh_:
index = salt.utils.data.decode(
salt.utils.msgpack.load(fh_, encoding=MSGPACK_ENCODING))
else:
return
if minion_id in index:
del index[minion_id]
mode = 'wb' if six.PY3 else 'w'
with salt.utils.files.fopen(index_file, mode) as fh_:
salt.utils.msgpack.dump(index, fh_, encoding=MSGPACK_ENCODING)
unlock_file(index_file) | Delete an entry from the cachedir index. This generally only needs to happen
when an instance is deleted. | Below is the the instruction that describes the task:
### Input:
Delete an entry from the cachedir index. This generally only needs to happen
when an instance is deleted.
### Response:
def cachedir_index_del(minion_id, base=None):
'''
Delete an entry from the cachedir index. This generally only needs to happen
when an instance is deleted.
'''
base = init_cachedir(base)
index_file = os.path.join(base, 'index.p')
lock_file(index_file)
if os.path.exists(index_file):
mode = 'rb' if six.PY3 else 'r'
with salt.utils.files.fopen(index_file, mode) as fh_:
index = salt.utils.data.decode(
salt.utils.msgpack.load(fh_, encoding=MSGPACK_ENCODING))
else:
return
if minion_id in index:
del index[minion_id]
mode = 'wb' if six.PY3 else 'w'
with salt.utils.files.fopen(index_file, mode) as fh_:
salt.utils.msgpack.dump(index, fh_, encoding=MSGPACK_ENCODING)
unlock_file(index_file) |
def instance():
"""Return an PyVabamorf instance.
It returns the previously initialized instance or creates a new
one if nothing exists. Also creates new instance in case the
process has been forked.
"""
if not hasattr(Vabamorf, 'pid') or Vabamorf.pid != os.getpid():
Vabamorf.pid = os.getpid()
Vabamorf.morf = Vabamorf()
return Vabamorf.morf | Return an PyVabamorf instance.
It returns the previously initialized instance or creates a new
one if nothing exists. Also creates new instance in case the
process has been forked. | Below is the the instruction that describes the task:
### Input:
Return an PyVabamorf instance.
It returns the previously initialized instance or creates a new
one if nothing exists. Also creates new instance in case the
process has been forked.
### Response:
def instance():
"""Return an PyVabamorf instance.
It returns the previously initialized instance or creates a new
one if nothing exists. Also creates new instance in case the
process has been forked.
"""
if not hasattr(Vabamorf, 'pid') or Vabamorf.pid != os.getpid():
Vabamorf.pid = os.getpid()
Vabamorf.morf = Vabamorf()
return Vabamorf.morf |
def Calls(self, conditions=None):
"""Find the methods that evaluate data that meets this condition.
Args:
conditions: A tuple of (artifact, os_name, cpe, label)
Returns:
A list of methods that evaluate the data.
"""
results = set()
if conditions is None:
conditions = [None]
for condition in conditions:
for c in self.Match(*condition):
results.update(self._registry.get(c, []))
return results | Find the methods that evaluate data that meets this condition.
Args:
conditions: A tuple of (artifact, os_name, cpe, label)
Returns:
A list of methods that evaluate the data. | Below is the the instruction that describes the task:
### Input:
Find the methods that evaluate data that meets this condition.
Args:
conditions: A tuple of (artifact, os_name, cpe, label)
Returns:
A list of methods that evaluate the data.
### Response:
def Calls(self, conditions=None):
"""Find the methods that evaluate data that meets this condition.
Args:
conditions: A tuple of (artifact, os_name, cpe, label)
Returns:
A list of methods that evaluate the data.
"""
results = set()
if conditions is None:
conditions = [None]
for condition in conditions:
for c in self.Match(*condition):
results.update(self._registry.get(c, []))
return results |
def list_members(self, retrieve_all=True, **_params):
"""Fetches a list of all load balancer members for a project."""
# Pass filters in "params" argument to do_request
return self.list('members', self.members_path, retrieve_all,
**_params) | Fetches a list of all load balancer members for a project. | Below is the the instruction that describes the task:
### Input:
Fetches a list of all load balancer members for a project.
### Response:
def list_members(self, retrieve_all=True, **_params):
"""Fetches a list of all load balancer members for a project."""
# Pass filters in "params" argument to do_request
return self.list('members', self.members_path, retrieve_all,
**_params) |
def focus_prev_matching(self, querystring):
"""focus previous matching message in depth first order"""
self.focus_property(lambda x: x._message.matches(querystring),
self._tree.prev_position) | focus previous matching message in depth first order | Below is the the instruction that describes the task:
### Input:
focus previous matching message in depth first order
### Response:
def focus_prev_matching(self, querystring):
"""focus previous matching message in depth first order"""
self.focus_property(lambda x: x._message.matches(querystring),
self._tree.prev_position) |
def raw_query(self, query, query_parameters=None):
"""
To get all the document that equal to the query
@param str query: The rql query
@param dict query_parameters: Add query parameters to the query {key : value}
"""
self.assert_no_raw_query()
if len(self._where_tokens) != 0 or len(self._select_tokens) != 0 or len(
self._order_by_tokens) != 0 or len(self._group_by_tokens) != 0:
raise InvalidOperationException(
"You can only use raw_query on a new query, without applying any operations "
"(such as where, select, order_by, group_by, etc)")
if query_parameters:
self.query_parameters = query_parameters
self._query = query
return self | To get all the document that equal to the query
@param str query: The rql query
@param dict query_parameters: Add query parameters to the query {key : value} | Below is the the instruction that describes the task:
### Input:
To get all the document that equal to the query
@param str query: The rql query
@param dict query_parameters: Add query parameters to the query {key : value}
### Response:
def raw_query(self, query, query_parameters=None):
"""
To get all the document that equal to the query
@param str query: The rql query
@param dict query_parameters: Add query parameters to the query {key : value}
"""
self.assert_no_raw_query()
if len(self._where_tokens) != 0 or len(self._select_tokens) != 0 or len(
self._order_by_tokens) != 0 or len(self._group_by_tokens) != 0:
raise InvalidOperationException(
"You can only use raw_query on a new query, without applying any operations "
"(such as where, select, order_by, group_by, etc)")
if query_parameters:
self.query_parameters = query_parameters
self._query = query
return self |
def step5(self):
"""step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
"""
self.j = self.k
if self.b[self.k] == "e":
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k - 1)):
self.k = self.k - 1
if self.b[self.k] == "l" and self.doublec(self.k) and self.m() > 1:
self.k = self.k - 1 | step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1. | Below is the the instruction that describes the task:
### Input:
step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
### Response:
def step5(self):
"""step5() removes a final -e if m() > 1, and changes -ll to -l if
m() > 1.
"""
self.j = self.k
if self.b[self.k] == "e":
a = self.m()
if a > 1 or (a == 1 and not self.cvc(self.k - 1)):
self.k = self.k - 1
if self.b[self.k] == "l" and self.doublec(self.k) and self.m() > 1:
self.k = self.k - 1 |
def YamlLoader(string):
"""Load an AFF4 object from a serialized YAML representation."""
representation = yaml.Parse(string)
result_cls = aff4.FACTORY.AFF4Object(representation["aff4_class"])
aff4_attributes = {}
for predicate, values in iteritems(representation["attributes"]):
attribute = aff4.Attribute.PREDICATES[predicate]
tmp = aff4_attributes[attribute] = []
for rdfvalue_cls_name, value, age in values:
rdfvalue_cls = aff4.FACTORY.RDFValue(rdfvalue_cls_name)
value = rdfvalue_cls(value, age=rdfvalue.RDFDatetime(age))
tmp.append(value)
# Ensure the object is dirty so when we save it, it can be written to the data
# store.
result = result_cls(
urn=representation["_urn"],
clone=aff4_attributes,
mode="rw",
age=representation["age_policy"])
result.new_attributes, result.synced_attributes = result.synced_attributes, {}
result._dirty = True # pylint: disable=protected-access
return result | Load an AFF4 object from a serialized YAML representation. | Below is the the instruction that describes the task:
### Input:
Load an AFF4 object from a serialized YAML representation.
### Response:
def YamlLoader(string):
"""Load an AFF4 object from a serialized YAML representation."""
representation = yaml.Parse(string)
result_cls = aff4.FACTORY.AFF4Object(representation["aff4_class"])
aff4_attributes = {}
for predicate, values in iteritems(representation["attributes"]):
attribute = aff4.Attribute.PREDICATES[predicate]
tmp = aff4_attributes[attribute] = []
for rdfvalue_cls_name, value, age in values:
rdfvalue_cls = aff4.FACTORY.RDFValue(rdfvalue_cls_name)
value = rdfvalue_cls(value, age=rdfvalue.RDFDatetime(age))
tmp.append(value)
# Ensure the object is dirty so when we save it, it can be written to the data
# store.
result = result_cls(
urn=representation["_urn"],
clone=aff4_attributes,
mode="rw",
age=representation["age_policy"])
result.new_attributes, result.synced_attributes = result.synced_attributes, {}
result._dirty = True # pylint: disable=protected-access
return result |
def init_default(required, default, optional_default):
"""
Returns optional default if field is not required and
default was not provided.
:param bool required: whether the field is required in a given model.
:param default: default provided by creator of field.
:param optional_default: default for the data type if none provided.
:return: default or optional default based on inputs
"""
if not required and default == NOTHING:
default = optional_default
return default | Returns optional default if field is not required and
default was not provided.
:param bool required: whether the field is required in a given model.
:param default: default provided by creator of field.
:param optional_default: default for the data type if none provided.
:return: default or optional default based on inputs | Below is the the instruction that describes the task:
### Input:
Returns optional default if field is not required and
default was not provided.
:param bool required: whether the field is required in a given model.
:param default: default provided by creator of field.
:param optional_default: default for the data type if none provided.
:return: default or optional default based on inputs
### Response:
def init_default(required, default, optional_default):
"""
Returns optional default if field is not required and
default was not provided.
:param bool required: whether the field is required in a given model.
:param default: default provided by creator of field.
:param optional_default: default for the data type if none provided.
:return: default or optional default based on inputs
"""
if not required and default == NOTHING:
default = optional_default
return default |
def to_envvars(self):
"""
Export property values to a dictionary with environment variable names as keys.
"""
export = {}
for prop_name in self.profile_properties:
prop = self._get_prop(prop_name)
value = self[prop_name]
if value is not None:
export[prop.get_envvar(self)] = prop.to_str(self, value)
if self._profile_parent_name:
export[
f"{self._envvar_prefix}PARENT_PROFILE".upper()
] = self._profile_parent_name
return export | Export property values to a dictionary with environment variable names as keys. | Below is the the instruction that describes the task:
### Input:
Export property values to a dictionary with environment variable names as keys.
### Response:
def to_envvars(self):
"""
Export property values to a dictionary with environment variable names as keys.
"""
export = {}
for prop_name in self.profile_properties:
prop = self._get_prop(prop_name)
value = self[prop_name]
if value is not None:
export[prop.get_envvar(self)] = prop.to_str(self, value)
if self._profile_parent_name:
export[
f"{self._envvar_prefix}PARENT_PROFILE".upper()
] = self._profile_parent_name
return export |
def get_ip_info(ip_str):
"""
Given a string, it returns a tuple of (IP, Routable).
"""
ip = None
is_routable_ip = False
if is_valid_ip(ip_str):
ip = ip_str
is_routable_ip = is_public_ip(ip)
return ip, is_routable_ip | Given a string, it returns a tuple of (IP, Routable). | Below is the the instruction that describes the task:
### Input:
Given a string, it returns a tuple of (IP, Routable).
### Response:
def get_ip_info(ip_str):
"""
Given a string, it returns a tuple of (IP, Routable).
"""
ip = None
is_routable_ip = False
if is_valid_ip(ip_str):
ip = ip_str
is_routable_ip = is_public_ip(ip)
return ip, is_routable_ip |
def update_detector(self, detector_id, detector):
"""Update an existing detector.
Args:
detector_id (string): the ID of the detector.
detector (object): the detector model object. Will be serialized as
JSON.
Returns:
dictionary of the response (updated detector model).
"""
resp = self._put(self._u(self._DETECTOR_ENDPOINT_SUFFIX, detector_id),
data=detector)
resp.raise_for_status()
return resp.json() | Update an existing detector.
Args:
detector_id (string): the ID of the detector.
detector (object): the detector model object. Will be serialized as
JSON.
Returns:
dictionary of the response (updated detector model). | Below is the the instruction that describes the task:
### Input:
Update an existing detector.
Args:
detector_id (string): the ID of the detector.
detector (object): the detector model object. Will be serialized as
JSON.
Returns:
dictionary of the response (updated detector model).
### Response:
def update_detector(self, detector_id, detector):
"""Update an existing detector.
Args:
detector_id (string): the ID of the detector.
detector (object): the detector model object. Will be serialized as
JSON.
Returns:
dictionary of the response (updated detector model).
"""
resp = self._put(self._u(self._DETECTOR_ENDPOINT_SUFFIX, detector_id),
data=detector)
resp.raise_for_status()
return resp.json() |
def zk_client(host, scheme, credential):
""" returns a connected (and possibly authenticated) ZK client """
if not re.match(r".*:\d+$", host):
host = "%s:%d" % (host, DEFAULT_ZK_PORT)
client = KazooClient(hosts=host)
client.start()
if scheme != "":
client.add_auth(scheme, credential)
return client | returns a connected (and possibly authenticated) ZK client | Below is the the instruction that describes the task:
### Input:
returns a connected (and possibly authenticated) ZK client
### Response:
def zk_client(host, scheme, credential):
""" returns a connected (and possibly authenticated) ZK client """
if not re.match(r".*:\d+$", host):
host = "%s:%d" % (host, DEFAULT_ZK_PORT)
client = KazooClient(hosts=host)
client.start()
if scheme != "":
client.add_auth(scheme, credential)
return client |
def set_stim_by_index(self, index):
"""Sets the stimulus to be generated to the one referenced by index
:param index: index number of stimulus to set from this class's internal list of stimuli
:type index: int
"""
# remove any current components
self.stimulus.clearComponents()
# add one to index because of tone curve
self.stimulus.insertComponent(self.stim_components[index]) | Sets the stimulus to be generated to the one referenced by index
:param index: index number of stimulus to set from this class's internal list of stimuli
:type index: int | Below is the the instruction that describes the task:
### Input:
Sets the stimulus to be generated to the one referenced by index
:param index: index number of stimulus to set from this class's internal list of stimuli
:type index: int
### Response:
def set_stim_by_index(self, index):
"""Sets the stimulus to be generated to the one referenced by index
:param index: index number of stimulus to set from this class's internal list of stimuli
:type index: int
"""
# remove any current components
self.stimulus.clearComponents()
# add one to index because of tone curve
self.stimulus.insertComponent(self.stim_components[index]) |
def create(lr, alpha, momentum=0, weight_decay=0, epsilon=1e-8):
""" Vel factory function """
return RMSpropTFFactory(lr=lr, alpha=alpha, momentum=momentum, weight_decay=weight_decay, eps=float(epsilon)) | Vel factory function | Below is the the instruction that describes the task:
### Input:
Vel factory function
### Response:
def create(lr, alpha, momentum=0, weight_decay=0, epsilon=1e-8):
""" Vel factory function """
return RMSpropTFFactory(lr=lr, alpha=alpha, momentum=momentum, weight_decay=weight_decay, eps=float(epsilon)) |
def blobs(n_variables=11, n_centers=5, cluster_std=1.0, n_observations=640) -> AnnData:
"""Gaussian Blobs.
Parameters
----------
n_variables : `int`, optional (default: 11)
Dimension of feature space.
n_centers : `int`, optional (default: 5)
Number of cluster centers.
cluster_std : `float`, optional (default: 1.0)
Standard deviation of clusters.
n_observations : `int`, optional (default: 640)
Number of observations. By default, this is the same observation number as in
``sc.datasets.krumsiek11()``.
Returns
-------
Annotated data matrix containing a observation annotation 'blobs' that
indicates cluster identity.
"""
import sklearn.datasets
X, y = sklearn.datasets.make_blobs(n_samples=n_observations,
n_features=n_variables,
centers=n_centers,
cluster_std=cluster_std,
random_state=0)
return AnnData(X, obs={'blobs': y.astype(str)}) | Gaussian Blobs.
Parameters
----------
n_variables : `int`, optional (default: 11)
Dimension of feature space.
n_centers : `int`, optional (default: 5)
Number of cluster centers.
cluster_std : `float`, optional (default: 1.0)
Standard deviation of clusters.
n_observations : `int`, optional (default: 640)
Number of observations. By default, this is the same observation number as in
``sc.datasets.krumsiek11()``.
Returns
-------
Annotated data matrix containing a observation annotation 'blobs' that
indicates cluster identity. | Below is the the instruction that describes the task:
### Input:
Gaussian Blobs.
Parameters
----------
n_variables : `int`, optional (default: 11)
Dimension of feature space.
n_centers : `int`, optional (default: 5)
Number of cluster centers.
cluster_std : `float`, optional (default: 1.0)
Standard deviation of clusters.
n_observations : `int`, optional (default: 640)
Number of observations. By default, this is the same observation number as in
``sc.datasets.krumsiek11()``.
Returns
-------
Annotated data matrix containing a observation annotation 'blobs' that
indicates cluster identity.
### Response:
def blobs(n_variables=11, n_centers=5, cluster_std=1.0, n_observations=640) -> AnnData:
"""Gaussian Blobs.
Parameters
----------
n_variables : `int`, optional (default: 11)
Dimension of feature space.
n_centers : `int`, optional (default: 5)
Number of cluster centers.
cluster_std : `float`, optional (default: 1.0)
Standard deviation of clusters.
n_observations : `int`, optional (default: 640)
Number of observations. By default, this is the same observation number as in
``sc.datasets.krumsiek11()``.
Returns
-------
Annotated data matrix containing a observation annotation 'blobs' that
indicates cluster identity.
"""
import sklearn.datasets
X, y = sklearn.datasets.make_blobs(n_samples=n_observations,
n_features=n_variables,
centers=n_centers,
cluster_std=cluster_std,
random_state=0)
return AnnData(X, obs={'blobs': y.astype(str)}) |
def _encode_request(self, request):
"""Encode a request object"""
return pickle.dumps(request_to_dict(request, self.spider), protocol=-1) | Encode a request object | Below is the the instruction that describes the task:
### Input:
Encode a request object
### Response:
def _encode_request(self, request):
"""Encode a request object"""
return pickle.dumps(request_to_dict(request, self.spider), protocol=-1) |
def check_calendar_dates(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar_dates``.
"""
table = "calendar_dates"
problems = []
# Preliminary checks
if feed.calendar_dates is None:
return problems
f = feed.calendar_dates.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service_id
problems = check_column(problems, table, f, "service_id", valid_str)
# Check date
problems = check_column(problems, table, f, "date", valid_date)
# No duplicate (service_id, date) pairs allowed
cond = f[["service_id", "date"]].duplicated()
problems = check_table(
problems, table, f, cond, "Repeated pair (service_id, date)"
)
# Check exception_type
v = lambda x: x in [1, 2]
problems = check_column(problems, table, f, "exception_type", v)
return format_problems(problems, as_df=as_df) | Analog of :func:`check_agency` for ``feed.calendar_dates``. | Below is the the instruction that describes the task:
### Input:
Analog of :func:`check_agency` for ``feed.calendar_dates``.
### Response:
def check_calendar_dates(
feed: "Feed", *, as_df: bool = False, include_warnings: bool = False
) -> List:
"""
Analog of :func:`check_agency` for ``feed.calendar_dates``.
"""
table = "calendar_dates"
problems = []
# Preliminary checks
if feed.calendar_dates is None:
return problems
f = feed.calendar_dates.copy()
problems = check_for_required_columns(problems, table, f)
if problems:
return format_problems(problems, as_df=as_df)
if include_warnings:
problems = check_for_invalid_columns(problems, table, f)
# Check service_id
problems = check_column(problems, table, f, "service_id", valid_str)
# Check date
problems = check_column(problems, table, f, "date", valid_date)
# No duplicate (service_id, date) pairs allowed
cond = f[["service_id", "date"]].duplicated()
problems = check_table(
problems, table, f, cond, "Repeated pair (service_id, date)"
)
# Check exception_type
v = lambda x: x in [1, 2]
problems = check_column(problems, table, f, "exception_type", v)
return format_problems(problems, as_df=as_df) |
def get(self, attr_name, *args):
""" Get the most retrieval attribute in the configuration file. This method
will recursively look through the configuration file for the attribute specified
and return the last found value or None. The values can be referenced by
the key name provided in the configuration file or that value normalized with
snake_casing.
Usage::
>>> from freight_forwarder.config import Config
>>>
>>> config = Config()
>>> thing = config.get('thing', 'grandparent', 'parent')
:param attr_name: A :string: The configuration property name to get.
:param *args: A :tuple: if :strings: parent objects in which to look for attr. This is optional.
:return attr value:
"""
if not isinstance(attr_name, six.string_types):
raise TypeError('attr_name must be a str.')
# allow retrieval of data with alias or normalized name
if '-' in attr_name:
attr_name = attr_name.replace('-', '_')
parent_attr = self
attr = getattr(parent_attr, attr_name, None)
for arg in args:
if not isinstance(arg, six.string_types):
raise TypeError(
'each additional argument must be a string. {0} was not a string'.format(arg)
)
if hasattr(parent_attr, arg):
parent_attr = getattr(parent_attr, arg)
if hasattr(parent_attr, attr_name):
attr = getattr(parent_attr, attr_name)
else:
pass
return attr | Get the most retrieval attribute in the configuration file. This method
will recursively look through the configuration file for the attribute specified
and return the last found value or None. The values can be referenced by
the key name provided in the configuration file or that value normalized with
snake_casing.
Usage::
>>> from freight_forwarder.config import Config
>>>
>>> config = Config()
>>> thing = config.get('thing', 'grandparent', 'parent')
:param attr_name: A :string: The configuration property name to get.
:param *args: A :tuple: if :strings: parent objects in which to look for attr. This is optional.
:return attr value: | Below is the the instruction that describes the task:
### Input:
Get the most retrieval attribute in the configuration file. This method
will recursively look through the configuration file for the attribute specified
and return the last found value or None. The values can be referenced by
the key name provided in the configuration file or that value normalized with
snake_casing.
Usage::
>>> from freight_forwarder.config import Config
>>>
>>> config = Config()
>>> thing = config.get('thing', 'grandparent', 'parent')
:param attr_name: A :string: The configuration property name to get.
:param *args: A :tuple: if :strings: parent objects in which to look for attr. This is optional.
:return attr value:
### Response:
def get(self, attr_name, *args):
""" Get the most retrieval attribute in the configuration file. This method
will recursively look through the configuration file for the attribute specified
and return the last found value or None. The values can be referenced by
the key name provided in the configuration file or that value normalized with
snake_casing.
Usage::
>>> from freight_forwarder.config import Config
>>>
>>> config = Config()
>>> thing = config.get('thing', 'grandparent', 'parent')
:param attr_name: A :string: The configuration property name to get.
:param *args: A :tuple: if :strings: parent objects in which to look for attr. This is optional.
:return attr value:
"""
if not isinstance(attr_name, six.string_types):
raise TypeError('attr_name must be a str.')
# allow retrieval of data with alias or normalized name
if '-' in attr_name:
attr_name = attr_name.replace('-', '_')
parent_attr = self
attr = getattr(parent_attr, attr_name, None)
for arg in args:
if not isinstance(arg, six.string_types):
raise TypeError(
'each additional argument must be a string. {0} was not a string'.format(arg)
)
if hasattr(parent_attr, arg):
parent_attr = getattr(parent_attr, arg)
if hasattr(parent_attr, attr_name):
attr = getattr(parent_attr, attr_name)
else:
pass
return attr |
def count_alleles(self, max_allele=None, subpop=None):
"""Count the number of calls of each allele per variant.
Parameters
----------
max_allele : int, optional
The highest allele index to count. Alleles above this will be
ignored.
subpop : sequence of ints, optional
Indices of samples to include in count.
Returns
-------
ac : AlleleCountsArray
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[2, 2], [-1, -1]]])
>>> g.count_alleles()
<AlleleCountsArray shape=(3, 3) dtype=int32>
3 1 0
1 2 1
0 0 2
>>> g.count_alleles(max_allele=1)
<AlleleCountsArray shape=(3, 2) dtype=int32>
3 1
1 2
0 0
"""
# check inputs
subpop = _normalize_subpop_arg(subpop, self.shape[1])
# determine alleles to count
if max_allele is None:
max_allele = self.max()
# use optimisations
values = memoryview_safe(self.values)
mask = memoryview_safe(self.mask).view(dtype='u1') if self.mask is not None else None
if subpop is None and mask is None:
ac = genotype_array_count_alleles(values, max_allele)
elif subpop is None:
ac = genotype_array_count_alleles_masked(values, mask, max_allele)
elif mask is None:
ac = genotype_array_count_alleles_subpop(values, max_allele, subpop)
else:
ac = genotype_array_count_alleles_subpop_masked(values, mask, max_allele, subpop)
return AlleleCountsArray(ac, copy=False) | Count the number of calls of each allele per variant.
Parameters
----------
max_allele : int, optional
The highest allele index to count. Alleles above this will be
ignored.
subpop : sequence of ints, optional
Indices of samples to include in count.
Returns
-------
ac : AlleleCountsArray
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[2, 2], [-1, -1]]])
>>> g.count_alleles()
<AlleleCountsArray shape=(3, 3) dtype=int32>
3 1 0
1 2 1
0 0 2
>>> g.count_alleles(max_allele=1)
<AlleleCountsArray shape=(3, 2) dtype=int32>
3 1
1 2
0 0 | Below is the the instruction that describes the task:
### Input:
Count the number of calls of each allele per variant.
Parameters
----------
max_allele : int, optional
The highest allele index to count. Alleles above this will be
ignored.
subpop : sequence of ints, optional
Indices of samples to include in count.
Returns
-------
ac : AlleleCountsArray
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[2, 2], [-1, -1]]])
>>> g.count_alleles()
<AlleleCountsArray shape=(3, 3) dtype=int32>
3 1 0
1 2 1
0 0 2
>>> g.count_alleles(max_allele=1)
<AlleleCountsArray shape=(3, 2) dtype=int32>
3 1
1 2
0 0
### Response:
def count_alleles(self, max_allele=None, subpop=None):
"""Count the number of calls of each allele per variant.
Parameters
----------
max_allele : int, optional
The highest allele index to count. Alleles above this will be
ignored.
subpop : sequence of ints, optional
Indices of samples to include in count.
Returns
-------
ac : AlleleCountsArray
Examples
--------
>>> import allel
>>> g = allel.GenotypeArray([[[0, 0], [0, 1]],
... [[0, 2], [1, 1]],
... [[2, 2], [-1, -1]]])
>>> g.count_alleles()
<AlleleCountsArray shape=(3, 3) dtype=int32>
3 1 0
1 2 1
0 0 2
>>> g.count_alleles(max_allele=1)
<AlleleCountsArray shape=(3, 2) dtype=int32>
3 1
1 2
0 0
"""
# check inputs
subpop = _normalize_subpop_arg(subpop, self.shape[1])
# determine alleles to count
if max_allele is None:
max_allele = self.max()
# use optimisations
values = memoryview_safe(self.values)
mask = memoryview_safe(self.mask).view(dtype='u1') if self.mask is not None else None
if subpop is None and mask is None:
ac = genotype_array_count_alleles(values, max_allele)
elif subpop is None:
ac = genotype_array_count_alleles_masked(values, mask, max_allele)
elif mask is None:
ac = genotype_array_count_alleles_subpop(values, max_allele, subpop)
else:
ac = genotype_array_count_alleles_subpop_masked(values, mask, max_allele, subpop)
return AlleleCountsArray(ac, copy=False) |
def select_catalogue(self, selector, distance,
distance_metric='joyner-boore', upper_eq_depth=None,
lower_eq_depth=None):
'''
Selects earthquakes within a distance of the fault
:param selector:
Populated instance of :class:
`openquake.hmtk.seismicity.selector.CatalogueSelector`
:param distance:
Distance from point (km) for selection
:param str distance_metric
Choice of fault source distance metric 'joyner-boore' or 'rupture'
:param float upper_eq_depth:
Upper hypocentral depth of hypocentres to be selected
:param float lower_eq_depth:
Lower hypocentral depth of hypocentres to be selected
'''
if selector.catalogue.get_number_events() < 1:
raise ValueError('No events found in catalogue!')
# rupture metric is selected and dip != 90 or 'rupture'
if ('rupture' in distance_metric) and (fabs(self.dip - 90) > 1E-5):
# Use rupture distance
self.catalogue = selector.within_rupture_distance(
self.geometry,
distance,
upper_depth=upper_eq_depth,
lower_depth=lower_eq_depth)
else:
# Use Joyner-Boore distance
self.catalogue = selector.within_joyner_boore_distance(
self.geometry,
distance,
upper_depth=upper_eq_depth,
lower_depth=lower_eq_depth)
if self.catalogue.get_number_events() < 5:
# Throw a warning regarding the small number of earthquakes in
# the source!
warnings.warn('Source %s (%s) has fewer than 5 events'
% (self.id, self.name)) | Selects earthquakes within a distance of the fault
:param selector:
Populated instance of :class:
`openquake.hmtk.seismicity.selector.CatalogueSelector`
:param distance:
Distance from point (km) for selection
:param str distance_metric
Choice of fault source distance metric 'joyner-boore' or 'rupture'
:param float upper_eq_depth:
Upper hypocentral depth of hypocentres to be selected
:param float lower_eq_depth:
Lower hypocentral depth of hypocentres to be selected | Below is the the instruction that describes the task:
### Input:
Selects earthquakes within a distance of the fault
:param selector:
Populated instance of :class:
`openquake.hmtk.seismicity.selector.CatalogueSelector`
:param distance:
Distance from point (km) for selection
:param str distance_metric
Choice of fault source distance metric 'joyner-boore' or 'rupture'
:param float upper_eq_depth:
Upper hypocentral depth of hypocentres to be selected
:param float lower_eq_depth:
Lower hypocentral depth of hypocentres to be selected
### Response:
def select_catalogue(self, selector, distance,
distance_metric='joyner-boore', upper_eq_depth=None,
lower_eq_depth=None):
'''
Selects earthquakes within a distance of the fault
:param selector:
Populated instance of :class:
`openquake.hmtk.seismicity.selector.CatalogueSelector`
:param distance:
Distance from point (km) for selection
:param str distance_metric
Choice of fault source distance metric 'joyner-boore' or 'rupture'
:param float upper_eq_depth:
Upper hypocentral depth of hypocentres to be selected
:param float lower_eq_depth:
Lower hypocentral depth of hypocentres to be selected
'''
if selector.catalogue.get_number_events() < 1:
raise ValueError('No events found in catalogue!')
# rupture metric is selected and dip != 90 or 'rupture'
if ('rupture' in distance_metric) and (fabs(self.dip - 90) > 1E-5):
# Use rupture distance
self.catalogue = selector.within_rupture_distance(
self.geometry,
distance,
upper_depth=upper_eq_depth,
lower_depth=lower_eq_depth)
else:
# Use Joyner-Boore distance
self.catalogue = selector.within_joyner_boore_distance(
self.geometry,
distance,
upper_depth=upper_eq_depth,
lower_depth=lower_eq_depth)
if self.catalogue.get_number_events() < 5:
# Throw a warning regarding the small number of earthquakes in
# the source!
warnings.warn('Source %s (%s) has fewer than 5 events'
% (self.id, self.name)) |
def from_dict(self, document):
"""Create image group object from JSON document retrieved from database.
Parameters
----------
document : JSON
Json document in database
Returns
-------
ImageGroupHandle
Handle for image group object
"""
# Get object attributes from Json document
identifier = str(document['_id'])
# Create list of group images from Json
images = list()
for grp_image in document['images']:
images.append(GroupImage(
grp_image['identifier'],
grp_image['folder'],
grp_image['name'],
os.path.join(
self.image_manager.get_directory(grp_image['identifier']),
grp_image['name']
)
))
# Create list of properties and add group size
properties = document['properties']
properties[PROPERTY_GROUPSIZE] = len(document['images'])
# Directories are simply named by object identifier
directory = os.path.join(self.directory, identifier)
# Create image group handle.
return ImageGroupHandle(
identifier,
properties,
directory,
images,
attribute.attributes_from_dict(document['options']),
timestamp=datetime.datetime.strptime(
document['timestamp'],
'%Y-%m-%dT%H:%M:%S.%f'
),
is_active=document['active']
) | Create image group object from JSON document retrieved from database.
Parameters
----------
document : JSON
Json document in database
Returns
-------
ImageGroupHandle
Handle for image group object | Below is the the instruction that describes the task:
### Input:
Create image group object from JSON document retrieved from database.
Parameters
----------
document : JSON
Json document in database
Returns
-------
ImageGroupHandle
Handle for image group object
### Response:
def from_dict(self, document):
"""Create image group object from JSON document retrieved from database.
Parameters
----------
document : JSON
Json document in database
Returns
-------
ImageGroupHandle
Handle for image group object
"""
# Get object attributes from Json document
identifier = str(document['_id'])
# Create list of group images from Json
images = list()
for grp_image in document['images']:
images.append(GroupImage(
grp_image['identifier'],
grp_image['folder'],
grp_image['name'],
os.path.join(
self.image_manager.get_directory(grp_image['identifier']),
grp_image['name']
)
))
# Create list of properties and add group size
properties = document['properties']
properties[PROPERTY_GROUPSIZE] = len(document['images'])
# Directories are simply named by object identifier
directory = os.path.join(self.directory, identifier)
# Create image group handle.
return ImageGroupHandle(
identifier,
properties,
directory,
images,
attribute.attributes_from_dict(document['options']),
timestamp=datetime.datetime.strptime(
document['timestamp'],
'%Y-%m-%dT%H:%M:%S.%f'
),
is_active=document['active']
) |
def check_image_evaluation(self, image, show_history=False, detail=False, tag=None, policy=None):
'''**Description**
Check the latest policy evaluation for an image
**Arguments**
- image: Input image can be in the following formats: registry/repo:tag
- show_history: Show all previous policy evaluations
- detail: Show detailed policy evaluation report
- tag: Specify which TAG is evaluated for a given image ID or Image Digest
- policy: Specify which POLICY to use for evaluate (defaults currently active policy)
**Success Return Value**
A JSON object representing the evaluation status.
'''
itype, _, image_digest = self._discover_inputimage(image)
if not image_digest:
return [False, "could not get image record from anchore"]
if not tag and itype != 'tag':
return [False, "input image name is not a tag, and no --tag is specified"]
thetag = tag if tag else image
url = "{base_url}/api/scanning/v1/anchore/images/{image_digest}/check?history={history}&detail={detail}&tag={tag}{policy_id}"
url = url.format(
base_url=self.url,
image_digest=image_digest,
history=str(show_history).lower(),
detail=str(detail).lower(),
tag=thetag,
policy_id=("&policyId=%s" % policy) if policy else "")
res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()] | **Description**
Check the latest policy evaluation for an image
**Arguments**
- image: Input image can be in the following formats: registry/repo:tag
- show_history: Show all previous policy evaluations
- detail: Show detailed policy evaluation report
- tag: Specify which TAG is evaluated for a given image ID or Image Digest
- policy: Specify which POLICY to use for evaluate (defaults currently active policy)
**Success Return Value**
A JSON object representing the evaluation status. | Below is the the instruction that describes the task:
### Input:
**Description**
Check the latest policy evaluation for an image
**Arguments**
- image: Input image can be in the following formats: registry/repo:tag
- show_history: Show all previous policy evaluations
- detail: Show detailed policy evaluation report
- tag: Specify which TAG is evaluated for a given image ID or Image Digest
- policy: Specify which POLICY to use for evaluate (defaults currently active policy)
**Success Return Value**
A JSON object representing the evaluation status.
### Response:
def check_image_evaluation(self, image, show_history=False, detail=False, tag=None, policy=None):
'''**Description**
Check the latest policy evaluation for an image
**Arguments**
- image: Input image can be in the following formats: registry/repo:tag
- show_history: Show all previous policy evaluations
- detail: Show detailed policy evaluation report
- tag: Specify which TAG is evaluated for a given image ID or Image Digest
- policy: Specify which POLICY to use for evaluate (defaults currently active policy)
**Success Return Value**
A JSON object representing the evaluation status.
'''
itype, _, image_digest = self._discover_inputimage(image)
if not image_digest:
return [False, "could not get image record from anchore"]
if not tag and itype != 'tag':
return [False, "input image name is not a tag, and no --tag is specified"]
thetag = tag if tag else image
url = "{base_url}/api/scanning/v1/anchore/images/{image_digest}/check?history={history}&detail={detail}&tag={tag}{policy_id}"
url = url.format(
base_url=self.url,
image_digest=image_digest,
history=str(show_history).lower(),
detail=str(detail).lower(),
tag=thetag,
policy_id=("&policyId=%s" % policy) if policy else "")
res = requests.get(url, headers=self.hdrs, verify=self.ssl_verify)
if not self._checkResponse(res):
return [False, self.lasterr]
return [True, res.json()] |
def process_pool(self, limited_run=False):
"""Return a pool for multiprocess operations, sized either to the number of CPUS, or a configured value"""
from multiprocessing import cpu_count
from ambry.bundle.concurrent import Pool, init_library
if self.processes:
cpus = self.processes
else:
cpus = cpu_count()
self.logger.info('Starting MP pool with {} processors'.format(cpus))
return Pool(self, processes=cpus, initializer=init_library,
maxtasksperchild=1,
initargs=[self.database.dsn, self._account_password, limited_run]) | Return a pool for multiprocess operations, sized either to the number of CPUS, or a configured value | Below is the the instruction that describes the task:
### Input:
Return a pool for multiprocess operations, sized either to the number of CPUS, or a configured value
### Response:
def process_pool(self, limited_run=False):
"""Return a pool for multiprocess operations, sized either to the number of CPUS, or a configured value"""
from multiprocessing import cpu_count
from ambry.bundle.concurrent import Pool, init_library
if self.processes:
cpus = self.processes
else:
cpus = cpu_count()
self.logger.info('Starting MP pool with {} processors'.format(cpus))
return Pool(self, processes=cpus, initializer=init_library,
maxtasksperchild=1,
initargs=[self.database.dsn, self._account_password, limited_run]) |
def create_login_manager(self, app) -> LoginManager:
"""
Override to implement your custom login manager instance
:param app: Flask app
"""
lm = LoginManager(app)
lm.login_view = "login"
lm.user_loader(self.load_user)
return lm | Override to implement your custom login manager instance
:param app: Flask app | Below is the the instruction that describes the task:
### Input:
Override to implement your custom login manager instance
:param app: Flask app
### Response:
def create_login_manager(self, app) -> LoginManager:
"""
Override to implement your custom login manager instance
:param app: Flask app
"""
lm = LoginManager(app)
lm.login_view = "login"
lm.user_loader(self.load_user)
return lm |
def _valid_other_type(x, types):
"""
Do all elements of x have a type from types?
"""
return all(any(isinstance(el, t) for t in types) for el in np.ravel(x)) | Do all elements of x have a type from types? | Below is the the instruction that describes the task:
### Input:
Do all elements of x have a type from types?
### Response:
def _valid_other_type(x, types):
"""
Do all elements of x have a type from types?
"""
return all(any(isinstance(el, t) for t in types) for el in np.ravel(x)) |
def _parse_logo(self, parsed_content):
"""
Parses the guild logo and saves it to the instance.
Parameters
----------
parsed_content: :class:`bs4.Tag`
The parsed content of the page.
Returns
-------
:class:`bool`
Whether the logo was found or not.
"""
logo_img = parsed_content.find('img', {'height': '64'})
if logo_img is None:
return False
self.logo_url = logo_img["src"]
return True | Parses the guild logo and saves it to the instance.
Parameters
----------
parsed_content: :class:`bs4.Tag`
The parsed content of the page.
Returns
-------
:class:`bool`
Whether the logo was found or not. | Below is the the instruction that describes the task:
### Input:
Parses the guild logo and saves it to the instance.
Parameters
----------
parsed_content: :class:`bs4.Tag`
The parsed content of the page.
Returns
-------
:class:`bool`
Whether the logo was found or not.
### Response:
def _parse_logo(self, parsed_content):
"""
Parses the guild logo and saves it to the instance.
Parameters
----------
parsed_content: :class:`bs4.Tag`
The parsed content of the page.
Returns
-------
:class:`bool`
Whether the logo was found or not.
"""
logo_img = parsed_content.find('img', {'height': '64'})
if logo_img is None:
return False
self.logo_url = logo_img["src"]
return True |
def observed_vis(self, context):
""" Observed visibility data source """
lrow, urow = MS.row_extents(context)
data = self._manager.ordered_main_table.getcol(
self._vis_column, startrow=lrow, nrow=urow-lrow)
return data.reshape(context.shape).astype(context.dtype) | Observed visibility data source | Below is the the instruction that describes the task:
### Input:
Observed visibility data source
### Response:
def observed_vis(self, context):
""" Observed visibility data source """
lrow, urow = MS.row_extents(context)
data = self._manager.ordered_main_table.getcol(
self._vis_column, startrow=lrow, nrow=urow-lrow)
return data.reshape(context.shape).astype(context.dtype) |
def get_endpoints():
"""
get all endpoints known on the Ariane server
:return:
"""
LOGGER.debug("EndpointService.get_endpoints")
params = SessionService.complete_transactional_req(None)
if params is None:
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params = {'OPERATION': 'getEndpoints'}
args = {'properties': params}
else:
args = {'http_operation': 'GET', 'operation_path': ''}
else:
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getEndpoints'
args = {'properties': params}
else:
args = {'http_operation': 'GET', 'operation_path': '', 'parameters': params}
response = EndpointService.requester.call(args)
if MappingService.driver_type != DriverFactory.DRIVER_REST:
response = response.get()
ret = None
if response.rc == 0:
ret = []
for endpoint in response.response_content['endpoints']:
ret.append(Endpoint.json_2_endpoint(endpoint))
elif response.rc != 404:
err_msg = 'EndpointService.get_endpoints - Problem while getting nodes. ' \
'Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(err_msg)
if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message:
raise ArianeMappingOverloadError("EndpointService.get_endpoints",
ArianeMappingOverloadError.ERROR_MSG)
# traceback.print_stack()
return ret | get all endpoints known on the Ariane server
:return: | Below is the the instruction that describes the task:
### Input:
get all endpoints known on the Ariane server
:return:
### Response:
def get_endpoints():
"""
get all endpoints known on the Ariane server
:return:
"""
LOGGER.debug("EndpointService.get_endpoints")
params = SessionService.complete_transactional_req(None)
if params is None:
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params = {'OPERATION': 'getEndpoints'}
args = {'properties': params}
else:
args = {'http_operation': 'GET', 'operation_path': ''}
else:
if MappingService.driver_type != DriverFactory.DRIVER_REST:
params['OPERATION'] = 'getEndpoints'
args = {'properties': params}
else:
args = {'http_operation': 'GET', 'operation_path': '', 'parameters': params}
response = EndpointService.requester.call(args)
if MappingService.driver_type != DriverFactory.DRIVER_REST:
response = response.get()
ret = None
if response.rc == 0:
ret = []
for endpoint in response.response_content['endpoints']:
ret.append(Endpoint.json_2_endpoint(endpoint))
elif response.rc != 404:
err_msg = 'EndpointService.get_endpoints - Problem while getting nodes. ' \
'Reason: ' + str(response.response_content) + ' - ' + str(response.error_message) + \
" (" + str(response.rc) + ")"
LOGGER.warning(err_msg)
if response.rc == 500 and ArianeMappingOverloadError.ERROR_MSG in response.error_message:
raise ArianeMappingOverloadError("EndpointService.get_endpoints",
ArianeMappingOverloadError.ERROR_MSG)
# traceback.print_stack()
return ret |
def cache_call_signatures(source, user_pos, stmt):
"""This function calculates the cache key."""
index = user_pos[0] - 1
lines = source.splitlines() or ['']
if source and source[-1] == '\n':
lines.append('')
before_cursor = lines[index][:user_pos[1]]
other_lines = lines[stmt.start_pos[0]:index]
whole = '\n'.join(other_lines + [before_cursor])
before_bracket = re.match(r'.*\(', whole, re.DOTALL)
module_path = stmt.get_parent_until().path
return None if module_path is None else (module_path, before_bracket, stmt.start_pos) | This function calculates the cache key. | Below is the the instruction that describes the task:
### Input:
This function calculates the cache key.
### Response:
def cache_call_signatures(source, user_pos, stmt):
"""This function calculates the cache key."""
index = user_pos[0] - 1
lines = source.splitlines() or ['']
if source and source[-1] == '\n':
lines.append('')
before_cursor = lines[index][:user_pos[1]]
other_lines = lines[stmt.start_pos[0]:index]
whole = '\n'.join(other_lines + [before_cursor])
before_bracket = re.match(r'.*\(', whole, re.DOTALL)
module_path = stmt.get_parent_until().path
return None if module_path is None else (module_path, before_bracket, stmt.start_pos) |
def parse_variable(lexer: Lexer) -> VariableNode:
"""Variable: $Name"""
start = lexer.token
expect_token(lexer, TokenKind.DOLLAR)
return VariableNode(name=parse_name(lexer), loc=loc(lexer, start)) | Variable: $Name | Below is the the instruction that describes the task:
### Input:
Variable: $Name
### Response:
def parse_variable(lexer: Lexer) -> VariableNode:
"""Variable: $Name"""
start = lexer.token
expect_token(lexer, TokenKind.DOLLAR)
return VariableNode(name=parse_name(lexer), loc=loc(lexer, start)) |
def _create_sequence_maps(self):
'''Get all of the SequenceMaps - Rosetta->ATOM, ATOM->SEQRES/FASTA, SEQRES->UniParc.'''
if self.sifts:
self.sifts_atom_to_seqres_sequence_maps = self.sifts.atom_to_seqres_sequence_maps
self.sifts_seqres_to_uniparc_sequence_maps = self.sifts.seqres_to_uniparc_sequence_maps
self.sifts_atom_to_uniparc_sequence_maps = self.sifts.atom_to_uniparc_sequence_maps
if self.pdb_id in pdbs_with_do_not_use_SIFTS_for_these_chains:
for chain_id in self.sifts_atom_to_seqres_sequence_maps.keys() + self.sifts_seqres_to_uniparc_sequence_maps.keys() + self.sifts_atom_to_uniparc_sequence_maps.keys():
if (self.pdb_id, chain_id) in do_not_use_SIFTS_for_these_chains:
self.sifts_atom_to_seqres_sequence_maps[chain_id] = SequenceMap()
self.sifts_seqres_to_uniparc_sequence_maps = SequenceMap()
self.sifts_atom_to_uniparc_sequence_maps = SequenceMap()
if self.pdb_to_rosetta_residue_map_error:
self.rosetta_to_atom_sequence_maps = {}
for c in self.atom_sequences.keys():
self.rosetta_to_atom_sequence_maps[c] = SequenceMap()
else:
self.rosetta_to_atom_sequence_maps = self.pdb.rosetta_to_atom_sequence_maps
# If we removed atoms from the PDB file, we need to remove them from the maps so that our validations hold later on
self.pdbml_atom_to_seqres_sequence_maps = self.pdbml.atom_to_seqres_sequence_maps
if self.pdb_id in ROSETTA_HACKS_residues_to_remove:
for residue_to_remove in ROSETTA_HACKS_residues_to_remove[self.pdb_id]:
chain_id = residue_to_remove[0]
self.pdbml_atom_to_seqres_sequence_maps[chain_id].remove(residue_to_remove)
#if self.sifts:
# self.sifts_atom_to_seqres_sequence_maps[chain_id].remove(residue_to_remove)
if self.pdb_id not in do_not_use_the_sequence_aligner:
self.clustal_seqres_to_uniparc_sequence_maps = self.PDB_UniParc_SA.seqres_to_uniparc_sequence_maps | Get all of the SequenceMaps - Rosetta->ATOM, ATOM->SEQRES/FASTA, SEQRES->UniParc. | Below is the the instruction that describes the task:
### Input:
Get all of the SequenceMaps - Rosetta->ATOM, ATOM->SEQRES/FASTA, SEQRES->UniParc.
### Response:
def _create_sequence_maps(self):
'''Get all of the SequenceMaps - Rosetta->ATOM, ATOM->SEQRES/FASTA, SEQRES->UniParc.'''
if self.sifts:
self.sifts_atom_to_seqres_sequence_maps = self.sifts.atom_to_seqres_sequence_maps
self.sifts_seqres_to_uniparc_sequence_maps = self.sifts.seqres_to_uniparc_sequence_maps
self.sifts_atom_to_uniparc_sequence_maps = self.sifts.atom_to_uniparc_sequence_maps
if self.pdb_id in pdbs_with_do_not_use_SIFTS_for_these_chains:
for chain_id in self.sifts_atom_to_seqres_sequence_maps.keys() + self.sifts_seqres_to_uniparc_sequence_maps.keys() + self.sifts_atom_to_uniparc_sequence_maps.keys():
if (self.pdb_id, chain_id) in do_not_use_SIFTS_for_these_chains:
self.sifts_atom_to_seqres_sequence_maps[chain_id] = SequenceMap()
self.sifts_seqres_to_uniparc_sequence_maps = SequenceMap()
self.sifts_atom_to_uniparc_sequence_maps = SequenceMap()
if self.pdb_to_rosetta_residue_map_error:
self.rosetta_to_atom_sequence_maps = {}
for c in self.atom_sequences.keys():
self.rosetta_to_atom_sequence_maps[c] = SequenceMap()
else:
self.rosetta_to_atom_sequence_maps = self.pdb.rosetta_to_atom_sequence_maps
# If we removed atoms from the PDB file, we need to remove them from the maps so that our validations hold later on
self.pdbml_atom_to_seqres_sequence_maps = self.pdbml.atom_to_seqres_sequence_maps
if self.pdb_id in ROSETTA_HACKS_residues_to_remove:
for residue_to_remove in ROSETTA_HACKS_residues_to_remove[self.pdb_id]:
chain_id = residue_to_remove[0]
self.pdbml_atom_to_seqres_sequence_maps[chain_id].remove(residue_to_remove)
#if self.sifts:
# self.sifts_atom_to_seqres_sequence_maps[chain_id].remove(residue_to_remove)
if self.pdb_id not in do_not_use_the_sequence_aligner:
self.clustal_seqres_to_uniparc_sequence_maps = self.PDB_UniParc_SA.seqres_to_uniparc_sequence_maps |
def _update_state_from_response(self, response_json):
"""
:param response_json: the json obj returned from query
:return:
"""
if 'data' in response_json and response_json['data']['object_type'] == "cloud_clock":
cloud_clock = response_json.get('data')
if cloud_clock is None:
return False
alarms = cloud_clock.get('alarms')
for alarm in alarms:
if alarm.get('object_id') == self.object_id():
self.json_state = alarm
return True
return False
if 'data' in response_json:
alarm = response_json.get('data')
self.json_state = alarm
return True
self.json_state = response_json
return True | :param response_json: the json obj returned from query
:return: | Below is the the instruction that describes the task:
### Input:
:param response_json: the json obj returned from query
:return:
### Response:
def _update_state_from_response(self, response_json):
"""
:param response_json: the json obj returned from query
:return:
"""
if 'data' in response_json and response_json['data']['object_type'] == "cloud_clock":
cloud_clock = response_json.get('data')
if cloud_clock is None:
return False
alarms = cloud_clock.get('alarms')
for alarm in alarms:
if alarm.get('object_id') == self.object_id():
self.json_state = alarm
return True
return False
if 'data' in response_json:
alarm = response_json.get('data')
self.json_state = alarm
return True
self.json_state = response_json
return True |
def conference_play(self, call_params):
"""REST Conference Play helper
"""
path = '/' + self.api_version + '/ConferencePlay/'
method = 'POST'
return self.request(path, method, call_params) | REST Conference Play helper | Below is the the instruction that describes the task:
### Input:
REST Conference Play helper
### Response:
def conference_play(self, call_params):
"""REST Conference Play helper
"""
path = '/' + self.api_version + '/ConferencePlay/'
method = 'POST'
return self.request(path, method, call_params) |
def detect_extracellular_compartment(model):
"""Detect the identifier for equations with extracellular compartments.
Args:
model: :class:`NativeModel`.
"""
extracellular_key = Counter()
for reaction in model.reactions:
equation = reaction.equation
if equation is None:
continue
if len(equation.compounds) == 1:
compound, _ = equation.compounds[0]
compartment = compound.compartment
extracellular_key[compartment] += 1
if len(extracellular_key) == 0:
return None
else:
best_key, _ = extracellular_key.most_common(1)[0]
logger.info('{} is extracellular compartment'.format(best_key))
return best_key | Detect the identifier for equations with extracellular compartments.
Args:
model: :class:`NativeModel`. | Below is the the instruction that describes the task:
### Input:
Detect the identifier for equations with extracellular compartments.
Args:
model: :class:`NativeModel`.
### Response:
def detect_extracellular_compartment(model):
"""Detect the identifier for equations with extracellular compartments.
Args:
model: :class:`NativeModel`.
"""
extracellular_key = Counter()
for reaction in model.reactions:
equation = reaction.equation
if equation is None:
continue
if len(equation.compounds) == 1:
compound, _ = equation.compounds[0]
compartment = compound.compartment
extracellular_key[compartment] += 1
if len(extracellular_key) == 0:
return None
else:
best_key, _ = extracellular_key.most_common(1)[0]
logger.info('{} is extracellular compartment'.format(best_key))
return best_key |
def _conv_adr(adr, entry):
"""Converts to Abook address format"""
if adr.value.street:
entry['address'] = adr.value.street
if adr.value.extended:
entry['address2'] = adr.value.extended
if adr.value.city:
entry['city'] = adr.value.city
if adr.value.region:
entry['state'] = adr.value.region
if adr.value.code and adr.value.code != '0':
entry['zip'] = adr.value.code
if adr.value.country:
entry['country'] = adr.value.country | Converts to Abook address format | Below is the the instruction that describes the task:
### Input:
Converts to Abook address format
### Response:
def _conv_adr(adr, entry):
"""Converts to Abook address format"""
if adr.value.street:
entry['address'] = adr.value.street
if adr.value.extended:
entry['address2'] = adr.value.extended
if adr.value.city:
entry['city'] = adr.value.city
if adr.value.region:
entry['state'] = adr.value.region
if adr.value.code and adr.value.code != '0':
entry['zip'] = adr.value.code
if adr.value.country:
entry['country'] = adr.value.country |
def process_files():
"""
Process files with a single progress bar
"""
with enlighten.Counter(total=100, desc='Simple', unit='ticks') as pbar:
for num in range(100): # pylint: disable=unused-variable
time.sleep(0.05)
pbar.update() | Process files with a single progress bar | Below is the the instruction that describes the task:
### Input:
Process files with a single progress bar
### Response:
def process_files():
"""
Process files with a single progress bar
"""
with enlighten.Counter(total=100, desc='Simple', unit='ticks') as pbar:
for num in range(100): # pylint: disable=unused-variable
time.sleep(0.05)
pbar.update() |
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket | Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`. | Below is the the instruction that describes the task:
### Input:
Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
### Response:
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket |
def parse_link(value):
"""Return a list of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
value = value.strip(replace_chars)
if not value:
return links
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links | Return a list of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list | Below is the the instruction that describes the task:
### Input:
Return a list of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
### Response:
def parse_link(value):
"""Return a list of parsed link headers proxies.
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
:rtype: list
"""
links = []
replace_chars = ' \'"'
value = value.strip(replace_chars)
if not value:
return links
for val in re.split(', *<', value):
try:
url, params = val.split(';', 1)
except ValueError:
url, params = val, ''
link = {'url': url.strip('<> \'"')}
for param in params.split(';'):
try:
key, value = param.split('=')
except ValueError:
break
link[key.strip(replace_chars)] = value.strip(replace_chars)
links.append(link)
return links |
def remove_udp_port(self, port):
"""
Removes an associated UDP port number from this project.
:param port: UDP port number
"""
if port in self._used_udp_ports:
self._used_udp_ports.remove(port) | Removes an associated UDP port number from this project.
:param port: UDP port number | Below is the the instruction that describes the task:
### Input:
Removes an associated UDP port number from this project.
:param port: UDP port number
### Response:
def remove_udp_port(self, port):
"""
Removes an associated UDP port number from this project.
:param port: UDP port number
"""
if port in self._used_udp_ports:
self._used_udp_ports.remove(port) |
def make_prefetchitem_accessedfilelist_accessedfile(accessed_file, condition='contains', negate=False,
preserve_case=False):
"""
Create a node for PrefetchItem/AccessedFileList/AccessedFile
:return: A IndicatorItem represented as an Element node
"""
document = 'PrefetchItem'
search = 'PrefetchItem/AccessedFileList/AccessedFile'
content_type = 'string'
content = accessed_file
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node | Create a node for PrefetchItem/AccessedFileList/AccessedFile
:return: A IndicatorItem represented as an Element node | Below is the the instruction that describes the task:
### Input:
Create a node for PrefetchItem/AccessedFileList/AccessedFile
:return: A IndicatorItem represented as an Element node
### Response:
def make_prefetchitem_accessedfilelist_accessedfile(accessed_file, condition='contains', negate=False,
preserve_case=False):
"""
Create a node for PrefetchItem/AccessedFileList/AccessedFile
:return: A IndicatorItem represented as an Element node
"""
document = 'PrefetchItem'
search = 'PrefetchItem/AccessedFileList/AccessedFile'
content_type = 'string'
content = accessed_file
ii_node = ioc_api.make_indicatoritem_node(condition, document, search, content_type, content,
negate=negate, preserve_case=preserve_case)
return ii_node |
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_2_0):
"""
Write the Attributes structure encoding to the data stream.
Args:
output_stream (stream): A data stream in which to encode
Attributes structure data, supporting a write method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 2.0.
Raises:
AttributeNotSupported: Raised if an unsupported attribute is
found in the attribute list while encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the Attributes object.
"""
if kmip_version < enums.KMIPVersion.KMIP_2_0:
raise exceptions.VersionNotSupported(
"KMIP {} does not support the Attributes object.".format(
kmip_version.value
)
)
local_stream = BytearrayStream()
for attribute in self._attributes:
tag = attribute.tag
if not enums.is_attribute(tag, kmip_version=kmip_version):
raise exceptions.AttributeNotSupported(
"Attribute {} is not supported by KMIP {}.".format(
tag.name,
kmip_version.value
)
)
attribute.write(local_stream, kmip_version=kmip_version)
self.length = local_stream.length()
super(Attributes, self).write(output_stream, kmip_version=kmip_version)
output_stream.write(local_stream.buffer) | Write the Attributes structure encoding to the data stream.
Args:
output_stream (stream): A data stream in which to encode
Attributes structure data, supporting a write method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 2.0.
Raises:
AttributeNotSupported: Raised if an unsupported attribute is
found in the attribute list while encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the Attributes object. | Below is the the instruction that describes the task:
### Input:
Write the Attributes structure encoding to the data stream.
Args:
output_stream (stream): A data stream in which to encode
Attributes structure data, supporting a write method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 2.0.
Raises:
AttributeNotSupported: Raised if an unsupported attribute is
found in the attribute list while encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the Attributes object.
### Response:
def write(self, output_stream, kmip_version=enums.KMIPVersion.KMIP_2_0):
"""
Write the Attributes structure encoding to the data stream.
Args:
output_stream (stream): A data stream in which to encode
Attributes structure data, supporting a write method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be encoded. Optional,
defaults to KMIP 2.0.
Raises:
AttributeNotSupported: Raised if an unsupported attribute is
found in the attribute list while encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the Attributes object.
"""
if kmip_version < enums.KMIPVersion.KMIP_2_0:
raise exceptions.VersionNotSupported(
"KMIP {} does not support the Attributes object.".format(
kmip_version.value
)
)
local_stream = BytearrayStream()
for attribute in self._attributes:
tag = attribute.tag
if not enums.is_attribute(tag, kmip_version=kmip_version):
raise exceptions.AttributeNotSupported(
"Attribute {} is not supported by KMIP {}.".format(
tag.name,
kmip_version.value
)
)
attribute.write(local_stream, kmip_version=kmip_version)
self.length = local_stream.length()
super(Attributes, self).write(output_stream, kmip_version=kmip_version)
output_stream.write(local_stream.buffer) |
def _aix_get_machine_id():
'''
Parse the output of lsattr -El sys0 for os_uuid
'''
grains = {}
cmd = salt.utils.path.which('lsattr')
if cmd:
data = __salt__['cmd.run']('{0} -El sys0'.format(cmd)) + os.linesep
uuid_regexes = [re.compile(r'(?im)^\s*os_uuid\s+(\S+)\s+(.*)')]
for regex in uuid_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['machine_id'] = res.group(1).strip()
break
else:
log.error('The \'lsattr\' binary was not found in $PATH.')
return grains | Parse the output of lsattr -El sys0 for os_uuid | Below is the the instruction that describes the task:
### Input:
Parse the output of lsattr -El sys0 for os_uuid
### Response:
def _aix_get_machine_id():
'''
Parse the output of lsattr -El sys0 for os_uuid
'''
grains = {}
cmd = salt.utils.path.which('lsattr')
if cmd:
data = __salt__['cmd.run']('{0} -El sys0'.format(cmd)) + os.linesep
uuid_regexes = [re.compile(r'(?im)^\s*os_uuid\s+(\S+)\s+(.*)')]
for regex in uuid_regexes:
res = regex.search(data)
if res and len(res.groups()) >= 1:
grains['machine_id'] = res.group(1).strip()
break
else:
log.error('The \'lsattr\' binary was not found in $PATH.')
return grains |
def t_NUMBER(self, t):
r'(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?(kb|gb|mb|tb|pb|Kb|Gb|Mb|Tb|Pb)?'
if re.match(r'^(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?(kb|gb|mb|tb|pb|Kb|Gb|Mb|Tb|Pb)?$',t.value):
multiplyer = 1
try:
suffix = (t.value[-2:]).lower()
if suffix in ['kb']:
multiplyer = 1024
elif suffix in ['mb']:
multiplyer = 1024*1024
elif suffix in ['gb']:
multiplyer = 1024*1024*1024
elif suffix in ['tb']:
multiplyer = 1024*1024*1024*1024
elif suffix in ['pb']:
multiplyer = 1024*1024*1024*1024*1024
if multiplyer > 1:
t.value = t.value[:-2]
except:
pass
try:
f = float(t.value)
try:
e = int(t.value)
except ValueError:
e = f
if (e == f):
t.value = multiplyer*e
else:
t.value = multiplyer*f
except ValueError:
_LOGGER.error("el valor %s no es un numero valido" % t.value)
t.value = 0
else:
t.type = 'STRING'
return t | r'(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?(kb|gb|mb|tb|pb|Kb|Gb|Mb|Tb|Pb)? | Below is the the instruction that describes the task:
### Input:
r'(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?(kb|gb|mb|tb|pb|Kb|Gb|Mb|Tb|Pb)?
### Response:
def t_NUMBER(self, t):
r'(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?(kb|gb|mb|tb|pb|Kb|Gb|Mb|Tb|Pb)?'
if re.match(r'^(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?(kb|gb|mb|tb|pb|Kb|Gb|Mb|Tb|Pb)?$',t.value):
multiplyer = 1
try:
suffix = (t.value[-2:]).lower()
if suffix in ['kb']:
multiplyer = 1024
elif suffix in ['mb']:
multiplyer = 1024*1024
elif suffix in ['gb']:
multiplyer = 1024*1024*1024
elif suffix in ['tb']:
multiplyer = 1024*1024*1024*1024
elif suffix in ['pb']:
multiplyer = 1024*1024*1024*1024*1024
if multiplyer > 1:
t.value = t.value[:-2]
except:
pass
try:
f = float(t.value)
try:
e = int(t.value)
except ValueError:
e = f
if (e == f):
t.value = multiplyer*e
else:
t.value = multiplyer*f
except ValueError:
_LOGGER.error("el valor %s no es un numero valido" % t.value)
t.value = 0
else:
t.type = 'STRING'
return t |
def _search(self, words, include=None, exclude=None, lookup=None):
'''Full text search. Return a list of queries to intersect.'''
lookup = lookup or 'contains'
query = self.router.worditem.query()
if include:
query = query.filter(model_type__in=include)
if exclude:
query = query.exclude(model_type__in=include)
if not words:
return [query]
qs = []
if lookup == 'in':
# we are looking for items with at least one word in it
qs.append(query.filter(word__in=words))
elif lookup == 'contains':
#we want to match every single words
for word in words:
qs.append(query.filter(word=word))
else:
raise ValueError('Unknown lookup "{0}"'.format(lookup))
return qs | Full text search. Return a list of queries to intersect. | Below is the the instruction that describes the task:
### Input:
Full text search. Return a list of queries to intersect.
### Response:
def _search(self, words, include=None, exclude=None, lookup=None):
'''Full text search. Return a list of queries to intersect.'''
lookup = lookup or 'contains'
query = self.router.worditem.query()
if include:
query = query.filter(model_type__in=include)
if exclude:
query = query.exclude(model_type__in=include)
if not words:
return [query]
qs = []
if lookup == 'in':
# we are looking for items with at least one word in it
qs.append(query.filter(word__in=words))
elif lookup == 'contains':
#we want to match every single words
for word in words:
qs.append(query.filter(word=word))
else:
raise ValueError('Unknown lookup "{0}"'.format(lookup))
return qs |
def set_size(self, width_in_points, height_in_points):
"""Changes the size of a PostScript surface
for the current (and subsequent) pages.
This method should only be called
before any drawing operations have been performed on the current page.
The simplest way to do this is to call this method
immediately after creating the surface
or immediately after completing a page with either
:meth:`~Surface.show_page` or :meth:`~Surface.copy_page`.
:param width_in_points:
New width of the page, in points (1 point == 1/72.0 inch)
:param height_in_points:
New height of the page, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
"""
cairo.cairo_ps_surface_set_size(
self._pointer, width_in_points, height_in_points)
self._check_status() | Changes the size of a PostScript surface
for the current (and subsequent) pages.
This method should only be called
before any drawing operations have been performed on the current page.
The simplest way to do this is to call this method
immediately after creating the surface
or immediately after completing a page with either
:meth:`~Surface.show_page` or :meth:`~Surface.copy_page`.
:param width_in_points:
New width of the page, in points (1 point == 1/72.0 inch)
:param height_in_points:
New height of the page, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float | Below is the the instruction that describes the task:
### Input:
Changes the size of a PostScript surface
for the current (and subsequent) pages.
This method should only be called
before any drawing operations have been performed on the current page.
The simplest way to do this is to call this method
immediately after creating the surface
or immediately after completing a page with either
:meth:`~Surface.show_page` or :meth:`~Surface.copy_page`.
:param width_in_points:
New width of the page, in points (1 point == 1/72.0 inch)
:param height_in_points:
New height of the page, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
### Response:
def set_size(self, width_in_points, height_in_points):
"""Changes the size of a PostScript surface
for the current (and subsequent) pages.
This method should only be called
before any drawing operations have been performed on the current page.
The simplest way to do this is to call this method
immediately after creating the surface
or immediately after completing a page with either
:meth:`~Surface.show_page` or :meth:`~Surface.copy_page`.
:param width_in_points:
New width of the page, in points (1 point == 1/72.0 inch)
:param height_in_points:
New height of the page, in points (1 point == 1/72.0 inch)
:type width_in_points: float
:type height_in_points: float
"""
cairo.cairo_ps_surface_set_size(
self._pointer, width_in_points, height_in_points)
self._check_status() |
def is_outlier(df, item_id, segment_id, price):
"""
Verify if a item is an outlier compared to the
other occurrences of the same item, based on his price.
Args:
item_id: idPlanilhaItens
segment_id: idSegmento
price: VlUnitarioAprovado
"""
if (segment_id, item_id) not in df.index:
return False
mean = df.loc[(segment_id, item_id)]['mean']
std = df.loc[(segment_id, item_id)]['std']
return gaussian_outlier.is_outlier(
x=price, mean=mean, standard_deviation=std
) | Verify if a item is an outlier compared to the
other occurrences of the same item, based on his price.
Args:
item_id: idPlanilhaItens
segment_id: idSegmento
price: VlUnitarioAprovado | Below is the the instruction that describes the task:
### Input:
Verify if a item is an outlier compared to the
other occurrences of the same item, based on his price.
Args:
item_id: idPlanilhaItens
segment_id: idSegmento
price: VlUnitarioAprovado
### Response:
def is_outlier(df, item_id, segment_id, price):
"""
Verify if a item is an outlier compared to the
other occurrences of the same item, based on his price.
Args:
item_id: idPlanilhaItens
segment_id: idSegmento
price: VlUnitarioAprovado
"""
if (segment_id, item_id) not in df.index:
return False
mean = df.loc[(segment_id, item_id)]['mean']
std = df.loc[(segment_id, item_id)]['std']
return gaussian_outlier.is_outlier(
x=price, mean=mean, standard_deviation=std
) |
def redo(self):
"""Called when an image is set in the channel."""
image = self.channel.get_current_image()
if image is None:
return True
path = image.get('path', None)
if path is None:
self.fv.show_error(
"Cannot open image: no value for metadata key 'path'")
return
# TODO: How to properly reset GUI components?
# They are still showing info from prev FITS.
# No-op for ASDF
if path.endswith('asdf'):
return True
if path != self.img_path:
# <-- New file is being looked at
self.img_path = path
# close previous file opener, if any
if self.file_obj is not None:
try:
self.file_obj.close()
except Exception:
pass
self.file_obj = io_fits.get_fitsloader(logger=self.logger)
# TODO: specify 'readonly' somehow?
self.file_obj.open_file(path)
upper = len(self.file_obj) - 1
self.prep_hdu_menu(self.w.hdu, self.file_obj.hdu_info)
self.num_hdu = upper
self.logger.debug("there are %d hdus" % (upper + 1))
self.w.numhdu.set_text("%d" % (upper + 1))
self.w.hdu.set_enabled(len(self.file_obj) > 0)
name = image.get('name', iohelper.name_image_from_path(path))
idx = image.get('idx', None)
# remove index designation from root of name, if any
match = re.match(r'^(.+)\[(.+)\]$', name)
if match:
name = match.group(1)
self.name_pfx = name
htype = None
if idx is not None:
# set the HDU in the drop down if known
info = self.file_obj.hdu_db.get(idx, None)
if info is not None:
htype = info.htype.lower()
toc_ent = self._toc_fmt % info
self.w.hdu.show_text(toc_ent)
# rebuild the NAXIS controls, if necessary
# No two images in the same channel can have the same name.
# Here we keep track of the name to decide if we need to rebuild
if self.img_name != name:
self.img_name = name
dims = [0, 0]
data = image.get_data()
if data is None:
# <- empty data part to this HDU
self.logger.warning("Empty data part in HDU %s" % (str(idx)))
elif htype in ('bintablehdu', 'tablehdu',):
pass
elif htype not in ('imagehdu', 'primaryhdu', 'compimagehdu'):
self.logger.warning("HDU %s is not an image (%s)" % (
str(idx), htype))
else:
mddata = image.get_mddata()
if mddata is not None:
dims = list(mddata.shape)
dims.reverse()
self.build_naxis(dims, image) | Called when an image is set in the channel. | Below is the the instruction that describes the task:
### Input:
Called when an image is set in the channel.
### Response:
def redo(self):
"""Called when an image is set in the channel."""
image = self.channel.get_current_image()
if image is None:
return True
path = image.get('path', None)
if path is None:
self.fv.show_error(
"Cannot open image: no value for metadata key 'path'")
return
# TODO: How to properly reset GUI components?
# They are still showing info from prev FITS.
# No-op for ASDF
if path.endswith('asdf'):
return True
if path != self.img_path:
# <-- New file is being looked at
self.img_path = path
# close previous file opener, if any
if self.file_obj is not None:
try:
self.file_obj.close()
except Exception:
pass
self.file_obj = io_fits.get_fitsloader(logger=self.logger)
# TODO: specify 'readonly' somehow?
self.file_obj.open_file(path)
upper = len(self.file_obj) - 1
self.prep_hdu_menu(self.w.hdu, self.file_obj.hdu_info)
self.num_hdu = upper
self.logger.debug("there are %d hdus" % (upper + 1))
self.w.numhdu.set_text("%d" % (upper + 1))
self.w.hdu.set_enabled(len(self.file_obj) > 0)
name = image.get('name', iohelper.name_image_from_path(path))
idx = image.get('idx', None)
# remove index designation from root of name, if any
match = re.match(r'^(.+)\[(.+)\]$', name)
if match:
name = match.group(1)
self.name_pfx = name
htype = None
if idx is not None:
# set the HDU in the drop down if known
info = self.file_obj.hdu_db.get(idx, None)
if info is not None:
htype = info.htype.lower()
toc_ent = self._toc_fmt % info
self.w.hdu.show_text(toc_ent)
# rebuild the NAXIS controls, if necessary
# No two images in the same channel can have the same name.
# Here we keep track of the name to decide if we need to rebuild
if self.img_name != name:
self.img_name = name
dims = [0, 0]
data = image.get_data()
if data is None:
# <- empty data part to this HDU
self.logger.warning("Empty data part in HDU %s" % (str(idx)))
elif htype in ('bintablehdu', 'tablehdu',):
pass
elif htype not in ('imagehdu', 'primaryhdu', 'compimagehdu'):
self.logger.warning("HDU %s is not an image (%s)" % (
str(idx), htype))
else:
mddata = image.get_mddata()
if mddata is not None:
dims = list(mddata.shape)
dims.reverse()
self.build_naxis(dims, image) |
def Softsign(a):
"""
Softsign op.
"""
return np.divide(a, np.add(np.abs(a), 1)), | Softsign op. | Below is the the instruction that describes the task:
### Input:
Softsign op.
### Response:
def Softsign(a):
"""
Softsign op.
"""
return np.divide(a, np.add(np.abs(a), 1)), |
def send_template_email(recipients, title_template, body_template, context, language):
"""Sends e-mail using templating system"""
send_emails = getattr(settings, 'SEND_PLANS_EMAILS', True)
if not send_emails:
return
site_name = getattr(settings, 'SITE_NAME', 'Please define settings.SITE_NAME')
domain = getattr(settings, 'SITE_URL', None)
if domain is None:
try:
Site = apps.get_model('sites', 'Site')
current_site = Site.objects.get_current()
site_name = current_site.name
domain = current_site.domain
except LookupError:
pass
context.update({'site_name': site_name, 'site_domain': domain})
if language is not None:
translation.activate(language)
mail_title_template = loader.get_template(title_template)
mail_body_template = loader.get_template(body_template)
title = mail_title_template.render(context)
body = mail_body_template.render(context)
try:
email_from = getattr(settings, 'DEFAULT_FROM_EMAIL')
except AttributeError:
raise ImproperlyConfigured('DEFAULT_FROM_EMAIL setting needed for sending e-mails')
mail.send_mail(title, body, email_from, recipients)
if language is not None:
translation.deactivate()
email_logger.info(u"Email (%s) sent to %s\nTitle: %s\n%s\n\n" % (language, recipients, title, body)) | Sends e-mail using templating system | Below is the the instruction that describes the task:
### Input:
Sends e-mail using templating system
### Response:
def send_template_email(recipients, title_template, body_template, context, language):
"""Sends e-mail using templating system"""
send_emails = getattr(settings, 'SEND_PLANS_EMAILS', True)
if not send_emails:
return
site_name = getattr(settings, 'SITE_NAME', 'Please define settings.SITE_NAME')
domain = getattr(settings, 'SITE_URL', None)
if domain is None:
try:
Site = apps.get_model('sites', 'Site')
current_site = Site.objects.get_current()
site_name = current_site.name
domain = current_site.domain
except LookupError:
pass
context.update({'site_name': site_name, 'site_domain': domain})
if language is not None:
translation.activate(language)
mail_title_template = loader.get_template(title_template)
mail_body_template = loader.get_template(body_template)
title = mail_title_template.render(context)
body = mail_body_template.render(context)
try:
email_from = getattr(settings, 'DEFAULT_FROM_EMAIL')
except AttributeError:
raise ImproperlyConfigured('DEFAULT_FROM_EMAIL setting needed for sending e-mails')
mail.send_mail(title, body, email_from, recipients)
if language is not None:
translation.deactivate()
email_logger.info(u"Email (%s) sent to %s\nTitle: %s\n%s\n\n" % (language, recipients, title, body)) |
def uniqued(layer, column=0):
"""Group inputs to a layer, so that the layer only has to compute
for the unique values. The data is transformed back before output, and the same
transformation is applied for the gradient. Effectively, this is a cache
local to each minibatch.
The uniqued wrapper is useful for word inputs, because common words are
seen often, but we may want to compute complicated features for the words,
using e.g. character LSTM.
"""
def uniqued_fwd(X, drop=0.0):
keys = X[:, column]
keys = layer.ops.xp.ascontiguousarray(keys)
if not isinstance(keys, numpy.ndarray):
keys = keys.get()
uniq_keys, ind, inv, counts = numpy.unique(
keys, return_index=True, return_inverse=True, return_counts=True
)
X_uniq = layer.ops.xp.ascontiguousarray(X[ind])
Y_uniq, bp_Y_uniq = layer.begin_update(X_uniq, drop=drop)
Y = Y_uniq[inv].reshape((X.shape[0],) + Y_uniq.shape[1:])
def uniqued_bwd(dY, sgd=None):
dY_uniq = layer.ops.allocate(Y_uniq.shape, dtype="f")
layer.ops.scatter_add(dY_uniq, layer.ops.asarray(inv, dtype="i"), dY)
d_uniques = bp_Y_uniq(dY_uniq, sgd=sgd)
if d_uniques is not None:
dX = (d_uniques / counts)[inv]
return dX
else:
return None
return Y, uniqued_bwd
model = wrap(uniqued_fwd, layer)
return model | Group inputs to a layer, so that the layer only has to compute
for the unique values. The data is transformed back before output, and the same
transformation is applied for the gradient. Effectively, this is a cache
local to each minibatch.
The uniqued wrapper is useful for word inputs, because common words are
seen often, but we may want to compute complicated features for the words,
using e.g. character LSTM. | Below is the the instruction that describes the task:
### Input:
Group inputs to a layer, so that the layer only has to compute
for the unique values. The data is transformed back before output, and the same
transformation is applied for the gradient. Effectively, this is a cache
local to each minibatch.
The uniqued wrapper is useful for word inputs, because common words are
seen often, but we may want to compute complicated features for the words,
using e.g. character LSTM.
### Response:
def uniqued(layer, column=0):
"""Group inputs to a layer, so that the layer only has to compute
for the unique values. The data is transformed back before output, and the same
transformation is applied for the gradient. Effectively, this is a cache
local to each minibatch.
The uniqued wrapper is useful for word inputs, because common words are
seen often, but we may want to compute complicated features for the words,
using e.g. character LSTM.
"""
def uniqued_fwd(X, drop=0.0):
keys = X[:, column]
keys = layer.ops.xp.ascontiguousarray(keys)
if not isinstance(keys, numpy.ndarray):
keys = keys.get()
uniq_keys, ind, inv, counts = numpy.unique(
keys, return_index=True, return_inverse=True, return_counts=True
)
X_uniq = layer.ops.xp.ascontiguousarray(X[ind])
Y_uniq, bp_Y_uniq = layer.begin_update(X_uniq, drop=drop)
Y = Y_uniq[inv].reshape((X.shape[0],) + Y_uniq.shape[1:])
def uniqued_bwd(dY, sgd=None):
dY_uniq = layer.ops.allocate(Y_uniq.shape, dtype="f")
layer.ops.scatter_add(dY_uniq, layer.ops.asarray(inv, dtype="i"), dY)
d_uniques = bp_Y_uniq(dY_uniq, sgd=sgd)
if d_uniques is not None:
dX = (d_uniques / counts)[inv]
return dX
else:
return None
return Y, uniqued_bwd
model = wrap(uniqued_fwd, layer)
return model |
def dict_of(validate_key, validate_item):
"""Returns a validator function that succeeds only if the input is a dict, and each key and value in the dict passes
as input to the provided validators validate_key and validate_item, respectively.
:param callable validate_key: the validator function for keys in the dict
:param callable validate_item: the validator function for values in the list
:returns: a function which returns True its input is an dict of valid items, and raises TypeError otherwise
:rtype: callable
"""
def validate(value, should_raise=True):
validate_type = is_type(dict)
if not validate_type(value, should_raise=should_raise):
return False
for key, item in value.items():
try:
validate_key(key)
except TypeError as e:
if should_raise:
samtranslator.model.exceptions.prepend(e, "dict contained an invalid key")
raise
return False
try:
validate_item(item)
except TypeError as e:
if should_raise:
samtranslator.model.exceptions.prepend(e, "dict contained an invalid value")
raise
return False
return True
return validate | Returns a validator function that succeeds only if the input is a dict, and each key and value in the dict passes
as input to the provided validators validate_key and validate_item, respectively.
:param callable validate_key: the validator function for keys in the dict
:param callable validate_item: the validator function for values in the list
:returns: a function which returns True its input is an dict of valid items, and raises TypeError otherwise
:rtype: callable | Below is the the instruction that describes the task:
### Input:
Returns a validator function that succeeds only if the input is a dict, and each key and value in the dict passes
as input to the provided validators validate_key and validate_item, respectively.
:param callable validate_key: the validator function for keys in the dict
:param callable validate_item: the validator function for values in the list
:returns: a function which returns True its input is an dict of valid items, and raises TypeError otherwise
:rtype: callable
### Response:
def dict_of(validate_key, validate_item):
"""Returns a validator function that succeeds only if the input is a dict, and each key and value in the dict passes
as input to the provided validators validate_key and validate_item, respectively.
:param callable validate_key: the validator function for keys in the dict
:param callable validate_item: the validator function for values in the list
:returns: a function which returns True its input is an dict of valid items, and raises TypeError otherwise
:rtype: callable
"""
def validate(value, should_raise=True):
validate_type = is_type(dict)
if not validate_type(value, should_raise=should_raise):
return False
for key, item in value.items():
try:
validate_key(key)
except TypeError as e:
if should_raise:
samtranslator.model.exceptions.prepend(e, "dict contained an invalid key")
raise
return False
try:
validate_item(item)
except TypeError as e:
if should_raise:
samtranslator.model.exceptions.prepend(e, "dict contained an invalid value")
raise
return False
return True
return validate |
def upvote(self):
"""
Upvote :class:`Issue`.
"""
self.requester.post(
'/{endpoint}/{id}/upvote',
endpoint=self.endpoint, id=self.id
)
return self | Upvote :class:`Issue`. | Below is the the instruction that describes the task:
### Input:
Upvote :class:`Issue`.
### Response:
def upvote(self):
"""
Upvote :class:`Issue`.
"""
self.requester.post(
'/{endpoint}/{id}/upvote',
endpoint=self.endpoint, id=self.id
)
return self |
def plot_neuron_on_density(population, # pylint: disable=too-many-arguments
bins=100, new_fig=True, subplot=111, levels=None, plane='xy',
colorlabel='Nodes per unit area', labelfontsize=16,
color_map='Reds', no_colorbar=False, threshold=0.01,
neurite_type=NeuriteType.basal_dendrite, **kwargs):
'''Plots the 2d histogram of the center
coordinates of segments in the selected plane
and superimposes the view of the first neurite of the collection.
'''
_, ax = common.get_figure(new_fig=new_fig)
view.plot_tree(ax, population.neurites[0])
return plot_density(population, plane=plane, bins=bins, new_fig=False, subplot=subplot,
colorlabel=colorlabel, labelfontsize=labelfontsize, levels=levels,
color_map=color_map, no_colorbar=no_colorbar, threshold=threshold,
neurite_type=neurite_type, **kwargs) | Plots the 2d histogram of the center
coordinates of segments in the selected plane
and superimposes the view of the first neurite of the collection. | Below is the the instruction that describes the task:
### Input:
Plots the 2d histogram of the center
coordinates of segments in the selected plane
and superimposes the view of the first neurite of the collection.
### Response:
def plot_neuron_on_density(population, # pylint: disable=too-many-arguments
bins=100, new_fig=True, subplot=111, levels=None, plane='xy',
colorlabel='Nodes per unit area', labelfontsize=16,
color_map='Reds', no_colorbar=False, threshold=0.01,
neurite_type=NeuriteType.basal_dendrite, **kwargs):
'''Plots the 2d histogram of the center
coordinates of segments in the selected plane
and superimposes the view of the first neurite of the collection.
'''
_, ax = common.get_figure(new_fig=new_fig)
view.plot_tree(ax, population.neurites[0])
return plot_density(population, plane=plane, bins=bins, new_fig=False, subplot=subplot,
colorlabel=colorlabel, labelfontsize=labelfontsize, levels=levels,
color_map=color_map, no_colorbar=no_colorbar, threshold=threshold,
neurite_type=neurite_type, **kwargs) |
def normalize_lons(l1, l2):
"""
An international date line safe way of returning a range of longitudes.
>>> normalize_lons(20, 30) # no IDL within the range
[(20, 30)]
>>> normalize_lons(-17, +17) # no IDL within the range
[(-17, 17)]
>>> normalize_lons(-178, +179)
[(-180, -178), (179, 180)]
>>> normalize_lons(178, -179)
[(-180, -179), (178, 180)]
>>> normalize_lons(179, -179)
[(-180, -179), (179, 180)]
>>> normalize_lons(177, -176)
[(-180, -176), (177, 180)]
"""
if l1 > l2: # exchange lons
l1, l2 = l2, l1
delta = l2 - l1
if l1 < 0 and l2 > 0 and delta > 180:
return [(-180, l1), (l2, 180)]
elif l1 > 0 and l2 > 180 and delta < 180:
return [(l1, 180), (-180, l2 - 360)]
elif l1 < -180 and l2 < 0 and delta < 180:
return [(l1 + 360, 180), (l2, -180)]
return [(l1, l2)] | An international date line safe way of returning a range of longitudes.
>>> normalize_lons(20, 30) # no IDL within the range
[(20, 30)]
>>> normalize_lons(-17, +17) # no IDL within the range
[(-17, 17)]
>>> normalize_lons(-178, +179)
[(-180, -178), (179, 180)]
>>> normalize_lons(178, -179)
[(-180, -179), (178, 180)]
>>> normalize_lons(179, -179)
[(-180, -179), (179, 180)]
>>> normalize_lons(177, -176)
[(-180, -176), (177, 180)] | Below is the the instruction that describes the task:
### Input:
An international date line safe way of returning a range of longitudes.
>>> normalize_lons(20, 30) # no IDL within the range
[(20, 30)]
>>> normalize_lons(-17, +17) # no IDL within the range
[(-17, 17)]
>>> normalize_lons(-178, +179)
[(-180, -178), (179, 180)]
>>> normalize_lons(178, -179)
[(-180, -179), (178, 180)]
>>> normalize_lons(179, -179)
[(-180, -179), (179, 180)]
>>> normalize_lons(177, -176)
[(-180, -176), (177, 180)]
### Response:
def normalize_lons(l1, l2):
"""
An international date line safe way of returning a range of longitudes.
>>> normalize_lons(20, 30) # no IDL within the range
[(20, 30)]
>>> normalize_lons(-17, +17) # no IDL within the range
[(-17, 17)]
>>> normalize_lons(-178, +179)
[(-180, -178), (179, 180)]
>>> normalize_lons(178, -179)
[(-180, -179), (178, 180)]
>>> normalize_lons(179, -179)
[(-180, -179), (179, 180)]
>>> normalize_lons(177, -176)
[(-180, -176), (177, 180)]
"""
if l1 > l2: # exchange lons
l1, l2 = l2, l1
delta = l2 - l1
if l1 < 0 and l2 > 0 and delta > 180:
return [(-180, l1), (l2, 180)]
elif l1 > 0 and l2 > 180 and delta < 180:
return [(l1, 180), (-180, l2 - 360)]
elif l1 < -180 and l2 < 0 and delta < 180:
return [(l1 + 360, 180), (l2, -180)]
return [(l1, l2)] |
def encryptfile(filename, passphrase, algo='srp'):
"""
Encrypt a file and write it with .cryptoshop extension.
:param filename: a string with the path to the file to encrypt.
:param passphrase: a string with the user passphrase.
:param algo: a string with the algorithm. Can be srp, aes, twf. Default is srp.
:return: a string with "successfully encrypted" or error.
"""
try:
if algo == "srp":
header = b"Cryptoshop srp " + b_version
crypto_algo = "Serpent/GCM"
if algo == "aes":
header = b"Cryptoshop aes " + b_version
crypto_algo = "AES-256/GCM"
if algo == "twf":
header = b"Cryptoshop twf " + b_version
crypto_algo = "Twofish/GCM"
if algo != "srp" and algo != "aes" and algo != "twf":
return "No valid algo. Use 'srp' 'aes' or 'twf'"
outname = filename + ".cryptoshop"
internal_key = botan.rng().get(internal_key_length)
# Passphrase derivation...
salt = botan.rng().get(__salt_size__)
masterkey = calc_derivation(passphrase=passphrase, salt=salt)
# Encrypt internal key...
encrypted_key = encry_decry_cascade(data=internal_key, masterkey=masterkey,
bool_encry=True,
assoc_data=header)
with open(filename, 'rb') as filestream:
file_size = os.stat(filename).st_size
if file_size == 0:
raise Exception("Error: You can't encrypt empty file.")
with open(str(outname), 'wb') as filestreamout:
filestreamout.write(header)
filestreamout.write(salt)
filestreamout.write(encrypted_key)
finished = False
# the maximum of the progress bar is the total chunk to process. It's files_size // chunk_size
bar = tqdm(range(file_size // __chunk_size__))
while not finished:
chunk = filestream.read(__chunk_size__)
if len(chunk) == 0 or len(chunk) % __chunk_size__ != 0:
finished = True
# An encrypted-chunk output is nonce, gcmtag, and cipher-chunk concatenation.
encryptedchunk = encry_decry_chunk(chunk=chunk, key=internal_key, bool_encry=True,
algo=crypto_algo, assoc_data=header)
filestreamout.write(encryptedchunk)
bar.update(1)
return "successfully encrypted"
except IOError:
exit("Error: file \"" + filename + "\" was not found.") | Encrypt a file and write it with .cryptoshop extension.
:param filename: a string with the path to the file to encrypt.
:param passphrase: a string with the user passphrase.
:param algo: a string with the algorithm. Can be srp, aes, twf. Default is srp.
:return: a string with "successfully encrypted" or error. | Below is the the instruction that describes the task:
### Input:
Encrypt a file and write it with .cryptoshop extension.
:param filename: a string with the path to the file to encrypt.
:param passphrase: a string with the user passphrase.
:param algo: a string with the algorithm. Can be srp, aes, twf. Default is srp.
:return: a string with "successfully encrypted" or error.
### Response:
def encryptfile(filename, passphrase, algo='srp'):
"""
Encrypt a file and write it with .cryptoshop extension.
:param filename: a string with the path to the file to encrypt.
:param passphrase: a string with the user passphrase.
:param algo: a string with the algorithm. Can be srp, aes, twf. Default is srp.
:return: a string with "successfully encrypted" or error.
"""
try:
if algo == "srp":
header = b"Cryptoshop srp " + b_version
crypto_algo = "Serpent/GCM"
if algo == "aes":
header = b"Cryptoshop aes " + b_version
crypto_algo = "AES-256/GCM"
if algo == "twf":
header = b"Cryptoshop twf " + b_version
crypto_algo = "Twofish/GCM"
if algo != "srp" and algo != "aes" and algo != "twf":
return "No valid algo. Use 'srp' 'aes' or 'twf'"
outname = filename + ".cryptoshop"
internal_key = botan.rng().get(internal_key_length)
# Passphrase derivation...
salt = botan.rng().get(__salt_size__)
masterkey = calc_derivation(passphrase=passphrase, salt=salt)
# Encrypt internal key...
encrypted_key = encry_decry_cascade(data=internal_key, masterkey=masterkey,
bool_encry=True,
assoc_data=header)
with open(filename, 'rb') as filestream:
file_size = os.stat(filename).st_size
if file_size == 0:
raise Exception("Error: You can't encrypt empty file.")
with open(str(outname), 'wb') as filestreamout:
filestreamout.write(header)
filestreamout.write(salt)
filestreamout.write(encrypted_key)
finished = False
# the maximum of the progress bar is the total chunk to process. It's files_size // chunk_size
bar = tqdm(range(file_size // __chunk_size__))
while not finished:
chunk = filestream.read(__chunk_size__)
if len(chunk) == 0 or len(chunk) % __chunk_size__ != 0:
finished = True
# An encrypted-chunk output is nonce, gcmtag, and cipher-chunk concatenation.
encryptedchunk = encry_decry_chunk(chunk=chunk, key=internal_key, bool_encry=True,
algo=crypto_algo, assoc_data=header)
filestreamout.write(encryptedchunk)
bar.update(1)
return "successfully encrypted"
except IOError:
exit("Error: file \"" + filename + "\" was not found.") |
def logItems(self, level=logging.DEBUG):
""" rootItem
"""
rootItem = self.rootItem()
if rootItem is None:
logger.debug("No items in: {}".format(self))
else:
rootItem.logBranch(level=level) | rootItem | Below is the the instruction that describes the task:
### Input:
rootItem
### Response:
def logItems(self, level=logging.DEBUG):
""" rootItem
"""
rootItem = self.rootItem()
if rootItem is None:
logger.debug("No items in: {}".format(self))
else:
rootItem.logBranch(level=level) |
def repo(
state, host, name, baseurl,
present=True, description=None, enabled=True, gpgcheck=True, gpgkey=None,
):
'''
Add/remove/update yum repositories.
+ name: filename for the repo (in ``/etc/yum/repos.d/``)
+ baseurl: the baseurl of the repo
+ present: whether the ``.repo`` file should be present
+ description: optional verbose description
+ gpgcheck: whether set ``gpgcheck=1``
+ gpgkey: the URL to the gpg key for this repo
'''
# Description defaults to name
description = description or name
filename = '/etc/yum.repos.d/{0}.repo'.format(name)
# If we don't want the repo, just remove any existing file
if not present:
yield files.file(state, host, filename, present=False)
return
# Build the repo file from string
repo_lines = [
'[{0}]'.format(name),
'name={0}'.format(description),
'baseurl={0}'.format(baseurl),
'enabled={0}'.format(1 if enabled else 0),
'gpgcheck={0}'.format(1 if gpgcheck else 0),
]
if gpgkey:
repo_lines.append('gpgkey={0}'.format(gpgkey))
repo_lines.append('')
repo = '\n'.join(repo_lines)
repo = StringIO(repo)
# Ensure this is the file on the server
yield files.put(state, host, repo, filename) | Add/remove/update yum repositories.
+ name: filename for the repo (in ``/etc/yum/repos.d/``)
+ baseurl: the baseurl of the repo
+ present: whether the ``.repo`` file should be present
+ description: optional verbose description
+ gpgcheck: whether set ``gpgcheck=1``
+ gpgkey: the URL to the gpg key for this repo | Below is the the instruction that describes the task:
### Input:
Add/remove/update yum repositories.
+ name: filename for the repo (in ``/etc/yum/repos.d/``)
+ baseurl: the baseurl of the repo
+ present: whether the ``.repo`` file should be present
+ description: optional verbose description
+ gpgcheck: whether set ``gpgcheck=1``
+ gpgkey: the URL to the gpg key for this repo
### Response:
def repo(
state, host, name, baseurl,
present=True, description=None, enabled=True, gpgcheck=True, gpgkey=None,
):
'''
Add/remove/update yum repositories.
+ name: filename for the repo (in ``/etc/yum/repos.d/``)
+ baseurl: the baseurl of the repo
+ present: whether the ``.repo`` file should be present
+ description: optional verbose description
+ gpgcheck: whether set ``gpgcheck=1``
+ gpgkey: the URL to the gpg key for this repo
'''
# Description defaults to name
description = description or name
filename = '/etc/yum.repos.d/{0}.repo'.format(name)
# If we don't want the repo, just remove any existing file
if not present:
yield files.file(state, host, filename, present=False)
return
# Build the repo file from string
repo_lines = [
'[{0}]'.format(name),
'name={0}'.format(description),
'baseurl={0}'.format(baseurl),
'enabled={0}'.format(1 if enabled else 0),
'gpgcheck={0}'.format(1 if gpgcheck else 0),
]
if gpgkey:
repo_lines.append('gpgkey={0}'.format(gpgkey))
repo_lines.append('')
repo = '\n'.join(repo_lines)
repo = StringIO(repo)
# Ensure this is the file on the server
yield files.put(state, host, repo, filename) |
def prettify_metrics(metrics: List[Tuple[str, float]], precision: int = 4) -> OrderedDict:
"""Prettifies the dictionary of metrics."""
prettified_metrics = OrderedDict()
for key, value in metrics:
value = round(value, precision)
prettified_metrics[key] = value
return prettified_metrics | Prettifies the dictionary of metrics. | Below is the the instruction that describes the task:
### Input:
Prettifies the dictionary of metrics.
### Response:
def prettify_metrics(metrics: List[Tuple[str, float]], precision: int = 4) -> OrderedDict:
"""Prettifies the dictionary of metrics."""
prettified_metrics = OrderedDict()
for key, value in metrics:
value = round(value, precision)
prettified_metrics[key] = value
return prettified_metrics |
def get_table(self, id_or_name):
"""
Retrieves a Table by id or name
:param str id_or_name: The id or name of the column
:return: a Table instance
"""
url = self.build_url(self._endpoints.get('get_table').format(id=id_or_name))
response = self.session.get(url)
if not response:
return None
return self.table_constructor(parent=self, **{self._cloud_data_key: response.json()}) | Retrieves a Table by id or name
:param str id_or_name: The id or name of the column
:return: a Table instance | Below is the the instruction that describes the task:
### Input:
Retrieves a Table by id or name
:param str id_or_name: The id or name of the column
:return: a Table instance
### Response:
def get_table(self, id_or_name):
"""
Retrieves a Table by id or name
:param str id_or_name: The id or name of the column
:return: a Table instance
"""
url = self.build_url(self._endpoints.get('get_table').format(id=id_or_name))
response = self.session.get(url)
if not response:
return None
return self.table_constructor(parent=self, **{self._cloud_data_key: response.json()}) |
def extract_hosted_zip(data_url, save_dir, exclude_term=None):
"""Downloads, then extracts a zip file."""
zip_name = os.path.join(save_dir, 'temp.zip')
# get the zip file
try:
print('Downloading %r to %r' % (data_url, zip_name))
zip_name, hdrs = urllib.request.urlretrieve(url=data_url, filename=zip_name)
print('Download successfully completed')
except IOError as e:
print("Could not successfully retrieve %r" % data_url)
raise e
# extract, then remove temp file
extract_zip(zip_name=zip_name, exclude_term=exclude_term)
os.unlink(zip_name)
print("Extraction Complete") | Downloads, then extracts a zip file. | Below is the the instruction that describes the task:
### Input:
Downloads, then extracts a zip file.
### Response:
def extract_hosted_zip(data_url, save_dir, exclude_term=None):
"""Downloads, then extracts a zip file."""
zip_name = os.path.join(save_dir, 'temp.zip')
# get the zip file
try:
print('Downloading %r to %r' % (data_url, zip_name))
zip_name, hdrs = urllib.request.urlretrieve(url=data_url, filename=zip_name)
print('Download successfully completed')
except IOError as e:
print("Could not successfully retrieve %r" % data_url)
raise e
# extract, then remove temp file
extract_zip(zip_name=zip_name, exclude_term=exclude_term)
os.unlink(zip_name)
print("Extraction Complete") |
def build(self, response):
"""
Deserialize the returned objects and return either a single Zenpy object, or a ResultGenerator in
the case of multiple results.
:param response: the requests Response object.
"""
response_json = response.json()
# Special case for ticket audits.
if get_endpoint_path(self.api, response).startswith('/ticket_audits.json'):
return TicketAuditGenerator(self, response_json)
zenpy_objects = self.deserialize(response_json)
# Collection of objects (eg, users/tickets)
plural_object_type = as_plural(self.api.object_type)
if plural_object_type in zenpy_objects:
return ZendeskResultGenerator(self, response_json, response_objects=zenpy_objects[plural_object_type])
# Here the response matches the API object_type, seems legit.
if self.api.object_type in zenpy_objects:
return zenpy_objects[self.api.object_type]
# Could be anything, if we know of this object then return it.
for zenpy_object_name in self.object_mapping.class_mapping:
if zenpy_object_name in zenpy_objects:
return zenpy_objects[zenpy_object_name]
# Maybe a collection of known objects?
for zenpy_object_name in self.object_mapping.class_mapping:
plural_zenpy_object_name = as_plural(zenpy_object_name)
if plural_zenpy_object_name in zenpy_objects:
return ZendeskResultGenerator(self, response_json, object_type=plural_zenpy_object_name)
# Bummer, bail out.
raise ZenpyException("Unknown Response: " + str(response_json)) | Deserialize the returned objects and return either a single Zenpy object, or a ResultGenerator in
the case of multiple results.
:param response: the requests Response object. | Below is the the instruction that describes the task:
### Input:
Deserialize the returned objects and return either a single Zenpy object, or a ResultGenerator in
the case of multiple results.
:param response: the requests Response object.
### Response:
def build(self, response):
"""
Deserialize the returned objects and return either a single Zenpy object, or a ResultGenerator in
the case of multiple results.
:param response: the requests Response object.
"""
response_json = response.json()
# Special case for ticket audits.
if get_endpoint_path(self.api, response).startswith('/ticket_audits.json'):
return TicketAuditGenerator(self, response_json)
zenpy_objects = self.deserialize(response_json)
# Collection of objects (eg, users/tickets)
plural_object_type = as_plural(self.api.object_type)
if plural_object_type in zenpy_objects:
return ZendeskResultGenerator(self, response_json, response_objects=zenpy_objects[plural_object_type])
# Here the response matches the API object_type, seems legit.
if self.api.object_type in zenpy_objects:
return zenpy_objects[self.api.object_type]
# Could be anything, if we know of this object then return it.
for zenpy_object_name in self.object_mapping.class_mapping:
if zenpy_object_name in zenpy_objects:
return zenpy_objects[zenpy_object_name]
# Maybe a collection of known objects?
for zenpy_object_name in self.object_mapping.class_mapping:
plural_zenpy_object_name = as_plural(zenpy_object_name)
if plural_zenpy_object_name in zenpy_objects:
return ZendeskResultGenerator(self, response_json, object_type=plural_zenpy_object_name)
# Bummer, bail out.
raise ZenpyException("Unknown Response: " + str(response_json)) |
def playbooks(playbook, rundir=None, check=False, diff=False, extra_vars=None,
flush_cache=False, forks=5, inventory=None, limit=None,
list_hosts=False, list_tags=False, list_tasks=False,
module_path=None, skip_tags=None, start_at_task=None,
syntax_check=False, tags=None, playbook_kwargs=None):
'''
Run Ansible Playbooks
:param playbook: Which playbook to run.
:param rundir: Directory to run `ansible-playbook` in. (Default: None)
:param check: don't make any changes; instead, try to predict some
of the changes that may occur (Default: False)
:param diff: when changing (small) files and templates, show the
differences in those files; works great with --check
(default: False)
:param extra_vars: set additional variables as key=value or YAML/JSON, if
filename prepend with @, (default: None)
:param flush_cache: clear the fact cache for every host in inventory
(default: False)
:param forks: specify number of parallel processes to use
(Default: 5)
:param inventory: specify inventory host path or comma separated host
list. (Default: None) (Ansible's default is /etc/ansible/hosts)
:param limit: further limit selected hosts to an additional pattern (Default: None)
:param list_hosts: outputs a list of matching hosts; does not execute anything else
(Default: False)
:param list_tags: list all available tags (Default: False)
:param list_tasks: list all tasks that would be executed (Default: False)
:param module_path: prepend colon-separated path(s) to module library. (Default: None)
:param skip_tags: only run plays and tasks whose tags do not match these
values (Default: False)
:param start_at_task: start the playbook at the task matching this name (Default: None)
:param: syntax_check: perform a syntax check on the playbook, but do not execute it
(Default: False)
:param tags: only run plays and tasks tagged with these values (Default: None)
:return: Playbook return
CLI Example:
.. code-block:: bash
salt 'ansiblehost' ansible.playbook playbook=/srv/playbooks/play.yml
'''
command = ['ansible-playbook', playbook]
if check:
command.append('--check')
if diff:
command.append('--diff')
if isinstance(extra_vars, dict):
command.append("--extra-vars='{0}'".format(json.dumps(extra_vars)))
elif isinstance(extra_vars, six.text_type) and extra_vars.startswith('@'):
command.append('--extra-vars={0}'.format(extra_vars))
if flush_cache:
command.append('--flush-cache')
if inventory:
command.append('--inventory={0}'.format(inventory))
if limit:
command.append('--limit={0}'.format(limit))
if list_hosts:
command.append('--list-hosts')
if list_tags:
command.append('--list-tags')
if list_tasks:
command.append('--list-tasks')
if module_path:
command.append('--module-path={0}'.format(module_path))
if skip_tags:
command.append('--skip-tags={0}'.format(skip_tags))
if start_at_task:
command.append('--start-at-task={0}'.format(start_at_task))
if syntax_check:
command.append('--syntax-check')
if tags:
command.append('--tags={0}'.format(tags))
if playbook_kwargs:
for key, value in six.iteritems(playbook_kwargs):
key = key.replace('_', '-')
if value is True:
command.append('--{0}'.format(key))
elif isinstance(value, six.text_type):
command.append('--{0}={1}'.format(key, value))
elif isinstance(value, dict):
command.append('--{0}={1}'.format(key, json.dumps(value)))
command.append('--forks={0}'.format(forks))
cmd_kwargs = {
'env': {'ANSIBLE_STDOUT_CALLBACK': 'json', 'ANSIBLE_RETRY_FILES_ENABLED': '0'},
'cwd': rundir,
'cmd': ' '.join(command)
}
ret = __salt__['cmd.run_all'](**cmd_kwargs)
log.debug('Ansible Playbook Return: %s', ret)
retdata = json.loads(ret['stdout'])
if ret['retcode']:
__context__['retcode'] = ret['retcode']
return retdata | Run Ansible Playbooks
:param playbook: Which playbook to run.
:param rundir: Directory to run `ansible-playbook` in. (Default: None)
:param check: don't make any changes; instead, try to predict some
of the changes that may occur (Default: False)
:param diff: when changing (small) files and templates, show the
differences in those files; works great with --check
(default: False)
:param extra_vars: set additional variables as key=value or YAML/JSON, if
filename prepend with @, (default: None)
:param flush_cache: clear the fact cache for every host in inventory
(default: False)
:param forks: specify number of parallel processes to use
(Default: 5)
:param inventory: specify inventory host path or comma separated host
list. (Default: None) (Ansible's default is /etc/ansible/hosts)
:param limit: further limit selected hosts to an additional pattern (Default: None)
:param list_hosts: outputs a list of matching hosts; does not execute anything else
(Default: False)
:param list_tags: list all available tags (Default: False)
:param list_tasks: list all tasks that would be executed (Default: False)
:param module_path: prepend colon-separated path(s) to module library. (Default: None)
:param skip_tags: only run plays and tasks whose tags do not match these
values (Default: False)
:param start_at_task: start the playbook at the task matching this name (Default: None)
:param: syntax_check: perform a syntax check on the playbook, but do not execute it
(Default: False)
:param tags: only run plays and tasks tagged with these values (Default: None)
:return: Playbook return
CLI Example:
.. code-block:: bash
salt 'ansiblehost' ansible.playbook playbook=/srv/playbooks/play.yml | Below is the the instruction that describes the task:
### Input:
Run Ansible Playbooks
:param playbook: Which playbook to run.
:param rundir: Directory to run `ansible-playbook` in. (Default: None)
:param check: don't make any changes; instead, try to predict some
of the changes that may occur (Default: False)
:param diff: when changing (small) files and templates, show the
differences in those files; works great with --check
(default: False)
:param extra_vars: set additional variables as key=value or YAML/JSON, if
filename prepend with @, (default: None)
:param flush_cache: clear the fact cache for every host in inventory
(default: False)
:param forks: specify number of parallel processes to use
(Default: 5)
:param inventory: specify inventory host path or comma separated host
list. (Default: None) (Ansible's default is /etc/ansible/hosts)
:param limit: further limit selected hosts to an additional pattern (Default: None)
:param list_hosts: outputs a list of matching hosts; does not execute anything else
(Default: False)
:param list_tags: list all available tags (Default: False)
:param list_tasks: list all tasks that would be executed (Default: False)
:param module_path: prepend colon-separated path(s) to module library. (Default: None)
:param skip_tags: only run plays and tasks whose tags do not match these
values (Default: False)
:param start_at_task: start the playbook at the task matching this name (Default: None)
:param: syntax_check: perform a syntax check on the playbook, but do not execute it
(Default: False)
:param tags: only run plays and tasks tagged with these values (Default: None)
:return: Playbook return
CLI Example:
.. code-block:: bash
salt 'ansiblehost' ansible.playbook playbook=/srv/playbooks/play.yml
### Response:
def playbooks(playbook, rundir=None, check=False, diff=False, extra_vars=None,
flush_cache=False, forks=5, inventory=None, limit=None,
list_hosts=False, list_tags=False, list_tasks=False,
module_path=None, skip_tags=None, start_at_task=None,
syntax_check=False, tags=None, playbook_kwargs=None):
'''
Run Ansible Playbooks
:param playbook: Which playbook to run.
:param rundir: Directory to run `ansible-playbook` in. (Default: None)
:param check: don't make any changes; instead, try to predict some
of the changes that may occur (Default: False)
:param diff: when changing (small) files and templates, show the
differences in those files; works great with --check
(default: False)
:param extra_vars: set additional variables as key=value or YAML/JSON, if
filename prepend with @, (default: None)
:param flush_cache: clear the fact cache for every host in inventory
(default: False)
:param forks: specify number of parallel processes to use
(Default: 5)
:param inventory: specify inventory host path or comma separated host
list. (Default: None) (Ansible's default is /etc/ansible/hosts)
:param limit: further limit selected hosts to an additional pattern (Default: None)
:param list_hosts: outputs a list of matching hosts; does not execute anything else
(Default: False)
:param list_tags: list all available tags (Default: False)
:param list_tasks: list all tasks that would be executed (Default: False)
:param module_path: prepend colon-separated path(s) to module library. (Default: None)
:param skip_tags: only run plays and tasks whose tags do not match these
values (Default: False)
:param start_at_task: start the playbook at the task matching this name (Default: None)
:param: syntax_check: perform a syntax check on the playbook, but do not execute it
(Default: False)
:param tags: only run plays and tasks tagged with these values (Default: None)
:return: Playbook return
CLI Example:
.. code-block:: bash
salt 'ansiblehost' ansible.playbook playbook=/srv/playbooks/play.yml
'''
command = ['ansible-playbook', playbook]
if check:
command.append('--check')
if diff:
command.append('--diff')
if isinstance(extra_vars, dict):
command.append("--extra-vars='{0}'".format(json.dumps(extra_vars)))
elif isinstance(extra_vars, six.text_type) and extra_vars.startswith('@'):
command.append('--extra-vars={0}'.format(extra_vars))
if flush_cache:
command.append('--flush-cache')
if inventory:
command.append('--inventory={0}'.format(inventory))
if limit:
command.append('--limit={0}'.format(limit))
if list_hosts:
command.append('--list-hosts')
if list_tags:
command.append('--list-tags')
if list_tasks:
command.append('--list-tasks')
if module_path:
command.append('--module-path={0}'.format(module_path))
if skip_tags:
command.append('--skip-tags={0}'.format(skip_tags))
if start_at_task:
command.append('--start-at-task={0}'.format(start_at_task))
if syntax_check:
command.append('--syntax-check')
if tags:
command.append('--tags={0}'.format(tags))
if playbook_kwargs:
for key, value in six.iteritems(playbook_kwargs):
key = key.replace('_', '-')
if value is True:
command.append('--{0}'.format(key))
elif isinstance(value, six.text_type):
command.append('--{0}={1}'.format(key, value))
elif isinstance(value, dict):
command.append('--{0}={1}'.format(key, json.dumps(value)))
command.append('--forks={0}'.format(forks))
cmd_kwargs = {
'env': {'ANSIBLE_STDOUT_CALLBACK': 'json', 'ANSIBLE_RETRY_FILES_ENABLED': '0'},
'cwd': rundir,
'cmd': ' '.join(command)
}
ret = __salt__['cmd.run_all'](**cmd_kwargs)
log.debug('Ansible Playbook Return: %s', ret)
retdata = json.loads(ret['stdout'])
if ret['retcode']:
__context__['retcode'] = ret['retcode']
return retdata |
def _locate(self, name):
"""
Gets a dependency locator by its name.
:param name: the name of the dependency to locate.
:return: the dependency locator or null if locator was not configured.
"""
if name == None:
raise Exception("Dependency name cannot be null")
if self._references == None:
raise Exception("References shall be set")
return self._dependencies.get(name) | Gets a dependency locator by its name.
:param name: the name of the dependency to locate.
:return: the dependency locator or null if locator was not configured. | Below is the the instruction that describes the task:
### Input:
Gets a dependency locator by its name.
:param name: the name of the dependency to locate.
:return: the dependency locator or null if locator was not configured.
### Response:
def _locate(self, name):
"""
Gets a dependency locator by its name.
:param name: the name of the dependency to locate.
:return: the dependency locator or null if locator was not configured.
"""
if name == None:
raise Exception("Dependency name cannot be null")
if self._references == None:
raise Exception("References shall be set")
return self._dependencies.get(name) |
def blast(request, blast_form, template_init, template_result, blast_commandline, sample_fasta_path,
extra_context=None):
"""
Process blastn/tblastn (blast+) query or set up initial blast form.
"""
if request.method == 'POST':
form = blast_form(request.POST)
if form.is_valid():
query_file_object_tmp = form.cleaned_data['sequence_in_form']
evalue = float(form.cleaned_data['evalue_in_form'])
word_size = int(form.cleaned_data['word_size_in_form'])
database_path = str(form.cleaned_data['blast_db_in_form'])
standard_opt_dic = {'query': query_file_object_tmp, 'evalue': evalue, 'outfmt': 5, 'db': database_path,
'word_size': word_size}
annotated = utils.get_annotation(database_path, BLAST_DB_NUCL_LIST)
# none standard options:
try:
matrix = str(form.cleaned_data['matrix_in_form'])
standard_opt_dic["matrix"] = matrix
except:
pass
sensitivity_opt_dic = ast.literal_eval(str(form.cleaned_data['search_sensitivity_in_form']))
blast_records__file_xml = None
try:
# blast search, parse results from temp file, put them into template for rendering.
blast_records__file_xml, blast_error = utils.run_blast_commands(blast_commandline,
**dict(standard_opt_dic,
**sensitivity_opt_dic))
if len(blast_error) > 0:
return render(request=request, template_name=template_result,
context={"blast_record": '', blast_error: BLAST_CORRECT_PARAMS})
else:
# converts blast results into objects and pack into list
blast_records_in_object_and_list = utils.blast_records_to_object(
list(NCBIXML.parse(blast_records__file_xml)))
# user defined function to modify blast results
# e.g. join blast results with external database in template
if extra_context is not None:
blast_records_in_object_and_list = extra_context(blast_records_in_object_and_list)
return render(request=request, template_name=template_result,
context={'application': blast_records_in_object_and_list[0].application,
'version': blast_records_in_object_and_list[0].version,
'blast_records': blast_records_in_object_and_list,
'annotated': annotated})
finally:
# remove result - temporary file
if blast_records__file_xml is not None:
os.remove(blast_records__file_xml.name)
else:
form = blast_form(initial={'sequence_in_form': '', 'evalue_in_form': EVALUE_BLAST_DEFAULT})
return render(request=request, template_name=template_init,
context={'form': form, 'sequence_sample_in_fasta': utils.get_sample_data(sample_fasta_path),
"blast_max_number_seq_in_input": BLAST_MAX_NUMBER_SEQ_IN_INPUT, }) | Process blastn/tblastn (blast+) query or set up initial blast form. | Below is the the instruction that describes the task:
### Input:
Process blastn/tblastn (blast+) query or set up initial blast form.
### Response:
def blast(request, blast_form, template_init, template_result, blast_commandline, sample_fasta_path,
extra_context=None):
"""
Process blastn/tblastn (blast+) query or set up initial blast form.
"""
if request.method == 'POST':
form = blast_form(request.POST)
if form.is_valid():
query_file_object_tmp = form.cleaned_data['sequence_in_form']
evalue = float(form.cleaned_data['evalue_in_form'])
word_size = int(form.cleaned_data['word_size_in_form'])
database_path = str(form.cleaned_data['blast_db_in_form'])
standard_opt_dic = {'query': query_file_object_tmp, 'evalue': evalue, 'outfmt': 5, 'db': database_path,
'word_size': word_size}
annotated = utils.get_annotation(database_path, BLAST_DB_NUCL_LIST)
# none standard options:
try:
matrix = str(form.cleaned_data['matrix_in_form'])
standard_opt_dic["matrix"] = matrix
except:
pass
sensitivity_opt_dic = ast.literal_eval(str(form.cleaned_data['search_sensitivity_in_form']))
blast_records__file_xml = None
try:
# blast search, parse results from temp file, put them into template for rendering.
blast_records__file_xml, blast_error = utils.run_blast_commands(blast_commandline,
**dict(standard_opt_dic,
**sensitivity_opt_dic))
if len(blast_error) > 0:
return render(request=request, template_name=template_result,
context={"blast_record": '', blast_error: BLAST_CORRECT_PARAMS})
else:
# converts blast results into objects and pack into list
blast_records_in_object_and_list = utils.blast_records_to_object(
list(NCBIXML.parse(blast_records__file_xml)))
# user defined function to modify blast results
# e.g. join blast results with external database in template
if extra_context is not None:
blast_records_in_object_and_list = extra_context(blast_records_in_object_and_list)
return render(request=request, template_name=template_result,
context={'application': blast_records_in_object_and_list[0].application,
'version': blast_records_in_object_and_list[0].version,
'blast_records': blast_records_in_object_and_list,
'annotated': annotated})
finally:
# remove result - temporary file
if blast_records__file_xml is not None:
os.remove(blast_records__file_xml.name)
else:
form = blast_form(initial={'sequence_in_form': '', 'evalue_in_form': EVALUE_BLAST_DEFAULT})
return render(request=request, template_name=template_init,
context={'form': form, 'sequence_sample_in_fasta': utils.get_sample_data(sample_fasta_path),
"blast_max_number_seq_in_input": BLAST_MAX_NUMBER_SEQ_IN_INPUT, }) |
def get_request_kwargs(self):
"""Construct keyword parameters for Session.request() and
Session.resolve_redirects()."""
kwargs = dict(stream=True, timeout=self.aggregate.config["timeout"])
if self.proxy:
kwargs["proxies"] = {self.proxytype: self.proxy}
if self.scheme == u"https" and self.aggregate.config["sslverify"]:
kwargs['verify'] = self.aggregate.config["sslverify"]
else:
kwargs['verify'] = False
return kwargs | Construct keyword parameters for Session.request() and
Session.resolve_redirects(). | Below is the the instruction that describes the task:
### Input:
Construct keyword parameters for Session.request() and
Session.resolve_redirects().
### Response:
def get_request_kwargs(self):
"""Construct keyword parameters for Session.request() and
Session.resolve_redirects()."""
kwargs = dict(stream=True, timeout=self.aggregate.config["timeout"])
if self.proxy:
kwargs["proxies"] = {self.proxytype: self.proxy}
if self.scheme == u"https" and self.aggregate.config["sslverify"]:
kwargs['verify'] = self.aggregate.config["sslverify"]
else:
kwargs['verify'] = False
return kwargs |
def get_es(urls=None, timeout=DEFAULT_TIMEOUT, force_new=False, **settings):
"""Create an elasticsearch `Elasticsearch` object and return it.
This will aggressively re-use `Elasticsearch` objects with the
following rules:
1. if you pass the same argument values to `get_es()`, then it
will return the same `Elasticsearch` object
2. if you pass different argument values to `get_es()`, then it
will return different `Elasticsearch` object
3. it caches each `Elasticsearch` object that gets created
4. if you pass in `force_new=True`, then you are guaranteed to get
a fresh `Elasticsearch` object AND that object will not be
cached
:arg urls: list of uris; Elasticsearch hosts to connect to,
defaults to ``['http://localhost:9200']``
:arg timeout: int; the timeout in seconds, defaults to 5
:arg force_new: Forces get_es() to generate a new Elasticsearch
object rather than pulling it from cache.
:arg settings: other settings to pass into Elasticsearch
constructor; See
`<http://elasticsearch-py.readthedocs.org/>`_ for more details.
Examples::
# Returns cached Elasticsearch object
es = get_es()
# Returns a new Elasticsearch object
es = get_es(force_new=True)
es = get_es(urls=['localhost'])
es = get_es(urls=['localhost:9200'], timeout=10,
max_retries=3)
"""
# Cheap way of de-None-ifying things
urls = urls or DEFAULT_URLS
# v0.7: Check for 'hosts' instead of 'urls'. Take this out in v1.0.
if 'hosts' in settings:
raise DeprecationWarning('"hosts" is deprecated in favor of "urls".')
if not force_new:
key = _build_key(urls, timeout, **settings)
if key in _cached_elasticsearch:
return _cached_elasticsearch[key]
es = Elasticsearch(urls, timeout=timeout, **settings)
if not force_new:
# We don't need to rebuild the key here since we built it in
# the previous if block, so it's in the namespace. Having said
# that, this is a little ew.
_cached_elasticsearch[key] = es
return es | Create an elasticsearch `Elasticsearch` object and return it.
This will aggressively re-use `Elasticsearch` objects with the
following rules:
1. if you pass the same argument values to `get_es()`, then it
will return the same `Elasticsearch` object
2. if you pass different argument values to `get_es()`, then it
will return different `Elasticsearch` object
3. it caches each `Elasticsearch` object that gets created
4. if you pass in `force_new=True`, then you are guaranteed to get
a fresh `Elasticsearch` object AND that object will not be
cached
:arg urls: list of uris; Elasticsearch hosts to connect to,
defaults to ``['http://localhost:9200']``
:arg timeout: int; the timeout in seconds, defaults to 5
:arg force_new: Forces get_es() to generate a new Elasticsearch
object rather than pulling it from cache.
:arg settings: other settings to pass into Elasticsearch
constructor; See
`<http://elasticsearch-py.readthedocs.org/>`_ for more details.
Examples::
# Returns cached Elasticsearch object
es = get_es()
# Returns a new Elasticsearch object
es = get_es(force_new=True)
es = get_es(urls=['localhost'])
es = get_es(urls=['localhost:9200'], timeout=10,
max_retries=3) | Below is the the instruction that describes the task:
### Input:
Create an elasticsearch `Elasticsearch` object and return it.
This will aggressively re-use `Elasticsearch` objects with the
following rules:
1. if you pass the same argument values to `get_es()`, then it
will return the same `Elasticsearch` object
2. if you pass different argument values to `get_es()`, then it
will return different `Elasticsearch` object
3. it caches each `Elasticsearch` object that gets created
4. if you pass in `force_new=True`, then you are guaranteed to get
a fresh `Elasticsearch` object AND that object will not be
cached
:arg urls: list of uris; Elasticsearch hosts to connect to,
defaults to ``['http://localhost:9200']``
:arg timeout: int; the timeout in seconds, defaults to 5
:arg force_new: Forces get_es() to generate a new Elasticsearch
object rather than pulling it from cache.
:arg settings: other settings to pass into Elasticsearch
constructor; See
`<http://elasticsearch-py.readthedocs.org/>`_ for more details.
Examples::
# Returns cached Elasticsearch object
es = get_es()
# Returns a new Elasticsearch object
es = get_es(force_new=True)
es = get_es(urls=['localhost'])
es = get_es(urls=['localhost:9200'], timeout=10,
max_retries=3)
### Response:
def get_es(urls=None, timeout=DEFAULT_TIMEOUT, force_new=False, **settings):
"""Create an elasticsearch `Elasticsearch` object and return it.
This will aggressively re-use `Elasticsearch` objects with the
following rules:
1. if you pass the same argument values to `get_es()`, then it
will return the same `Elasticsearch` object
2. if you pass different argument values to `get_es()`, then it
will return different `Elasticsearch` object
3. it caches each `Elasticsearch` object that gets created
4. if you pass in `force_new=True`, then you are guaranteed to get
a fresh `Elasticsearch` object AND that object will not be
cached
:arg urls: list of uris; Elasticsearch hosts to connect to,
defaults to ``['http://localhost:9200']``
:arg timeout: int; the timeout in seconds, defaults to 5
:arg force_new: Forces get_es() to generate a new Elasticsearch
object rather than pulling it from cache.
:arg settings: other settings to pass into Elasticsearch
constructor; See
`<http://elasticsearch-py.readthedocs.org/>`_ for more details.
Examples::
# Returns cached Elasticsearch object
es = get_es()
# Returns a new Elasticsearch object
es = get_es(force_new=True)
es = get_es(urls=['localhost'])
es = get_es(urls=['localhost:9200'], timeout=10,
max_retries=3)
"""
# Cheap way of de-None-ifying things
urls = urls or DEFAULT_URLS
# v0.7: Check for 'hosts' instead of 'urls'. Take this out in v1.0.
if 'hosts' in settings:
raise DeprecationWarning('"hosts" is deprecated in favor of "urls".')
if not force_new:
key = _build_key(urls, timeout, **settings)
if key in _cached_elasticsearch:
return _cached_elasticsearch[key]
es = Elasticsearch(urls, timeout=timeout, **settings)
if not force_new:
# We don't need to rebuild the key here since we built it in
# the previous if block, so it's in the namespace. Having said
# that, this is a little ew.
_cached_elasticsearch[key] = es
return es |
def reexport_tf_summary():
"""Re-export all symbols from the original tf.summary.
This function finds the original tf.summary V2 API and re-exports all the
symbols from it within this module as well, so that when this module is
patched into the TF API namespace as the new tf.summary, the effect is an
overlay that just adds TensorBoard-provided symbols to the module.
Finding the original tf.summary V2 API module reliably is a challenge, since
this code runs *during* the overall TF API import process and depending on
the order of imports (which is subject to change), different parts of the API
may or may not be defined at the point in time we attempt to access them. This
code also may be inserted into two places in the API (tf and tf.compat.v2)
and may be re-executed multiple times even for the same place in the API (due
to the TF module import system not populating sys.modules properly), so it
needs to be robust to many different scenarios.
The one constraint we can count on is that everywhere this module is loaded
(via the component_api_helper mechanism in TF), it's going to be the 'summary'
submodule of a larger API package that already has a 'summary' attribute
that contains the TF-only summary API symbols we need to re-export. This
may either be the original TF-only summary module (the first time we load
this module) or a pre-existing copy of this module (if we're re-loading this
module again). We don't actually need to differentiate those two cases,
because it's okay if we re-import our own TensorBoard-provided symbols; they
will just be overwritten later on in this file.
So given that guarantee, the approach we take is to first attempt to locate
a TF V2 API package that already has a 'summary' attribute (most likely this
is the parent package into which we're being imported, but not necessarily),
and then do the dynamic version of "from tf_api_package.summary import *".
Lastly, this logic is encapsulated in a function to avoid symbol leakage.
"""
import sys # pylint: disable=g-import-not-at-top
# API packages to check for the original V2 summary API, in preference order
# to avoid going "under the hood" to the _api packages unless necessary.
packages = [
'tensorflow',
'tensorflow.compat.v2',
'tensorflow._api.v2',
'tensorflow._api.v2.compat.v2',
'tensorflow._api.v1.compat.v2',
]
# If we aren't sure we're on V2, don't use tf.summary since it could be V1.
# Note there may be false positives since the __version__ attribute may not be
# defined at this point in the import process.
if not getattr(tf, '__version__', '').startswith('2.'): # noqa: F821
packages.remove('tensorflow')
def dynamic_wildcard_import(module):
"""Implements the logic of "from module import *" for the given module."""
symbols = getattr(module, '__all__', None)
if symbols is None:
symbols = [k for k in module.__dict__.keys() if not k.startswith('_')]
globals().update({symbol: getattr(module, symbol) for symbol in symbols})
notfound = object() # sentinel value
for package_name in packages:
package = sys.modules.get(package_name, notfound)
if package is notfound:
# Either it isn't in this installation at all (e.g. the _api.vX packages
# are only in API version X), it isn't imported yet, or it was imported
# but not inserted into sys.modules under its user-facing name (for the
# non-'_api' packages), at which point we continue down the list to look
# "under the hood" for it via its '_api' package name.
continue
module = getattr(package, 'summary', None)
if module is None:
# This happens if the package hasn't been fully imported yet. For example,
# the 'tensorflow' package won't yet have 'summary' attribute if we are
# loading this code via the 'tensorflow.compat...' path and 'compat' is
# imported before 'summary' in the 'tensorflow' __init__.py file.
continue
# Success, we hope. Import all the public symbols into this module.
dynamic_wildcard_import(module)
return | Re-export all symbols from the original tf.summary.
This function finds the original tf.summary V2 API and re-exports all the
symbols from it within this module as well, so that when this module is
patched into the TF API namespace as the new tf.summary, the effect is an
overlay that just adds TensorBoard-provided symbols to the module.
Finding the original tf.summary V2 API module reliably is a challenge, since
this code runs *during* the overall TF API import process and depending on
the order of imports (which is subject to change), different parts of the API
may or may not be defined at the point in time we attempt to access them. This
code also may be inserted into two places in the API (tf and tf.compat.v2)
and may be re-executed multiple times even for the same place in the API (due
to the TF module import system not populating sys.modules properly), so it
needs to be robust to many different scenarios.
The one constraint we can count on is that everywhere this module is loaded
(via the component_api_helper mechanism in TF), it's going to be the 'summary'
submodule of a larger API package that already has a 'summary' attribute
that contains the TF-only summary API symbols we need to re-export. This
may either be the original TF-only summary module (the first time we load
this module) or a pre-existing copy of this module (if we're re-loading this
module again). We don't actually need to differentiate those two cases,
because it's okay if we re-import our own TensorBoard-provided symbols; they
will just be overwritten later on in this file.
So given that guarantee, the approach we take is to first attempt to locate
a TF V2 API package that already has a 'summary' attribute (most likely this
is the parent package into which we're being imported, but not necessarily),
and then do the dynamic version of "from tf_api_package.summary import *".
Lastly, this logic is encapsulated in a function to avoid symbol leakage. | Below is the the instruction that describes the task:
### Input:
Re-export all symbols from the original tf.summary.
This function finds the original tf.summary V2 API and re-exports all the
symbols from it within this module as well, so that when this module is
patched into the TF API namespace as the new tf.summary, the effect is an
overlay that just adds TensorBoard-provided symbols to the module.
Finding the original tf.summary V2 API module reliably is a challenge, since
this code runs *during* the overall TF API import process and depending on
the order of imports (which is subject to change), different parts of the API
may or may not be defined at the point in time we attempt to access them. This
code also may be inserted into two places in the API (tf and tf.compat.v2)
and may be re-executed multiple times even for the same place in the API (due
to the TF module import system not populating sys.modules properly), so it
needs to be robust to many different scenarios.
The one constraint we can count on is that everywhere this module is loaded
(via the component_api_helper mechanism in TF), it's going to be the 'summary'
submodule of a larger API package that already has a 'summary' attribute
that contains the TF-only summary API symbols we need to re-export. This
may either be the original TF-only summary module (the first time we load
this module) or a pre-existing copy of this module (if we're re-loading this
module again). We don't actually need to differentiate those two cases,
because it's okay if we re-import our own TensorBoard-provided symbols; they
will just be overwritten later on in this file.
So given that guarantee, the approach we take is to first attempt to locate
a TF V2 API package that already has a 'summary' attribute (most likely this
is the parent package into which we're being imported, but not necessarily),
and then do the dynamic version of "from tf_api_package.summary import *".
Lastly, this logic is encapsulated in a function to avoid symbol leakage.
### Response:
def reexport_tf_summary():
"""Re-export all symbols from the original tf.summary.
This function finds the original tf.summary V2 API and re-exports all the
symbols from it within this module as well, so that when this module is
patched into the TF API namespace as the new tf.summary, the effect is an
overlay that just adds TensorBoard-provided symbols to the module.
Finding the original tf.summary V2 API module reliably is a challenge, since
this code runs *during* the overall TF API import process and depending on
the order of imports (which is subject to change), different parts of the API
may or may not be defined at the point in time we attempt to access them. This
code also may be inserted into two places in the API (tf and tf.compat.v2)
and may be re-executed multiple times even for the same place in the API (due
to the TF module import system not populating sys.modules properly), so it
needs to be robust to many different scenarios.
The one constraint we can count on is that everywhere this module is loaded
(via the component_api_helper mechanism in TF), it's going to be the 'summary'
submodule of a larger API package that already has a 'summary' attribute
that contains the TF-only summary API symbols we need to re-export. This
may either be the original TF-only summary module (the first time we load
this module) or a pre-existing copy of this module (if we're re-loading this
module again). We don't actually need to differentiate those two cases,
because it's okay if we re-import our own TensorBoard-provided symbols; they
will just be overwritten later on in this file.
So given that guarantee, the approach we take is to first attempt to locate
a TF V2 API package that already has a 'summary' attribute (most likely this
is the parent package into which we're being imported, but not necessarily),
and then do the dynamic version of "from tf_api_package.summary import *".
Lastly, this logic is encapsulated in a function to avoid symbol leakage.
"""
import sys # pylint: disable=g-import-not-at-top
# API packages to check for the original V2 summary API, in preference order
# to avoid going "under the hood" to the _api packages unless necessary.
packages = [
'tensorflow',
'tensorflow.compat.v2',
'tensorflow._api.v2',
'tensorflow._api.v2.compat.v2',
'tensorflow._api.v1.compat.v2',
]
# If we aren't sure we're on V2, don't use tf.summary since it could be V1.
# Note there may be false positives since the __version__ attribute may not be
# defined at this point in the import process.
if not getattr(tf, '__version__', '').startswith('2.'): # noqa: F821
packages.remove('tensorflow')
def dynamic_wildcard_import(module):
"""Implements the logic of "from module import *" for the given module."""
symbols = getattr(module, '__all__', None)
if symbols is None:
symbols = [k for k in module.__dict__.keys() if not k.startswith('_')]
globals().update({symbol: getattr(module, symbol) for symbol in symbols})
notfound = object() # sentinel value
for package_name in packages:
package = sys.modules.get(package_name, notfound)
if package is notfound:
# Either it isn't in this installation at all (e.g. the _api.vX packages
# are only in API version X), it isn't imported yet, or it was imported
# but not inserted into sys.modules under its user-facing name (for the
# non-'_api' packages), at which point we continue down the list to look
# "under the hood" for it via its '_api' package name.
continue
module = getattr(package, 'summary', None)
if module is None:
# This happens if the package hasn't been fully imported yet. For example,
# the 'tensorflow' package won't yet have 'summary' attribute if we are
# loading this code via the 'tensorflow.compat...' path and 'compat' is
# imported before 'summary' in the 'tensorflow' __init__.py file.
continue
# Success, we hope. Import all the public symbols into this module.
dynamic_wildcard_import(module)
return |
def parseStep(self, line):
"""
Parse the line describing the mode.
One of:
variableStep chrom=<reference> [span=<window_size>]
fixedStep chrom=<reference> start=<position> step=<step_interval>
[span=<window_size>]
Span is optional, defaulting to 1. It indicates that each value
applies to region, starting at the given position and extending
<span> positions.
"""
fields = dict([field.split('=') for field in line.split()[1:]])
if 'chrom' in fields:
self._reference = fields['chrom']
else:
raise ValueError("Missing chrom field in %s" % line.strip())
if line.startswith("fixedStep"):
if 'start' in fields:
self._start = int(fields['start']) - 1 # to 0-based
else:
raise ValueError("Missing start field in %s" % line.strip())
if 'span' in fields:
self._span = int(fields['span'])
if 'step' in fields:
self._step = int(fields['step']) | Parse the line describing the mode.
One of:
variableStep chrom=<reference> [span=<window_size>]
fixedStep chrom=<reference> start=<position> step=<step_interval>
[span=<window_size>]
Span is optional, defaulting to 1. It indicates that each value
applies to region, starting at the given position and extending
<span> positions. | Below is the the instruction that describes the task:
### Input:
Parse the line describing the mode.
One of:
variableStep chrom=<reference> [span=<window_size>]
fixedStep chrom=<reference> start=<position> step=<step_interval>
[span=<window_size>]
Span is optional, defaulting to 1. It indicates that each value
applies to region, starting at the given position and extending
<span> positions.
### Response:
def parseStep(self, line):
"""
Parse the line describing the mode.
One of:
variableStep chrom=<reference> [span=<window_size>]
fixedStep chrom=<reference> start=<position> step=<step_interval>
[span=<window_size>]
Span is optional, defaulting to 1. It indicates that each value
applies to region, starting at the given position and extending
<span> positions.
"""
fields = dict([field.split('=') for field in line.split()[1:]])
if 'chrom' in fields:
self._reference = fields['chrom']
else:
raise ValueError("Missing chrom field in %s" % line.strip())
if line.startswith("fixedStep"):
if 'start' in fields:
self._start = int(fields['start']) - 1 # to 0-based
else:
raise ValueError("Missing start field in %s" % line.strip())
if 'span' in fields:
self._span = int(fields['span'])
if 'step' in fields:
self._step = int(fields['step']) |
def Thome(m, x, D, rhol, rhog, mul, mug, kl, kg, Cpl, Cpg, Hvap, sigma, Psat,
Pc, q=None, Te=None):
r'''Calculates heat transfer coefficient for film boiling of saturated
fluid in any orientation of flow. Correlation
is as developed in [1]_ and [2]_, and also reviewed [3]_. This is a
complicated model, but expected to have more accuracy as a result.
Either the heat flux or excess temperature is required for the calculation
of heat transfer coefficient. The solution for a specified excess
temperature is solved numerically, making it slow.
.. math::
h(z) = \frac{t_l}{\tau} h_l(z) +\frac{t_{film}}{\tau} h_{film}(z)
+ \frac{t_{dry}}{\tau} h_{g}(z)
h_{l/g}(z) = (Nu_{lam}^4 + Nu_{trans}^4)^{1/4} k/D
Nu_{laminar} = 0.91 {Pr}^{1/3} \sqrt{ReD/L(z)}
Nu_{trans} = \frac{ (f/8) (Re-1000)Pr}{1+12.7 (f/8)^{1/2} (Pr^{2/3}-1)}
\left[ 1 + \left( \frac{D}{L(z)}\right)^{2/3}\right]
f = (1.82 \log_{10} Re - 1.64 )^{-2}
L_l = \frac{\tau G_{tp}}{\rho_l}(1-x)
L_{dry} = v_p t_{dry}
t_l = \frac{\tau}{1 + \frac{\rho_l}{\rho_g}\frac{x}{1-x}}
t_v = \frac{\tau}{1 + \frac{\rho_g}{\rho_l}\frac{1-x}{x}}
\tau = \frac{1}{f_{opt}}
f_{opt} = \left(\frac{q}{q_{ref}}\right)^{n_f}
q_{ref} = 3328\left(\frac{P_{sat}}{P_c}\right)^{-0.5}
t_{dry,film} = \frac{\rho_l \Delta H_{vap}}{q}[\delta_0(z) -
\delta_{min}]
\frac{\delta_0}{D} = C_{\delta 0}\left(3\sqrt{\frac{\nu_l}{v_p D}}
\right)^{0.84}\left[(0.07Bo^{0.41})^{-8} + 0.1^{-8}\right]^{-1/8}
Bo = \frac{\rho_l D}{\sigma} v_p^2
v_p = G_{tp} \left[\frac{x}{\rho_g} + \frac{1-x}{\rho_l}\right]
h_{film}(z) = \frac{2 k_l}{\delta_0(z) + \delta_{min}(z)}
\delta_{min} = 0.3\cdot 10^{-6} \text{m}
C_{\delta,0} = 0.29
n_f = 1.74
if t dry film > tv:
.. math::
\delta_{end}(x) = \delta(z, t_v)
t_{film} = t_v
t_{dry} = 0
Otherwise:
.. math::
\delta_{end}(z) = \delta_{min}
t_{film} = t_{dry,film}
t_{dry} = t_v - t_{film}
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific tube interval []
D : float
Diameter of the tube [m]
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
mug : float
Viscosity of gas [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
kg : float
Thermal conductivity of gas [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Cpg : float
Heat capacity of gas [J/kg/K]
Hvap : float
Heat of vaporization of liquid [J/kg]
sigma : float
Surface tension of liquid [N/m]
Psat : float
Vapor pressure of fluid, [Pa]
Pc : float
Critical pressure of fluid, [Pa]
q : float, optional
Heat flux to wall [W/m^2]
Te : float, optional
Excess temperature of wall, [K]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ and [2]_ have been reviewed, and are accurately reproduced in [3]_.
[1]_ used data from 7 studies, covering 7 fluids and Dh from 0.7-3.1 mm,
heat flux from 0.5-17.8 W/cm^2, x from 0.01-0.99, and G from 50-564
kg/m^2/s.
Liquid and/or gas slugs are both considered, and are hydrodynamically
developing. `Ll` is the calculated length of liquid slugs, and `L_dry`
is the same for vapor slugs.
Because of the complexity of the model and that there is some logic in this
function, `Te` as an input may lead to a different solution that the
calculated `q` will in return.
Examples
--------
>>> Thome(m=1, x=0.4, D=0.3, rhol=567., rhog=18.09, kl=0.086, kg=0.2,
... mul=156E-6, mug=1E-5, Cpl=2300, Cpg=1400, sigma=0.02, Hvap=9E5,
... Psat=1E5, Pc=22E6, q=1E5)
1633.008836502032
References
----------
.. [1] Thome, J. R., V. Dupont, and A. M. Jacobi. "Heat Transfer Model for
Evaporation in Microchannels. Part I: Presentation of the Model."
International Journal of Heat and Mass Transfer 47, no. 14-16 (July
2004): 3375-85. doi:10.1016/j.ijheatmasstransfer.2004.01.006.
.. [2] Dupont, V., J. R. Thome, and A. M. Jacobi. "Heat Transfer Model for
Evaporation in Microchannels. Part II: Comparison with the Database."
International Journal of Heat and Mass Transfer 47, no. 14-16 (July
2004): 3387-3401. doi:10.1016/j.ijheatmasstransfer.2004.01.007.
.. [3] Bertsch, Stefan S., Eckhard A. Groll, and Suresh V. Garimella.
"Review and Comparative Analysis of Studies on Saturated Flow Boiling in
Small Channels." Nanoscale and Microscale Thermophysical Engineering 12,
no. 3 (September 4, 2008): 187-227. doi:10.1080/15567260802317357.
'''
if q is None and Te:
to_solve = lambda q : q/Thome(m=m, x=x, D=D, rhol=rhol, rhog=rhog, kl=kl, kg=kg, mul=mul, mug=mug, Cpl=Cpl, Cpg=Cpg, sigma=sigma, Hvap=Hvap, Psat=Psat, Pc=Pc, q=q) - Te
q = newton(to_solve, 1E4)
return Thome(m=m, x=x, D=D, rhol=rhol, rhog=rhog, kl=kl, kg=kg, mul=mul, mug=mug, Cpl=Cpl, Cpg=Cpg, sigma=sigma, Hvap=Hvap, Psat=Psat, Pc=Pc, q=q)
elif q is None and Te is None:
raise Exception('Either q or Te is needed for this correlation')
C_delta0 = 0.3E-6
G = m/(pi/4*D**2)
Rel = G*D*(1-x)/mul
Reg = G*D*x/mug
qref = 3328*(Psat/Pc)**-0.5
fopt = (q/qref)**1.74
tau = 1./fopt
vp = G*(x/rhog + (1-x)/rhol)
Bo = rhol*D/sigma*vp**2 # Not standard definition
nul = nu_mu_converter(rho=rhol, mu=mul)
delta0 = D*0.29*(3*(nul/vp/D)**0.5)**0.84*((0.07*Bo**0.41)**-8 + 0.1**-8)**(-1/8.)
tl = tau/(1 + rhol/rhog*(x/(1.-x)))
tv = tau/(1 ++ rhog/rhol*((1.-x)/x))
t_dry_film = rhol*Hvap/q*(delta0 - C_delta0)
if t_dry_film > tv:
t_film = tv
delta_end = delta0 - q/rhol/Hvap*tv # what could time possibly be?
t_dry = 0
else:
t_film = t_dry_film
delta_end = C_delta0
t_dry = tv-t_film
Ll = tau*G/rhol*(1-x)
Ldry = t_dry*vp
Prg = Prandtl(Cp=Cpg, k=kg, mu=mug)
Prl = Prandtl(Cp=Cpl, k=kl, mu=mul)
fg = (1.82*log10(Reg) - 1.64)**-2
fl = (1.82*log10(Rel) - 1.64)**-2
Nu_lam_Zl = 2*0.455*(Prl)**(1/3.)*(D*Rel/Ll)**0.5
Nu_trans_Zl = turbulent_Gnielinski(Re=Rel, Pr=Prl, fd=fl)*(1 + (D/Ll)**(2/3.))
if Ldry == 0:
Nu_lam_Zg, Nu_trans_Zg = 0, 0
else:
Nu_lam_Zg = 2*0.455*(Prg)**(1/3.)*(D*Reg/Ldry)**0.5
Nu_trans_Zg = turbulent_Gnielinski(Re=Reg, Pr=Prg, fd=fg)*(1 + (D/Ldry)**(2/3.))
h_Zg = kg/D*(Nu_lam_Zg**4 + Nu_trans_Zg**4)**0.25
h_Zl = kl/D*(Nu_lam_Zl**4 + Nu_trans_Zl**4)**0.25
h_film = 2*kl/(delta0 + C_delta0)
return tl/tau*h_Zl + t_film/tau*h_film + t_dry/tau*h_Zg | r'''Calculates heat transfer coefficient for film boiling of saturated
fluid in any orientation of flow. Correlation
is as developed in [1]_ and [2]_, and also reviewed [3]_. This is a
complicated model, but expected to have more accuracy as a result.
Either the heat flux or excess temperature is required for the calculation
of heat transfer coefficient. The solution for a specified excess
temperature is solved numerically, making it slow.
.. math::
h(z) = \frac{t_l}{\tau} h_l(z) +\frac{t_{film}}{\tau} h_{film}(z)
+ \frac{t_{dry}}{\tau} h_{g}(z)
h_{l/g}(z) = (Nu_{lam}^4 + Nu_{trans}^4)^{1/4} k/D
Nu_{laminar} = 0.91 {Pr}^{1/3} \sqrt{ReD/L(z)}
Nu_{trans} = \frac{ (f/8) (Re-1000)Pr}{1+12.7 (f/8)^{1/2} (Pr^{2/3}-1)}
\left[ 1 + \left( \frac{D}{L(z)}\right)^{2/3}\right]
f = (1.82 \log_{10} Re - 1.64 )^{-2}
L_l = \frac{\tau G_{tp}}{\rho_l}(1-x)
L_{dry} = v_p t_{dry}
t_l = \frac{\tau}{1 + \frac{\rho_l}{\rho_g}\frac{x}{1-x}}
t_v = \frac{\tau}{1 + \frac{\rho_g}{\rho_l}\frac{1-x}{x}}
\tau = \frac{1}{f_{opt}}
f_{opt} = \left(\frac{q}{q_{ref}}\right)^{n_f}
q_{ref} = 3328\left(\frac{P_{sat}}{P_c}\right)^{-0.5}
t_{dry,film} = \frac{\rho_l \Delta H_{vap}}{q}[\delta_0(z) -
\delta_{min}]
\frac{\delta_0}{D} = C_{\delta 0}\left(3\sqrt{\frac{\nu_l}{v_p D}}
\right)^{0.84}\left[(0.07Bo^{0.41})^{-8} + 0.1^{-8}\right]^{-1/8}
Bo = \frac{\rho_l D}{\sigma} v_p^2
v_p = G_{tp} \left[\frac{x}{\rho_g} + \frac{1-x}{\rho_l}\right]
h_{film}(z) = \frac{2 k_l}{\delta_0(z) + \delta_{min}(z)}
\delta_{min} = 0.3\cdot 10^{-6} \text{m}
C_{\delta,0} = 0.29
n_f = 1.74
if t dry film > tv:
.. math::
\delta_{end}(x) = \delta(z, t_v)
t_{film} = t_v
t_{dry} = 0
Otherwise:
.. math::
\delta_{end}(z) = \delta_{min}
t_{film} = t_{dry,film}
t_{dry} = t_v - t_{film}
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific tube interval []
D : float
Diameter of the tube [m]
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
mug : float
Viscosity of gas [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
kg : float
Thermal conductivity of gas [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Cpg : float
Heat capacity of gas [J/kg/K]
Hvap : float
Heat of vaporization of liquid [J/kg]
sigma : float
Surface tension of liquid [N/m]
Psat : float
Vapor pressure of fluid, [Pa]
Pc : float
Critical pressure of fluid, [Pa]
q : float, optional
Heat flux to wall [W/m^2]
Te : float, optional
Excess temperature of wall, [K]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ and [2]_ have been reviewed, and are accurately reproduced in [3]_.
[1]_ used data from 7 studies, covering 7 fluids and Dh from 0.7-3.1 mm,
heat flux from 0.5-17.8 W/cm^2, x from 0.01-0.99, and G from 50-564
kg/m^2/s.
Liquid and/or gas slugs are both considered, and are hydrodynamically
developing. `Ll` is the calculated length of liquid slugs, and `L_dry`
is the same for vapor slugs.
Because of the complexity of the model and that there is some logic in this
function, `Te` as an input may lead to a different solution that the
calculated `q` will in return.
Examples
--------
>>> Thome(m=1, x=0.4, D=0.3, rhol=567., rhog=18.09, kl=0.086, kg=0.2,
... mul=156E-6, mug=1E-5, Cpl=2300, Cpg=1400, sigma=0.02, Hvap=9E5,
... Psat=1E5, Pc=22E6, q=1E5)
1633.008836502032
References
----------
.. [1] Thome, J. R., V. Dupont, and A. M. Jacobi. "Heat Transfer Model for
Evaporation in Microchannels. Part I: Presentation of the Model."
International Journal of Heat and Mass Transfer 47, no. 14-16 (July
2004): 3375-85. doi:10.1016/j.ijheatmasstransfer.2004.01.006.
.. [2] Dupont, V., J. R. Thome, and A. M. Jacobi. "Heat Transfer Model for
Evaporation in Microchannels. Part II: Comparison with the Database."
International Journal of Heat and Mass Transfer 47, no. 14-16 (July
2004): 3387-3401. doi:10.1016/j.ijheatmasstransfer.2004.01.007.
.. [3] Bertsch, Stefan S., Eckhard A. Groll, and Suresh V. Garimella.
"Review and Comparative Analysis of Studies on Saturated Flow Boiling in
Small Channels." Nanoscale and Microscale Thermophysical Engineering 12,
no. 3 (September 4, 2008): 187-227. doi:10.1080/15567260802317357. | Below is the the instruction that describes the task:
### Input:
r'''Calculates heat transfer coefficient for film boiling of saturated
fluid in any orientation of flow. Correlation
is as developed in [1]_ and [2]_, and also reviewed [3]_. This is a
complicated model, but expected to have more accuracy as a result.
Either the heat flux or excess temperature is required for the calculation
of heat transfer coefficient. The solution for a specified excess
temperature is solved numerically, making it slow.
.. math::
h(z) = \frac{t_l}{\tau} h_l(z) +\frac{t_{film}}{\tau} h_{film}(z)
+ \frac{t_{dry}}{\tau} h_{g}(z)
h_{l/g}(z) = (Nu_{lam}^4 + Nu_{trans}^4)^{1/4} k/D
Nu_{laminar} = 0.91 {Pr}^{1/3} \sqrt{ReD/L(z)}
Nu_{trans} = \frac{ (f/8) (Re-1000)Pr}{1+12.7 (f/8)^{1/2} (Pr^{2/3}-1)}
\left[ 1 + \left( \frac{D}{L(z)}\right)^{2/3}\right]
f = (1.82 \log_{10} Re - 1.64 )^{-2}
L_l = \frac{\tau G_{tp}}{\rho_l}(1-x)
L_{dry} = v_p t_{dry}
t_l = \frac{\tau}{1 + \frac{\rho_l}{\rho_g}\frac{x}{1-x}}
t_v = \frac{\tau}{1 + \frac{\rho_g}{\rho_l}\frac{1-x}{x}}
\tau = \frac{1}{f_{opt}}
f_{opt} = \left(\frac{q}{q_{ref}}\right)^{n_f}
q_{ref} = 3328\left(\frac{P_{sat}}{P_c}\right)^{-0.5}
t_{dry,film} = \frac{\rho_l \Delta H_{vap}}{q}[\delta_0(z) -
\delta_{min}]
\frac{\delta_0}{D} = C_{\delta 0}\left(3\sqrt{\frac{\nu_l}{v_p D}}
\right)^{0.84}\left[(0.07Bo^{0.41})^{-8} + 0.1^{-8}\right]^{-1/8}
Bo = \frac{\rho_l D}{\sigma} v_p^2
v_p = G_{tp} \left[\frac{x}{\rho_g} + \frac{1-x}{\rho_l}\right]
h_{film}(z) = \frac{2 k_l}{\delta_0(z) + \delta_{min}(z)}
\delta_{min} = 0.3\cdot 10^{-6} \text{m}
C_{\delta,0} = 0.29
n_f = 1.74
if t dry film > tv:
.. math::
\delta_{end}(x) = \delta(z, t_v)
t_{film} = t_v
t_{dry} = 0
Otherwise:
.. math::
\delta_{end}(z) = \delta_{min}
t_{film} = t_{dry,film}
t_{dry} = t_v - t_{film}
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific tube interval []
D : float
Diameter of the tube [m]
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
mug : float
Viscosity of gas [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
kg : float
Thermal conductivity of gas [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Cpg : float
Heat capacity of gas [J/kg/K]
Hvap : float
Heat of vaporization of liquid [J/kg]
sigma : float
Surface tension of liquid [N/m]
Psat : float
Vapor pressure of fluid, [Pa]
Pc : float
Critical pressure of fluid, [Pa]
q : float, optional
Heat flux to wall [W/m^2]
Te : float, optional
Excess temperature of wall, [K]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ and [2]_ have been reviewed, and are accurately reproduced in [3]_.
[1]_ used data from 7 studies, covering 7 fluids and Dh from 0.7-3.1 mm,
heat flux from 0.5-17.8 W/cm^2, x from 0.01-0.99, and G from 50-564
kg/m^2/s.
Liquid and/or gas slugs are both considered, and are hydrodynamically
developing. `Ll` is the calculated length of liquid slugs, and `L_dry`
is the same for vapor slugs.
Because of the complexity of the model and that there is some logic in this
function, `Te` as an input may lead to a different solution that the
calculated `q` will in return.
Examples
--------
>>> Thome(m=1, x=0.4, D=0.3, rhol=567., rhog=18.09, kl=0.086, kg=0.2,
... mul=156E-6, mug=1E-5, Cpl=2300, Cpg=1400, sigma=0.02, Hvap=9E5,
... Psat=1E5, Pc=22E6, q=1E5)
1633.008836502032
References
----------
.. [1] Thome, J. R., V. Dupont, and A. M. Jacobi. "Heat Transfer Model for
Evaporation in Microchannels. Part I: Presentation of the Model."
International Journal of Heat and Mass Transfer 47, no. 14-16 (July
2004): 3375-85. doi:10.1016/j.ijheatmasstransfer.2004.01.006.
.. [2] Dupont, V., J. R. Thome, and A. M. Jacobi. "Heat Transfer Model for
Evaporation in Microchannels. Part II: Comparison with the Database."
International Journal of Heat and Mass Transfer 47, no. 14-16 (July
2004): 3387-3401. doi:10.1016/j.ijheatmasstransfer.2004.01.007.
.. [3] Bertsch, Stefan S., Eckhard A. Groll, and Suresh V. Garimella.
"Review and Comparative Analysis of Studies on Saturated Flow Boiling in
Small Channels." Nanoscale and Microscale Thermophysical Engineering 12,
no. 3 (September 4, 2008): 187-227. doi:10.1080/15567260802317357.
### Response:
def Thome(m, x, D, rhol, rhog, mul, mug, kl, kg, Cpl, Cpg, Hvap, sigma, Psat,
Pc, q=None, Te=None):
r'''Calculates heat transfer coefficient for film boiling of saturated
fluid in any orientation of flow. Correlation
is as developed in [1]_ and [2]_, and also reviewed [3]_. This is a
complicated model, but expected to have more accuracy as a result.
Either the heat flux or excess temperature is required for the calculation
of heat transfer coefficient. The solution for a specified excess
temperature is solved numerically, making it slow.
.. math::
h(z) = \frac{t_l}{\tau} h_l(z) +\frac{t_{film}}{\tau} h_{film}(z)
+ \frac{t_{dry}}{\tau} h_{g}(z)
h_{l/g}(z) = (Nu_{lam}^4 + Nu_{trans}^4)^{1/4} k/D
Nu_{laminar} = 0.91 {Pr}^{1/3} \sqrt{ReD/L(z)}
Nu_{trans} = \frac{ (f/8) (Re-1000)Pr}{1+12.7 (f/8)^{1/2} (Pr^{2/3}-1)}
\left[ 1 + \left( \frac{D}{L(z)}\right)^{2/3}\right]
f = (1.82 \log_{10} Re - 1.64 )^{-2}
L_l = \frac{\tau G_{tp}}{\rho_l}(1-x)
L_{dry} = v_p t_{dry}
t_l = \frac{\tau}{1 + \frac{\rho_l}{\rho_g}\frac{x}{1-x}}
t_v = \frac{\tau}{1 + \frac{\rho_g}{\rho_l}\frac{1-x}{x}}
\tau = \frac{1}{f_{opt}}
f_{opt} = \left(\frac{q}{q_{ref}}\right)^{n_f}
q_{ref} = 3328\left(\frac{P_{sat}}{P_c}\right)^{-0.5}
t_{dry,film} = \frac{\rho_l \Delta H_{vap}}{q}[\delta_0(z) -
\delta_{min}]
\frac{\delta_0}{D} = C_{\delta 0}\left(3\sqrt{\frac{\nu_l}{v_p D}}
\right)^{0.84}\left[(0.07Bo^{0.41})^{-8} + 0.1^{-8}\right]^{-1/8}
Bo = \frac{\rho_l D}{\sigma} v_p^2
v_p = G_{tp} \left[\frac{x}{\rho_g} + \frac{1-x}{\rho_l}\right]
h_{film}(z) = \frac{2 k_l}{\delta_0(z) + \delta_{min}(z)}
\delta_{min} = 0.3\cdot 10^{-6} \text{m}
C_{\delta,0} = 0.29
n_f = 1.74
if t dry film > tv:
.. math::
\delta_{end}(x) = \delta(z, t_v)
t_{film} = t_v
t_{dry} = 0
Otherwise:
.. math::
\delta_{end}(z) = \delta_{min}
t_{film} = t_{dry,film}
t_{dry} = t_v - t_{film}
Parameters
----------
m : float
Mass flow rate [kg/s]
x : float
Quality at the specific tube interval []
D : float
Diameter of the tube [m]
rhol : float
Density of the liquid [kg/m^3]
rhog : float
Density of the gas [kg/m^3]
mul : float
Viscosity of liquid [Pa*s]
mug : float
Viscosity of gas [Pa*s]
kl : float
Thermal conductivity of liquid [W/m/K]
kg : float
Thermal conductivity of gas [W/m/K]
Cpl : float
Heat capacity of liquid [J/kg/K]
Cpg : float
Heat capacity of gas [J/kg/K]
Hvap : float
Heat of vaporization of liquid [J/kg]
sigma : float
Surface tension of liquid [N/m]
Psat : float
Vapor pressure of fluid, [Pa]
Pc : float
Critical pressure of fluid, [Pa]
q : float, optional
Heat flux to wall [W/m^2]
Te : float, optional
Excess temperature of wall, [K]
Returns
-------
h : float
Heat transfer coefficient [W/m^2/K]
Notes
-----
[1]_ and [2]_ have been reviewed, and are accurately reproduced in [3]_.
[1]_ used data from 7 studies, covering 7 fluids and Dh from 0.7-3.1 mm,
heat flux from 0.5-17.8 W/cm^2, x from 0.01-0.99, and G from 50-564
kg/m^2/s.
Liquid and/or gas slugs are both considered, and are hydrodynamically
developing. `Ll` is the calculated length of liquid slugs, and `L_dry`
is the same for vapor slugs.
Because of the complexity of the model and that there is some logic in this
function, `Te` as an input may lead to a different solution that the
calculated `q` will in return.
Examples
--------
>>> Thome(m=1, x=0.4, D=0.3, rhol=567., rhog=18.09, kl=0.086, kg=0.2,
... mul=156E-6, mug=1E-5, Cpl=2300, Cpg=1400, sigma=0.02, Hvap=9E5,
... Psat=1E5, Pc=22E6, q=1E5)
1633.008836502032
References
----------
.. [1] Thome, J. R., V. Dupont, and A. M. Jacobi. "Heat Transfer Model for
Evaporation in Microchannels. Part I: Presentation of the Model."
International Journal of Heat and Mass Transfer 47, no. 14-16 (July
2004): 3375-85. doi:10.1016/j.ijheatmasstransfer.2004.01.006.
.. [2] Dupont, V., J. R. Thome, and A. M. Jacobi. "Heat Transfer Model for
Evaporation in Microchannels. Part II: Comparison with the Database."
International Journal of Heat and Mass Transfer 47, no. 14-16 (July
2004): 3387-3401. doi:10.1016/j.ijheatmasstransfer.2004.01.007.
.. [3] Bertsch, Stefan S., Eckhard A. Groll, and Suresh V. Garimella.
"Review and Comparative Analysis of Studies on Saturated Flow Boiling in
Small Channels." Nanoscale and Microscale Thermophysical Engineering 12,
no. 3 (September 4, 2008): 187-227. doi:10.1080/15567260802317357.
'''
if q is None and Te:
to_solve = lambda q : q/Thome(m=m, x=x, D=D, rhol=rhol, rhog=rhog, kl=kl, kg=kg, mul=mul, mug=mug, Cpl=Cpl, Cpg=Cpg, sigma=sigma, Hvap=Hvap, Psat=Psat, Pc=Pc, q=q) - Te
q = newton(to_solve, 1E4)
return Thome(m=m, x=x, D=D, rhol=rhol, rhog=rhog, kl=kl, kg=kg, mul=mul, mug=mug, Cpl=Cpl, Cpg=Cpg, sigma=sigma, Hvap=Hvap, Psat=Psat, Pc=Pc, q=q)
elif q is None and Te is None:
raise Exception('Either q or Te is needed for this correlation')
C_delta0 = 0.3E-6
G = m/(pi/4*D**2)
Rel = G*D*(1-x)/mul
Reg = G*D*x/mug
qref = 3328*(Psat/Pc)**-0.5
fopt = (q/qref)**1.74
tau = 1./fopt
vp = G*(x/rhog + (1-x)/rhol)
Bo = rhol*D/sigma*vp**2 # Not standard definition
nul = nu_mu_converter(rho=rhol, mu=mul)
delta0 = D*0.29*(3*(nul/vp/D)**0.5)**0.84*((0.07*Bo**0.41)**-8 + 0.1**-8)**(-1/8.)
tl = tau/(1 + rhol/rhog*(x/(1.-x)))
tv = tau/(1 ++ rhog/rhol*((1.-x)/x))
t_dry_film = rhol*Hvap/q*(delta0 - C_delta0)
if t_dry_film > tv:
t_film = tv
delta_end = delta0 - q/rhol/Hvap*tv # what could time possibly be?
t_dry = 0
else:
t_film = t_dry_film
delta_end = C_delta0
t_dry = tv-t_film
Ll = tau*G/rhol*(1-x)
Ldry = t_dry*vp
Prg = Prandtl(Cp=Cpg, k=kg, mu=mug)
Prl = Prandtl(Cp=Cpl, k=kl, mu=mul)
fg = (1.82*log10(Reg) - 1.64)**-2
fl = (1.82*log10(Rel) - 1.64)**-2
Nu_lam_Zl = 2*0.455*(Prl)**(1/3.)*(D*Rel/Ll)**0.5
Nu_trans_Zl = turbulent_Gnielinski(Re=Rel, Pr=Prl, fd=fl)*(1 + (D/Ll)**(2/3.))
if Ldry == 0:
Nu_lam_Zg, Nu_trans_Zg = 0, 0
else:
Nu_lam_Zg = 2*0.455*(Prg)**(1/3.)*(D*Reg/Ldry)**0.5
Nu_trans_Zg = turbulent_Gnielinski(Re=Reg, Pr=Prg, fd=fg)*(1 + (D/Ldry)**(2/3.))
h_Zg = kg/D*(Nu_lam_Zg**4 + Nu_trans_Zg**4)**0.25
h_Zl = kl/D*(Nu_lam_Zl**4 + Nu_trans_Zl**4)**0.25
h_film = 2*kl/(delta0 + C_delta0)
return tl/tau*h_Zl + t_film/tau*h_film + t_dry/tau*h_Zg |
def handle_matches(self, match):
"""
Returns a response statement from a matched input statement.
:param match: It is a valid matched pattern from the input statement
:type: `_sre.SRE_Match`
"""
response = Statement(text='')
from_parsed = match.group("from")
target_parsed = match.group("target")
n_statement = match.group("number")
if n_statement == 'a' or n_statement == 'an':
n_statement = '1.0'
n = mathparse.parse(n_statement, self.language.ISO_639.upper())
ureg = UnitRegistry()
from_parsed, target_parsed = self.get_valid_units(ureg, from_parsed, target_parsed)
if from_parsed is None or target_parsed is None:
response.confidence = 0.0
else:
from_value = ureg.Quantity(float(n), from_parsed)
target_value = from_value.to(target_parsed)
response.confidence = 1.0
response.text = str(target_value.magnitude)
return response | Returns a response statement from a matched input statement.
:param match: It is a valid matched pattern from the input statement
:type: `_sre.SRE_Match` | Below is the the instruction that describes the task:
### Input:
Returns a response statement from a matched input statement.
:param match: It is a valid matched pattern from the input statement
:type: `_sre.SRE_Match`
### Response:
def handle_matches(self, match):
"""
Returns a response statement from a matched input statement.
:param match: It is a valid matched pattern from the input statement
:type: `_sre.SRE_Match`
"""
response = Statement(text='')
from_parsed = match.group("from")
target_parsed = match.group("target")
n_statement = match.group("number")
if n_statement == 'a' or n_statement == 'an':
n_statement = '1.0'
n = mathparse.parse(n_statement, self.language.ISO_639.upper())
ureg = UnitRegistry()
from_parsed, target_parsed = self.get_valid_units(ureg, from_parsed, target_parsed)
if from_parsed is None or target_parsed is None:
response.confidence = 0.0
else:
from_value = ureg.Quantity(float(n), from_parsed)
target_value = from_value.to(target_parsed)
response.confidence = 1.0
response.text = str(target_value.magnitude)
return response |
def transformer_base_v2():
"""Set of hyperparameters."""
hparams = transformer_base_v1()
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
hparams.learning_rate_warmup_steps = 8000
hparams.learning_rate = 0.2
return hparams | Set of hyperparameters. | Below is the the instruction that describes the task:
### Input:
Set of hyperparameters.
### Response:
def transformer_base_v2():
"""Set of hyperparameters."""
hparams = transformer_base_v1()
hparams.layer_preprocess_sequence = "n"
hparams.layer_postprocess_sequence = "da"
hparams.layer_prepostprocess_dropout = 0.1
hparams.attention_dropout = 0.1
hparams.relu_dropout = 0.1
hparams.learning_rate_warmup_steps = 8000
hparams.learning_rate = 0.2
return hparams |
def _repack_archive (archive1, archive2, verbosity=0, interactive=True):
"""Repackage an archive to a different format."""
format1, compression1 = get_archive_format(archive1)
format2, compression2 = get_archive_format(archive2)
if format1 == format2 and compression1 == compression2:
# same format and compression allows to copy the file
util.link_or_copy(archive1, archive2, verbosity=verbosity)
return
tmpdir = util.tmpdir()
try:
kwargs = dict(verbosity=verbosity, outdir=tmpdir)
same_format = (format1 == format2 and compression1 and compression2)
if same_format:
# only decompress since the format is the same
kwargs['format'] = compression1
path = _extract_archive(archive1, **kwargs)
archive = os.path.abspath(archive2)
files = tuple(os.listdir(path))
olddir = os.getcwd()
os.chdir(path)
try:
kwargs = dict(verbosity=verbosity, interactive=interactive)
if same_format:
# only compress since the format is the same
kwargs['format'] = compression2
_create_archive(archive, files, **kwargs)
finally:
os.chdir(olddir)
finally:
shutil.rmtree(tmpdir, onerror=rmtree_log_error) | Repackage an archive to a different format. | Below is the the instruction that describes the task:
### Input:
Repackage an archive to a different format.
### Response:
def _repack_archive (archive1, archive2, verbosity=0, interactive=True):
"""Repackage an archive to a different format."""
format1, compression1 = get_archive_format(archive1)
format2, compression2 = get_archive_format(archive2)
if format1 == format2 and compression1 == compression2:
# same format and compression allows to copy the file
util.link_or_copy(archive1, archive2, verbosity=verbosity)
return
tmpdir = util.tmpdir()
try:
kwargs = dict(verbosity=verbosity, outdir=tmpdir)
same_format = (format1 == format2 and compression1 and compression2)
if same_format:
# only decompress since the format is the same
kwargs['format'] = compression1
path = _extract_archive(archive1, **kwargs)
archive = os.path.abspath(archive2)
files = tuple(os.listdir(path))
olddir = os.getcwd()
os.chdir(path)
try:
kwargs = dict(verbosity=verbosity, interactive=interactive)
if same_format:
# only compress since the format is the same
kwargs['format'] = compression2
_create_archive(archive, files, **kwargs)
finally:
os.chdir(olddir)
finally:
shutil.rmtree(tmpdir, onerror=rmtree_log_error) |
def match_rstring(self, tokens, item):
"""Match suffix string."""
name, suffix = tokens
return self.match_mstring((None, name, suffix), item, use_bytes=suffix.startswith("b")) | Match suffix string. | Below is the the instruction that describes the task:
### Input:
Match suffix string.
### Response:
def match_rstring(self, tokens, item):
"""Match suffix string."""
name, suffix = tokens
return self.match_mstring((None, name, suffix), item, use_bytes=suffix.startswith("b")) |
def compute_bin_edges(features, num_bins, edge_range, trim_outliers, trim_percentile, use_orig_distr=False):
"Compute the edges for the histogram bins to keep it the same for all nodes."
if use_orig_distr:
print('Using original distribution (without histogram) to compute edge weights!')
edges=None
return edges
if edge_range is None:
if trim_outliers:
# percentiles_to_keep = [ trim_percentile, 1.0-trim_percentile] # [0.05, 0.95]
edges_of_edges = np.array([np.percentile(features, trim_percentile),
np.percentile(features, 100 - trim_percentile)])
else:
edges_of_edges = np.array([np.min(features), np.max(features)])
else:
edges_of_edges = edge_range
# Edges computed using data from all nodes, in order to establish correspondence
edges = np.linspace(edges_of_edges[0], edges_of_edges[1], num=num_bins, endpoint=True)
return edges | Compute the edges for the histogram bins to keep it the same for all nodes. | Below is the the instruction that describes the task:
### Input:
Compute the edges for the histogram bins to keep it the same for all nodes.
### Response:
def compute_bin_edges(features, num_bins, edge_range, trim_outliers, trim_percentile, use_orig_distr=False):
"Compute the edges for the histogram bins to keep it the same for all nodes."
if use_orig_distr:
print('Using original distribution (without histogram) to compute edge weights!')
edges=None
return edges
if edge_range is None:
if trim_outliers:
# percentiles_to_keep = [ trim_percentile, 1.0-trim_percentile] # [0.05, 0.95]
edges_of_edges = np.array([np.percentile(features, trim_percentile),
np.percentile(features, 100 - trim_percentile)])
else:
edges_of_edges = np.array([np.min(features), np.max(features)])
else:
edges_of_edges = edge_range
# Edges computed using data from all nodes, in order to establish correspondence
edges = np.linspace(edges_of_edges[0], edges_of_edges[1], num=num_bins, endpoint=True)
return edges |
def incr(self, name, amount=1):
"""
Increase the value at key ``name`` by ``amount``. If no key exists, the value
will be initialized as ``amount`` .
Like **Redis.INCR**
:param string name: the key name
:param int amount: increments
:return: the integer value at key ``name``
:rtype: int
>>> ssdb.incr('set_count', 3)
13
>>> ssdb.incr('set_count', 1)
14
>>> ssdb.incr('set_count', -2)
12
>>> ssdb.incr('temp_count', 42)
42
"""
amount = get_integer('amount', amount)
return self.execute_command('incr', name, amount) | Increase the value at key ``name`` by ``amount``. If no key exists, the value
will be initialized as ``amount`` .
Like **Redis.INCR**
:param string name: the key name
:param int amount: increments
:return: the integer value at key ``name``
:rtype: int
>>> ssdb.incr('set_count', 3)
13
>>> ssdb.incr('set_count', 1)
14
>>> ssdb.incr('set_count', -2)
12
>>> ssdb.incr('temp_count', 42)
42 | Below is the the instruction that describes the task:
### Input:
Increase the value at key ``name`` by ``amount``. If no key exists, the value
will be initialized as ``amount`` .
Like **Redis.INCR**
:param string name: the key name
:param int amount: increments
:return: the integer value at key ``name``
:rtype: int
>>> ssdb.incr('set_count', 3)
13
>>> ssdb.incr('set_count', 1)
14
>>> ssdb.incr('set_count', -2)
12
>>> ssdb.incr('temp_count', 42)
42
### Response:
def incr(self, name, amount=1):
"""
Increase the value at key ``name`` by ``amount``. If no key exists, the value
will be initialized as ``amount`` .
Like **Redis.INCR**
:param string name: the key name
:param int amount: increments
:return: the integer value at key ``name``
:rtype: int
>>> ssdb.incr('set_count', 3)
13
>>> ssdb.incr('set_count', 1)
14
>>> ssdb.incr('set_count', -2)
12
>>> ssdb.incr('temp_count', 42)
42
"""
amount = get_integer('amount', amount)
return self.execute_command('incr', name, amount) |
def _is_bright(rgb):
"""Return whether a RGB color is bright or not."""
r, g, b = rgb
gray = 0.299 * r + 0.587 * g + 0.114 * b
return gray >= .5 | Return whether a RGB color is bright or not. | Below is the the instruction that describes the task:
### Input:
Return whether a RGB color is bright or not.
### Response:
def _is_bright(rgb):
"""Return whether a RGB color is bright or not."""
r, g, b = rgb
gray = 0.299 * r + 0.587 * g + 0.114 * b
return gray >= .5 |
def CMP(self, params):
"""
CMP Rm, Rn
CMP Rm, #imm8
Subtract Rn or imm8 from Rm, set the NZCV flags, and discard the result
Rm and Rn can be R0-R14
"""
Rm, Rn = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
if self.is_register(Rn):
# CMP Rm, Rn
self.check_arguments(R0_thru_R14=(Rm, Rn))
def CMP_func():
self.set_NZCV_flags(self.register[Rm], self.register[Rn],
self.register[Rm] - self.register[Rn], 'sub')
else:
# CMP Rm, #imm8
self.check_arguments(R0_thru_R14=(Rm,), imm8=(Rn,))
def CMP_func():
tmp = self.convert_to_integer(Rn[1:])
self.set_NZCV_flags(self.register[Rm], tmp,
self.register[Rm] - tmp, 'sub')
return CMP_func | CMP Rm, Rn
CMP Rm, #imm8
Subtract Rn or imm8 from Rm, set the NZCV flags, and discard the result
Rm and Rn can be R0-R14 | Below is the the instruction that describes the task:
### Input:
CMP Rm, Rn
CMP Rm, #imm8
Subtract Rn or imm8 from Rm, set the NZCV flags, and discard the result
Rm and Rn can be R0-R14
### Response:
def CMP(self, params):
"""
CMP Rm, Rn
CMP Rm, #imm8
Subtract Rn or imm8 from Rm, set the NZCV flags, and discard the result
Rm and Rn can be R0-R14
"""
Rm, Rn = self.get_two_parameters(self.TWO_PARAMETER_COMMA_SEPARATED, params)
if self.is_register(Rn):
# CMP Rm, Rn
self.check_arguments(R0_thru_R14=(Rm, Rn))
def CMP_func():
self.set_NZCV_flags(self.register[Rm], self.register[Rn],
self.register[Rm] - self.register[Rn], 'sub')
else:
# CMP Rm, #imm8
self.check_arguments(R0_thru_R14=(Rm,), imm8=(Rn,))
def CMP_func():
tmp = self.convert_to_integer(Rn[1:])
self.set_NZCV_flags(self.register[Rm], tmp,
self.register[Rm] - tmp, 'sub')
return CMP_func |
def touchPoint(self, x, y):
'''
Touches a point in the device screen.
The generated operation will use the units specified in L{coordinatesUnit} and the
orientation in L{vc.display['orientation']}.
'''
if DEBUG:
print >> sys.stderr, 'touchPoint(%d, %d)' % (x, y)
print >> sys.stderr, 'touchPoint:', type(x), type(y)
if self.areEventsDisabled:
if DEBUG:
print >> sys.stderr, "Ignoring event"
self.canvas.update_idletasks()
return
if DEBUG:
print >> sys.stderr, "Is touching point:", self.isTouchingPoint
if self.isTouchingPoint:
self.showVignette()
if self.vc:
self.vc.touch(x, y)
if self.coordinatesUnit == Unit.DIP:
x = round(x / self.device.display['density'], 2)
y = round(y / self.device.display['density'], 2)
self.printOperation(None, Operation.TOUCH_POINT, x, y, self.coordinatesUnit,
self.device.display['orientation'])
self.printOperation(None, Operation.SLEEP, Operation.DEFAULT)
# FIXME: can we reduce this sleep? (was 5)
time.sleep(1)
self.isTouchingPoint = self.vc is None
self.takeScreenshotAndShowItOnWindow()
# self.hideVignette()
self.statusBar.clear()
return | Touches a point in the device screen.
The generated operation will use the units specified in L{coordinatesUnit} and the
orientation in L{vc.display['orientation']}. | Below is the the instruction that describes the task:
### Input:
Touches a point in the device screen.
The generated operation will use the units specified in L{coordinatesUnit} and the
orientation in L{vc.display['orientation']}.
### Response:
def touchPoint(self, x, y):
'''
Touches a point in the device screen.
The generated operation will use the units specified in L{coordinatesUnit} and the
orientation in L{vc.display['orientation']}.
'''
if DEBUG:
print >> sys.stderr, 'touchPoint(%d, %d)' % (x, y)
print >> sys.stderr, 'touchPoint:', type(x), type(y)
if self.areEventsDisabled:
if DEBUG:
print >> sys.stderr, "Ignoring event"
self.canvas.update_idletasks()
return
if DEBUG:
print >> sys.stderr, "Is touching point:", self.isTouchingPoint
if self.isTouchingPoint:
self.showVignette()
if self.vc:
self.vc.touch(x, y)
if self.coordinatesUnit == Unit.DIP:
x = round(x / self.device.display['density'], 2)
y = round(y / self.device.display['density'], 2)
self.printOperation(None, Operation.TOUCH_POINT, x, y, self.coordinatesUnit,
self.device.display['orientation'])
self.printOperation(None, Operation.SLEEP, Operation.DEFAULT)
# FIXME: can we reduce this sleep? (was 5)
time.sleep(1)
self.isTouchingPoint = self.vc is None
self.takeScreenshotAndShowItOnWindow()
# self.hideVignette()
self.statusBar.clear()
return |
def get_app_list(region_name=None,filter_name=None):
"""
get local app list
"""
try:
conn = get_conn()
c = conn.cursor()
cond = []
where_clause = ""
if region_name:
cond.append( "region='{0}' ".format(region_name) )
if filter_name:
cond.append( "lower(name) like '%{0}%' ".format(filter_name.lower()) )
if len(cond) > 0:
where_clause = 'and ' + 'and '.join( cond )
sqlcmd = "SELECT name,source_id,region,state,create_at,change_at FROM app where state<>'Terminated' %s " % where_clause
log = logging.getLogger(__name__)
log.debug('> sql : %s' % sqlcmd)
c.execute(sqlcmd)
rlt = c.fetchall()
conn.commit()
conn.close()
return rlt
except Exception,e:
raise RuntimeError('list app failed! %s ' % e) | get local app list | Below is the the instruction that describes the task:
### Input:
get local app list
### Response:
def get_app_list(region_name=None,filter_name=None):
"""
get local app list
"""
try:
conn = get_conn()
c = conn.cursor()
cond = []
where_clause = ""
if region_name:
cond.append( "region='{0}' ".format(region_name) )
if filter_name:
cond.append( "lower(name) like '%{0}%' ".format(filter_name.lower()) )
if len(cond) > 0:
where_clause = 'and ' + 'and '.join( cond )
sqlcmd = "SELECT name,source_id,region,state,create_at,change_at FROM app where state<>'Terminated' %s " % where_clause
log = logging.getLogger(__name__)
log.debug('> sql : %s' % sqlcmd)
c.execute(sqlcmd)
rlt = c.fetchall()
conn.commit()
conn.close()
return rlt
except Exception,e:
raise RuntimeError('list app failed! %s ' % e) |
def visit_invenio_keyword_query(self, node):
"""Transform an :class:`InvenioKeywordQuery` into a :class:`KeywordOp`.
Notes:
In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained
:class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and
values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or
:class:`SimpleValueNegation`.)
"""
try:
keyword = node.left.accept(self)
except AttributeError:
# The keywords whose values aren't an InspireKeyword are simple strings.
keyword = Keyword(node.left)
value = node.right.accept(self)
if isinstance(value, SimpleValueBooleanQuery):
return _convert_simple_value_boolean_query_to_and_boolean_queries(value, keyword)
return KeywordOp(keyword, value) | Transform an :class:`InvenioKeywordQuery` into a :class:`KeywordOp`.
Notes:
In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained
:class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and
values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or
:class:`SimpleValueNegation`.) | Below is the the instruction that describes the task:
### Input:
Transform an :class:`InvenioKeywordQuery` into a :class:`KeywordOp`.
Notes:
In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained
:class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and
values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or
:class:`SimpleValueNegation`.)
### Response:
def visit_invenio_keyword_query(self, node):
"""Transform an :class:`InvenioKeywordQuery` into a :class:`KeywordOp`.
Notes:
In case the value being a :class:`SimpleValueBooleanQuery`, the subtree is transformed to chained
:class:`AndOp` queries containing :class:`KeywordOp`, whose keyword is the keyword of the current node and
values, all the :class:`SimpleValueBooleanQuery` values (either :class:`SimpleValues` or
:class:`SimpleValueNegation`.)
"""
try:
keyword = node.left.accept(self)
except AttributeError:
# The keywords whose values aren't an InspireKeyword are simple strings.
keyword = Keyword(node.left)
value = node.right.accept(self)
if isinstance(value, SimpleValueBooleanQuery):
return _convert_simple_value_boolean_query_to_and_boolean_queries(value, keyword)
return KeywordOp(keyword, value) |
def is_time_variable(varname, var):
"""
Identifies if a variable is represents time
"""
satisfied = varname.lower() == 'time'
satisfied |= getattr(var, 'standard_name', '') == 'time'
satisfied |= getattr(var, 'axis', '') == 'T'
satisfied |= units_convertible('seconds since 1900-01-01', getattr(var, 'units', ''))
return satisfied | Identifies if a variable is represents time | Below is the the instruction that describes the task:
### Input:
Identifies if a variable is represents time
### Response:
def is_time_variable(varname, var):
"""
Identifies if a variable is represents time
"""
satisfied = varname.lower() == 'time'
satisfied |= getattr(var, 'standard_name', '') == 'time'
satisfied |= getattr(var, 'axis', '') == 'T'
satisfied |= units_convertible('seconds since 1900-01-01', getattr(var, 'units', ''))
return satisfied |
def get_dev_start_config(devId):
"""
function takes the devId of a specific device and issues a RESTFUL call to get the most current startup config
file as known by the HP IMC Base Platform ICC module for the target device.
:param devId: int or str value of the target device
:return: str which contains the entire content of the target device startup configuration. If the device is not
currently supported in the HP IMC Base Platform ICC module, this call returns a string of "This feature is not
supported on this device"
"""
# checks to see if the imc credentials are already available
if auth is None or url is None:
set_imc_creds()
global r
get_dev_run_url = "/imcrs/icc/deviceCfg/" + str(devId) + "/currentStart"
f_url = url + get_dev_run_url
payload = None
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=headers)
if r.status_code == 200:
start_conf = (json.loads(r.text))['content']
return start_conf
else:
# print (r.status_code)
return "This feature is not supported on this device" | function takes the devId of a specific device and issues a RESTFUL call to get the most current startup config
file as known by the HP IMC Base Platform ICC module for the target device.
:param devId: int or str value of the target device
:return: str which contains the entire content of the target device startup configuration. If the device is not
currently supported in the HP IMC Base Platform ICC module, this call returns a string of "This feature is not
supported on this device" | Below is the the instruction that describes the task:
### Input:
function takes the devId of a specific device and issues a RESTFUL call to get the most current startup config
file as known by the HP IMC Base Platform ICC module for the target device.
:param devId: int or str value of the target device
:return: str which contains the entire content of the target device startup configuration. If the device is not
currently supported in the HP IMC Base Platform ICC module, this call returns a string of "This feature is not
supported on this device"
### Response:
def get_dev_start_config(devId):
"""
function takes the devId of a specific device and issues a RESTFUL call to get the most current startup config
file as known by the HP IMC Base Platform ICC module for the target device.
:param devId: int or str value of the target device
:return: str which contains the entire content of the target device startup configuration. If the device is not
currently supported in the HP IMC Base Platform ICC module, this call returns a string of "This feature is not
supported on this device"
"""
# checks to see if the imc credentials are already available
if auth is None or url is None:
set_imc_creds()
global r
get_dev_run_url = "/imcrs/icc/deviceCfg/" + str(devId) + "/currentStart"
f_url = url + get_dev_run_url
payload = None
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=headers)
if r.status_code == 200:
start_conf = (json.loads(r.text))['content']
return start_conf
else:
# print (r.status_code)
return "This feature is not supported on this device" |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.