code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def assertNotCookie(self, name, value=None, attrs={}, *args, **kwargs):
""" Negation of :meth:`assertCookie`. """
return not self.assertCookie(name, value, attrs) | Negation of :meth:`assertCookie`. | Below is the the instruction that describes the task:
### Input:
Negation of :meth:`assertCookie`.
### Response:
def assertNotCookie(self, name, value=None, attrs={}, *args, **kwargs):
""" Negation of :meth:`assertCookie`. """
return not self.assertCookie(name, value, attrs) |
def main(
title,
authors,
year,
email,
journal='',
volume='',
number='',
pages='',
publisher='',
doi='',
tags=[],
DFT_code='Quantum ESPRESSO',
DFT_functionals=['BEEF-vdW'],
reactions=[
{'reactants': ['2.0H2Ogas', '-1.5H2gas', 'star'],
'products': ['OOHstar@ontop']}],
energy_corrections={},
bulk_compositions=['Pt', 'Ag'],
crystal_structures=['fcc', 'hcp'],
facets=['111'],
custom_base=None):
"""Automatically generate an organized folder structure for a DFT
calculation.
Start by copying the script to a folder in your username
and assign the right information to the arguments in the function.
You can change the parameters and run the script several times if you,
for example, are using different functionals or are doing different
reactions on different surfaces.
Remember to include the reaction that gives the adsorption energy of
reaction intermediates, taking gas phase molecules as references
(preferably H20, H2, CH4, CO, NH3).
Parameters
----------
title : str
Publication or working title if not yet published.
authors : list
Author names, e.g. ['Doe, John', 'Einstein, Albert']
year : str
Year of (submission?)
email : str
email address of the person responsible for uploading.
Login at catalysis-hub.org currently only supports @gmail or
Slack login email addresses.
journal : str
Publications journal name
volume : str
Publications volume number
number : str
Publication number
pages : str
Publication page numbers
publisher : str
Publisher name
doi : str, optional
DOI of publication
tags : list, optional
User defined quire tags
DFT_code : str
e.g. 'Quantum ESPRESSO'
DFT_functionals : list of str
Calculator functional used, e.g. 'BEEF-vdW'
reactions : list of dict
A new dictionary is required for each reaction, and should include two
lists, 'reactants' and 'products'. Remember to include a minus sign and
prefactor in the name when relevant. If your reaction is not balanced,
you will receive an error when running the script.
Include the phase if mixing gas phase and surface phase.
e.g. 'star' for empty site or adsorbed phase, 'gas' if in gas phase.
Include the adsorption site if relevant.
e.g. star@top or star@bridge.
For example, we can write an entry for the adsorption of CH2:
CH4(g) - H2(g) + * -> CH2*
as:
{'reactants': ['CH4gas', 'H2gas', 'star'],
'products': ['CH2star@bridge']}
A complete entry could read:
reactions = [
{'reactants': ['CH4gas', '-H2gas', 'star'],
'products': ['CH2star@bridge']},
{'reactants': ['CH4gas', '-0.5H2gas', 'star'],
'products': ['CH3star@top']}]
energy_corrections : dict, optional
e.g. {'H2gas': 0.1}
bulk_compositions : list of str
e.g. ['Pt', 'Ag']
crystal_structures : list of str
e.g. ['fcc', 'hcp']
facets : list
For complicated structures use term you would use in publication.
e.g. ['111']
custom_base : str
TODO
"""
for reaction in reactions:
check_reaction(reaction['reactants'], reaction['products'])
# Set up directories
if custom_base is not None:
base = custom_base + '/'
else:
catbase = os.path.abspath(os.path.curdir)
base = '%s/%s/' % (catbase, username)
if not os.path.exists(base):
os.mkdir(base)
publication_shortname = get_pub_id(title, authors, year)
publication_base = base + publication_shortname + '/'
if not os.path.exists(publication_base):
os.mkdir(publication_base)
# save publication info to publications.txt
publication_dict = {'title': title,
'authors': authors,
'journal': journal,
'volume': volume,
'number': number,
'pages': pages,
'year': year,
'email': email,
'publisher': publisher,
'doi': doi,
'tags': tags
}
pub_txt = publication_base + 'publication.txt'
with open(pub_txt, 'w') as f:
yaml.dump(publication_dict, f)
if not len(energy_corrections.keys()) == 0:
energy_txt = publication_base + 'energy_corrections.txt'
with open(energy_txt, 'w') as fe:
yaml.dump(energy_corrections, fe)
def create(path):
if not os.path.exists(path):
os.mkdir(path)
return path
base = create(publication_base + DFT_code + '/')
bulk_bases = []
gas_bases = []
for DFT_functional in DFT_functionals:
bulk_bases += [create(base + DFT_functional + '/')]
gas_bases += [create(base + DFT_functional + '/gas/')]
gas_names = []
ads_names = []
for i in range(len(reactions)):
rnames = [r.split('@')[0] for r in reactions[i]['reactants'] +
reactions[i]['products']]
states = [get_state(r) for r in rnames]
gas_names += [clear_state(clear_prefactor(rnames[i]))
for i in range(len(states)) if states[i] == 'gas']
for gas_base in gas_bases:
for name in set(gas_names):
with open(gas_base + 'MISSING:{}_gas'.format(name), 'w'):
pass
for bulk_base in bulk_bases:
for bulk in bulk_compositions:
for crystal_structure in crystal_structures:
bulk_name = bulk + '_' + crystal_structure
facet_base = create(bulk_base + bulk_name + '/')
with open(facet_base + 'MISSING:{}_bulk'.format(bulk_name),
'w'):
pass
for facet in facets:
reaction_base = create(facet_base + facet + '/')
with open(reaction_base + 'MISSING:empty_slab'
.format(bulk_name), 'w'):
pass
for i in range(len(reactions)):
rname = '_'.join(reactions[i]['reactants'])
pname = '_'.join(reactions[i]['products'])
reaction_name = '__'.join([rname, pname])
base = create(reaction_base + reaction_name + '/')
rnames = [r.split('@')[0] for r in
reactions[i]['reactants'] +
reactions[i]['products']]
states = [get_state(r) for r in rnames]
ads_names = [clear_prefactor(clear_state(rnames[i]))
for i in range(len(states))
if states[i] == 'star']
for ads in ads_names:
if ads == '':
continue
with open(base + 'MISSING:{}_slab'.format(ads),
'w'):
pass
with open(base + 'MISSING:TS?'.format(ads),
'w'):
pass
print('Folders were succesfully created under {}'.format(publication_base)) | Automatically generate an organized folder structure for a DFT
calculation.
Start by copying the script to a folder in your username
and assign the right information to the arguments in the function.
You can change the parameters and run the script several times if you,
for example, are using different functionals or are doing different
reactions on different surfaces.
Remember to include the reaction that gives the adsorption energy of
reaction intermediates, taking gas phase molecules as references
(preferably H20, H2, CH4, CO, NH3).
Parameters
----------
title : str
Publication or working title if not yet published.
authors : list
Author names, e.g. ['Doe, John', 'Einstein, Albert']
year : str
Year of (submission?)
email : str
email address of the person responsible for uploading.
Login at catalysis-hub.org currently only supports @gmail or
Slack login email addresses.
journal : str
Publications journal name
volume : str
Publications volume number
number : str
Publication number
pages : str
Publication page numbers
publisher : str
Publisher name
doi : str, optional
DOI of publication
tags : list, optional
User defined quire tags
DFT_code : str
e.g. 'Quantum ESPRESSO'
DFT_functionals : list of str
Calculator functional used, e.g. 'BEEF-vdW'
reactions : list of dict
A new dictionary is required for each reaction, and should include two
lists, 'reactants' and 'products'. Remember to include a minus sign and
prefactor in the name when relevant. If your reaction is not balanced,
you will receive an error when running the script.
Include the phase if mixing gas phase and surface phase.
e.g. 'star' for empty site or adsorbed phase, 'gas' if in gas phase.
Include the adsorption site if relevant.
e.g. star@top or star@bridge.
For example, we can write an entry for the adsorption of CH2:
CH4(g) - H2(g) + * -> CH2*
as:
{'reactants': ['CH4gas', 'H2gas', 'star'],
'products': ['CH2star@bridge']}
A complete entry could read:
reactions = [
{'reactants': ['CH4gas', '-H2gas', 'star'],
'products': ['CH2star@bridge']},
{'reactants': ['CH4gas', '-0.5H2gas', 'star'],
'products': ['CH3star@top']}]
energy_corrections : dict, optional
e.g. {'H2gas': 0.1}
bulk_compositions : list of str
e.g. ['Pt', 'Ag']
crystal_structures : list of str
e.g. ['fcc', 'hcp']
facets : list
For complicated structures use term you would use in publication.
e.g. ['111']
custom_base : str
TODO | Below is the the instruction that describes the task:
### Input:
Automatically generate an organized folder structure for a DFT
calculation.
Start by copying the script to a folder in your username
and assign the right information to the arguments in the function.
You can change the parameters and run the script several times if you,
for example, are using different functionals or are doing different
reactions on different surfaces.
Remember to include the reaction that gives the adsorption energy of
reaction intermediates, taking gas phase molecules as references
(preferably H20, H2, CH4, CO, NH3).
Parameters
----------
title : str
Publication or working title if not yet published.
authors : list
Author names, e.g. ['Doe, John', 'Einstein, Albert']
year : str
Year of (submission?)
email : str
email address of the person responsible for uploading.
Login at catalysis-hub.org currently only supports @gmail or
Slack login email addresses.
journal : str
Publications journal name
volume : str
Publications volume number
number : str
Publication number
pages : str
Publication page numbers
publisher : str
Publisher name
doi : str, optional
DOI of publication
tags : list, optional
User defined quire tags
DFT_code : str
e.g. 'Quantum ESPRESSO'
DFT_functionals : list of str
Calculator functional used, e.g. 'BEEF-vdW'
reactions : list of dict
A new dictionary is required for each reaction, and should include two
lists, 'reactants' and 'products'. Remember to include a minus sign and
prefactor in the name when relevant. If your reaction is not balanced,
you will receive an error when running the script.
Include the phase if mixing gas phase and surface phase.
e.g. 'star' for empty site or adsorbed phase, 'gas' if in gas phase.
Include the adsorption site if relevant.
e.g. star@top or star@bridge.
For example, we can write an entry for the adsorption of CH2:
CH4(g) - H2(g) + * -> CH2*
as:
{'reactants': ['CH4gas', 'H2gas', 'star'],
'products': ['CH2star@bridge']}
A complete entry could read:
reactions = [
{'reactants': ['CH4gas', '-H2gas', 'star'],
'products': ['CH2star@bridge']},
{'reactants': ['CH4gas', '-0.5H2gas', 'star'],
'products': ['CH3star@top']}]
energy_corrections : dict, optional
e.g. {'H2gas': 0.1}
bulk_compositions : list of str
e.g. ['Pt', 'Ag']
crystal_structures : list of str
e.g. ['fcc', 'hcp']
facets : list
For complicated structures use term you would use in publication.
e.g. ['111']
custom_base : str
TODO
### Response:
def main(
title,
authors,
year,
email,
journal='',
volume='',
number='',
pages='',
publisher='',
doi='',
tags=[],
DFT_code='Quantum ESPRESSO',
DFT_functionals=['BEEF-vdW'],
reactions=[
{'reactants': ['2.0H2Ogas', '-1.5H2gas', 'star'],
'products': ['OOHstar@ontop']}],
energy_corrections={},
bulk_compositions=['Pt', 'Ag'],
crystal_structures=['fcc', 'hcp'],
facets=['111'],
custom_base=None):
"""Automatically generate an organized folder structure for a DFT
calculation.
Start by copying the script to a folder in your username
and assign the right information to the arguments in the function.
You can change the parameters and run the script several times if you,
for example, are using different functionals or are doing different
reactions on different surfaces.
Remember to include the reaction that gives the adsorption energy of
reaction intermediates, taking gas phase molecules as references
(preferably H20, H2, CH4, CO, NH3).
Parameters
----------
title : str
Publication or working title if not yet published.
authors : list
Author names, e.g. ['Doe, John', 'Einstein, Albert']
year : str
Year of (submission?)
email : str
email address of the person responsible for uploading.
Login at catalysis-hub.org currently only supports @gmail or
Slack login email addresses.
journal : str
Publications journal name
volume : str
Publications volume number
number : str
Publication number
pages : str
Publication page numbers
publisher : str
Publisher name
doi : str, optional
DOI of publication
tags : list, optional
User defined quire tags
DFT_code : str
e.g. 'Quantum ESPRESSO'
DFT_functionals : list of str
Calculator functional used, e.g. 'BEEF-vdW'
reactions : list of dict
A new dictionary is required for each reaction, and should include two
lists, 'reactants' and 'products'. Remember to include a minus sign and
prefactor in the name when relevant. If your reaction is not balanced,
you will receive an error when running the script.
Include the phase if mixing gas phase and surface phase.
e.g. 'star' for empty site or adsorbed phase, 'gas' if in gas phase.
Include the adsorption site if relevant.
e.g. star@top or star@bridge.
For example, we can write an entry for the adsorption of CH2:
CH4(g) - H2(g) + * -> CH2*
as:
{'reactants': ['CH4gas', 'H2gas', 'star'],
'products': ['CH2star@bridge']}
A complete entry could read:
reactions = [
{'reactants': ['CH4gas', '-H2gas', 'star'],
'products': ['CH2star@bridge']},
{'reactants': ['CH4gas', '-0.5H2gas', 'star'],
'products': ['CH3star@top']}]
energy_corrections : dict, optional
e.g. {'H2gas': 0.1}
bulk_compositions : list of str
e.g. ['Pt', 'Ag']
crystal_structures : list of str
e.g. ['fcc', 'hcp']
facets : list
For complicated structures use term you would use in publication.
e.g. ['111']
custom_base : str
TODO
"""
for reaction in reactions:
check_reaction(reaction['reactants'], reaction['products'])
# Set up directories
if custom_base is not None:
base = custom_base + '/'
else:
catbase = os.path.abspath(os.path.curdir)
base = '%s/%s/' % (catbase, username)
if not os.path.exists(base):
os.mkdir(base)
publication_shortname = get_pub_id(title, authors, year)
publication_base = base + publication_shortname + '/'
if not os.path.exists(publication_base):
os.mkdir(publication_base)
# save publication info to publications.txt
publication_dict = {'title': title,
'authors': authors,
'journal': journal,
'volume': volume,
'number': number,
'pages': pages,
'year': year,
'email': email,
'publisher': publisher,
'doi': doi,
'tags': tags
}
pub_txt = publication_base + 'publication.txt'
with open(pub_txt, 'w') as f:
yaml.dump(publication_dict, f)
if not len(energy_corrections.keys()) == 0:
energy_txt = publication_base + 'energy_corrections.txt'
with open(energy_txt, 'w') as fe:
yaml.dump(energy_corrections, fe)
def create(path):
if not os.path.exists(path):
os.mkdir(path)
return path
base = create(publication_base + DFT_code + '/')
bulk_bases = []
gas_bases = []
for DFT_functional in DFT_functionals:
bulk_bases += [create(base + DFT_functional + '/')]
gas_bases += [create(base + DFT_functional + '/gas/')]
gas_names = []
ads_names = []
for i in range(len(reactions)):
rnames = [r.split('@')[0] for r in reactions[i]['reactants'] +
reactions[i]['products']]
states = [get_state(r) for r in rnames]
gas_names += [clear_state(clear_prefactor(rnames[i]))
for i in range(len(states)) if states[i] == 'gas']
for gas_base in gas_bases:
for name in set(gas_names):
with open(gas_base + 'MISSING:{}_gas'.format(name), 'w'):
pass
for bulk_base in bulk_bases:
for bulk in bulk_compositions:
for crystal_structure in crystal_structures:
bulk_name = bulk + '_' + crystal_structure
facet_base = create(bulk_base + bulk_name + '/')
with open(facet_base + 'MISSING:{}_bulk'.format(bulk_name),
'w'):
pass
for facet in facets:
reaction_base = create(facet_base + facet + '/')
with open(reaction_base + 'MISSING:empty_slab'
.format(bulk_name), 'w'):
pass
for i in range(len(reactions)):
rname = '_'.join(reactions[i]['reactants'])
pname = '_'.join(reactions[i]['products'])
reaction_name = '__'.join([rname, pname])
base = create(reaction_base + reaction_name + '/')
rnames = [r.split('@')[0] for r in
reactions[i]['reactants'] +
reactions[i]['products']]
states = [get_state(r) for r in rnames]
ads_names = [clear_prefactor(clear_state(rnames[i]))
for i in range(len(states))
if states[i] == 'star']
for ads in ads_names:
if ads == '':
continue
with open(base + 'MISSING:{}_slab'.format(ads),
'w'):
pass
with open(base + 'MISSING:TS?'.format(ads),
'w'):
pass
print('Folders were succesfully created under {}'.format(publication_base)) |
def fetch_state_data(self, states):
"""
Fetch census estimates from table.
"""
print("Fetching census data")
for table in CensusTable.objects.all():
api = self.get_series(table.series)
for variable in table.variables.all():
estimate = "{}_{}".format(table.code, variable.code)
print(
">> Fetching {} {} {}".format(
table.year, table.series, estimate
)
)
for state in tqdm(states):
self.get_state_estimates_by_state(
api=api,
table=table,
variable=variable,
estimate=estimate,
state=state,
)
self.get_county_estimates_by_state(
api=api,
table=table,
variable=variable,
estimate=estimate,
state=state,
)
self.get_district_estimates_by_state(
api=api,
table=table,
variable=variable,
estimate=estimate,
state=state,
) | Fetch census estimates from table. | Below is the the instruction that describes the task:
### Input:
Fetch census estimates from table.
### Response:
def fetch_state_data(self, states):
"""
Fetch census estimates from table.
"""
print("Fetching census data")
for table in CensusTable.objects.all():
api = self.get_series(table.series)
for variable in table.variables.all():
estimate = "{}_{}".format(table.code, variable.code)
print(
">> Fetching {} {} {}".format(
table.year, table.series, estimate
)
)
for state in tqdm(states):
self.get_state_estimates_by_state(
api=api,
table=table,
variable=variable,
estimate=estimate,
state=state,
)
self.get_county_estimates_by_state(
api=api,
table=table,
variable=variable,
estimate=estimate,
state=state,
)
self.get_district_estimates_by_state(
api=api,
table=table,
variable=variable,
estimate=estimate,
state=state,
) |
def run(self, host, port, **options):
"""For debugging purposes, you can run this as a standalone server.
.. WARNING:: **Security vulnerability**
This uses :class:`DebuggedJsonRpcApplication` to assist debugging. If you want to use
this in production, you should run :class:`Server` as a standard WSGI app with
`uWSGI <https://uwsgi-docs.readthedocs.org/en/latest/>`_ or another similar WSGI server.
.. versionadded:: 0.1.0
"""
self.registry.debug = True
debugged = DebuggedJsonRpcApplication(self, evalex=True)
run_simple(host, port, debugged, use_reloader=True, **options) | For debugging purposes, you can run this as a standalone server.
.. WARNING:: **Security vulnerability**
This uses :class:`DebuggedJsonRpcApplication` to assist debugging. If you want to use
this in production, you should run :class:`Server` as a standard WSGI app with
`uWSGI <https://uwsgi-docs.readthedocs.org/en/latest/>`_ or another similar WSGI server.
.. versionadded:: 0.1.0 | Below is the the instruction that describes the task:
### Input:
For debugging purposes, you can run this as a standalone server.
.. WARNING:: **Security vulnerability**
This uses :class:`DebuggedJsonRpcApplication` to assist debugging. If you want to use
this in production, you should run :class:`Server` as a standard WSGI app with
`uWSGI <https://uwsgi-docs.readthedocs.org/en/latest/>`_ or another similar WSGI server.
.. versionadded:: 0.1.0
### Response:
def run(self, host, port, **options):
"""For debugging purposes, you can run this as a standalone server.
.. WARNING:: **Security vulnerability**
This uses :class:`DebuggedJsonRpcApplication` to assist debugging. If you want to use
this in production, you should run :class:`Server` as a standard WSGI app with
`uWSGI <https://uwsgi-docs.readthedocs.org/en/latest/>`_ or another similar WSGI server.
.. versionadded:: 0.1.0
"""
self.registry.debug = True
debugged = DebuggedJsonRpcApplication(self, evalex=True)
run_simple(host, port, debugged, use_reloader=True, **options) |
def normal_mean(data, variance):
""" Creates a segment cost function for a time series with a
Normal distribution with changing mean
Args:
data (:obj:`list` of float): 1D time series data
variance (float): variance
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
i_variance_2 = 1 / (variance ** 2)
cmm = [0.0]
cmm.extend(np.cumsum(data))
cmm2 = [0.0]
cmm2.extend(np.cumsum(np.abs(data)))
def cost(start, end):
""" Cost function for normal distribution with variable mean
Args:
start (int): start index
end (int): end index
Returns:
float: Cost, from start to end
"""
cmm2_diff = cmm2[end] - cmm2[start]
cmm_diff = pow(cmm[end] - cmm[start], 2)
i_diff = end - start
diff = cmm2_diff - cmm_diff
return (diff/i_diff) * i_variance_2
return cost | Creates a segment cost function for a time series with a
Normal distribution with changing mean
Args:
data (:obj:`list` of float): 1D time series data
variance (float): variance
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment | Below is the the instruction that describes the task:
### Input:
Creates a segment cost function for a time series with a
Normal distribution with changing mean
Args:
data (:obj:`list` of float): 1D time series data
variance (float): variance
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
### Response:
def normal_mean(data, variance):
""" Creates a segment cost function for a time series with a
Normal distribution with changing mean
Args:
data (:obj:`list` of float): 1D time series data
variance (float): variance
Returns:
function: Function with signature
(int, int) -> float
where the first arg is the starting index, and the second
is the last arg. Returns the cost of that segment
"""
if not isinstance(data, np.ndarray):
data = np.array(data)
i_variance_2 = 1 / (variance ** 2)
cmm = [0.0]
cmm.extend(np.cumsum(data))
cmm2 = [0.0]
cmm2.extend(np.cumsum(np.abs(data)))
def cost(start, end):
""" Cost function for normal distribution with variable mean
Args:
start (int): start index
end (int): end index
Returns:
float: Cost, from start to end
"""
cmm2_diff = cmm2[end] - cmm2[start]
cmm_diff = pow(cmm[end] - cmm[start], 2)
i_diff = end - start
diff = cmm2_diff - cmm_diff
return (diff/i_diff) * i_variance_2
return cost |
def finalize(self):
"""
Prepare the figure for rendering by setting the title and adjusting
the limits on the axes, adding labels and a legend.
"""
# Set the title
self.set_title((
"Silhouette Plot of {} Clustering for {} Samples in {} Centers"
).format(
self.name, self.n_samples_, self.n_clusters_
))
# Set the X and Y limits
# The silhouette coefficient can range from -1, 1;
# but here we scale the plot according to our visualizations
# l_xlim and u_xlim are lower and upper limits of the x-axis,
# set according to our calculated maximum and minimum silhouette score along with necessary padding
l_xlim = max(-1, min(-0.1, round(min(self.silhouette_samples_) - 0.1, 1)))
u_xlim = min(1, round(max(self.silhouette_samples_) + 0.1, 1))
self.ax.set_xlim([l_xlim, u_xlim])
# The (n_clusters_+1)*10 is for inserting blank space between
# silhouette plots of individual clusters, to demarcate them clearly.
self.ax.set_ylim([0, self.n_samples_ + (self.n_clusters_ + 1) * 10])
# Set the x and y labels
self.ax.set_xlabel("silhouette coefficient values")
self.ax.set_ylabel("cluster label")
# Set the ticks on the axis object.
self.ax.set_yticks([]) # Clear the yaxis labels / ticks
self.ax.xaxis.set_major_locator(ticker.MultipleLocator(0.1)) | Prepare the figure for rendering by setting the title and adjusting
the limits on the axes, adding labels and a legend. | Below is the the instruction that describes the task:
### Input:
Prepare the figure for rendering by setting the title and adjusting
the limits on the axes, adding labels and a legend.
### Response:
def finalize(self):
"""
Prepare the figure for rendering by setting the title and adjusting
the limits on the axes, adding labels and a legend.
"""
# Set the title
self.set_title((
"Silhouette Plot of {} Clustering for {} Samples in {} Centers"
).format(
self.name, self.n_samples_, self.n_clusters_
))
# Set the X and Y limits
# The silhouette coefficient can range from -1, 1;
# but here we scale the plot according to our visualizations
# l_xlim and u_xlim are lower and upper limits of the x-axis,
# set according to our calculated maximum and minimum silhouette score along with necessary padding
l_xlim = max(-1, min(-0.1, round(min(self.silhouette_samples_) - 0.1, 1)))
u_xlim = min(1, round(max(self.silhouette_samples_) + 0.1, 1))
self.ax.set_xlim([l_xlim, u_xlim])
# The (n_clusters_+1)*10 is for inserting blank space between
# silhouette plots of individual clusters, to demarcate them clearly.
self.ax.set_ylim([0, self.n_samples_ + (self.n_clusters_ + 1) * 10])
# Set the x and y labels
self.ax.set_xlabel("silhouette coefficient values")
self.ax.set_ylabel("cluster label")
# Set the ticks on the axis object.
self.ax.set_yticks([]) # Clear the yaxis labels / ticks
self.ax.xaxis.set_major_locator(ticker.MultipleLocator(0.1)) |
def methods(self):
"""
Returns all documented methods as `pydoc.Function` objects in
the class, sorted alphabetically with `__init__` always coming
first.
Unfortunately, this also includes class methods.
"""
p = lambda o: (isinstance(o, Function)
and o.method
and self.module._docfilter(o))
return filter(p, self.doc.values()) | Returns all documented methods as `pydoc.Function` objects in
the class, sorted alphabetically with `__init__` always coming
first.
Unfortunately, this also includes class methods. | Below is the the instruction that describes the task:
### Input:
Returns all documented methods as `pydoc.Function` objects in
the class, sorted alphabetically with `__init__` always coming
first.
Unfortunately, this also includes class methods.
### Response:
def methods(self):
"""
Returns all documented methods as `pydoc.Function` objects in
the class, sorted alphabetically with `__init__` always coming
first.
Unfortunately, this also includes class methods.
"""
p = lambda o: (isinstance(o, Function)
and o.method
and self.module._docfilter(o))
return filter(p, self.doc.values()) |
def assign_load_feedin_case(network):
"""
For each time step evaluate whether it is a feed-in or a load case.
Feed-in and load case are identified based on the
generation and load time series and defined as follows:
1. Load case: positive (load - generation) at HV/MV substation
2. Feed-in case: negative (load - generation) at HV/MV substation
Output of this function is written to `timesteps_load_feedin_case`
attribute of the network.timeseries (see
:class:`~.grid.network.TimeSeries`).
Parameters
----------
network : :class:`~.grid.network.Network`
Network for which worst-case snapshots are identified.
Returns
--------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe with information on whether time step is handled as load case
('load_case') or feed-in case ('feedin_case') for each time step in
`timeindex` attribute of network.timeseries.
Index of the dataframe is network.timeseries.timeindex. Columns of the
dataframe are 'residual_load' with (load - generation) in kW at HV/MV
substation and 'case' with 'load_case' for positive residual load and
'feedin_case' for negative residual load.
"""
if network.pypsa is not None:
residual_load = get_residual_load_from_pypsa_network(network.pypsa) * \
1e3
else:
grids = [network.mv_grid] + list(network.mv_grid.lv_grids)
gens = []
loads = []
for grid in grids:
gens.extend(grid.generators)
gens.extend(list(grid.graph.nodes_by_attribute('storage')))
loads.extend(list(grid.graph.nodes_by_attribute('load')))
generation_timeseries = pd.Series(
0, index=network.timeseries.timeindex)
for gen in gens:
generation_timeseries += gen.timeseries.p
load_timeseries = pd.Series(0, index=network.timeseries.timeindex)
for load in loads:
load_timeseries += load.timeseries.p
residual_load = load_timeseries - generation_timeseries
timeseries_load_feedin_case = residual_load.rename(
'residual_load').to_frame()
timeseries_load_feedin_case['case'] = \
timeseries_load_feedin_case.residual_load.apply(
lambda _: 'feedin_case' if _ < 0 else 'load_case')
return timeseries_load_feedin_case | For each time step evaluate whether it is a feed-in or a load case.
Feed-in and load case are identified based on the
generation and load time series and defined as follows:
1. Load case: positive (load - generation) at HV/MV substation
2. Feed-in case: negative (load - generation) at HV/MV substation
Output of this function is written to `timesteps_load_feedin_case`
attribute of the network.timeseries (see
:class:`~.grid.network.TimeSeries`).
Parameters
----------
network : :class:`~.grid.network.Network`
Network for which worst-case snapshots are identified.
Returns
--------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe with information on whether time step is handled as load case
('load_case') or feed-in case ('feedin_case') for each time step in
`timeindex` attribute of network.timeseries.
Index of the dataframe is network.timeseries.timeindex. Columns of the
dataframe are 'residual_load' with (load - generation) in kW at HV/MV
substation and 'case' with 'load_case' for positive residual load and
'feedin_case' for negative residual load. | Below is the the instruction that describes the task:
### Input:
For each time step evaluate whether it is a feed-in or a load case.
Feed-in and load case are identified based on the
generation and load time series and defined as follows:
1. Load case: positive (load - generation) at HV/MV substation
2. Feed-in case: negative (load - generation) at HV/MV substation
Output of this function is written to `timesteps_load_feedin_case`
attribute of the network.timeseries (see
:class:`~.grid.network.TimeSeries`).
Parameters
----------
network : :class:`~.grid.network.Network`
Network for which worst-case snapshots are identified.
Returns
--------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe with information on whether time step is handled as load case
('load_case') or feed-in case ('feedin_case') for each time step in
`timeindex` attribute of network.timeseries.
Index of the dataframe is network.timeseries.timeindex. Columns of the
dataframe are 'residual_load' with (load - generation) in kW at HV/MV
substation and 'case' with 'load_case' for positive residual load and
'feedin_case' for negative residual load.
### Response:
def assign_load_feedin_case(network):
"""
For each time step evaluate whether it is a feed-in or a load case.
Feed-in and load case are identified based on the
generation and load time series and defined as follows:
1. Load case: positive (load - generation) at HV/MV substation
2. Feed-in case: negative (load - generation) at HV/MV substation
Output of this function is written to `timesteps_load_feedin_case`
attribute of the network.timeseries (see
:class:`~.grid.network.TimeSeries`).
Parameters
----------
network : :class:`~.grid.network.Network`
Network for which worst-case snapshots are identified.
Returns
--------
:pandas:`pandas.DataFrame<dataframe>`
Dataframe with information on whether time step is handled as load case
('load_case') or feed-in case ('feedin_case') for each time step in
`timeindex` attribute of network.timeseries.
Index of the dataframe is network.timeseries.timeindex. Columns of the
dataframe are 'residual_load' with (load - generation) in kW at HV/MV
substation and 'case' with 'load_case' for positive residual load and
'feedin_case' for negative residual load.
"""
if network.pypsa is not None:
residual_load = get_residual_load_from_pypsa_network(network.pypsa) * \
1e3
else:
grids = [network.mv_grid] + list(network.mv_grid.lv_grids)
gens = []
loads = []
for grid in grids:
gens.extend(grid.generators)
gens.extend(list(grid.graph.nodes_by_attribute('storage')))
loads.extend(list(grid.graph.nodes_by_attribute('load')))
generation_timeseries = pd.Series(
0, index=network.timeseries.timeindex)
for gen in gens:
generation_timeseries += gen.timeseries.p
load_timeseries = pd.Series(0, index=network.timeseries.timeindex)
for load in loads:
load_timeseries += load.timeseries.p
residual_load = load_timeseries - generation_timeseries
timeseries_load_feedin_case = residual_load.rename(
'residual_load').to_frame()
timeseries_load_feedin_case['case'] = \
timeseries_load_feedin_case.residual_load.apply(
lambda _: 'feedin_case' if _ < 0 else 'load_case')
return timeseries_load_feedin_case |
def punctuate_authorname(an):
"""Punctuate author names properly.
Expects input in the form 'Bloggs, J K' and will return 'Bloggs, J. K.'.
"""
name = an.strip()
parts = [x for x in name.split(',') if x != '']
ret_str = ''
for idx, part in enumerate(parts):
subparts = part.strip().split(' ')
for sidx, substr in enumerate(subparts):
ret_str += substr
if len(substr) == 1:
ret_str += '.'
if sidx < (len(subparts) - 1):
ret_str += ' '
if idx < (len(parts) - 1):
ret_str += ', '
return ret_str.strip() | Punctuate author names properly.
Expects input in the form 'Bloggs, J K' and will return 'Bloggs, J. K.'. | Below is the the instruction that describes the task:
### Input:
Punctuate author names properly.
Expects input in the form 'Bloggs, J K' and will return 'Bloggs, J. K.'.
### Response:
def punctuate_authorname(an):
"""Punctuate author names properly.
Expects input in the form 'Bloggs, J K' and will return 'Bloggs, J. K.'.
"""
name = an.strip()
parts = [x for x in name.split(',') if x != '']
ret_str = ''
for idx, part in enumerate(parts):
subparts = part.strip().split(' ')
for sidx, substr in enumerate(subparts):
ret_str += substr
if len(substr) == 1:
ret_str += '.'
if sidx < (len(subparts) - 1):
ret_str += ' '
if idx < (len(parts) - 1):
ret_str += ', '
return ret_str.strip() |
def _exec_process(cmd_list, base_context, instance=0, log=None):
"""
Process execution tool.
The forks and execs a process with args formatted according to a context.
This is implemented as a module function to make it available to
event_targets, legion and tasks.
The args are:
cmd_list - The path and arg vector
context - Task's context
instance - An integer instance number used with multi-process tasks
log - Logging object (default is nothing logged).
The context is used to format command args. In addition, these values will
be used to change the process execution environment:
procname - Changes the process name of the executed command (but not the path executed).
user - Does a setuid for the process
group - Does a setgid for the process
cwd - Does a chdir before executing
The passed context is extended to include these specific runtime values which
are only available for cmd_list substitution.
context_prefix+'pid' - The process ID of the child process
context_prefix+'instance' - The instance number (0 if not provided)
context_prefix+'uid' - The numeric uid (based on 'user' if set, getuid() otherwise)
context_prefix+'gid' - The numeric gid (based on 'group' if set, getgid() otherwise)
"""
if not log: # pragma: no cover
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
# Get a copy of the context so changes here will not affect the
# task's base context.
#
context = base_context.copy()
# Make sure we have a normalized clone of the cmd_list
#
cmd_list = list(cmd_list)
name = context.get(context_prefix+'name', cmd_list[0])
log.debug("Starting %s instance %d", name, instance)
procname = _fmt_context(context.get(context_prefix+'procname'), context)
user = _fmt_context(context.get(context_prefix+'user'), context)
group = _fmt_context(context.get(context_prefix+'group'), context)
cwd = _fmt_context(context.get(context_prefix+'cwd'), context)
# Do the user setup early so we can throw an Exception on failure.
# Identity errors are considered fatal as we do not want to run
# a process at a higher priv if it was explicitly set to something
# else.
#
proc_uid = os.geteuid()
proc_gid = os.getegid()
do_setuid = (proc_uid != os.getuid())
do_setgid = (proc_gid != os.getgid())
if user is not None:
pw = None
try:
uid = int(user)
try: pw = pwd.getpwuid(uid)
except: pass # pragma: no cover
except: pass
if pw is None:
try:
pw = pwd.getpwnam(user)
except Exception as e:
raise TaskError(name, "Bad user %r -- %s" % (user, e))
if proc_uid != pw.pw_uid:
proc_uid = pw.pw_uid
do_setuid = True
if proc_gid != pw.pw_gid:
proc_gid = pw.pw_gid
do_setgid = True
if group is not None:
gr = None
try:
gid = int(group)
try: gr = grp.getgrgid(gid)
except: pass
except: pass
if gr is None:
try:
gr = grp.getgrnam(group)
except Exception as e:
raise TaskError(name, "Bad group %r -- %s" % (group, e))
if proc_uid is not None and proc_gid != gr.gr_gid:
log.info("gid for user %r (%d) overridden by group %r (%d)", user, proc_gid, group, gr.gr_gid)
proc_gid = gr.gr_gid
do_setgid = True
if cwd is not None and not os.path.isdir(cwd):
raise TaskError(name, "Directory for cwd setting '%s' does not exist" % (cwd,))
# Add in per-process context
#
context[context_prefix+'instance'] = instance
context[context_prefix+'started'] = time.time()
context[context_prefix+'uid'] = proc_uid
context[context_prefix+'gid'] = proc_gid
pid = os.fork()
# Parent just returns pid
if pid > 0:
return pid
# This section is processing the child. Exceptions from this point must
# never escape to outside handlers or we might create zombie init tasks.
#
try:
# Add the pid to the context now that we have it.
#
context[context_prefix+'pid'] = os.getpid()
# Set up the requested process environment
#
if do_setgid:
try:
os.setgid(proc_gid)
log.debug("Setgid to %d succeeded in child '%s', instance %d", proc_gid, name, instance)
except Exception as e:
log.error("Setgid to %d failed in child %r, instance %d -- %s",
proc_gid, name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG))
os._exit(81)
if do_setuid:
try:
os.setuid(proc_uid)
log.debug("Setuid to %d succeeded in child '%s', instance %d", proc_uid, name, instance)
except Exception as e:
log.error("Setuid to %d failed in child %r, instance %d -- %s",
proc_uid, name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG))
os._exit(82)
if cwd is not None:
try:
os.chdir(cwd)
log.debug("Chdir to '%s' succeeded in child '%s', instance %d", cwd, name, instance)
except Exception as e:
log.error("Chdir to '%s' failed in child %r, instance %d -- %s",
cwd, name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG))
os._exit(83)
# Build formatted command
#
prog = _fmt_context(cmd_list[0], context)
cmd = []
if procname:
cmd_list.pop(0)
cmd.append(_fmt_context(context['procname'], context))
for a in cmd_list:
cmd.append(_fmt_context(a, context))
log.info("child, Execing: %s <%s>", prog, utils.format_cmd(cmd))
except Exception as e:
# Log any exceptions here while we still can. After the closeall,
# bets are off.
#
log.error("Child processing failed for task %r, instance %d -- %s",
name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG))
os._exit(84)
try:
retain_fds = [0,1,2]
for log_fd in utils.log_filenos(log):
if log_fd not in retain_fds:
retain_fds.append(log_fd)
utils.closeall(exclude=retain_fds)
fd = None
try: os.close(0)
except: pass
try:
fd = os.open(std_process_dest, os.O_RDONLY)
except Exception as e:
log.error("child read open of %s failed -- %s", std_process_dest, e)
if fd != 0:
log.error("child failed to redirect stdin to %s", std_process_dest)
try: os.close(1)
except: pass
try:
fd = os.open('/dev/null', os.O_WRONLY)
except Exception as e:
log.error("child write open of %s failed -- %s", std_process_dest, e)
if fd != 1:
log.error("child failed to redirect stdout to %s", std_process_dest)
# Build a fresh environment based on context, with None values excluded and
# all other values as strings, formatted where appropriate:
#
env = {}
for tag, val in context.items():
if val is None:
continue
val = _fmt_context(str(val), context)
if val is not None:
env[tag] = val
except Exception as e:
# At this point we can still send logs to stderr, so log these
# too, just in case.
#
log.error("Child processing failed for task %r, instance %d -- %s",
name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG))
os._exit(85)
try:
try: os.close(2)
except: pass
try: os.dup(1)
except: pass
os.execvpe(prog, cmd, env)
except:
pass
# There is no way to report an exception here, so hopefully the exit code will
# be evidence enough. When child output logging is supported, this can be reworked.
#
os._exit(86) | Process execution tool.
The forks and execs a process with args formatted according to a context.
This is implemented as a module function to make it available to
event_targets, legion and tasks.
The args are:
cmd_list - The path and arg vector
context - Task's context
instance - An integer instance number used with multi-process tasks
log - Logging object (default is nothing logged).
The context is used to format command args. In addition, these values will
be used to change the process execution environment:
procname - Changes the process name of the executed command (but not the path executed).
user - Does a setuid for the process
group - Does a setgid for the process
cwd - Does a chdir before executing
The passed context is extended to include these specific runtime values which
are only available for cmd_list substitution.
context_prefix+'pid' - The process ID of the child process
context_prefix+'instance' - The instance number (0 if not provided)
context_prefix+'uid' - The numeric uid (based on 'user' if set, getuid() otherwise)
context_prefix+'gid' - The numeric gid (based on 'group' if set, getgid() otherwise) | Below is the the instruction that describes the task:
### Input:
Process execution tool.
The forks and execs a process with args formatted according to a context.
This is implemented as a module function to make it available to
event_targets, legion and tasks.
The args are:
cmd_list - The path and arg vector
context - Task's context
instance - An integer instance number used with multi-process tasks
log - Logging object (default is nothing logged).
The context is used to format command args. In addition, these values will
be used to change the process execution environment:
procname - Changes the process name of the executed command (but not the path executed).
user - Does a setuid for the process
group - Does a setgid for the process
cwd - Does a chdir before executing
The passed context is extended to include these specific runtime values which
are only available for cmd_list substitution.
context_prefix+'pid' - The process ID of the child process
context_prefix+'instance' - The instance number (0 if not provided)
context_prefix+'uid' - The numeric uid (based on 'user' if set, getuid() otherwise)
context_prefix+'gid' - The numeric gid (based on 'group' if set, getgid() otherwise)
### Response:
def _exec_process(cmd_list, base_context, instance=0, log=None):
"""
Process execution tool.
The forks and execs a process with args formatted according to a context.
This is implemented as a module function to make it available to
event_targets, legion and tasks.
The args are:
cmd_list - The path and arg vector
context - Task's context
instance - An integer instance number used with multi-process tasks
log - Logging object (default is nothing logged).
The context is used to format command args. In addition, these values will
be used to change the process execution environment:
procname - Changes the process name of the executed command (but not the path executed).
user - Does a setuid for the process
group - Does a setgid for the process
cwd - Does a chdir before executing
The passed context is extended to include these specific runtime values which
are only available for cmd_list substitution.
context_prefix+'pid' - The process ID of the child process
context_prefix+'instance' - The instance number (0 if not provided)
context_prefix+'uid' - The numeric uid (based on 'user' if set, getuid() otherwise)
context_prefix+'gid' - The numeric gid (based on 'group' if set, getgid() otherwise)
"""
if not log: # pragma: no cover
log = logging.getLogger(__name__)
log.addHandler(logging.NullHandler())
# Get a copy of the context so changes here will not affect the
# task's base context.
#
context = base_context.copy()
# Make sure we have a normalized clone of the cmd_list
#
cmd_list = list(cmd_list)
name = context.get(context_prefix+'name', cmd_list[0])
log.debug("Starting %s instance %d", name, instance)
procname = _fmt_context(context.get(context_prefix+'procname'), context)
user = _fmt_context(context.get(context_prefix+'user'), context)
group = _fmt_context(context.get(context_prefix+'group'), context)
cwd = _fmt_context(context.get(context_prefix+'cwd'), context)
# Do the user setup early so we can throw an Exception on failure.
# Identity errors are considered fatal as we do not want to run
# a process at a higher priv if it was explicitly set to something
# else.
#
proc_uid = os.geteuid()
proc_gid = os.getegid()
do_setuid = (proc_uid != os.getuid())
do_setgid = (proc_gid != os.getgid())
if user is not None:
pw = None
try:
uid = int(user)
try: pw = pwd.getpwuid(uid)
except: pass # pragma: no cover
except: pass
if pw is None:
try:
pw = pwd.getpwnam(user)
except Exception as e:
raise TaskError(name, "Bad user %r -- %s" % (user, e))
if proc_uid != pw.pw_uid:
proc_uid = pw.pw_uid
do_setuid = True
if proc_gid != pw.pw_gid:
proc_gid = pw.pw_gid
do_setgid = True
if group is not None:
gr = None
try:
gid = int(group)
try: gr = grp.getgrgid(gid)
except: pass
except: pass
if gr is None:
try:
gr = grp.getgrnam(group)
except Exception as e:
raise TaskError(name, "Bad group %r -- %s" % (group, e))
if proc_uid is not None and proc_gid != gr.gr_gid:
log.info("gid for user %r (%d) overridden by group %r (%d)", user, proc_gid, group, gr.gr_gid)
proc_gid = gr.gr_gid
do_setgid = True
if cwd is not None and not os.path.isdir(cwd):
raise TaskError(name, "Directory for cwd setting '%s' does not exist" % (cwd,))
# Add in per-process context
#
context[context_prefix+'instance'] = instance
context[context_prefix+'started'] = time.time()
context[context_prefix+'uid'] = proc_uid
context[context_prefix+'gid'] = proc_gid
pid = os.fork()
# Parent just returns pid
if pid > 0:
return pid
# This section is processing the child. Exceptions from this point must
# never escape to outside handlers or we might create zombie init tasks.
#
try:
# Add the pid to the context now that we have it.
#
context[context_prefix+'pid'] = os.getpid()
# Set up the requested process environment
#
if do_setgid:
try:
os.setgid(proc_gid)
log.debug("Setgid to %d succeeded in child '%s', instance %d", proc_gid, name, instance)
except Exception as e:
log.error("Setgid to %d failed in child %r, instance %d -- %s",
proc_gid, name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG))
os._exit(81)
if do_setuid:
try:
os.setuid(proc_uid)
log.debug("Setuid to %d succeeded in child '%s', instance %d", proc_uid, name, instance)
except Exception as e:
log.error("Setuid to %d failed in child %r, instance %d -- %s",
proc_uid, name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG))
os._exit(82)
if cwd is not None:
try:
os.chdir(cwd)
log.debug("Chdir to '%s' succeeded in child '%s', instance %d", cwd, name, instance)
except Exception as e:
log.error("Chdir to '%s' failed in child %r, instance %d -- %s",
cwd, name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG))
os._exit(83)
# Build formatted command
#
prog = _fmt_context(cmd_list[0], context)
cmd = []
if procname:
cmd_list.pop(0)
cmd.append(_fmt_context(context['procname'], context))
for a in cmd_list:
cmd.append(_fmt_context(a, context))
log.info("child, Execing: %s <%s>", prog, utils.format_cmd(cmd))
except Exception as e:
# Log any exceptions here while we still can. After the closeall,
# bets are off.
#
log.error("Child processing failed for task %r, instance %d -- %s",
name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG))
os._exit(84)
try:
retain_fds = [0,1,2]
for log_fd in utils.log_filenos(log):
if log_fd not in retain_fds:
retain_fds.append(log_fd)
utils.closeall(exclude=retain_fds)
fd = None
try: os.close(0)
except: pass
try:
fd = os.open(std_process_dest, os.O_RDONLY)
except Exception as e:
log.error("child read open of %s failed -- %s", std_process_dest, e)
if fd != 0:
log.error("child failed to redirect stdin to %s", std_process_dest)
try: os.close(1)
except: pass
try:
fd = os.open('/dev/null', os.O_WRONLY)
except Exception as e:
log.error("child write open of %s failed -- %s", std_process_dest, e)
if fd != 1:
log.error("child failed to redirect stdout to %s", std_process_dest)
# Build a fresh environment based on context, with None values excluded and
# all other values as strings, formatted where appropriate:
#
env = {}
for tag, val in context.items():
if val is None:
continue
val = _fmt_context(str(val), context)
if val is not None:
env[tag] = val
except Exception as e:
# At this point we can still send logs to stderr, so log these
# too, just in case.
#
log.error("Child processing failed for task %r, instance %d -- %s",
name, instance, e, exc_info=log.isEnabledFor(logging.DEBUG))
os._exit(85)
try:
try: os.close(2)
except: pass
try: os.dup(1)
except: pass
os.execvpe(prog, cmd, env)
except:
pass
# There is no way to report an exception here, so hopefully the exit code will
# be evidence enough. When child output logging is supported, this can be reworked.
#
os._exit(86) |
def nii_gzip(imfile, outpath=''):
'''Compress *.gz file'''
import gzip
with open(imfile, 'rb') as f:
d = f.read()
# Now store the compressed data
if outpath=='':
fout = imfile+'.gz'
else:
fout = os.path.join(outpath, os.path.basename(imfile)+'.gz')
# store compressed file data from 'd' variable
with gzip.open(fout, 'wb') as f:
f.write(d)
return fout | Compress *.gz file | Below is the the instruction that describes the task:
### Input:
Compress *.gz file
### Response:
def nii_gzip(imfile, outpath=''):
'''Compress *.gz file'''
import gzip
with open(imfile, 'rb') as f:
d = f.read()
# Now store the compressed data
if outpath=='':
fout = imfile+'.gz'
else:
fout = os.path.join(outpath, os.path.basename(imfile)+'.gz')
# store compressed file data from 'd' variable
with gzip.open(fout, 'wb') as f:
f.write(d)
return fout |
def already_pending_upgrade(self, upgrade_type: UpgradeId) -> Union[int, float]:
""" Check if an upgrade is being researched
Return values:
0: not started
0 < x < 1: researching
1: finished
"""
assert isinstance(upgrade_type, UpgradeId)
if upgrade_type in self.state.upgrades:
return 1
level = None
if "LEVEL" in upgrade_type.name:
level = upgrade_type.name[-1]
creationAbilityID = self._game_data.upgrades[upgrade_type.value].research_ability.id
for structure in self.units.structure.ready:
for order in structure.orders:
if order.ability.id is creationAbilityID:
if level and order.ability.button_name[-1] != level:
return 0
return order.progress
return 0 | Check if an upgrade is being researched
Return values:
0: not started
0 < x < 1: researching
1: finished | Below is the the instruction that describes the task:
### Input:
Check if an upgrade is being researched
Return values:
0: not started
0 < x < 1: researching
1: finished
### Response:
def already_pending_upgrade(self, upgrade_type: UpgradeId) -> Union[int, float]:
""" Check if an upgrade is being researched
Return values:
0: not started
0 < x < 1: researching
1: finished
"""
assert isinstance(upgrade_type, UpgradeId)
if upgrade_type in self.state.upgrades:
return 1
level = None
if "LEVEL" in upgrade_type.name:
level = upgrade_type.name[-1]
creationAbilityID = self._game_data.upgrades[upgrade_type.value].research_ability.id
for structure in self.units.structure.ready:
for order in structure.orders:
if order.ability.id is creationAbilityID:
if level and order.ability.button_name[-1] != level:
return 0
return order.progress
return 0 |
def down(job, input_file_id, n, down_checkpoints):
"""Input is a file and a range into that file to sort and an output location in which
to write the sorted file.
If the range is larger than a threshold N the range is divided recursively and
a follow on job is then created which merges back the results. Otherwise,
the file is sorted and placed in the output.
"""
# Read the file
input_file = job.fileStore.readGlobalFile(input_file_id, cache=False)
length = os.path.getsize(input_file)
if length > n:
# We will subdivide the file
job.fileStore.logToMaster("Splitting file: %s of size: %s"
% (input_file_id, length), level=logging.CRITICAL)
# Split the file into two copies
mid_point = get_midpoint(input_file, 0, length)
t1 = job.fileStore.getLocalTempFile()
with open(t1, 'w') as fH:
copy_subrange_of_file(input_file, 0, mid_point + 1, fH)
t2 = job.fileStore.getLocalTempFile()
with open(t2, 'w') as fH:
copy_subrange_of_file(input_file, mid_point + 1, length, fH)
# Call the down function recursively
return job.addFollowOnJobFn(up, job.addChildJobFn(down, job.fileStore.writeGlobalFile(t1), n,
down_checkpoints=down_checkpoints, memory='600M').rv(),
job.addChildJobFn(down, job.fileStore.writeGlobalFile(t2), n,
down_checkpoints=down_checkpoints,
memory='600M').rv()).rv()
else:
# We can sort this bit of the file
job.fileStore.logToMaster("Sorting file: %s of size: %s"
% (input_file_id, length), level=logging.CRITICAL)
# Sort the copy and write back to the fileStore
output_file = job.fileStore.getLocalTempFile()
sort(input_file, output_file)
return job.fileStore.writeGlobalFile(output_file) | Input is a file and a range into that file to sort and an output location in which
to write the sorted file.
If the range is larger than a threshold N the range is divided recursively and
a follow on job is then created which merges back the results. Otherwise,
the file is sorted and placed in the output. | Below is the the instruction that describes the task:
### Input:
Input is a file and a range into that file to sort and an output location in which
to write the sorted file.
If the range is larger than a threshold N the range is divided recursively and
a follow on job is then created which merges back the results. Otherwise,
the file is sorted and placed in the output.
### Response:
def down(job, input_file_id, n, down_checkpoints):
"""Input is a file and a range into that file to sort and an output location in which
to write the sorted file.
If the range is larger than a threshold N the range is divided recursively and
a follow on job is then created which merges back the results. Otherwise,
the file is sorted and placed in the output.
"""
# Read the file
input_file = job.fileStore.readGlobalFile(input_file_id, cache=False)
length = os.path.getsize(input_file)
if length > n:
# We will subdivide the file
job.fileStore.logToMaster("Splitting file: %s of size: %s"
% (input_file_id, length), level=logging.CRITICAL)
# Split the file into two copies
mid_point = get_midpoint(input_file, 0, length)
t1 = job.fileStore.getLocalTempFile()
with open(t1, 'w') as fH:
copy_subrange_of_file(input_file, 0, mid_point + 1, fH)
t2 = job.fileStore.getLocalTempFile()
with open(t2, 'w') as fH:
copy_subrange_of_file(input_file, mid_point + 1, length, fH)
# Call the down function recursively
return job.addFollowOnJobFn(up, job.addChildJobFn(down, job.fileStore.writeGlobalFile(t1), n,
down_checkpoints=down_checkpoints, memory='600M').rv(),
job.addChildJobFn(down, job.fileStore.writeGlobalFile(t2), n,
down_checkpoints=down_checkpoints,
memory='600M').rv()).rv()
else:
# We can sort this bit of the file
job.fileStore.logToMaster("Sorting file: %s of size: %s"
% (input_file_id, length), level=logging.CRITICAL)
# Sort the copy and write back to the fileStore
output_file = job.fileStore.getLocalTempFile()
sort(input_file, output_file)
return job.fileStore.writeGlobalFile(output_file) |
def check_new_round(hours=24, tournament=1):
"""Check if a new round has started within the last `hours`."""
click.echo(int(napi.check_new_round(hours=hours, tournament=tournament))) | Check if a new round has started within the last `hours`. | Below is the the instruction that describes the task:
### Input:
Check if a new round has started within the last `hours`.
### Response:
def check_new_round(hours=24, tournament=1):
"""Check if a new round has started within the last `hours`."""
click.echo(int(napi.check_new_round(hours=hours, tournament=tournament))) |
def close(self):
"""Close a port on dummy_serial."""
if VERBOSE:
_print_out('\nDummy_serial: Closing port\n')
if not self._isOpen:
raise IOError('Dummy_serial: The port is already closed')
self._isOpen = False
self.port = None | Close a port on dummy_serial. | Below is the the instruction that describes the task:
### Input:
Close a port on dummy_serial.
### Response:
def close(self):
"""Close a port on dummy_serial."""
if VERBOSE:
_print_out('\nDummy_serial: Closing port\n')
if not self._isOpen:
raise IOError('Dummy_serial: The port is already closed')
self._isOpen = False
self.port = None |
def readQword(self):
"""
Reads a qword value from the L{ReadData} stream object.
@rtype: int
@return: The qword value read from the L{ReadData} stream.
"""
qword = unpack(self.endianness + ('Q' if not self.signed else 'b'), self.readAt(self.offset, 8))[0]
self.offset += 8
return qword | Reads a qword value from the L{ReadData} stream object.
@rtype: int
@return: The qword value read from the L{ReadData} stream. | Below is the the instruction that describes the task:
### Input:
Reads a qword value from the L{ReadData} stream object.
@rtype: int
@return: The qword value read from the L{ReadData} stream.
### Response:
def readQword(self):
"""
Reads a qword value from the L{ReadData} stream object.
@rtype: int
@return: The qword value read from the L{ReadData} stream.
"""
qword = unpack(self.endianness + ('Q' if not self.signed else 'b'), self.readAt(self.offset, 8))[0]
self.offset += 8
return qword |
def single(fun, name, test=None, **kwargs):
'''
.. versionadded:: 2015.5.0
Execute a single state function with the named kwargs, returns False if
insufficient data is sent to the command
By default, the values of the kwargs will be parsed as YAML. So, you can
specify lists values, or lists of single entry key-value maps, as you
would in a YAML salt file. Alternatively, JSON format of keyword values
is also supported.
CLI Example:
.. code-block:: bash
salt '*' state.single pkg.installed name=vim
'''
st_kwargs = __salt__.kwargs
__opts__['grains'] = __grains__
# state.fun -> [state, fun]
comps = fun.split('.')
if len(comps) < 2:
__context__['retcode'] = 1
return 'Invalid function passed'
# Create the low chunk, using kwargs as a base
kwargs.update({'state': comps[0],
'fun': comps[1],
'__id__': name,
'name': name})
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
# Set test mode
if salt.utils.args.test_mode(test=test, **kwargs):
opts['test'] = True
else:
opts['test'] = __opts__.get('test', None)
# Get the override pillar data
__pillar__.update(kwargs.get('pillar', {}))
# Create the State environment
st_ = salt.client.ssh.state.SSHState(opts, __pillar__)
# Verify the low chunk
err = st_.verify_data(kwargs)
if err:
__context__['retcode'] = 1
return err
# Must be a list of low-chunks
chunks = [kwargs]
# Retrieve file refs for the state run, so we can copy relevant files down
# to the minion before executing the state
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
opts.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(opts, opts.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'],
roster_grains)
# Create a hash so we can verify the tar on the target system
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, opts['hash_type'])
# We use state.pkg to execute the "state package"
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
opts['thin_dir'],
test,
trans_tar_sum,
opts['hash_type'])
# Create a salt-ssh Single object to actually do the ssh work
single = salt.client.ssh.Single(
opts,
cmd,
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
# Copy the tar down
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(opts['thin_dir']))
# Run the state.pkg command on the target
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
try:
os.remove(trans_tar)
except (OSError, IOError):
pass
# Read in the JSON data and return the data structure
try:
return salt.utils.json.loads(stdout)
except Exception as e:
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(six.text_type(e))
# If for some reason the json load fails, return the stdout
return stdout | .. versionadded:: 2015.5.0
Execute a single state function with the named kwargs, returns False if
insufficient data is sent to the command
By default, the values of the kwargs will be parsed as YAML. So, you can
specify lists values, or lists of single entry key-value maps, as you
would in a YAML salt file. Alternatively, JSON format of keyword values
is also supported.
CLI Example:
.. code-block:: bash
salt '*' state.single pkg.installed name=vim | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2015.5.0
Execute a single state function with the named kwargs, returns False if
insufficient data is sent to the command
By default, the values of the kwargs will be parsed as YAML. So, you can
specify lists values, or lists of single entry key-value maps, as you
would in a YAML salt file. Alternatively, JSON format of keyword values
is also supported.
CLI Example:
.. code-block:: bash
salt '*' state.single pkg.installed name=vim
### Response:
def single(fun, name, test=None, **kwargs):
'''
.. versionadded:: 2015.5.0
Execute a single state function with the named kwargs, returns False if
insufficient data is sent to the command
By default, the values of the kwargs will be parsed as YAML. So, you can
specify lists values, or lists of single entry key-value maps, as you
would in a YAML salt file. Alternatively, JSON format of keyword values
is also supported.
CLI Example:
.. code-block:: bash
salt '*' state.single pkg.installed name=vim
'''
st_kwargs = __salt__.kwargs
__opts__['grains'] = __grains__
# state.fun -> [state, fun]
comps = fun.split('.')
if len(comps) < 2:
__context__['retcode'] = 1
return 'Invalid function passed'
# Create the low chunk, using kwargs as a base
kwargs.update({'state': comps[0],
'fun': comps[1],
'__id__': name,
'name': name})
opts = salt.utils.state.get_sls_opts(__opts__, **kwargs)
# Set test mode
if salt.utils.args.test_mode(test=test, **kwargs):
opts['test'] = True
else:
opts['test'] = __opts__.get('test', None)
# Get the override pillar data
__pillar__.update(kwargs.get('pillar', {}))
# Create the State environment
st_ = salt.client.ssh.state.SSHState(opts, __pillar__)
# Verify the low chunk
err = st_.verify_data(kwargs)
if err:
__context__['retcode'] = 1
return err
# Must be a list of low-chunks
chunks = [kwargs]
# Retrieve file refs for the state run, so we can copy relevant files down
# to the minion before executing the state
file_refs = salt.client.ssh.state.lowstate_file_refs(
chunks,
_merge_extra_filerefs(
kwargs.get('extra_filerefs', ''),
opts.get('extra_filerefs', '')
)
)
roster = salt.roster.Roster(opts, opts.get('roster', 'flat'))
roster_grains = roster.opts['grains']
# Create the tar containing the state pkg and relevant files.
trans_tar = salt.client.ssh.state.prep_trans_tar(
__context__['fileclient'],
chunks,
file_refs,
__pillar__,
st_kwargs['id_'],
roster_grains)
# Create a hash so we can verify the tar on the target system
trans_tar_sum = salt.utils.hashutils.get_hash(trans_tar, opts['hash_type'])
# We use state.pkg to execute the "state package"
cmd = 'state.pkg {0}/salt_state.tgz test={1} pkg_sum={2} hash_type={3}'.format(
opts['thin_dir'],
test,
trans_tar_sum,
opts['hash_type'])
# Create a salt-ssh Single object to actually do the ssh work
single = salt.client.ssh.Single(
opts,
cmd,
fsclient=__context__['fileclient'],
minion_opts=__salt__.minion_opts,
**st_kwargs)
# Copy the tar down
single.shell.send(
trans_tar,
'{0}/salt_state.tgz'.format(opts['thin_dir']))
# Run the state.pkg command on the target
stdout, stderr, _ = single.cmd_block()
# Clean up our tar
try:
os.remove(trans_tar)
except (OSError, IOError):
pass
# Read in the JSON data and return the data structure
try:
return salt.utils.json.loads(stdout)
except Exception as e:
log.error("JSON Render failed for: %s\n%s", stdout, stderr)
log.error(six.text_type(e))
# If for some reason the json load fails, return the stdout
return stdout |
def delete_item(self, item):
''' removes an item from the db '''
for relation in self.relations_of(item):
self.delete_relation(item, relation)
for origin, relation in self.relations_to(item, True):
self.delete_relation(origin, relation, item)
with self._write_lock:
self._execute('''
DELETE from objects where code=?
''', (self.serialize(item),))
self.autocommit() | removes an item from the db | Below is the the instruction that describes the task:
### Input:
removes an item from the db
### Response:
def delete_item(self, item):
''' removes an item from the db '''
for relation in self.relations_of(item):
self.delete_relation(item, relation)
for origin, relation in self.relations_to(item, True):
self.delete_relation(origin, relation, item)
with self._write_lock:
self._execute('''
DELETE from objects where code=?
''', (self.serialize(item),))
self.autocommit() |
def GetAll(alias=None,location=None,session=None):
"""Gets a list of anti-affinity policies within a given account.
https://t3n.zendesk.com/entries/44657214-Get-Anti-Affinity-Policies
>>> clc.v2.AntiAffinity.GetAll()
[<clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65e910>, <clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65ec90>]
"""
if not alias: alias = clc.v2.Account.GetAlias(session=session)
policies = []
policy_resp = clc.v2.API.Call('GET','antiAffinityPolicies/%s' % alias,{},session=session)
for k in policy_resp:
r_val = policy_resp[k]
for r in r_val:
if r.get('location'):
if location and r['location'].lower()!=location.lower(): continue
servers = [obj['id'] for obj in r['links'] if obj['rel'] == "server"]
policies.append(AntiAffinity(id=r['id'],name=r['name'],location=r['location'],servers=servers,session=session))
return(policies) | Gets a list of anti-affinity policies within a given account.
https://t3n.zendesk.com/entries/44657214-Get-Anti-Affinity-Policies
>>> clc.v2.AntiAffinity.GetAll()
[<clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65e910>, <clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65ec90>] | Below is the the instruction that describes the task:
### Input:
Gets a list of anti-affinity policies within a given account.
https://t3n.zendesk.com/entries/44657214-Get-Anti-Affinity-Policies
>>> clc.v2.AntiAffinity.GetAll()
[<clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65e910>, <clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65ec90>]
### Response:
def GetAll(alias=None,location=None,session=None):
"""Gets a list of anti-affinity policies within a given account.
https://t3n.zendesk.com/entries/44657214-Get-Anti-Affinity-Policies
>>> clc.v2.AntiAffinity.GetAll()
[<clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65e910>, <clc.APIv2.anti_affinity.AntiAffinity object at 0x10c65ec90>]
"""
if not alias: alias = clc.v2.Account.GetAlias(session=session)
policies = []
policy_resp = clc.v2.API.Call('GET','antiAffinityPolicies/%s' % alias,{},session=session)
for k in policy_resp:
r_val = policy_resp[k]
for r in r_val:
if r.get('location'):
if location and r['location'].lower()!=location.lower(): continue
servers = [obj['id'] for obj in r['links'] if obj['rel'] == "server"]
policies.append(AntiAffinity(id=r['id'],name=r['name'],location=r['location'],servers=servers,session=session))
return(policies) |
def perez(surface_tilt, surface_azimuth, dhi, dni, dni_extra,
solar_zenith, solar_azimuth, airmass,
model='allsitescomposite1990', return_components=False):
'''
Determine diffuse irradiance from the sky on a tilted surface using
one of the Perez models.
Perez models determine the diffuse irradiance from the sky (ground
reflected irradiance is not included in this algorithm) on a tilted
surface using the surface tilt angle, surface azimuth angle, diffuse
horizontal irradiance, direct normal irradiance, extraterrestrial
irradiance, sun zenith angle, sun azimuth angle, and relative (not
pressure-corrected) airmass. Optionally a selector may be used to
use any of Perez's model coefficient sets.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. surface_tilt must be >=0
and <=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth : numeric
Surface azimuth angles in decimal degrees. surface_azimuth must
be >=0 and <=360. The azimuth convention is defined as degrees
east of north (e.g. North = 0, South=180 East = 90, West = 270).
dhi : numeric
Diffuse horizontal irradiance in W/m^2. DHI must be >=0.
dni : numeric
Direct normal irradiance in W/m^2. DNI must be >=0.
dni_extra : numeric
Extraterrestrial normal irradiance in W/m^2.
solar_zenith : numeric
apparent (refraction-corrected) zenith angles in decimal
degrees. solar_zenith must be >=0 and <=180.
solar_azimuth : numeric
Sun azimuth angles in decimal degrees. solar_azimuth must be >=0
and <=360. The azimuth convention is defined as degrees east of
north (e.g. North = 0, East = 90, West = 270).
airmass : numeric
Relative (not pressure-corrected) airmass values. If AM is a
DataFrame it must be of the same size as all other DataFrame
inputs. AM must be >=0 (careful using the 1/sec(z) model of AM
generation)
model : string (optional, default='allsitescomposite1990')
A string which selects the desired set of Perez coefficients. If
model is not provided as an input, the default, '1990' will be
used. All possible model selections are:
* '1990'
* 'allsitescomposite1990' (same as '1990')
* 'allsitescomposite1988'
* 'sandiacomposite1988'
* 'usacomposite1988'
* 'france1988'
* 'phoenix1988'
* 'elmonte1988'
* 'osage1988'
* 'albuquerque1988'
* 'capecanaveral1988'
* 'albany1988'
return_components: bool (optional, default=False)
Flag used to decide whether to return the calculated diffuse components
or not.
Returns
--------
numeric, OrderedDict, or DataFrame
Return type controlled by `return_components` argument.
If ``return_components=False``, `sky_diffuse` is returned.
If ``return_components=True``, `diffuse_components` is returned.
sky_diffuse : numeric
The sky diffuse component of the solar radiation on a tilted
surface.
diffuse_components : OrderedDict (array input) or DataFrame (Series input)
Keys/columns are:
* sky_diffuse: Total sky diffuse
* isotropic
* circumsolar
* horizon
References
----------
[1] Loutzenhiser P.G. et. al. "Empirical validation of models to
compute solar irradiance on inclined surfaces for building energy
simulation" 2007, Solar Energy vol. 81. pp. 254-267
[2] Perez, R., Seals, R., Ineichen, P., Stewart, R., Menicucci, D.,
1987. A new simplified version of the Perez diffuse irradiance model
for tilted surfaces. Solar Energy 39(3), 221-232.
[3] Perez, R., Ineichen, P., Seals, R., Michalsky, J., Stewart, R.,
1990. Modeling daylight availability and irradiance components from
direct and global irradiance. Solar Energy 44 (5), 271-289.
[4] Perez, R. et. al 1988. "The Development and Verification of the
Perez Diffuse Radiation Model". SAND88-7030
'''
kappa = 1.041 # for solar_zenith in radians
z = np.radians(solar_zenith) # convert to radians
# delta is the sky's "brightness"
delta = dhi * airmass / dni_extra
# epsilon is the sky's "clearness"
with np.errstate(invalid='ignore'):
eps = ((dhi + dni) / dhi + kappa * (z ** 3)) / (1 + kappa * (z ** 3))
# numpy indexing below will not work with a Series
if isinstance(eps, pd.Series):
eps = eps.values
# Perez et al define clearness bins according to the following
# rules. 1 = overcast ... 8 = clear (these names really only make
# sense for small zenith angles, but...) these values will
# eventually be used as indicies for coeffecient look ups
ebin = np.digitize(eps, (0., 1.065, 1.23, 1.5, 1.95, 2.8, 4.5, 6.2))
ebin = np.array(ebin) # GH 642
ebin[np.isnan(eps)] = 0
# correct for 0 indexing in coeffecient lookup
# later, ebin = -1 will yield nan coefficients
ebin -= 1
# The various possible sets of Perez coefficients are contained
# in a subfunction to clean up the code.
F1c, F2c = _get_perez_coefficients(model)
# results in invalid eps (ebin = -1) being mapped to nans
nans = np.array([np.nan, np.nan, np.nan])
F1c = np.vstack((F1c, nans))
F2c = np.vstack((F2c, nans))
F1 = (F1c[ebin, 0] + F1c[ebin, 1] * delta + F1c[ebin, 2] * z)
F1 = np.maximum(F1, 0)
F2 = (F2c[ebin, 0] + F2c[ebin, 1] * delta + F2c[ebin, 2] * z)
F2 = np.maximum(F2, 0)
A = aoi_projection(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
A = np.maximum(A, 0)
B = tools.cosd(solar_zenith)
B = np.maximum(B, tools.cosd(85))
# Calculate Diffuse POA from sky dome
term1 = 0.5 * (1 - F1) * (1 + tools.cosd(surface_tilt))
term2 = F1 * A / B
term3 = F2 * tools.sind(surface_tilt)
sky_diffuse = np.maximum(dhi * (term1 + term2 + term3), 0)
# we've preserved the input type until now, so don't ruin it!
if isinstance(sky_diffuse, pd.Series):
sky_diffuse[np.isnan(airmass)] = 0
else:
sky_diffuse = np.where(np.isnan(airmass), 0, sky_diffuse)
if return_components:
diffuse_components = OrderedDict()
diffuse_components['sky_diffuse'] = sky_diffuse
# Calculate the different components
diffuse_components['isotropic'] = dhi * term1
diffuse_components['circumsolar'] = dhi * term2
diffuse_components['horizon'] = dhi * term3
# Set values of components to 0 when sky_diffuse is 0
mask = sky_diffuse == 0
if isinstance(sky_diffuse, pd.Series):
diffuse_components = pd.DataFrame(diffuse_components)
diffuse_components.loc[mask] = 0
else:
diffuse_components = {k: np.where(mask, 0, v) for k, v in
diffuse_components.items()}
return diffuse_components
else:
return sky_diffuse | Determine diffuse irradiance from the sky on a tilted surface using
one of the Perez models.
Perez models determine the diffuse irradiance from the sky (ground
reflected irradiance is not included in this algorithm) on a tilted
surface using the surface tilt angle, surface azimuth angle, diffuse
horizontal irradiance, direct normal irradiance, extraterrestrial
irradiance, sun zenith angle, sun azimuth angle, and relative (not
pressure-corrected) airmass. Optionally a selector may be used to
use any of Perez's model coefficient sets.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. surface_tilt must be >=0
and <=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth : numeric
Surface azimuth angles in decimal degrees. surface_azimuth must
be >=0 and <=360. The azimuth convention is defined as degrees
east of north (e.g. North = 0, South=180 East = 90, West = 270).
dhi : numeric
Diffuse horizontal irradiance in W/m^2. DHI must be >=0.
dni : numeric
Direct normal irradiance in W/m^2. DNI must be >=0.
dni_extra : numeric
Extraterrestrial normal irradiance in W/m^2.
solar_zenith : numeric
apparent (refraction-corrected) zenith angles in decimal
degrees. solar_zenith must be >=0 and <=180.
solar_azimuth : numeric
Sun azimuth angles in decimal degrees. solar_azimuth must be >=0
and <=360. The azimuth convention is defined as degrees east of
north (e.g. North = 0, East = 90, West = 270).
airmass : numeric
Relative (not pressure-corrected) airmass values. If AM is a
DataFrame it must be of the same size as all other DataFrame
inputs. AM must be >=0 (careful using the 1/sec(z) model of AM
generation)
model : string (optional, default='allsitescomposite1990')
A string which selects the desired set of Perez coefficients. If
model is not provided as an input, the default, '1990' will be
used. All possible model selections are:
* '1990'
* 'allsitescomposite1990' (same as '1990')
* 'allsitescomposite1988'
* 'sandiacomposite1988'
* 'usacomposite1988'
* 'france1988'
* 'phoenix1988'
* 'elmonte1988'
* 'osage1988'
* 'albuquerque1988'
* 'capecanaveral1988'
* 'albany1988'
return_components: bool (optional, default=False)
Flag used to decide whether to return the calculated diffuse components
or not.
Returns
--------
numeric, OrderedDict, or DataFrame
Return type controlled by `return_components` argument.
If ``return_components=False``, `sky_diffuse` is returned.
If ``return_components=True``, `diffuse_components` is returned.
sky_diffuse : numeric
The sky diffuse component of the solar radiation on a tilted
surface.
diffuse_components : OrderedDict (array input) or DataFrame (Series input)
Keys/columns are:
* sky_diffuse: Total sky diffuse
* isotropic
* circumsolar
* horizon
References
----------
[1] Loutzenhiser P.G. et. al. "Empirical validation of models to
compute solar irradiance on inclined surfaces for building energy
simulation" 2007, Solar Energy vol. 81. pp. 254-267
[2] Perez, R., Seals, R., Ineichen, P., Stewart, R., Menicucci, D.,
1987. A new simplified version of the Perez diffuse irradiance model
for tilted surfaces. Solar Energy 39(3), 221-232.
[3] Perez, R., Ineichen, P., Seals, R., Michalsky, J., Stewart, R.,
1990. Modeling daylight availability and irradiance components from
direct and global irradiance. Solar Energy 44 (5), 271-289.
[4] Perez, R. et. al 1988. "The Development and Verification of the
Perez Diffuse Radiation Model". SAND88-7030 | Below is the the instruction that describes the task:
### Input:
Determine diffuse irradiance from the sky on a tilted surface using
one of the Perez models.
Perez models determine the diffuse irradiance from the sky (ground
reflected irradiance is not included in this algorithm) on a tilted
surface using the surface tilt angle, surface azimuth angle, diffuse
horizontal irradiance, direct normal irradiance, extraterrestrial
irradiance, sun zenith angle, sun azimuth angle, and relative (not
pressure-corrected) airmass. Optionally a selector may be used to
use any of Perez's model coefficient sets.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. surface_tilt must be >=0
and <=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth : numeric
Surface azimuth angles in decimal degrees. surface_azimuth must
be >=0 and <=360. The azimuth convention is defined as degrees
east of north (e.g. North = 0, South=180 East = 90, West = 270).
dhi : numeric
Diffuse horizontal irradiance in W/m^2. DHI must be >=0.
dni : numeric
Direct normal irradiance in W/m^2. DNI must be >=0.
dni_extra : numeric
Extraterrestrial normal irradiance in W/m^2.
solar_zenith : numeric
apparent (refraction-corrected) zenith angles in decimal
degrees. solar_zenith must be >=0 and <=180.
solar_azimuth : numeric
Sun azimuth angles in decimal degrees. solar_azimuth must be >=0
and <=360. The azimuth convention is defined as degrees east of
north (e.g. North = 0, East = 90, West = 270).
airmass : numeric
Relative (not pressure-corrected) airmass values. If AM is a
DataFrame it must be of the same size as all other DataFrame
inputs. AM must be >=0 (careful using the 1/sec(z) model of AM
generation)
model : string (optional, default='allsitescomposite1990')
A string which selects the desired set of Perez coefficients. If
model is not provided as an input, the default, '1990' will be
used. All possible model selections are:
* '1990'
* 'allsitescomposite1990' (same as '1990')
* 'allsitescomposite1988'
* 'sandiacomposite1988'
* 'usacomposite1988'
* 'france1988'
* 'phoenix1988'
* 'elmonte1988'
* 'osage1988'
* 'albuquerque1988'
* 'capecanaveral1988'
* 'albany1988'
return_components: bool (optional, default=False)
Flag used to decide whether to return the calculated diffuse components
or not.
Returns
--------
numeric, OrderedDict, or DataFrame
Return type controlled by `return_components` argument.
If ``return_components=False``, `sky_diffuse` is returned.
If ``return_components=True``, `diffuse_components` is returned.
sky_diffuse : numeric
The sky diffuse component of the solar radiation on a tilted
surface.
diffuse_components : OrderedDict (array input) or DataFrame (Series input)
Keys/columns are:
* sky_diffuse: Total sky diffuse
* isotropic
* circumsolar
* horizon
References
----------
[1] Loutzenhiser P.G. et. al. "Empirical validation of models to
compute solar irradiance on inclined surfaces for building energy
simulation" 2007, Solar Energy vol. 81. pp. 254-267
[2] Perez, R., Seals, R., Ineichen, P., Stewart, R., Menicucci, D.,
1987. A new simplified version of the Perez diffuse irradiance model
for tilted surfaces. Solar Energy 39(3), 221-232.
[3] Perez, R., Ineichen, P., Seals, R., Michalsky, J., Stewart, R.,
1990. Modeling daylight availability and irradiance components from
direct and global irradiance. Solar Energy 44 (5), 271-289.
[4] Perez, R. et. al 1988. "The Development and Verification of the
Perez Diffuse Radiation Model". SAND88-7030
### Response:
def perez(surface_tilt, surface_azimuth, dhi, dni, dni_extra,
solar_zenith, solar_azimuth, airmass,
model='allsitescomposite1990', return_components=False):
'''
Determine diffuse irradiance from the sky on a tilted surface using
one of the Perez models.
Perez models determine the diffuse irradiance from the sky (ground
reflected irradiance is not included in this algorithm) on a tilted
surface using the surface tilt angle, surface azimuth angle, diffuse
horizontal irradiance, direct normal irradiance, extraterrestrial
irradiance, sun zenith angle, sun azimuth angle, and relative (not
pressure-corrected) airmass. Optionally a selector may be used to
use any of Perez's model coefficient sets.
Parameters
----------
surface_tilt : numeric
Surface tilt angles in decimal degrees. surface_tilt must be >=0
and <=180. The tilt angle is defined as degrees from horizontal
(e.g. surface facing up = 0, surface facing horizon = 90)
surface_azimuth : numeric
Surface azimuth angles in decimal degrees. surface_azimuth must
be >=0 and <=360. The azimuth convention is defined as degrees
east of north (e.g. North = 0, South=180 East = 90, West = 270).
dhi : numeric
Diffuse horizontal irradiance in W/m^2. DHI must be >=0.
dni : numeric
Direct normal irradiance in W/m^2. DNI must be >=0.
dni_extra : numeric
Extraterrestrial normal irradiance in W/m^2.
solar_zenith : numeric
apparent (refraction-corrected) zenith angles in decimal
degrees. solar_zenith must be >=0 and <=180.
solar_azimuth : numeric
Sun azimuth angles in decimal degrees. solar_azimuth must be >=0
and <=360. The azimuth convention is defined as degrees east of
north (e.g. North = 0, East = 90, West = 270).
airmass : numeric
Relative (not pressure-corrected) airmass values. If AM is a
DataFrame it must be of the same size as all other DataFrame
inputs. AM must be >=0 (careful using the 1/sec(z) model of AM
generation)
model : string (optional, default='allsitescomposite1990')
A string which selects the desired set of Perez coefficients. If
model is not provided as an input, the default, '1990' will be
used. All possible model selections are:
* '1990'
* 'allsitescomposite1990' (same as '1990')
* 'allsitescomposite1988'
* 'sandiacomposite1988'
* 'usacomposite1988'
* 'france1988'
* 'phoenix1988'
* 'elmonte1988'
* 'osage1988'
* 'albuquerque1988'
* 'capecanaveral1988'
* 'albany1988'
return_components: bool (optional, default=False)
Flag used to decide whether to return the calculated diffuse components
or not.
Returns
--------
numeric, OrderedDict, or DataFrame
Return type controlled by `return_components` argument.
If ``return_components=False``, `sky_diffuse` is returned.
If ``return_components=True``, `diffuse_components` is returned.
sky_diffuse : numeric
The sky diffuse component of the solar radiation on a tilted
surface.
diffuse_components : OrderedDict (array input) or DataFrame (Series input)
Keys/columns are:
* sky_diffuse: Total sky diffuse
* isotropic
* circumsolar
* horizon
References
----------
[1] Loutzenhiser P.G. et. al. "Empirical validation of models to
compute solar irradiance on inclined surfaces for building energy
simulation" 2007, Solar Energy vol. 81. pp. 254-267
[2] Perez, R., Seals, R., Ineichen, P., Stewart, R., Menicucci, D.,
1987. A new simplified version of the Perez diffuse irradiance model
for tilted surfaces. Solar Energy 39(3), 221-232.
[3] Perez, R., Ineichen, P., Seals, R., Michalsky, J., Stewart, R.,
1990. Modeling daylight availability and irradiance components from
direct and global irradiance. Solar Energy 44 (5), 271-289.
[4] Perez, R. et. al 1988. "The Development and Verification of the
Perez Diffuse Radiation Model". SAND88-7030
'''
kappa = 1.041 # for solar_zenith in radians
z = np.radians(solar_zenith) # convert to radians
# delta is the sky's "brightness"
delta = dhi * airmass / dni_extra
# epsilon is the sky's "clearness"
with np.errstate(invalid='ignore'):
eps = ((dhi + dni) / dhi + kappa * (z ** 3)) / (1 + kappa * (z ** 3))
# numpy indexing below will not work with a Series
if isinstance(eps, pd.Series):
eps = eps.values
# Perez et al define clearness bins according to the following
# rules. 1 = overcast ... 8 = clear (these names really only make
# sense for small zenith angles, but...) these values will
# eventually be used as indicies for coeffecient look ups
ebin = np.digitize(eps, (0., 1.065, 1.23, 1.5, 1.95, 2.8, 4.5, 6.2))
ebin = np.array(ebin) # GH 642
ebin[np.isnan(eps)] = 0
# correct for 0 indexing in coeffecient lookup
# later, ebin = -1 will yield nan coefficients
ebin -= 1
# The various possible sets of Perez coefficients are contained
# in a subfunction to clean up the code.
F1c, F2c = _get_perez_coefficients(model)
# results in invalid eps (ebin = -1) being mapped to nans
nans = np.array([np.nan, np.nan, np.nan])
F1c = np.vstack((F1c, nans))
F2c = np.vstack((F2c, nans))
F1 = (F1c[ebin, 0] + F1c[ebin, 1] * delta + F1c[ebin, 2] * z)
F1 = np.maximum(F1, 0)
F2 = (F2c[ebin, 0] + F2c[ebin, 1] * delta + F2c[ebin, 2] * z)
F2 = np.maximum(F2, 0)
A = aoi_projection(surface_tilt, surface_azimuth,
solar_zenith, solar_azimuth)
A = np.maximum(A, 0)
B = tools.cosd(solar_zenith)
B = np.maximum(B, tools.cosd(85))
# Calculate Diffuse POA from sky dome
term1 = 0.5 * (1 - F1) * (1 + tools.cosd(surface_tilt))
term2 = F1 * A / B
term3 = F2 * tools.sind(surface_tilt)
sky_diffuse = np.maximum(dhi * (term1 + term2 + term3), 0)
# we've preserved the input type until now, so don't ruin it!
if isinstance(sky_diffuse, pd.Series):
sky_diffuse[np.isnan(airmass)] = 0
else:
sky_diffuse = np.where(np.isnan(airmass), 0, sky_diffuse)
if return_components:
diffuse_components = OrderedDict()
diffuse_components['sky_diffuse'] = sky_diffuse
# Calculate the different components
diffuse_components['isotropic'] = dhi * term1
diffuse_components['circumsolar'] = dhi * term2
diffuse_components['horizon'] = dhi * term3
# Set values of components to 0 when sky_diffuse is 0
mask = sky_diffuse == 0
if isinstance(sky_diffuse, pd.Series):
diffuse_components = pd.DataFrame(diffuse_components)
diffuse_components.loc[mask] = 0
else:
diffuse_components = {k: np.where(mask, 0, v) for k, v in
diffuse_components.items()}
return diffuse_components
else:
return sky_diffuse |
def run_ding0(self, session, mv_grid_districts_no=None, debug=False, export_figures=False):
""" Let DING0 run by shouting at this method (or just call
it from NetworkDing0 instance). This method is a wrapper
for the main functionality of DING0.
Parameters
----------
session : sqlalchemy.orm.session.Session
Database session
mv_grid_districts_no : List of Integers
List of MV grid_districts/stations to be imported (if empty,
all grid_districts & stations are imported)
debug : bool, defaults to False
If True, information is printed during process
export_figures : bool, defaults to False
If True, figures are shown or exported (default path: ~/.ding0/) during run.
Returns
-------
msg : str
Message of invalidity of a grid district
Notes
-----
The steps performed in this method are to be kept in the given order
since there are hard dependencies between them. Short description of
all steps performed:
* STEP 1: Import MV Grid Districts and subjacent objects
Imports MV Grid Districts, HV-MV stations, Load Areas, LV Grid Districts
and MV-LV stations, instantiates and initiates objects.
* STEP 2: Import generators
Conventional and renewable generators of voltage levels 4..7 are imported
and added to corresponding grid.
* STEP 3: Parametrize grid
Parameters of MV grid are set such as voltage level and cable/line types
according to MV Grid District's characteristics.
* STEP 4: Validate MV Grid Districts
Tests MV grid districts for validity concerning imported data such as
count of Load Areas.
* STEP 5: Build LV grids
Builds LV grids for every non-aggregated LA in every MV Grid District
using model grids.
* STEP 6: Build MV grids
Builds MV grid by performing a routing on Load Area centres to build
ring topology.
* STEP 7: Connect MV and LV generators
Generators are connected to grids, used approach depends on voltage
level.
* STEP 8: Set IDs for all branches in MV and LV grids
While IDs of imported objects can be derived from dataset's ID, branches
are created in steps 5+6 and need unique IDs (e.g. for PF calculation).
* STEP 9: Relocate switch disconnectors in MV grid
Switch disconnectors are set during routing process (step 6) according
to the load distribution within a ring. After further modifications of
the grid within step 6+7 they have to be relocated (note: switch
disconnectors are called circuit breakers in DING0 for historical reasons).
* STEP 10: Open all switch disconnectors in MV grid
Under normal conditions, rings are operated in open state (half-rings).
Furthermore, this is required to allow powerflow for MV grid.
* STEP 11: Do power flow analysis of MV grid
The technically working MV grid created in step 6 was extended by satellite
loads and generators. It is finally tested again using powerflow calculation.
* STEP 12: Reinforce MV grid
MV grid is eventually reinforced persuant to results from step 11.
STEP 13: Close all switch disconnectors in MV grid
The rings are finally closed to hold a complete graph (if the SDs are open,
the edges adjacent to a SD will not be exported!)
"""
if debug:
start = time.time()
# STEP 1: Import MV Grid Districts and subjacent objects
self.import_mv_grid_districts(session,
mv_grid_districts_no=mv_grid_districts_no)
# STEP 2: Import generators
self.import_generators(session, debug=debug)
# STEP 3: Parametrize MV grid
self.mv_parametrize_grid(debug=debug)
# STEP 4: Validate MV Grid Districts
msg = self.validate_grid_districts()
# STEP 5: Build LV grids
self.build_lv_grids()
# STEP 6: Build MV grids
self.mv_routing(debug=False)
if export_figures:
grid = self._mv_grid_districts[0].mv_grid
plot_mv_topology(grid, subtitle='Routing completed', filename='1_routing_completed.png')
# STEP 7: Connect MV and LV generators
self.connect_generators(debug=False)
if export_figures:
plot_mv_topology(grid, subtitle='Generators connected', filename='2_generators_connected.png')
# STEP 8: Set IDs for all branches in MV and LV grids
self.set_branch_ids()
# STEP 9: Relocate switch disconnectors in MV grid
self.set_circuit_breakers(debug=debug)
if export_figures:
plot_mv_topology(grid, subtitle='Circuit breakers relocated', filename='3_circuit_breakers_relocated.png')
# STEP 10: Open all switch disconnectors in MV grid
self.control_circuit_breakers(mode='open')
# STEP 11: Do power flow analysis of MV grid
self.run_powerflow(session, method='onthefly', export_pypsa=False, debug=debug)
if export_figures:
plot_mv_topology(grid, subtitle='PF result (load case)',
filename='4_PF_result_load.png',
line_color='loading', node_color='voltage', testcase='load')
plot_mv_topology(grid, subtitle='PF result (feedin case)',
filename='5_PF_result_feedin.png',
line_color='loading', node_color='voltage', testcase='feedin')
# STEP 12: Reinforce MV grid
self.reinforce_grid()
# STEP 13: Close all switch disconnectors in MV grid
self.control_circuit_breakers(mode='close')
if export_figures:
plot_mv_topology(grid, subtitle='Final grid PF result (load case)',
filename='6_final_grid_PF_result_load.png',
line_color='loading', node_color='voltage', testcase='load')
plot_mv_topology(grid, subtitle='Final grid PF result (feedin case)',
filename='7_final_grid_PF_result_feedin.png',
line_color='loading', node_color='voltage', testcase='feedin')
if debug:
logger.info('Elapsed time for {0} MV Grid Districts (seconds): {1}'.format(
str(len(mv_grid_districts_no)), time.time() - start))
return msg | Let DING0 run by shouting at this method (or just call
it from NetworkDing0 instance). This method is a wrapper
for the main functionality of DING0.
Parameters
----------
session : sqlalchemy.orm.session.Session
Database session
mv_grid_districts_no : List of Integers
List of MV grid_districts/stations to be imported (if empty,
all grid_districts & stations are imported)
debug : bool, defaults to False
If True, information is printed during process
export_figures : bool, defaults to False
If True, figures are shown or exported (default path: ~/.ding0/) during run.
Returns
-------
msg : str
Message of invalidity of a grid district
Notes
-----
The steps performed in this method are to be kept in the given order
since there are hard dependencies between them. Short description of
all steps performed:
* STEP 1: Import MV Grid Districts and subjacent objects
Imports MV Grid Districts, HV-MV stations, Load Areas, LV Grid Districts
and MV-LV stations, instantiates and initiates objects.
* STEP 2: Import generators
Conventional and renewable generators of voltage levels 4..7 are imported
and added to corresponding grid.
* STEP 3: Parametrize grid
Parameters of MV grid are set such as voltage level and cable/line types
according to MV Grid District's characteristics.
* STEP 4: Validate MV Grid Districts
Tests MV grid districts for validity concerning imported data such as
count of Load Areas.
* STEP 5: Build LV grids
Builds LV grids for every non-aggregated LA in every MV Grid District
using model grids.
* STEP 6: Build MV grids
Builds MV grid by performing a routing on Load Area centres to build
ring topology.
* STEP 7: Connect MV and LV generators
Generators are connected to grids, used approach depends on voltage
level.
* STEP 8: Set IDs for all branches in MV and LV grids
While IDs of imported objects can be derived from dataset's ID, branches
are created in steps 5+6 and need unique IDs (e.g. for PF calculation).
* STEP 9: Relocate switch disconnectors in MV grid
Switch disconnectors are set during routing process (step 6) according
to the load distribution within a ring. After further modifications of
the grid within step 6+7 they have to be relocated (note: switch
disconnectors are called circuit breakers in DING0 for historical reasons).
* STEP 10: Open all switch disconnectors in MV grid
Under normal conditions, rings are operated in open state (half-rings).
Furthermore, this is required to allow powerflow for MV grid.
* STEP 11: Do power flow analysis of MV grid
The technically working MV grid created in step 6 was extended by satellite
loads and generators. It is finally tested again using powerflow calculation.
* STEP 12: Reinforce MV grid
MV grid is eventually reinforced persuant to results from step 11.
STEP 13: Close all switch disconnectors in MV grid
The rings are finally closed to hold a complete graph (if the SDs are open,
the edges adjacent to a SD will not be exported!) | Below is the the instruction that describes the task:
### Input:
Let DING0 run by shouting at this method (or just call
it from NetworkDing0 instance). This method is a wrapper
for the main functionality of DING0.
Parameters
----------
session : sqlalchemy.orm.session.Session
Database session
mv_grid_districts_no : List of Integers
List of MV grid_districts/stations to be imported (if empty,
all grid_districts & stations are imported)
debug : bool, defaults to False
If True, information is printed during process
export_figures : bool, defaults to False
If True, figures are shown or exported (default path: ~/.ding0/) during run.
Returns
-------
msg : str
Message of invalidity of a grid district
Notes
-----
The steps performed in this method are to be kept in the given order
since there are hard dependencies between them. Short description of
all steps performed:
* STEP 1: Import MV Grid Districts and subjacent objects
Imports MV Grid Districts, HV-MV stations, Load Areas, LV Grid Districts
and MV-LV stations, instantiates and initiates objects.
* STEP 2: Import generators
Conventional and renewable generators of voltage levels 4..7 are imported
and added to corresponding grid.
* STEP 3: Parametrize grid
Parameters of MV grid are set such as voltage level and cable/line types
according to MV Grid District's characteristics.
* STEP 4: Validate MV Grid Districts
Tests MV grid districts for validity concerning imported data such as
count of Load Areas.
* STEP 5: Build LV grids
Builds LV grids for every non-aggregated LA in every MV Grid District
using model grids.
* STEP 6: Build MV grids
Builds MV grid by performing a routing on Load Area centres to build
ring topology.
* STEP 7: Connect MV and LV generators
Generators are connected to grids, used approach depends on voltage
level.
* STEP 8: Set IDs for all branches in MV and LV grids
While IDs of imported objects can be derived from dataset's ID, branches
are created in steps 5+6 and need unique IDs (e.g. for PF calculation).
* STEP 9: Relocate switch disconnectors in MV grid
Switch disconnectors are set during routing process (step 6) according
to the load distribution within a ring. After further modifications of
the grid within step 6+7 they have to be relocated (note: switch
disconnectors are called circuit breakers in DING0 for historical reasons).
* STEP 10: Open all switch disconnectors in MV grid
Under normal conditions, rings are operated in open state (half-rings).
Furthermore, this is required to allow powerflow for MV grid.
* STEP 11: Do power flow analysis of MV grid
The technically working MV grid created in step 6 was extended by satellite
loads and generators. It is finally tested again using powerflow calculation.
* STEP 12: Reinforce MV grid
MV grid is eventually reinforced persuant to results from step 11.
STEP 13: Close all switch disconnectors in MV grid
The rings are finally closed to hold a complete graph (if the SDs are open,
the edges adjacent to a SD will not be exported!)
### Response:
def run_ding0(self, session, mv_grid_districts_no=None, debug=False, export_figures=False):
""" Let DING0 run by shouting at this method (or just call
it from NetworkDing0 instance). This method is a wrapper
for the main functionality of DING0.
Parameters
----------
session : sqlalchemy.orm.session.Session
Database session
mv_grid_districts_no : List of Integers
List of MV grid_districts/stations to be imported (if empty,
all grid_districts & stations are imported)
debug : bool, defaults to False
If True, information is printed during process
export_figures : bool, defaults to False
If True, figures are shown or exported (default path: ~/.ding0/) during run.
Returns
-------
msg : str
Message of invalidity of a grid district
Notes
-----
The steps performed in this method are to be kept in the given order
since there are hard dependencies between them. Short description of
all steps performed:
* STEP 1: Import MV Grid Districts and subjacent objects
Imports MV Grid Districts, HV-MV stations, Load Areas, LV Grid Districts
and MV-LV stations, instantiates and initiates objects.
* STEP 2: Import generators
Conventional and renewable generators of voltage levels 4..7 are imported
and added to corresponding grid.
* STEP 3: Parametrize grid
Parameters of MV grid are set such as voltage level and cable/line types
according to MV Grid District's characteristics.
* STEP 4: Validate MV Grid Districts
Tests MV grid districts for validity concerning imported data such as
count of Load Areas.
* STEP 5: Build LV grids
Builds LV grids for every non-aggregated LA in every MV Grid District
using model grids.
* STEP 6: Build MV grids
Builds MV grid by performing a routing on Load Area centres to build
ring topology.
* STEP 7: Connect MV and LV generators
Generators are connected to grids, used approach depends on voltage
level.
* STEP 8: Set IDs for all branches in MV and LV grids
While IDs of imported objects can be derived from dataset's ID, branches
are created in steps 5+6 and need unique IDs (e.g. for PF calculation).
* STEP 9: Relocate switch disconnectors in MV grid
Switch disconnectors are set during routing process (step 6) according
to the load distribution within a ring. After further modifications of
the grid within step 6+7 they have to be relocated (note: switch
disconnectors are called circuit breakers in DING0 for historical reasons).
* STEP 10: Open all switch disconnectors in MV grid
Under normal conditions, rings are operated in open state (half-rings).
Furthermore, this is required to allow powerflow for MV grid.
* STEP 11: Do power flow analysis of MV grid
The technically working MV grid created in step 6 was extended by satellite
loads and generators. It is finally tested again using powerflow calculation.
* STEP 12: Reinforce MV grid
MV grid is eventually reinforced persuant to results from step 11.
STEP 13: Close all switch disconnectors in MV grid
The rings are finally closed to hold a complete graph (if the SDs are open,
the edges adjacent to a SD will not be exported!)
"""
if debug:
start = time.time()
# STEP 1: Import MV Grid Districts and subjacent objects
self.import_mv_grid_districts(session,
mv_grid_districts_no=mv_grid_districts_no)
# STEP 2: Import generators
self.import_generators(session, debug=debug)
# STEP 3: Parametrize MV grid
self.mv_parametrize_grid(debug=debug)
# STEP 4: Validate MV Grid Districts
msg = self.validate_grid_districts()
# STEP 5: Build LV grids
self.build_lv_grids()
# STEP 6: Build MV grids
self.mv_routing(debug=False)
if export_figures:
grid = self._mv_grid_districts[0].mv_grid
plot_mv_topology(grid, subtitle='Routing completed', filename='1_routing_completed.png')
# STEP 7: Connect MV and LV generators
self.connect_generators(debug=False)
if export_figures:
plot_mv_topology(grid, subtitle='Generators connected', filename='2_generators_connected.png')
# STEP 8: Set IDs for all branches in MV and LV grids
self.set_branch_ids()
# STEP 9: Relocate switch disconnectors in MV grid
self.set_circuit_breakers(debug=debug)
if export_figures:
plot_mv_topology(grid, subtitle='Circuit breakers relocated', filename='3_circuit_breakers_relocated.png')
# STEP 10: Open all switch disconnectors in MV grid
self.control_circuit_breakers(mode='open')
# STEP 11: Do power flow analysis of MV grid
self.run_powerflow(session, method='onthefly', export_pypsa=False, debug=debug)
if export_figures:
plot_mv_topology(grid, subtitle='PF result (load case)',
filename='4_PF_result_load.png',
line_color='loading', node_color='voltage', testcase='load')
plot_mv_topology(grid, subtitle='PF result (feedin case)',
filename='5_PF_result_feedin.png',
line_color='loading', node_color='voltage', testcase='feedin')
# STEP 12: Reinforce MV grid
self.reinforce_grid()
# STEP 13: Close all switch disconnectors in MV grid
self.control_circuit_breakers(mode='close')
if export_figures:
plot_mv_topology(grid, subtitle='Final grid PF result (load case)',
filename='6_final_grid_PF_result_load.png',
line_color='loading', node_color='voltage', testcase='load')
plot_mv_topology(grid, subtitle='Final grid PF result (feedin case)',
filename='7_final_grid_PF_result_feedin.png',
line_color='loading', node_color='voltage', testcase='feedin')
if debug:
logger.info('Elapsed time for {0} MV Grid Districts (seconds): {1}'.format(
str(len(mv_grid_districts_no)), time.time() - start))
return msg |
def get_pandas_series(self):
"""The function creates pandas series based on index and values"""
return pandas.Series(self.values, self.index, name=self.name) | The function creates pandas series based on index and values | Below is the the instruction that describes the task:
### Input:
The function creates pandas series based on index and values
### Response:
def get_pandas_series(self):
"""The function creates pandas series based on index and values"""
return pandas.Series(self.values, self.index, name=self.name) |
def get_subjects(self):
"""
Returns the list of subject names present in the schema registry.
"""
res = requests.get(self._url('/subjects'))
raise_if_failed(res)
return res.json() | Returns the list of subject names present in the schema registry. | Below is the the instruction that describes the task:
### Input:
Returns the list of subject names present in the schema registry.
### Response:
def get_subjects(self):
"""
Returns the list of subject names present in the schema registry.
"""
res = requests.get(self._url('/subjects'))
raise_if_failed(res)
return res.json() |
def get_feature_variable_string(self, feature_key, variable_key, user_id, attributes=None):
""" Returns value for a certain string variable attached to a feature.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
String value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
"""
variable_type = entities.Variable.Type.STRING
return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes) | Returns value for a certain string variable attached to a feature.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
String value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable. | Below is the the instruction that describes the task:
### Input:
Returns value for a certain string variable attached to a feature.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
String value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
### Response:
def get_feature_variable_string(self, feature_key, variable_key, user_id, attributes=None):
""" Returns value for a certain string variable attached to a feature.
Args:
feature_key: Key of the feature whose variable's value is being accessed.
variable_key: Key of the variable whose value is to be accessed.
user_id: ID for user.
attributes: Dict representing user attributes.
Returns:
String value of the variable. None if:
- Feature key is invalid.
- Variable key is invalid.
- Mismatch with type of variable.
"""
variable_type = entities.Variable.Type.STRING
return self._get_feature_variable_for_type(feature_key, variable_key, variable_type, user_id, attributes) |
def _load_fits(self, h5file):
""" Loads fits from h5file and returns a dictionary of fits. """
fits = {}
for key in ['mf', 'chifz', 'vfx', 'vfy']:
fits[key] = self._load_scalar_fit(fit_key=key, h5file=h5file)
return fits | Loads fits from h5file and returns a dictionary of fits. | Below is the the instruction that describes the task:
### Input:
Loads fits from h5file and returns a dictionary of fits.
### Response:
def _load_fits(self, h5file):
""" Loads fits from h5file and returns a dictionary of fits. """
fits = {}
for key in ['mf', 'chifz', 'vfx', 'vfy']:
fits[key] = self._load_scalar_fit(fit_key=key, h5file=h5file)
return fits |
def parse_insert_size(self, f):
""" Parse the contents of the Qualimap BamQC Insert Size Histogram file """
# Get the sample name from the parent parent directory
# Typical path: <sample name>/raw_data_qualimapReport/insert_size_histogram.txt
s_name = self.get_s_name(f)
d = dict()
zero_insertsize = 0
for l in f['f']:
if l.startswith('#'):
continue
insertsize, count = l.split(None, 1)
insertsize = int(round(float(insertsize)))
count = float(count) / 1000000
if(insertsize == 0):
zero_insertsize = count
else:
d[insertsize] = count
# Find median without importing anything to do it for us
num_counts = sum(d.values())
cum_counts = 0
median_insert_size = None
for thisins, thiscount in d.items():
cum_counts += thiscount
if cum_counts >= num_counts/2:
median_insert_size = thisins
break
# Add the median insert size to the general stats table
self.general_stats_data[s_name]['median_insert_size'] = median_insert_size
# Save results
if s_name in self.qualimap_bamqc_insert_size_hist:
log.debug("Duplicate insert size histogram sample name found! Overwriting: {}".format(s_name))
self.qualimap_bamqc_insert_size_hist[s_name] = d
self.add_data_source(f, s_name=s_name, section='insert_size_histogram') | Parse the contents of the Qualimap BamQC Insert Size Histogram file | Below is the the instruction that describes the task:
### Input:
Parse the contents of the Qualimap BamQC Insert Size Histogram file
### Response:
def parse_insert_size(self, f):
""" Parse the contents of the Qualimap BamQC Insert Size Histogram file """
# Get the sample name from the parent parent directory
# Typical path: <sample name>/raw_data_qualimapReport/insert_size_histogram.txt
s_name = self.get_s_name(f)
d = dict()
zero_insertsize = 0
for l in f['f']:
if l.startswith('#'):
continue
insertsize, count = l.split(None, 1)
insertsize = int(round(float(insertsize)))
count = float(count) / 1000000
if(insertsize == 0):
zero_insertsize = count
else:
d[insertsize] = count
# Find median without importing anything to do it for us
num_counts = sum(d.values())
cum_counts = 0
median_insert_size = None
for thisins, thiscount in d.items():
cum_counts += thiscount
if cum_counts >= num_counts/2:
median_insert_size = thisins
break
# Add the median insert size to the general stats table
self.general_stats_data[s_name]['median_insert_size'] = median_insert_size
# Save results
if s_name in self.qualimap_bamqc_insert_size_hist:
log.debug("Duplicate insert size histogram sample name found! Overwriting: {}".format(s_name))
self.qualimap_bamqc_insert_size_hist[s_name] = d
self.add_data_source(f, s_name=s_name, section='insert_size_histogram') |
def _get_wcs_request(self, bbox, time_interval, size_x, size_y, maxcc, time_difference, custom_url_params):
"""
Returns WCS request.
"""
return WcsRequest(layer=self.data_feature,
bbox=bbox,
time=time_interval,
resx=size_x, resy=size_y,
maxcc=maxcc,
custom_url_params=custom_url_params,
time_difference=time_difference,
image_format=self.image_format,
data_source=self.data_source,
instance_id=self.instance_id) | Returns WCS request. | Below is the the instruction that describes the task:
### Input:
Returns WCS request.
### Response:
def _get_wcs_request(self, bbox, time_interval, size_x, size_y, maxcc, time_difference, custom_url_params):
"""
Returns WCS request.
"""
return WcsRequest(layer=self.data_feature,
bbox=bbox,
time=time_interval,
resx=size_x, resy=size_y,
maxcc=maxcc,
custom_url_params=custom_url_params,
time_difference=time_difference,
image_format=self.image_format,
data_source=self.data_source,
instance_id=self.instance_id) |
def remove_accent_marks(text, excluded=None):
"""Remove accent marks from input text.
This function removes accent marks in the text, but leaves
unicode characters defined in the 'excluded' parameter.
Args:
text: The text to be processed.
excluded: Set of unicode characters to exclude.
Returns:
The text without accent marks.
"""
if excluded is None:
excluded = set()
return unicodedata.normalize(
'NFKC', ''.join(
c for c in unicodedata.normalize(
'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded)) | Remove accent marks from input text.
This function removes accent marks in the text, but leaves
unicode characters defined in the 'excluded' parameter.
Args:
text: The text to be processed.
excluded: Set of unicode characters to exclude.
Returns:
The text without accent marks. | Below is the the instruction that describes the task:
### Input:
Remove accent marks from input text.
This function removes accent marks in the text, but leaves
unicode characters defined in the 'excluded' parameter.
Args:
text: The text to be processed.
excluded: Set of unicode characters to exclude.
Returns:
The text without accent marks.
### Response:
def remove_accent_marks(text, excluded=None):
"""Remove accent marks from input text.
This function removes accent marks in the text, but leaves
unicode characters defined in the 'excluded' parameter.
Args:
text: The text to be processed.
excluded: Set of unicode characters to exclude.
Returns:
The text without accent marks.
"""
if excluded is None:
excluded = set()
return unicodedata.normalize(
'NFKC', ''.join(
c for c in unicodedata.normalize(
'NFKD', text) if unicodedata.category(c) != 'Mn' or c in excluded)) |
def _get_validate_plotdata_yaml(grading_file, data):
"""Retrieve validation plot data from grading YAML file (old style).
"""
with open(grading_file) as in_handle:
grade_stats = yaml.safe_load(in_handle)
for sample_stats in grade_stats:
sample = sample_stats["sample"]
for vtype, cat, val in _flatten_grading(sample_stats):
yield [sample, variant.get("variantcaller", ""),
vtype, cat, val] | Retrieve validation plot data from grading YAML file (old style). | Below is the the instruction that describes the task:
### Input:
Retrieve validation plot data from grading YAML file (old style).
### Response:
def _get_validate_plotdata_yaml(grading_file, data):
"""Retrieve validation plot data from grading YAML file (old style).
"""
with open(grading_file) as in_handle:
grade_stats = yaml.safe_load(in_handle)
for sample_stats in grade_stats:
sample = sample_stats["sample"]
for vtype, cat, val in _flatten_grading(sample_stats):
yield [sample, variant.get("variantcaller", ""),
vtype, cat, val] |
def rmvsuffix(subject):
"""
Remove the suffix from *subject*.
"""
index = subject.rfind('.')
if index > subject.replace('\\', '/').rfind('/'):
subject = subject[:index]
return subject | Remove the suffix from *subject*. | Below is the the instruction that describes the task:
### Input:
Remove the suffix from *subject*.
### Response:
def rmvsuffix(subject):
"""
Remove the suffix from *subject*.
"""
index = subject.rfind('.')
if index > subject.replace('\\', '/').rfind('/'):
subject = subject[:index]
return subject |
def _close(self, fd):
"""
Close the descriptor used for a path regardless
of mode.
"""
if self._mode == WF_INOTIFYX:
try: pynotifyx.rm_watch(self._inx_fd, fd)
except: pass
else:
try: os.close(fd)
except: pass | Close the descriptor used for a path regardless
of mode. | Below is the the instruction that describes the task:
### Input:
Close the descriptor used for a path regardless
of mode.
### Response:
def _close(self, fd):
"""
Close the descriptor used for a path regardless
of mode.
"""
if self._mode == WF_INOTIFYX:
try: pynotifyx.rm_watch(self._inx_fd, fd)
except: pass
else:
try: os.close(fd)
except: pass |
def mean_per_class_error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: mean per class error.
"""
return [[x[0], 1 - x[1]] for x in self.metric("mean_per_class_accuracy", thresholds=thresholds)] | :param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: mean per class error. | Below is the the instruction that describes the task:
### Input:
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: mean per class error.
### Response:
def mean_per_class_error(self, thresholds=None):
"""
:param thresholds: thresholds parameter must be a list (i.e. [0.01, 0.5, 0.99]). If None, then the
thresholds in this set of metrics will be used.
:returns: mean per class error.
"""
return [[x[0], 1 - x[1]] for x in self.metric("mean_per_class_accuracy", thresholds=thresholds)] |
def load(fname):
"""Loads an array from file.
See more details in ``save``.
Parameters
----------
fname : str
The filename.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data.
"""
if not isinstance(fname, string_types):
raise TypeError('fname required to be a string')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoad(c_str(fname),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))
for i in range(out_size.value)) | Loads an array from file.
See more details in ``save``.
Parameters
----------
fname : str
The filename.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data. | Below is the the instruction that describes the task:
### Input:
Loads an array from file.
See more details in ``save``.
Parameters
----------
fname : str
The filename.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data.
### Response:
def load(fname):
"""Loads an array from file.
See more details in ``save``.
Parameters
----------
fname : str
The filename.
Returns
-------
list of NDArray, RowSparseNDArray or CSRNDArray, or \
dict of str to NDArray, RowSparseNDArray or CSRNDArray
Loaded data.
"""
if not isinstance(fname, string_types):
raise TypeError('fname required to be a string')
out_size = mx_uint()
out_name_size = mx_uint()
handles = ctypes.POINTER(NDArrayHandle)()
names = ctypes.POINTER(ctypes.c_char_p)()
check_call(_LIB.MXNDArrayLoad(c_str(fname),
ctypes.byref(out_size),
ctypes.byref(handles),
ctypes.byref(out_name_size),
ctypes.byref(names)))
if out_name_size.value == 0:
return [_ndarray_cls(NDArrayHandle(handles[i])) for i in range(out_size.value)]
else:
assert out_name_size.value == out_size.value
return dict(
(py_str(names[i]), _ndarray_cls(NDArrayHandle(handles[i])))
for i in range(out_size.value)) |
def toplevelTryFunc(func, *args, **kwargs):
'Thread entry-point for `func(*args, **kwargs)` with try/except wrapper'
t = threading.current_thread()
t.name = func.__name__
ret = None
try:
ret = func(*args, **kwargs)
except EscapeException as e: # user aborted
t.status += 'aborted by user'
status('%s aborted' % t.name, priority=2)
except Exception as e:
t.exception = e
exceptionCaught(e)
if t.sheet:
t.sheet.currentThreads.remove(t)
return ret | Thread entry-point for `func(*args, **kwargs)` with try/except wrapper | Below is the the instruction that describes the task:
### Input:
Thread entry-point for `func(*args, **kwargs)` with try/except wrapper
### Response:
def toplevelTryFunc(func, *args, **kwargs):
'Thread entry-point for `func(*args, **kwargs)` with try/except wrapper'
t = threading.current_thread()
t.name = func.__name__
ret = None
try:
ret = func(*args, **kwargs)
except EscapeException as e: # user aborted
t.status += 'aborted by user'
status('%s aborted' % t.name, priority=2)
except Exception as e:
t.exception = e
exceptionCaught(e)
if t.sheet:
t.sheet.currentThreads.remove(t)
return ret |
def _RepackTemplates(self):
"""Repack templates with a dummy config."""
dummy_config = os.path.join(
args.grr_src, "grr/test/grr_response_test/test_data/dummyconfig.yaml")
if args.build_32:
template_i386 = glob.glob(os.path.join(args.output_dir,
"*_i386*.zip")).pop()
template_amd64 = glob.glob(os.path.join(args.output_dir,
"*_amd64*.zip")).pop()
# We put the installers in the output dir so they get stored as build
# artifacts and we can test the 32bit build manually.
subprocess.check_call([
self.grr_client_build64, "--verbose", "--secondary_configs",
dummy_config, "repack", "--template", template_amd64, "--output_dir",
args.output_dir
])
subprocess.check_call([
self.grr_client_build64, "--verbose", "--context",
"DebugClientBuild Context", "--secondary_configs", dummy_config,
"repack", "--template", template_amd64, "--output_dir", args.output_dir
])
if args.build_32:
subprocess.check_call([
self.grr_client_build32, "--verbose", "--secondary_configs",
dummy_config, "repack", "--template", template_i386, "--output_dir",
args.output_dir
])
subprocess.check_call([
self.grr_client_build32, "--verbose", "--context",
"DebugClientBuild Context", "--secondary_configs", dummy_config,
"repack", "--template", template_i386, "--output_dir", args.output_dir
]) | Repack templates with a dummy config. | Below is the the instruction that describes the task:
### Input:
Repack templates with a dummy config.
### Response:
def _RepackTemplates(self):
"""Repack templates with a dummy config."""
dummy_config = os.path.join(
args.grr_src, "grr/test/grr_response_test/test_data/dummyconfig.yaml")
if args.build_32:
template_i386 = glob.glob(os.path.join(args.output_dir,
"*_i386*.zip")).pop()
template_amd64 = glob.glob(os.path.join(args.output_dir,
"*_amd64*.zip")).pop()
# We put the installers in the output dir so they get stored as build
# artifacts and we can test the 32bit build manually.
subprocess.check_call([
self.grr_client_build64, "--verbose", "--secondary_configs",
dummy_config, "repack", "--template", template_amd64, "--output_dir",
args.output_dir
])
subprocess.check_call([
self.grr_client_build64, "--verbose", "--context",
"DebugClientBuild Context", "--secondary_configs", dummy_config,
"repack", "--template", template_amd64, "--output_dir", args.output_dir
])
if args.build_32:
subprocess.check_call([
self.grr_client_build32, "--verbose", "--secondary_configs",
dummy_config, "repack", "--template", template_i386, "--output_dir",
args.output_dir
])
subprocess.check_call([
self.grr_client_build32, "--verbose", "--context",
"DebugClientBuild Context", "--secondary_configs", dummy_config,
"repack", "--template", template_i386, "--output_dir", args.output_dir
]) |
def middleware(self, *args, **kwargs):
"""
A decorator that can be used to implement a Middleware plugin to
all of the Blueprints that belongs to this specific Blueprint Group.
In case of nested Blueprint Groups, the same middleware is applied
across each of the Blueprints recursively.
:param args: Optional positional Parameters to be use middleware
:param kwargs: Optional Keyword arg to use with Middleware
:return: Partial function to apply the middleware
"""
kwargs["bp_group"] = True
def register_middleware_for_blueprints(fn):
for blueprint in self.blueprints:
blueprint.middleware(fn, *args, **kwargs)
return register_middleware_for_blueprints | A decorator that can be used to implement a Middleware plugin to
all of the Blueprints that belongs to this specific Blueprint Group.
In case of nested Blueprint Groups, the same middleware is applied
across each of the Blueprints recursively.
:param args: Optional positional Parameters to be use middleware
:param kwargs: Optional Keyword arg to use with Middleware
:return: Partial function to apply the middleware | Below is the the instruction that describes the task:
### Input:
A decorator that can be used to implement a Middleware plugin to
all of the Blueprints that belongs to this specific Blueprint Group.
In case of nested Blueprint Groups, the same middleware is applied
across each of the Blueprints recursively.
:param args: Optional positional Parameters to be use middleware
:param kwargs: Optional Keyword arg to use with Middleware
:return: Partial function to apply the middleware
### Response:
def middleware(self, *args, **kwargs):
"""
A decorator that can be used to implement a Middleware plugin to
all of the Blueprints that belongs to this specific Blueprint Group.
In case of nested Blueprint Groups, the same middleware is applied
across each of the Blueprints recursively.
:param args: Optional positional Parameters to be use middleware
:param kwargs: Optional Keyword arg to use with Middleware
:return: Partial function to apply the middleware
"""
kwargs["bp_group"] = True
def register_middleware_for_blueprints(fn):
for blueprint in self.blueprints:
blueprint.middleware(fn, *args, **kwargs)
return register_middleware_for_blueprints |
def _populate_worksheet(self, workbook, worksheet):
"""
Write the chart data contents to *worksheet* in category chart
layout. Write categories starting in the first column starting in
the second row, and proceeding one column per category level (for
charts having multi-level categories). Write series as columns
starting in the next following column, placing the series title in
the first cell.
"""
self._write_categories(workbook, worksheet)
self._write_series(workbook, worksheet) | Write the chart data contents to *worksheet* in category chart
layout. Write categories starting in the first column starting in
the second row, and proceeding one column per category level (for
charts having multi-level categories). Write series as columns
starting in the next following column, placing the series title in
the first cell. | Below is the the instruction that describes the task:
### Input:
Write the chart data contents to *worksheet* in category chart
layout. Write categories starting in the first column starting in
the second row, and proceeding one column per category level (for
charts having multi-level categories). Write series as columns
starting in the next following column, placing the series title in
the first cell.
### Response:
def _populate_worksheet(self, workbook, worksheet):
"""
Write the chart data contents to *worksheet* in category chart
layout. Write categories starting in the first column starting in
the second row, and proceeding one column per category level (for
charts having multi-level categories). Write series as columns
starting in the next following column, placing the series title in
the first cell.
"""
self._write_categories(workbook, worksheet)
self._write_series(workbook, worksheet) |
def get_value(self, constraints, expression):
"""
Ask the solver for one possible result of given expression using given set of constraints.
"""
if not issymbolic(expression):
return expression
assert isinstance(expression, (Bool, BitVec, Array))
with constraints as temp_cs:
if isinstance(expression, Bool):
var = temp_cs.new_bool()
elif isinstance(expression, BitVec):
var = temp_cs.new_bitvec(expression.size)
elif isinstance(expression, Array):
var = []
result = []
for i in range(expression.index_max):
subvar = temp_cs.new_bitvec(expression.value_bits)
var.append(subvar)
temp_cs.add(subvar == simplify(expression[i]))
self._reset(temp_cs)
if not self._is_sat():
raise SolverError('Model is not available')
for i in range(expression.index_max):
self._send('(get-value (%s))' % var[i].name)
ret = self._recv()
assert ret.startswith('((') and ret.endswith('))')
pattern, base = self._get_value_fmt
m = pattern.match(ret)
expr, value = m.group('expr'), m.group('value')
result.append(int(value, base))
return bytes(result)
temp_cs.add(var == expression)
self._reset(temp_cs)
if not self._is_sat():
raise SolverError('Model is not available')
self._send('(get-value (%s))' % var.name)
ret = self._recv()
if not (ret.startswith('((') and ret.endswith('))')):
raise SolverError('SMTLIB error parsing response: %s' % ret)
if isinstance(expression, Bool):
return {'true': True, 'false': False}[ret[2:-2].split(' ')[1]]
if isinstance(expression, BitVec):
pattern, base = self._get_value_fmt
m = pattern.match(ret)
expr, value = m.group('expr'), m.group('value')
return int(value, base)
raise NotImplementedError("get_value only implemented for Bool and BitVec") | Ask the solver for one possible result of given expression using given set of constraints. | Below is the the instruction that describes the task:
### Input:
Ask the solver for one possible result of given expression using given set of constraints.
### Response:
def get_value(self, constraints, expression):
"""
Ask the solver for one possible result of given expression using given set of constraints.
"""
if not issymbolic(expression):
return expression
assert isinstance(expression, (Bool, BitVec, Array))
with constraints as temp_cs:
if isinstance(expression, Bool):
var = temp_cs.new_bool()
elif isinstance(expression, BitVec):
var = temp_cs.new_bitvec(expression.size)
elif isinstance(expression, Array):
var = []
result = []
for i in range(expression.index_max):
subvar = temp_cs.new_bitvec(expression.value_bits)
var.append(subvar)
temp_cs.add(subvar == simplify(expression[i]))
self._reset(temp_cs)
if not self._is_sat():
raise SolverError('Model is not available')
for i in range(expression.index_max):
self._send('(get-value (%s))' % var[i].name)
ret = self._recv()
assert ret.startswith('((') and ret.endswith('))')
pattern, base = self._get_value_fmt
m = pattern.match(ret)
expr, value = m.group('expr'), m.group('value')
result.append(int(value, base))
return bytes(result)
temp_cs.add(var == expression)
self._reset(temp_cs)
if not self._is_sat():
raise SolverError('Model is not available')
self._send('(get-value (%s))' % var.name)
ret = self._recv()
if not (ret.startswith('((') and ret.endswith('))')):
raise SolverError('SMTLIB error parsing response: %s' % ret)
if isinstance(expression, Bool):
return {'true': True, 'false': False}[ret[2:-2].split(' ')[1]]
if isinstance(expression, BitVec):
pattern, base = self._get_value_fmt
m = pattern.match(ret)
expr, value = m.group('expr'), m.group('value')
return int(value, base)
raise NotImplementedError("get_value only implemented for Bool and BitVec") |
def read_element_tag(fd, endian):
"""Read data element tag: type and number of bytes.
If tag is of the Small Data Element (SDE) type the element data
is also returned.
"""
data = fd.read(8)
mtpn = unpack(endian, 'I', data[:4])
# The most significant two bytes of mtpn will always be 0,
# if they are not, this must be SDE format
num_bytes = mtpn >> 16
if num_bytes > 0:
# small data element format
mtpn = mtpn & 0xFFFF
if num_bytes > 4:
raise ParseError('Error parsing Small Data Element (SDE) '
'formatted data')
data = data[4:4 + num_bytes]
else:
# regular element
num_bytes = unpack(endian, 'I', data[4:])
data = None
return (mtpn, num_bytes, data) | Read data element tag: type and number of bytes.
If tag is of the Small Data Element (SDE) type the element data
is also returned. | Below is the the instruction that describes the task:
### Input:
Read data element tag: type and number of bytes.
If tag is of the Small Data Element (SDE) type the element data
is also returned.
### Response:
def read_element_tag(fd, endian):
"""Read data element tag: type and number of bytes.
If tag is of the Small Data Element (SDE) type the element data
is also returned.
"""
data = fd.read(8)
mtpn = unpack(endian, 'I', data[:4])
# The most significant two bytes of mtpn will always be 0,
# if they are not, this must be SDE format
num_bytes = mtpn >> 16
if num_bytes > 0:
# small data element format
mtpn = mtpn & 0xFFFF
if num_bytes > 4:
raise ParseError('Error parsing Small Data Element (SDE) '
'formatted data')
data = data[4:4 + num_bytes]
else:
# regular element
num_bytes = unpack(endian, 'I', data[4:])
data = None
return (mtpn, num_bytes, data) |
def get_decorators(func):
"""
Return a list of decorator names for this function.
"""
decorators = []
# Parse the source code of the function with ast to find the names of
# all of its decorators.
tree = ast.parse(inspect.getsource(func))
for node in ast.iter_child_nodes(tree):
for dnode in node.decorator_list:
if isinstance(dnode, ast.Name):
decorator = func.func_globals[dnode.id]
elif isinstance(dnode, ast.Attribute):
module = func.func_globals[dnode.value.id]
decorator = getattr(module, dnode.attr)
else:
raise Exception("Unable to handle decorator node: %s" % dnode)
decorators.append(decorator)
return decorators | Return a list of decorator names for this function. | Below is the the instruction that describes the task:
### Input:
Return a list of decorator names for this function.
### Response:
def get_decorators(func):
"""
Return a list of decorator names for this function.
"""
decorators = []
# Parse the source code of the function with ast to find the names of
# all of its decorators.
tree = ast.parse(inspect.getsource(func))
for node in ast.iter_child_nodes(tree):
for dnode in node.decorator_list:
if isinstance(dnode, ast.Name):
decorator = func.func_globals[dnode.id]
elif isinstance(dnode, ast.Attribute):
module = func.func_globals[dnode.value.id]
decorator = getattr(module, dnode.attr)
else:
raise Exception("Unable to handle decorator node: %s" % dnode)
decorators.append(decorator)
return decorators |
def write_to(f, mode):
"""Flexible writing, where f can be a filename or f object, if filename, closed after writing"""
if hasattr(f, 'write'):
yield f
else:
f = open(f, mode)
yield f
f.close() | Flexible writing, where f can be a filename or f object, if filename, closed after writing | Below is the the instruction that describes the task:
### Input:
Flexible writing, where f can be a filename or f object, if filename, closed after writing
### Response:
def write_to(f, mode):
"""Flexible writing, where f can be a filename or f object, if filename, closed after writing"""
if hasattr(f, 'write'):
yield f
else:
f = open(f, mode)
yield f
f.close() |
def get_projects():
"""
Send a dictionary of projects that are available on the database.
Usage description:
This function is usually called to get and display the list of projects available in the database.
:return: JSON, {<int_keys>: <project_name>}
"""
assert request.method == "GET", "GET request expected received {}".format(request.method)
try:
if request.method == 'GET':
projects = utils.get_projects()
return jsonify(projects)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"}) | Send a dictionary of projects that are available on the database.
Usage description:
This function is usually called to get and display the list of projects available in the database.
:return: JSON, {<int_keys>: <project_name>} | Below is the the instruction that describes the task:
### Input:
Send a dictionary of projects that are available on the database.
Usage description:
This function is usually called to get and display the list of projects available in the database.
:return: JSON, {<int_keys>: <project_name>}
### Response:
def get_projects():
"""
Send a dictionary of projects that are available on the database.
Usage description:
This function is usually called to get and display the list of projects available in the database.
:return: JSON, {<int_keys>: <project_name>}
"""
assert request.method == "GET", "GET request expected received {}".format(request.method)
try:
if request.method == 'GET':
projects = utils.get_projects()
return jsonify(projects)
except Exception as e:
logging.error(e)
return jsonify({"0": "__EMPTY"}) |
def errored_tasks(self):
"""List of errored tasks."""
etasks = []
for status in [self.S_ERROR, self.S_QCRITICAL, self.S_ABICRITICAL]:
etasks.extend(list(self.iflat_tasks(status=status)))
return set(etasks) | List of errored tasks. | Below is the the instruction that describes the task:
### Input:
List of errored tasks.
### Response:
def errored_tasks(self):
"""List of errored tasks."""
etasks = []
for status in [self.S_ERROR, self.S_QCRITICAL, self.S_ABICRITICAL]:
etasks.extend(list(self.iflat_tasks(status=status)))
return set(etasks) |
def security_rule_create_or_update(name, access, direction, priority, protocol, security_group, resource_group,
source_address_prefix=None, destination_address_prefix=None, source_port_range=None,
destination_port_range=None, source_address_prefixes=None,
destination_address_prefixes=None, source_port_ranges=None,
destination_port_ranges=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Create or update a security rule within a specified network security group.
:param name: The name of the security rule to create.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param security_group: The network security group containing the
security rule.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.security_rule_create_or_update testrule1 allow outbound 101 tcp testnsg testgroup \
source_address_prefix='*' destination_address_prefix=internet source_port_range='*' \
destination_port_range='1-1024'
'''
exclusive_params = [
('source_port_ranges', 'source_port_range'),
('source_address_prefixes', 'source_address_prefix'),
('destination_port_ranges', 'destination_port_range'),
('destination_address_prefixes', 'destination_address_prefix'),
]
for params in exclusive_params:
# pylint: disable=eval-used
if not eval(params[0]) and not eval(params[1]):
log.error(
'Either the %s or %s parameter must be provided!',
params[0], params[1]
)
return False
# pylint: disable=eval-used
if eval(params[0]):
# pylint: disable=exec-used
exec('{0} = None'.format(params[1]))
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
rulemodel = __utils__['azurearm.create_object_model'](
'network',
'SecurityRule',
name=name,
access=access,
direction=direction,
priority=priority,
protocol=protocol,
source_port_ranges=source_port_ranges,
source_port_range=source_port_range,
source_address_prefixes=source_address_prefixes,
source_address_prefix=source_address_prefix,
destination_port_ranges=destination_port_ranges,
destination_port_range=destination_port_range,
destination_address_prefixes=destination_address_prefixes,
destination_address_prefix=destination_address_prefix,
**kwargs
)
except TypeError as exc:
result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
return result
try:
secrule = netconn.security_rules.create_or_update(
resource_group_name=resource_group,
network_security_group_name=security_group,
security_rule_name=name,
security_rule_parameters=rulemodel
)
secrule.wait()
secrule_result = secrule.result()
result = secrule_result.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
except SerializationError as exc:
result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
return result | .. versionadded:: 2019.2.0
Create or update a security rule within a specified network security group.
:param name: The name of the security rule to create.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param security_group: The network security group containing the
security rule.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.security_rule_create_or_update testrule1 allow outbound 101 tcp testnsg testgroup \
source_address_prefix='*' destination_address_prefix=internet source_port_range='*' \
destination_port_range='1-1024' | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
Create or update a security rule within a specified network security group.
:param name: The name of the security rule to create.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param security_group: The network security group containing the
security rule.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.security_rule_create_or_update testrule1 allow outbound 101 tcp testnsg testgroup \
source_address_prefix='*' destination_address_prefix=internet source_port_range='*' \
destination_port_range='1-1024'
### Response:
def security_rule_create_or_update(name, access, direction, priority, protocol, security_group, resource_group,
source_address_prefix=None, destination_address_prefix=None, source_port_range=None,
destination_port_range=None, source_address_prefixes=None,
destination_address_prefixes=None, source_port_ranges=None,
destination_port_ranges=None, **kwargs):
'''
.. versionadded:: 2019.2.0
Create or update a security rule within a specified network security group.
:param name: The name of the security rule to create.
:param access:
'allow' or 'deny'
:param direction:
'inbound' or 'outbound'
:param priority:
Integer between 100 and 4096 used for ordering rule application.
:param protocol:
'tcp', 'udp', or '*'
:param destination_address_prefix:
The CIDR or destination IP range. Asterix '*' can also be used to match all destination IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param destination_port_range:
The destination port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param source_address_prefix:
The CIDR or source IP range. Asterix '*' can also be used to match all source IPs.
Default tags such as 'VirtualNetwork', 'AzureLoadBalancer' and 'Internet' can also be used.
If this is an ingress rule, specifies where network traffic originates from.
:param source_port_range:
The source port or range. Integer or range between 0 and 65535. Asterix '*'
can also be used to match all ports.
:param destination_address_prefixes:
A list of destination_address_prefix values. This parameter overrides destination_address_prefix
and will cause any value entered there to be ignored.
:param destination_port_ranges:
A list of destination_port_range values. This parameter overrides destination_port_range
and will cause any value entered there to be ignored.
:param source_address_prefixes:
A list of source_address_prefix values. This parameter overrides source_address_prefix
and will cause any value entered there to be ignored.
:param source_port_ranges:
A list of source_port_range values. This parameter overrides source_port_range
and will cause any value entered there to be ignored.
:param security_group: The network security group containing the
security rule.
:param resource_group: The resource group name assigned to the
network security group.
CLI Example:
.. code-block:: bash
salt-call azurearm_network.security_rule_create_or_update testrule1 allow outbound 101 tcp testnsg testgroup \
source_address_prefix='*' destination_address_prefix=internet source_port_range='*' \
destination_port_range='1-1024'
'''
exclusive_params = [
('source_port_ranges', 'source_port_range'),
('source_address_prefixes', 'source_address_prefix'),
('destination_port_ranges', 'destination_port_range'),
('destination_address_prefixes', 'destination_address_prefix'),
]
for params in exclusive_params:
# pylint: disable=eval-used
if not eval(params[0]) and not eval(params[1]):
log.error(
'Either the %s or %s parameter must be provided!',
params[0], params[1]
)
return False
# pylint: disable=eval-used
if eval(params[0]):
# pylint: disable=exec-used
exec('{0} = None'.format(params[1]))
netconn = __utils__['azurearm.get_client']('network', **kwargs)
try:
rulemodel = __utils__['azurearm.create_object_model'](
'network',
'SecurityRule',
name=name,
access=access,
direction=direction,
priority=priority,
protocol=protocol,
source_port_ranges=source_port_ranges,
source_port_range=source_port_range,
source_address_prefixes=source_address_prefixes,
source_address_prefix=source_address_prefix,
destination_port_ranges=destination_port_ranges,
destination_port_range=destination_port_range,
destination_address_prefixes=destination_address_prefixes,
destination_address_prefix=destination_address_prefix,
**kwargs
)
except TypeError as exc:
result = {'error': 'The object model could not be built. ({0})'.format(str(exc))}
return result
try:
secrule = netconn.security_rules.create_or_update(
resource_group_name=resource_group,
network_security_group_name=security_group,
security_rule_name=name,
security_rule_parameters=rulemodel
)
secrule.wait()
secrule_result = secrule.result()
result = secrule_result.as_dict()
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('network', str(exc), **kwargs)
result = {'error': str(exc)}
except SerializationError as exc:
result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))}
return result |
def check_standard_name(self, ds):
'''
Check a variables's standard_name attribute to ensure that it meets CF
compliance.
CF §3.3 A standard name is associated with a variable via the attribute
standard_name which takes a string value comprised of a standard name
optionally followed by one or more blanks and a standard name modifier
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
'''
ret_val = []
coord_vars = self._find_coord_vars(ds)
aux_coord_vars = self._find_aux_coord_vars(ds)
axis_vars = cfutil.get_axis_variables(ds)
flag_vars = cfutil.get_flag_variables(ds)
geophysical_vars = self._find_geophysical_vars(ds)
variables_requiring_standard_names = coord_vars + aux_coord_vars + axis_vars + flag_vars + geophysical_vars
for name in set(variables_requiring_standard_names):
# Compression indices used in reduced horizontal grids or
# compression schemes do not require attributes other than compress
if cfutil.is_compression_coordinate(ds, name):
continue
ncvar = ds.variables[name]
# §9 doesn't explicitly allow instance variables as coordinates but
# it's loosely implied. Just in case, skip it.
if hasattr(ncvar, 'cf_role'):
continue
# Unfortunately, §6.1 allows for string types to be listed as
# coordinates.
if ncvar.dtype.char == 'S':
continue
standard_name = getattr(ncvar, 'standard_name', None)
standard_name, standard_name_modifier = self._split_standard_name(standard_name)
long_name = getattr(ncvar, 'long_name', None)
long_or_std_name = TestCtx(BaseCheck.HIGH, self.section_titles['3.3'])
if long_name is not None:
long_name_present = True
long_or_std_name.assert_true(isinstance(long_name, basestring),
"Attribute long_name for variable {} must be a string".format(name))
else:
long_name_present = False
# §1.3 The long_name and standard_name attributes are used to
# describe the content of each variable. For backwards
# compatibility with COARDS neither is required, but use of at
# least one of them is strongly recommended.
# If standard_name is not defined but long_name is, don't continue
# the check for this variable
if standard_name is not None:
standard_name_present = True
valid_std_name = TestCtx(BaseCheck.HIGH, self.section_titles['3.3'])
valid_std_name.assert_true(isinstance(standard_name, basestring),
"Attribute standard_name for variable {} must be a string".format(name))
if isinstance(standard_name, basestring):
valid_std_name.assert_true(standard_name in self._std_names,
"standard_name {} is not defined in Standard Name Table v{}".format(
standard_name or 'undefined',
self._std_names._version))
ret_val.append(valid_std_name.to_result())
# 2) optional - if modifiers, should be in table
if standard_name_modifier is not None:
valid_modifier = TestCtx(BaseCheck.HIGH, self.section_titles["3.3"])
allowed = ['detection_minimum',
'number_of_observations',
'standard_error',
'status_flag']
valid_modifier.assert_true(standard_name_modifier in allowed,
"standard_name modifier {} for variable {} is not a valid modifier "
"according to appendix C".format(standard_name_modifier, name))
ret_val.append(valid_modifier.to_result())
else:
standard_name_present = False
long_or_std_name.assert_true(long_name_present or
standard_name_present,
"Attribute long_name or/and standard_name is highly recommended for variable {}".format(name))
ret_val.append(long_or_std_name.to_result())
return ret_val | Check a variables's standard_name attribute to ensure that it meets CF
compliance.
CF §3.3 A standard name is associated with a variable via the attribute
standard_name which takes a string value comprised of a standard name
optionally followed by one or more blanks and a standard name modifier
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results | Below is the the instruction that describes the task:
### Input:
Check a variables's standard_name attribute to ensure that it meets CF
compliance.
CF §3.3 A standard name is associated with a variable via the attribute
standard_name which takes a string value comprised of a standard name
optionally followed by one or more blanks and a standard name modifier
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
### Response:
def check_standard_name(self, ds):
'''
Check a variables's standard_name attribute to ensure that it meets CF
compliance.
CF §3.3 A standard name is associated with a variable via the attribute
standard_name which takes a string value comprised of a standard name
optionally followed by one or more blanks and a standard name modifier
:param netCDF4.Dataset ds: An open netCDF dataset
:rtype: list
:return: List of results
'''
ret_val = []
coord_vars = self._find_coord_vars(ds)
aux_coord_vars = self._find_aux_coord_vars(ds)
axis_vars = cfutil.get_axis_variables(ds)
flag_vars = cfutil.get_flag_variables(ds)
geophysical_vars = self._find_geophysical_vars(ds)
variables_requiring_standard_names = coord_vars + aux_coord_vars + axis_vars + flag_vars + geophysical_vars
for name in set(variables_requiring_standard_names):
# Compression indices used in reduced horizontal grids or
# compression schemes do not require attributes other than compress
if cfutil.is_compression_coordinate(ds, name):
continue
ncvar = ds.variables[name]
# §9 doesn't explicitly allow instance variables as coordinates but
# it's loosely implied. Just in case, skip it.
if hasattr(ncvar, 'cf_role'):
continue
# Unfortunately, §6.1 allows for string types to be listed as
# coordinates.
if ncvar.dtype.char == 'S':
continue
standard_name = getattr(ncvar, 'standard_name', None)
standard_name, standard_name_modifier = self._split_standard_name(standard_name)
long_name = getattr(ncvar, 'long_name', None)
long_or_std_name = TestCtx(BaseCheck.HIGH, self.section_titles['3.3'])
if long_name is not None:
long_name_present = True
long_or_std_name.assert_true(isinstance(long_name, basestring),
"Attribute long_name for variable {} must be a string".format(name))
else:
long_name_present = False
# §1.3 The long_name and standard_name attributes are used to
# describe the content of each variable. For backwards
# compatibility with COARDS neither is required, but use of at
# least one of them is strongly recommended.
# If standard_name is not defined but long_name is, don't continue
# the check for this variable
if standard_name is not None:
standard_name_present = True
valid_std_name = TestCtx(BaseCheck.HIGH, self.section_titles['3.3'])
valid_std_name.assert_true(isinstance(standard_name, basestring),
"Attribute standard_name for variable {} must be a string".format(name))
if isinstance(standard_name, basestring):
valid_std_name.assert_true(standard_name in self._std_names,
"standard_name {} is not defined in Standard Name Table v{}".format(
standard_name or 'undefined',
self._std_names._version))
ret_val.append(valid_std_name.to_result())
# 2) optional - if modifiers, should be in table
if standard_name_modifier is not None:
valid_modifier = TestCtx(BaseCheck.HIGH, self.section_titles["3.3"])
allowed = ['detection_minimum',
'number_of_observations',
'standard_error',
'status_flag']
valid_modifier.assert_true(standard_name_modifier in allowed,
"standard_name modifier {} for variable {} is not a valid modifier "
"according to appendix C".format(standard_name_modifier, name))
ret_val.append(valid_modifier.to_result())
else:
standard_name_present = False
long_or_std_name.assert_true(long_name_present or
standard_name_present,
"Attribute long_name or/and standard_name is highly recommended for variable {}".format(name))
ret_val.append(long_or_std_name.to_result())
return ret_val |
def from_urlsafe(cls, urlsafe):
"""
Returns an instance of the model from a urlsafe string.
:param urlsafe: urlsafe key
:return: Instance of cls
"""
try:
key = ndb.Key(urlsafe=urlsafe)
except:
return None
obj = key.get()
if obj and isinstance(obj, cls):
return obj | Returns an instance of the model from a urlsafe string.
:param urlsafe: urlsafe key
:return: Instance of cls | Below is the the instruction that describes the task:
### Input:
Returns an instance of the model from a urlsafe string.
:param urlsafe: urlsafe key
:return: Instance of cls
### Response:
def from_urlsafe(cls, urlsafe):
"""
Returns an instance of the model from a urlsafe string.
:param urlsafe: urlsafe key
:return: Instance of cls
"""
try:
key = ndb.Key(urlsafe=urlsafe)
except:
return None
obj = key.get()
if obj and isinstance(obj, cls):
return obj |
def verify_signature(key_dict, signature, data):
"""
<Purpose>
Determine whether the private key belonging to 'key_dict' produced
'signature'. verify_signature() will use the public key found in
'key_dict', the 'sig' objects contained in 'signature', and 'data' to
complete the verification.
>>> ed25519_key = generate_ed25519_key()
>>> data = 'The quick brown fox jumps over the lazy dog'
>>> signature = create_signature(ed25519_key, data)
>>> verify_signature(ed25519_key, signature, data)
True
>>> verify_signature(ed25519_key, signature, 'bad_data')
False
>>> rsa_key = generate_rsa_key()
>>> signature = create_signature(rsa_key, data)
>>> verify_signature(rsa_key, signature, data)
True
>>> verify_signature(rsa_key, signature, 'bad_data')
False
>>> ecdsa_key = generate_ecdsa_key()
>>> signature = create_signature(ecdsa_key, data)
>>> verify_signature(ecdsa_key, signature, data)
True
>>> verify_signature(ecdsa_key, signature, 'bad_data')
False
<Arguments>
key_dict:
A dictionary containing the keys and other identifying information.
If 'key_dict' is an RSA key, it has the form:
{'keytype': 'rsa',
'scheme': 'rsassa-pss-sha256',
'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
The public and private keys are strings in PEM format.
signature:
The signature dictionary produced by one of the key generation functions.
'signature' has the form:
{'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'sig': sig}.
Conformant to 'securesystemslib.formats.SIGNATURE_SCHEMA'.
data:
Data that the signature is expected to be over. This should be a bytes
object; data should be encoded/serialized before it is passed here.)
This is the same value that can be passed into
securesystemslib.create_signature() in order to create the signature.
<Exceptions>
securesystemslib.exceptions.FormatError, raised if either 'key_dict' or
'signature' are improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'key_dict' or
'signature' specifies an unsupported algorithm.
securesystemslib.exceptions.CryptoError, if the KEYID in the given
'key_dict' does not match the KEYID in 'signature'.
<Side Effects>
The cryptography library specified in 'settings' called to do the actual
verification.
<Returns>
Boolean. True if the signature is valid, False otherwise.
"""
# Does 'key_dict' have the correct format?
# This check will ensure 'key_dict' has the appropriate number
# of objects and object types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.ANYKEY_SCHEMA.check_match(key_dict)
# Does 'signature' have the correct format?
securesystemslib.formats.SIGNATURE_SCHEMA.check_match(signature)
# Verify that the KEYID in 'key_dict' matches the KEYID listed in the
# 'signature'.
if key_dict['keyid'] != signature['keyid']:
raise securesystemslib.exceptions.CryptoError('The KEYID ('
' ' + repr(key_dict['keyid']) + ' ) in the given key does not match'
' the KEYID ( ' + repr(signature['keyid']) + ' ) in the signature.')
else:
logger.debug('The KEYIDs of key_dict and the signature match.')
# Using the public key belonging to 'key_dict'
# (i.e., rsakey_dict['keyval']['public']), verify whether 'signature'
# was produced by key_dict's corresponding private key
# key_dict['keyval']['private'].
sig = signature['sig']
sig = binascii.unhexlify(sig.encode('utf-8'))
public = key_dict['keyval']['public']
keytype = key_dict['keytype']
scheme = key_dict['scheme']
valid_signature = False
if keytype == 'rsa':
if scheme == 'rsassa-pss-sha256':
valid_signature = securesystemslib.pyca_crypto_keys.verify_rsa_signature(sig,
scheme, public, data)
else:
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(scheme))
elif keytype == 'ed25519':
if scheme == 'ed25519':
public = binascii.unhexlify(public.encode('utf-8'))
valid_signature = securesystemslib.ed25519_keys.verify_signature(public,
scheme, sig, data, use_pynacl=USE_PYNACL)
else:
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(scheme))
elif keytype == 'ecdsa-sha2-nistp256':
if scheme == 'ecdsa-sha2-nistp256':
valid_signature = securesystemslib.ecdsa_keys.verify_signature(public,
scheme, sig, data)
else:
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(scheme))
# 'securesystemslib.formats.ANYKEY_SCHEMA' should have detected invalid key
# types. This is a defensive check against an invalid key type.
else: # pragma: no cover
raise TypeError('Unsupported key type.')
return valid_signature | <Purpose>
Determine whether the private key belonging to 'key_dict' produced
'signature'. verify_signature() will use the public key found in
'key_dict', the 'sig' objects contained in 'signature', and 'data' to
complete the verification.
>>> ed25519_key = generate_ed25519_key()
>>> data = 'The quick brown fox jumps over the lazy dog'
>>> signature = create_signature(ed25519_key, data)
>>> verify_signature(ed25519_key, signature, data)
True
>>> verify_signature(ed25519_key, signature, 'bad_data')
False
>>> rsa_key = generate_rsa_key()
>>> signature = create_signature(rsa_key, data)
>>> verify_signature(rsa_key, signature, data)
True
>>> verify_signature(rsa_key, signature, 'bad_data')
False
>>> ecdsa_key = generate_ecdsa_key()
>>> signature = create_signature(ecdsa_key, data)
>>> verify_signature(ecdsa_key, signature, data)
True
>>> verify_signature(ecdsa_key, signature, 'bad_data')
False
<Arguments>
key_dict:
A dictionary containing the keys and other identifying information.
If 'key_dict' is an RSA key, it has the form:
{'keytype': 'rsa',
'scheme': 'rsassa-pss-sha256',
'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
The public and private keys are strings in PEM format.
signature:
The signature dictionary produced by one of the key generation functions.
'signature' has the form:
{'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'sig': sig}.
Conformant to 'securesystemslib.formats.SIGNATURE_SCHEMA'.
data:
Data that the signature is expected to be over. This should be a bytes
object; data should be encoded/serialized before it is passed here.)
This is the same value that can be passed into
securesystemslib.create_signature() in order to create the signature.
<Exceptions>
securesystemslib.exceptions.FormatError, raised if either 'key_dict' or
'signature' are improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'key_dict' or
'signature' specifies an unsupported algorithm.
securesystemslib.exceptions.CryptoError, if the KEYID in the given
'key_dict' does not match the KEYID in 'signature'.
<Side Effects>
The cryptography library specified in 'settings' called to do the actual
verification.
<Returns>
Boolean. True if the signature is valid, False otherwise. | Below is the the instruction that describes the task:
### Input:
<Purpose>
Determine whether the private key belonging to 'key_dict' produced
'signature'. verify_signature() will use the public key found in
'key_dict', the 'sig' objects contained in 'signature', and 'data' to
complete the verification.
>>> ed25519_key = generate_ed25519_key()
>>> data = 'The quick brown fox jumps over the lazy dog'
>>> signature = create_signature(ed25519_key, data)
>>> verify_signature(ed25519_key, signature, data)
True
>>> verify_signature(ed25519_key, signature, 'bad_data')
False
>>> rsa_key = generate_rsa_key()
>>> signature = create_signature(rsa_key, data)
>>> verify_signature(rsa_key, signature, data)
True
>>> verify_signature(rsa_key, signature, 'bad_data')
False
>>> ecdsa_key = generate_ecdsa_key()
>>> signature = create_signature(ecdsa_key, data)
>>> verify_signature(ecdsa_key, signature, data)
True
>>> verify_signature(ecdsa_key, signature, 'bad_data')
False
<Arguments>
key_dict:
A dictionary containing the keys and other identifying information.
If 'key_dict' is an RSA key, it has the form:
{'keytype': 'rsa',
'scheme': 'rsassa-pss-sha256',
'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
The public and private keys are strings in PEM format.
signature:
The signature dictionary produced by one of the key generation functions.
'signature' has the form:
{'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'sig': sig}.
Conformant to 'securesystemslib.formats.SIGNATURE_SCHEMA'.
data:
Data that the signature is expected to be over. This should be a bytes
object; data should be encoded/serialized before it is passed here.)
This is the same value that can be passed into
securesystemslib.create_signature() in order to create the signature.
<Exceptions>
securesystemslib.exceptions.FormatError, raised if either 'key_dict' or
'signature' are improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'key_dict' or
'signature' specifies an unsupported algorithm.
securesystemslib.exceptions.CryptoError, if the KEYID in the given
'key_dict' does not match the KEYID in 'signature'.
<Side Effects>
The cryptography library specified in 'settings' called to do the actual
verification.
<Returns>
Boolean. True if the signature is valid, False otherwise.
### Response:
def verify_signature(key_dict, signature, data):
"""
<Purpose>
Determine whether the private key belonging to 'key_dict' produced
'signature'. verify_signature() will use the public key found in
'key_dict', the 'sig' objects contained in 'signature', and 'data' to
complete the verification.
>>> ed25519_key = generate_ed25519_key()
>>> data = 'The quick brown fox jumps over the lazy dog'
>>> signature = create_signature(ed25519_key, data)
>>> verify_signature(ed25519_key, signature, data)
True
>>> verify_signature(ed25519_key, signature, 'bad_data')
False
>>> rsa_key = generate_rsa_key()
>>> signature = create_signature(rsa_key, data)
>>> verify_signature(rsa_key, signature, data)
True
>>> verify_signature(rsa_key, signature, 'bad_data')
False
>>> ecdsa_key = generate_ecdsa_key()
>>> signature = create_signature(ecdsa_key, data)
>>> verify_signature(ecdsa_key, signature, data)
True
>>> verify_signature(ecdsa_key, signature, 'bad_data')
False
<Arguments>
key_dict:
A dictionary containing the keys and other identifying information.
If 'key_dict' is an RSA key, it has the form:
{'keytype': 'rsa',
'scheme': 'rsassa-pss-sha256',
'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
The public and private keys are strings in PEM format.
signature:
The signature dictionary produced by one of the key generation functions.
'signature' has the form:
{'keyid': 'f30a0870d026980100c0573bd557394f8c1bbd6...',
'sig': sig}.
Conformant to 'securesystemslib.formats.SIGNATURE_SCHEMA'.
data:
Data that the signature is expected to be over. This should be a bytes
object; data should be encoded/serialized before it is passed here.)
This is the same value that can be passed into
securesystemslib.create_signature() in order to create the signature.
<Exceptions>
securesystemslib.exceptions.FormatError, raised if either 'key_dict' or
'signature' are improperly formatted.
securesystemslib.exceptions.UnsupportedAlgorithmError, if 'key_dict' or
'signature' specifies an unsupported algorithm.
securesystemslib.exceptions.CryptoError, if the KEYID in the given
'key_dict' does not match the KEYID in 'signature'.
<Side Effects>
The cryptography library specified in 'settings' called to do the actual
verification.
<Returns>
Boolean. True if the signature is valid, False otherwise.
"""
# Does 'key_dict' have the correct format?
# This check will ensure 'key_dict' has the appropriate number
# of objects and object types, and that all dict keys are properly named.
# Raise 'securesystemslib.exceptions.FormatError' if the check fails.
securesystemslib.formats.ANYKEY_SCHEMA.check_match(key_dict)
# Does 'signature' have the correct format?
securesystemslib.formats.SIGNATURE_SCHEMA.check_match(signature)
# Verify that the KEYID in 'key_dict' matches the KEYID listed in the
# 'signature'.
if key_dict['keyid'] != signature['keyid']:
raise securesystemslib.exceptions.CryptoError('The KEYID ('
' ' + repr(key_dict['keyid']) + ' ) in the given key does not match'
' the KEYID ( ' + repr(signature['keyid']) + ' ) in the signature.')
else:
logger.debug('The KEYIDs of key_dict and the signature match.')
# Using the public key belonging to 'key_dict'
# (i.e., rsakey_dict['keyval']['public']), verify whether 'signature'
# was produced by key_dict's corresponding private key
# key_dict['keyval']['private'].
sig = signature['sig']
sig = binascii.unhexlify(sig.encode('utf-8'))
public = key_dict['keyval']['public']
keytype = key_dict['keytype']
scheme = key_dict['scheme']
valid_signature = False
if keytype == 'rsa':
if scheme == 'rsassa-pss-sha256':
valid_signature = securesystemslib.pyca_crypto_keys.verify_rsa_signature(sig,
scheme, public, data)
else:
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(scheme))
elif keytype == 'ed25519':
if scheme == 'ed25519':
public = binascii.unhexlify(public.encode('utf-8'))
valid_signature = securesystemslib.ed25519_keys.verify_signature(public,
scheme, sig, data, use_pynacl=USE_PYNACL)
else:
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(scheme))
elif keytype == 'ecdsa-sha2-nistp256':
if scheme == 'ecdsa-sha2-nistp256':
valid_signature = securesystemslib.ecdsa_keys.verify_signature(public,
scheme, sig, data)
else:
raise securesystemslib.exceptions.UnsupportedAlgorithmError('Unsupported'
' signature scheme is specified: ' + repr(scheme))
# 'securesystemslib.formats.ANYKEY_SCHEMA' should have detected invalid key
# types. This is a defensive check against an invalid key type.
else: # pragma: no cover
raise TypeError('Unsupported key type.')
return valid_signature |
def exists(path, profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None):
'''
Check if path exists
path
path to check
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.exists /test/name profile=prod
'''
conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme,
username=username, password=password, default_acl=default_acl)
return bool(conn.exists(path)) | Check if path exists
path
path to check
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.exists /test/name profile=prod | Below is the the instruction that describes the task:
### Input:
Check if path exists
path
path to check
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.exists /test/name profile=prod
### Response:
def exists(path, profile=None, hosts=None, scheme=None, username=None, password=None, default_acl=None):
'''
Check if path exists
path
path to check
profile
Configured Zookeeper profile to authenticate with (Default: None)
hosts
Lists of Zookeeper Hosts (Default: '127.0.0.1:2181)
scheme
Scheme to authenticate with (Default: 'digest')
username
Username to authenticate (Default: None)
password
Password to authenticate (Default: None)
default_acl
Default acls to assign if a node is created in this connection (Default: None)
CLI Example:
.. code-block:: bash
salt minion1 zookeeper.exists /test/name profile=prod
'''
conn = _get_zk_conn(profile=profile, hosts=hosts, scheme=scheme,
username=username, password=password, default_acl=default_acl)
return bool(conn.exists(path)) |
def generate_slug(value):
"""
Generates a slug using a Hashid of `value`.
COPIED from spectator.core.models.SluggedModelMixin() because migrations
don't make this happen automatically and perhaps the least bad thing is
to copy the method here, ugh.
"""
alphabet = app_settings.SLUG_ALPHABET
salt = app_settings.SLUG_SALT
hashids = Hashids(alphabet=alphabet, salt=salt, min_length=5)
return hashids.encode(value) | Generates a slug using a Hashid of `value`.
COPIED from spectator.core.models.SluggedModelMixin() because migrations
don't make this happen automatically and perhaps the least bad thing is
to copy the method here, ugh. | Below is the the instruction that describes the task:
### Input:
Generates a slug using a Hashid of `value`.
COPIED from spectator.core.models.SluggedModelMixin() because migrations
don't make this happen automatically and perhaps the least bad thing is
to copy the method here, ugh.
### Response:
def generate_slug(value):
"""
Generates a slug using a Hashid of `value`.
COPIED from spectator.core.models.SluggedModelMixin() because migrations
don't make this happen automatically and perhaps the least bad thing is
to copy the method here, ugh.
"""
alphabet = app_settings.SLUG_ALPHABET
salt = app_settings.SLUG_SALT
hashids = Hashids(alphabet=alphabet, salt=salt, min_length=5)
return hashids.encode(value) |
def get_content_macro_by_macro_id(self, content_id, version, macro_id, callback=None):
"""
Returns the body of a macro (in storage format) with the given id.
This resource is primarily used by connect applications that require the body of macro to perform their work.
When content is created, if no macroId is specified, then Confluence will generate a random id.
The id is persisted as the content is saved and only modified by Confluence if there are conflicting IDs.
To preserve backwards compatibility this resource will also match on the hash of the macro body, even if a
macroId is found. This check will become redundant as pages get macroId's generated for them and transparently
propagate out to all instances.
:param content_id (string): A string containing the id of the content.
:param version (int): The version of the content to search.
:param macro_id (string): The macroID to find the corresponding macro.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the endpoint, or the results of the callback.
Will raise requests.HTTPError on bad input, potentially.
"""
return self._service_get_request("rest/api/content/{id}/history/{version}/macro/id/{macro_id}"
"".format(id=content_id, version=int(version), macro_id=macro_id),
callback=callback) | Returns the body of a macro (in storage format) with the given id.
This resource is primarily used by connect applications that require the body of macro to perform their work.
When content is created, if no macroId is specified, then Confluence will generate a random id.
The id is persisted as the content is saved and only modified by Confluence if there are conflicting IDs.
To preserve backwards compatibility this resource will also match on the hash of the macro body, even if a
macroId is found. This check will become redundant as pages get macroId's generated for them and transparently
propagate out to all instances.
:param content_id (string): A string containing the id of the content.
:param version (int): The version of the content to search.
:param macro_id (string): The macroID to find the corresponding macro.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the endpoint, or the results of the callback.
Will raise requests.HTTPError on bad input, potentially. | Below is the the instruction that describes the task:
### Input:
Returns the body of a macro (in storage format) with the given id.
This resource is primarily used by connect applications that require the body of macro to perform their work.
When content is created, if no macroId is specified, then Confluence will generate a random id.
The id is persisted as the content is saved and only modified by Confluence if there are conflicting IDs.
To preserve backwards compatibility this resource will also match on the hash of the macro body, even if a
macroId is found. This check will become redundant as pages get macroId's generated for them and transparently
propagate out to all instances.
:param content_id (string): A string containing the id of the content.
:param version (int): The version of the content to search.
:param macro_id (string): The macroID to find the corresponding macro.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the endpoint, or the results of the callback.
Will raise requests.HTTPError on bad input, potentially.
### Response:
def get_content_macro_by_macro_id(self, content_id, version, macro_id, callback=None):
"""
Returns the body of a macro (in storage format) with the given id.
This resource is primarily used by connect applications that require the body of macro to perform their work.
When content is created, if no macroId is specified, then Confluence will generate a random id.
The id is persisted as the content is saved and only modified by Confluence if there are conflicting IDs.
To preserve backwards compatibility this resource will also match on the hash of the macro body, even if a
macroId is found. This check will become redundant as pages get macroId's generated for them and transparently
propagate out to all instances.
:param content_id (string): A string containing the id of the content.
:param version (int): The version of the content to search.
:param macro_id (string): The macroID to find the corresponding macro.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the endpoint, or the results of the callback.
Will raise requests.HTTPError on bad input, potentially.
"""
return self._service_get_request("rest/api/content/{id}/history/{version}/macro/id/{macro_id}"
"".format(id=content_id, version=int(version), macro_id=macro_id),
callback=callback) |
def list_disks(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
List disks associated with the account
CLI Example:
.. code-block:: bash
salt-cloud -f list_disks my-azure
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_disks function must be called with -f or --function.'
)
if not conn:
conn = get_conn()
data = conn.list_disks()
ret = {}
for item in data.disks:
ret[item.name] = object_to_dict(item)
return ret | .. versionadded:: 2015.8.0
List disks associated with the account
CLI Example:
.. code-block:: bash
salt-cloud -f list_disks my-azure | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2015.8.0
List disks associated with the account
CLI Example:
.. code-block:: bash
salt-cloud -f list_disks my-azure
### Response:
def list_disks(kwargs=None, conn=None, call=None):
'''
.. versionadded:: 2015.8.0
List disks associated with the account
CLI Example:
.. code-block:: bash
salt-cloud -f list_disks my-azure
'''
if call != 'function':
raise SaltCloudSystemExit(
'The list_disks function must be called with -f or --function.'
)
if not conn:
conn = get_conn()
data = conn.list_disks()
ret = {}
for item in data.disks:
ret[item.name] = object_to_dict(item)
return ret |
def add_round_key(state, rkey):
"""
Transformation in the Cipher and Inverse Cipher in which a Round Key is
added to the State using an XOR operation. The length of a Round Key equals
the size of the State (i.e., for Nb = 4, the Round Key length equals 128
bits/16 bytes).
"""
state = state.reshape(4, 32)
rkey = rkey.reshape(4, 32)
return fcat(
state[0] ^ rkey[0],
state[1] ^ rkey[1],
state[2] ^ rkey[2],
state[3] ^ rkey[3],
) | Transformation in the Cipher and Inverse Cipher in which a Round Key is
added to the State using an XOR operation. The length of a Round Key equals
the size of the State (i.e., for Nb = 4, the Round Key length equals 128
bits/16 bytes). | Below is the the instruction that describes the task:
### Input:
Transformation in the Cipher and Inverse Cipher in which a Round Key is
added to the State using an XOR operation. The length of a Round Key equals
the size of the State (i.e., for Nb = 4, the Round Key length equals 128
bits/16 bytes).
### Response:
def add_round_key(state, rkey):
"""
Transformation in the Cipher and Inverse Cipher in which a Round Key is
added to the State using an XOR operation. The length of a Round Key equals
the size of the State (i.e., for Nb = 4, the Round Key length equals 128
bits/16 bytes).
"""
state = state.reshape(4, 32)
rkey = rkey.reshape(4, 32)
return fcat(
state[0] ^ rkey[0],
state[1] ^ rkey[1],
state[2] ^ rkey[2],
state[3] ^ rkey[3],
) |
def disable_key(self):
"""Disable an existing API Key."""
print("This command will disable a enabled key.")
apiKeyID = input("API Key ID: ")
try:
key = self._curl_bitmex("/apiKey/disable",
postdict={"apiKeyID": apiKeyID})
print("Key with ID %s disabled." % key["id"])
except:
print("Unable to disable key, please try again.")
self.disable_key() | Disable an existing API Key. | Below is the the instruction that describes the task:
### Input:
Disable an existing API Key.
### Response:
def disable_key(self):
"""Disable an existing API Key."""
print("This command will disable a enabled key.")
apiKeyID = input("API Key ID: ")
try:
key = self._curl_bitmex("/apiKey/disable",
postdict={"apiKeyID": apiKeyID})
print("Key with ID %s disabled." % key["id"])
except:
print("Unable to disable key, please try again.")
self.disable_key() |
def check_required_fields(method, uri, body, field_names):
"""
Check required fields in the request body.
Raises:
BadRequestError with reason 3: Missing request body
BadRequestError with reason 5: Missing required field in request body
"""
# Check presence of request body
if body is None:
raise BadRequestError(method, uri, reason=3,
message="Missing request body")
# Check required input fields
for field_name in field_names:
if field_name not in body:
raise BadRequestError(method, uri, reason=5,
message="Missing required field in request "
"body: {}".format(field_name)) | Check required fields in the request body.
Raises:
BadRequestError with reason 3: Missing request body
BadRequestError with reason 5: Missing required field in request body | Below is the the instruction that describes the task:
### Input:
Check required fields in the request body.
Raises:
BadRequestError with reason 3: Missing request body
BadRequestError with reason 5: Missing required field in request body
### Response:
def check_required_fields(method, uri, body, field_names):
"""
Check required fields in the request body.
Raises:
BadRequestError with reason 3: Missing request body
BadRequestError with reason 5: Missing required field in request body
"""
# Check presence of request body
if body is None:
raise BadRequestError(method, uri, reason=3,
message="Missing request body")
# Check required input fields
for field_name in field_names:
if field_name not in body:
raise BadRequestError(method, uri, reason=5,
message="Missing required field in request "
"body: {}".format(field_name)) |
def _raw_split(itxt):
"""
Parse HTML from text into array filled with tags end text.
Source code is little bit unintutive, because it is state machine parser.
For better understanding, look at http://bit.ly/1rXRcJj
Example::
>>> dhtmlparser._raw_split('<html><tag params="true"></html>')
['<html>', '<tag params="true">', '</html>']
Args:
itxt (str): Input HTML text, which will be parsed.
Returns:
list: List of strings (input splitted to tags and text).
"""
echr = ""
buff = ["", "", "", ""]
content = ""
array = []
next_state = 0
inside_tag = False
escaped = False
COMMENT_START = ["-", "!", "<"]
COMMENT_END = ["-", "-"]
gc.disable()
for c in itxt:
# content
if next_state == StateEnum.content:
if c == "<":
if content:
array.append(content)
content = c
next_state = StateEnum.tag
inside_tag = False
else:
content += c
# html tag
elif next_state == StateEnum.tag:
if c == ">":
array.append(content + c)
content = ""
next_state = StateEnum.content
elif c == "'" or c == '"':
echr = c
content += c
next_state = StateEnum.parameter
elif c == "-" and buff[:3] == COMMENT_START:
if content[:-3]:
array.append(content[:-3])
content = content[-3:] + c
next_state = StateEnum.comment
else:
if c == "<": # jump back into tag instead of content
array.append(content)
inside_tag = True
content = ""
content += c
# quotes "" / ''
elif next_state == StateEnum.parameter:
if c == echr and not escaped: # end of quotes
next_state = StateEnum.tag
# unescaped end of line - this is good for invalid HTML like
# <a href=something">..., because it allows recovery
if c == "\n" and not escaped and buff[0] == ">":
next_state = StateEnum.content
inside_tag = False
content += c
escaped = not escaped if c == "\\" else False
# html comments
elif next_state == StateEnum.comment:
if c == ">" and buff[:2] == COMMENT_END:
next_state = StateEnum.tag if inside_tag else StateEnum.content
inside_tag = False
array.append(content + c)
content = ""
else:
content += c
# rotate buffer
buff = _rotate_buff(buff)
buff[0] = c
gc.enable()
if content:
array.append(content)
return array | Parse HTML from text into array filled with tags end text.
Source code is little bit unintutive, because it is state machine parser.
For better understanding, look at http://bit.ly/1rXRcJj
Example::
>>> dhtmlparser._raw_split('<html><tag params="true"></html>')
['<html>', '<tag params="true">', '</html>']
Args:
itxt (str): Input HTML text, which will be parsed.
Returns:
list: List of strings (input splitted to tags and text). | Below is the the instruction that describes the task:
### Input:
Parse HTML from text into array filled with tags end text.
Source code is little bit unintutive, because it is state machine parser.
For better understanding, look at http://bit.ly/1rXRcJj
Example::
>>> dhtmlparser._raw_split('<html><tag params="true"></html>')
['<html>', '<tag params="true">', '</html>']
Args:
itxt (str): Input HTML text, which will be parsed.
Returns:
list: List of strings (input splitted to tags and text).
### Response:
def _raw_split(itxt):
"""
Parse HTML from text into array filled with tags end text.
Source code is little bit unintutive, because it is state machine parser.
For better understanding, look at http://bit.ly/1rXRcJj
Example::
>>> dhtmlparser._raw_split('<html><tag params="true"></html>')
['<html>', '<tag params="true">', '</html>']
Args:
itxt (str): Input HTML text, which will be parsed.
Returns:
list: List of strings (input splitted to tags and text).
"""
echr = ""
buff = ["", "", "", ""]
content = ""
array = []
next_state = 0
inside_tag = False
escaped = False
COMMENT_START = ["-", "!", "<"]
COMMENT_END = ["-", "-"]
gc.disable()
for c in itxt:
# content
if next_state == StateEnum.content:
if c == "<":
if content:
array.append(content)
content = c
next_state = StateEnum.tag
inside_tag = False
else:
content += c
# html tag
elif next_state == StateEnum.tag:
if c == ">":
array.append(content + c)
content = ""
next_state = StateEnum.content
elif c == "'" or c == '"':
echr = c
content += c
next_state = StateEnum.parameter
elif c == "-" and buff[:3] == COMMENT_START:
if content[:-3]:
array.append(content[:-3])
content = content[-3:] + c
next_state = StateEnum.comment
else:
if c == "<": # jump back into tag instead of content
array.append(content)
inside_tag = True
content = ""
content += c
# quotes "" / ''
elif next_state == StateEnum.parameter:
if c == echr and not escaped: # end of quotes
next_state = StateEnum.tag
# unescaped end of line - this is good for invalid HTML like
# <a href=something">..., because it allows recovery
if c == "\n" and not escaped and buff[0] == ">":
next_state = StateEnum.content
inside_tag = False
content += c
escaped = not escaped if c == "\\" else False
# html comments
elif next_state == StateEnum.comment:
if c == ">" and buff[:2] == COMMENT_END:
next_state = StateEnum.tag if inside_tag else StateEnum.content
inside_tag = False
array.append(content + c)
content = ""
else:
content += c
# rotate buffer
buff = _rotate_buff(buff)
buff[0] = c
gc.enable()
if content:
array.append(content)
return array |
def _set_host(self, v, load=False):
"""
Setter method for host, mapped from YANG variable /snmp_server/host (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_host is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_host() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("ip community",host.host, yang_name="host", rest_name="host", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip community', extensions={u'tailf-common': {u'info': u'Holds IP Address, community string, version\n(v1 | v2c), port number used to send traps\nand severity level', u'sort-priority': u'23', u'callpoint': u'snmphost', u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name="host", rest_name="host", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Holds IP Address, community string, version\n(v1 | v2c), port number used to send traps\nand severity level', u'sort-priority': u'23', u'callpoint': u'snmphost', u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """host must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("ip community",host.host, yang_name="host", rest_name="host", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip community', extensions={u'tailf-common': {u'info': u'Holds IP Address, community string, version\n(v1 | v2c), port number used to send traps\nand severity level', u'sort-priority': u'23', u'callpoint': u'snmphost', u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name="host", rest_name="host", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Holds IP Address, community string, version\n(v1 | v2c), port number used to send traps\nand severity level', u'sort-priority': u'23', u'callpoint': u'snmphost', u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='list', is_config=True)""",
})
self.__host = t
if hasattr(self, '_set'):
self._set() | Setter method for host, mapped from YANG variable /snmp_server/host (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_host is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_host() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for host, mapped from YANG variable /snmp_server/host (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_host is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_host() directly.
### Response:
def _set_host(self, v, load=False):
"""
Setter method for host, mapped from YANG variable /snmp_server/host (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_host is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_host() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("ip community",host.host, yang_name="host", rest_name="host", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip community', extensions={u'tailf-common': {u'info': u'Holds IP Address, community string, version\n(v1 | v2c), port number used to send traps\nand severity level', u'sort-priority': u'23', u'callpoint': u'snmphost', u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name="host", rest_name="host", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Holds IP Address, community string, version\n(v1 | v2c), port number used to send traps\nand severity level', u'sort-priority': u'23', u'callpoint': u'snmphost', u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """host must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("ip community",host.host, yang_name="host", rest_name="host", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='ip community', extensions={u'tailf-common': {u'info': u'Holds IP Address, community string, version\n(v1 | v2c), port number used to send traps\nand severity level', u'sort-priority': u'23', u'callpoint': u'snmphost', u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}), is_container='list', yang_name="host", rest_name="host", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Holds IP Address, community string, version\n(v1 | v2c), port number used to send traps\nand severity level', u'sort-priority': u'23', u'callpoint': u'snmphost', u'cli-suppress-key-abbreviation': None, u'cli-suppress-list-no': None}}, namespace='urn:brocade.com:mgmt:brocade-snmp', defining_module='brocade-snmp', yang_type='list', is_config=True)""",
})
self.__host = t
if hasattr(self, '_set'):
self._set() |
def fetch(self, is_dl_forced=False):
"""
Here we connect to the coriell sftp server using private connection
details. They dump bi-weekly files with a timestamp in the filename.
For each catalog, we ping the remote site and pull the most-recently
updated file, renaming it to our local latest.csv.
Be sure to have pg user/password connection details in your conf.yaml
file, like:
dbauth : {"coriell" : {
"user" : "<username>", "password" : "<password>",
"host" : <host>, "private_key"=path/to/rsa_key}
}
:param is_dl_forced:
:return:
"""
host = config.get_config()['dbauth']['coriell']['host']
key = config.get_config()['dbauth']['coriell']['private_key']
user = config.get_config()['user']['coriell']
passwd = config.get_config()['keys'][user]
with pysftp.Connection(
host, username=user, password=passwd, private_key=key) as sftp:
# check to make sure each file is in there
# get the remote files
remote_files = sftp.listdir_attr()
files_by_repo = {}
for attr in remote_files:
# for each catalog, get the most-recent filename
mch = re.match('(NIGMS|NIA|NHGRI|NINDS)', attr.filename)
if mch is not None and len(mch.groups()) > 0:
# there should just be one now
files_by_repo[mch.group(1)] = attr
# sort each array in hash,
# & get the name and time of the most-recent file for each catalog
for rmt in self.files:
LOG.info("Checking on %s catalog file", rmt)
fname = self.files[rmt]['file']
remotef = files_by_repo[rmt]
target_name = '/'.join((self.rawdir, fname))
# check if the local file is out of date, if so, download.
# otherwise, skip.
# we rename (for simplicity) the original file
fstat = None
if os.path.exists(target_name):
fstat = os.stat(target_name)
LOG.info(
"Local file date: %s",
datetime.utcfromtimestamp(fstat[stat.ST_CTIME]))
if fstat is None or remotef.st_mtime > fstat[stat.ST_CTIME]:
if fstat is None:
LOG.info("File does not exist locally; downloading...")
else:
LOG.info(
"New version of %s catalog available; downloading...", rmt)
sftp.get(remotef.filename, target_name)
LOG.info(
"Fetched remote %s -> %s", remotef.filename, target_name)
fstat = os.stat(target_name)
filedate = datetime.utcfromtimestamp(
remotef.st_mtime).strftime("%Y-%m-%d")
LOG.info(
"New file date: %s",
datetime.utcfromtimestamp(fstat[stat.ST_CTIME]))
else:
LOG.info("File %s exists; using local copy", fname)
filedate = datetime.utcfromtimestamp(
fstat[stat.ST_CTIME]).strftime("%Y-%m-%d")
self.dataset.setFileAccessUrl(remotef.filename, True)
self.dataset.setVersion(filedate)
return | Here we connect to the coriell sftp server using private connection
details. They dump bi-weekly files with a timestamp in the filename.
For each catalog, we ping the remote site and pull the most-recently
updated file, renaming it to our local latest.csv.
Be sure to have pg user/password connection details in your conf.yaml
file, like:
dbauth : {"coriell" : {
"user" : "<username>", "password" : "<password>",
"host" : <host>, "private_key"=path/to/rsa_key}
}
:param is_dl_forced:
:return: | Below is the the instruction that describes the task:
### Input:
Here we connect to the coriell sftp server using private connection
details. They dump bi-weekly files with a timestamp in the filename.
For each catalog, we ping the remote site and pull the most-recently
updated file, renaming it to our local latest.csv.
Be sure to have pg user/password connection details in your conf.yaml
file, like:
dbauth : {"coriell" : {
"user" : "<username>", "password" : "<password>",
"host" : <host>, "private_key"=path/to/rsa_key}
}
:param is_dl_forced:
:return:
### Response:
def fetch(self, is_dl_forced=False):
"""
Here we connect to the coriell sftp server using private connection
details. They dump bi-weekly files with a timestamp in the filename.
For each catalog, we ping the remote site and pull the most-recently
updated file, renaming it to our local latest.csv.
Be sure to have pg user/password connection details in your conf.yaml
file, like:
dbauth : {"coriell" : {
"user" : "<username>", "password" : "<password>",
"host" : <host>, "private_key"=path/to/rsa_key}
}
:param is_dl_forced:
:return:
"""
host = config.get_config()['dbauth']['coriell']['host']
key = config.get_config()['dbauth']['coriell']['private_key']
user = config.get_config()['user']['coriell']
passwd = config.get_config()['keys'][user]
with pysftp.Connection(
host, username=user, password=passwd, private_key=key) as sftp:
# check to make sure each file is in there
# get the remote files
remote_files = sftp.listdir_attr()
files_by_repo = {}
for attr in remote_files:
# for each catalog, get the most-recent filename
mch = re.match('(NIGMS|NIA|NHGRI|NINDS)', attr.filename)
if mch is not None and len(mch.groups()) > 0:
# there should just be one now
files_by_repo[mch.group(1)] = attr
# sort each array in hash,
# & get the name and time of the most-recent file for each catalog
for rmt in self.files:
LOG.info("Checking on %s catalog file", rmt)
fname = self.files[rmt]['file']
remotef = files_by_repo[rmt]
target_name = '/'.join((self.rawdir, fname))
# check if the local file is out of date, if so, download.
# otherwise, skip.
# we rename (for simplicity) the original file
fstat = None
if os.path.exists(target_name):
fstat = os.stat(target_name)
LOG.info(
"Local file date: %s",
datetime.utcfromtimestamp(fstat[stat.ST_CTIME]))
if fstat is None or remotef.st_mtime > fstat[stat.ST_CTIME]:
if fstat is None:
LOG.info("File does not exist locally; downloading...")
else:
LOG.info(
"New version of %s catalog available; downloading...", rmt)
sftp.get(remotef.filename, target_name)
LOG.info(
"Fetched remote %s -> %s", remotef.filename, target_name)
fstat = os.stat(target_name)
filedate = datetime.utcfromtimestamp(
remotef.st_mtime).strftime("%Y-%m-%d")
LOG.info(
"New file date: %s",
datetime.utcfromtimestamp(fstat[stat.ST_CTIME]))
else:
LOG.info("File %s exists; using local copy", fname)
filedate = datetime.utcfromtimestamp(
fstat[stat.ST_CTIME]).strftime("%Y-%m-%d")
self.dataset.setFileAccessUrl(remotef.filename, True)
self.dataset.setVersion(filedate)
return |
def update_loadbalancer(self, lbaas_loadbalancer, body=None):
"""Updates a load balancer."""
return self.put(self.lbaas_loadbalancer_path % (lbaas_loadbalancer),
body=body) | Updates a load balancer. | Below is the the instruction that describes the task:
### Input:
Updates a load balancer.
### Response:
def update_loadbalancer(self, lbaas_loadbalancer, body=None):
"""Updates a load balancer."""
return self.put(self.lbaas_loadbalancer_path % (lbaas_loadbalancer),
body=body) |
def delete_source(self, photo, **kwds):
"""
Endpoint: /photo/<id>/source/delete.json
Delete the source files of a photo.
Returns True if successful.
Raises a TroveboxError if not.
"""
return self._client.post("/photo/%s/source/delete.json" %
self._extract_id(photo),
**kwds)["result"] | Endpoint: /photo/<id>/source/delete.json
Delete the source files of a photo.
Returns True if successful.
Raises a TroveboxError if not. | Below is the the instruction that describes the task:
### Input:
Endpoint: /photo/<id>/source/delete.json
Delete the source files of a photo.
Returns True if successful.
Raises a TroveboxError if not.
### Response:
def delete_source(self, photo, **kwds):
"""
Endpoint: /photo/<id>/source/delete.json
Delete the source files of a photo.
Returns True if successful.
Raises a TroveboxError if not.
"""
return self._client.post("/photo/%s/source/delete.json" %
self._extract_id(photo),
**kwds)["result"] |
def _get_named_graph(context):
"""
Returns the named graph for this context.
"""
if context is None:
return None
return models.NamedGraph.objects.get_or_create(identifier=context.identifier)[0] | Returns the named graph for this context. | Below is the the instruction that describes the task:
### Input:
Returns the named graph for this context.
### Response:
def _get_named_graph(context):
"""
Returns the named graph for this context.
"""
if context is None:
return None
return models.NamedGraph.objects.get_or_create(identifier=context.identifier)[0] |
def modify_log_flags(self, settings):
"""Modifies the debug or release logger flags.
in settings of type str
The flags settings string. See iprt/log.h for details. To target the
release logger, prefix the string with "release:".
"""
if not isinstance(settings, basestring):
raise TypeError("settings can only be an instance of type basestring")
self._call("modifyLogFlags",
in_p=[settings]) | Modifies the debug or release logger flags.
in settings of type str
The flags settings string. See iprt/log.h for details. To target the
release logger, prefix the string with "release:". | Below is the the instruction that describes the task:
### Input:
Modifies the debug or release logger flags.
in settings of type str
The flags settings string. See iprt/log.h for details. To target the
release logger, prefix the string with "release:".
### Response:
def modify_log_flags(self, settings):
"""Modifies the debug or release logger flags.
in settings of type str
The flags settings string. See iprt/log.h for details. To target the
release logger, prefix the string with "release:".
"""
if not isinstance(settings, basestring):
raise TypeError("settings can only be an instance of type basestring")
self._call("modifyLogFlags",
in_p=[settings]) |
def relabeled(self, label, new_label):
"""Return a new table with ``label`` specifying column label(s)
replaced by corresponding ``new_label``.
Args:
``label`` -- (str or array of str) The label(s) of
columns to be changed.
``new_label`` -- (str or array of str): The new label(s) of
columns to be changed. Same number of elements as label.
Raises:
``ValueError`` -- if ``label`` does not exist in
table, or if the ``label`` and ``new_label`` are not not of
equal length. Also, raised if ``label`` and/or ``new_label``
are not ``str``.
Returns:
New table with ``new_label`` in place of ``label``.
>>> tiles = Table().with_columns('letter', make_array('c', 'd'),
... 'count', make_array(2, 4))
>>> tiles
letter | count
c | 2
d | 4
>>> tiles.relabeled('count', 'number')
letter | number
c | 2
d | 4
>>> tiles # original table unmodified
letter | count
c | 2
d | 4
>>> tiles.relabeled(make_array('letter', 'count'),
... make_array('column1', 'column2'))
column1 | column2
c | 2
d | 4
>>> tiles.relabeled(make_array('letter', 'number'),
... make_array('column1', 'column2'))
Traceback (most recent call last):
...
ValueError: Invalid labels. Column labels must already exist in table in order to be replaced.
"""
copy = self.copy()
copy.relabel(label, new_label)
return copy | Return a new table with ``label`` specifying column label(s)
replaced by corresponding ``new_label``.
Args:
``label`` -- (str or array of str) The label(s) of
columns to be changed.
``new_label`` -- (str or array of str): The new label(s) of
columns to be changed. Same number of elements as label.
Raises:
``ValueError`` -- if ``label`` does not exist in
table, or if the ``label`` and ``new_label`` are not not of
equal length. Also, raised if ``label`` and/or ``new_label``
are not ``str``.
Returns:
New table with ``new_label`` in place of ``label``.
>>> tiles = Table().with_columns('letter', make_array('c', 'd'),
... 'count', make_array(2, 4))
>>> tiles
letter | count
c | 2
d | 4
>>> tiles.relabeled('count', 'number')
letter | number
c | 2
d | 4
>>> tiles # original table unmodified
letter | count
c | 2
d | 4
>>> tiles.relabeled(make_array('letter', 'count'),
... make_array('column1', 'column2'))
column1 | column2
c | 2
d | 4
>>> tiles.relabeled(make_array('letter', 'number'),
... make_array('column1', 'column2'))
Traceback (most recent call last):
...
ValueError: Invalid labels. Column labels must already exist in table in order to be replaced. | Below is the the instruction that describes the task:
### Input:
Return a new table with ``label`` specifying column label(s)
replaced by corresponding ``new_label``.
Args:
``label`` -- (str or array of str) The label(s) of
columns to be changed.
``new_label`` -- (str or array of str): The new label(s) of
columns to be changed. Same number of elements as label.
Raises:
``ValueError`` -- if ``label`` does not exist in
table, or if the ``label`` and ``new_label`` are not not of
equal length. Also, raised if ``label`` and/or ``new_label``
are not ``str``.
Returns:
New table with ``new_label`` in place of ``label``.
>>> tiles = Table().with_columns('letter', make_array('c', 'd'),
... 'count', make_array(2, 4))
>>> tiles
letter | count
c | 2
d | 4
>>> tiles.relabeled('count', 'number')
letter | number
c | 2
d | 4
>>> tiles # original table unmodified
letter | count
c | 2
d | 4
>>> tiles.relabeled(make_array('letter', 'count'),
... make_array('column1', 'column2'))
column1 | column2
c | 2
d | 4
>>> tiles.relabeled(make_array('letter', 'number'),
... make_array('column1', 'column2'))
Traceback (most recent call last):
...
ValueError: Invalid labels. Column labels must already exist in table in order to be replaced.
### Response:
def relabeled(self, label, new_label):
"""Return a new table with ``label`` specifying column label(s)
replaced by corresponding ``new_label``.
Args:
``label`` -- (str or array of str) The label(s) of
columns to be changed.
``new_label`` -- (str or array of str): The new label(s) of
columns to be changed. Same number of elements as label.
Raises:
``ValueError`` -- if ``label`` does not exist in
table, or if the ``label`` and ``new_label`` are not not of
equal length. Also, raised if ``label`` and/or ``new_label``
are not ``str``.
Returns:
New table with ``new_label`` in place of ``label``.
>>> tiles = Table().with_columns('letter', make_array('c', 'd'),
... 'count', make_array(2, 4))
>>> tiles
letter | count
c | 2
d | 4
>>> tiles.relabeled('count', 'number')
letter | number
c | 2
d | 4
>>> tiles # original table unmodified
letter | count
c | 2
d | 4
>>> tiles.relabeled(make_array('letter', 'count'),
... make_array('column1', 'column2'))
column1 | column2
c | 2
d | 4
>>> tiles.relabeled(make_array('letter', 'number'),
... make_array('column1', 'column2'))
Traceback (most recent call last):
...
ValueError: Invalid labels. Column labels must already exist in table in order to be replaced.
"""
copy = self.copy()
copy.relabel(label, new_label)
return copy |
def parse_resource_extended(library, session, resource_name):
"""Parse a resource string to get extended interface information.
Corresponds to viParseRsrcEx function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode`
"""
interface_type = ViUInt16()
interface_board_number = ViUInt16()
resource_class = create_string_buffer(constants.VI_FIND_BUFLEN)
unaliased_expanded_resource_name = create_string_buffer(constants.VI_FIND_BUFLEN)
alias_if_exists = create_string_buffer(constants.VI_FIND_BUFLEN)
# [ViSession, ViRsrc, ViPUInt16, ViPUInt16, ViAChar, ViAChar, ViAChar]
# ViRsrc converts from (str, unicode, bytes) to bytes
ret = library.viParseRsrcEx(session, resource_name, byref(interface_type),
byref(interface_board_number), resource_class,
unaliased_expanded_resource_name,
alias_if_exists)
res = [buffer_to_text(val)
for val in (resource_class,
unaliased_expanded_resource_name,
alias_if_exists)]
if res[-1] == '':
res[-1] = None
return ResourceInfo(constants.InterfaceType(interface_type.value),
interface_board_number.value, *res), ret | Parse a resource string to get extended interface information.
Corresponds to viParseRsrcEx function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode` | Below is the the instruction that describes the task:
### Input:
Parse a resource string to get extended interface information.
Corresponds to viParseRsrcEx function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode`
### Response:
def parse_resource_extended(library, session, resource_name):
"""Parse a resource string to get extended interface information.
Corresponds to viParseRsrcEx function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Resource Manager session (should always be the Default Resource Manager for VISA
returned from open_default_resource_manager()).
:param resource_name: Unique symbolic name of a resource.
:return: Resource information, return value of the library call.
:rtype: :class:`pyvisa.highlevel.ResourceInfo`, :class:`pyvisa.constants.StatusCode`
"""
interface_type = ViUInt16()
interface_board_number = ViUInt16()
resource_class = create_string_buffer(constants.VI_FIND_BUFLEN)
unaliased_expanded_resource_name = create_string_buffer(constants.VI_FIND_BUFLEN)
alias_if_exists = create_string_buffer(constants.VI_FIND_BUFLEN)
# [ViSession, ViRsrc, ViPUInt16, ViPUInt16, ViAChar, ViAChar, ViAChar]
# ViRsrc converts from (str, unicode, bytes) to bytes
ret = library.viParseRsrcEx(session, resource_name, byref(interface_type),
byref(interface_board_number), resource_class,
unaliased_expanded_resource_name,
alias_if_exists)
res = [buffer_to_text(val)
for val in (resource_class,
unaliased_expanded_resource_name,
alias_if_exists)]
if res[-1] == '':
res[-1] = None
return ResourceInfo(constants.InterfaceType(interface_type.value),
interface_board_number.value, *res), ret |
def _calculate_areas(self, label):
"""Calculate areas for multiple labels"""
heights = np.maximum(0, label[:, 3] - label[:, 1])
widths = np.maximum(0, label[:, 2] - label[:, 0])
return heights * widths | Calculate areas for multiple labels | Below is the the instruction that describes the task:
### Input:
Calculate areas for multiple labels
### Response:
def _calculate_areas(self, label):
"""Calculate areas for multiple labels"""
heights = np.maximum(0, label[:, 3] - label[:, 1])
widths = np.maximum(0, label[:, 2] - label[:, 0])
return heights * widths |
def dump_rawdata(self):
"""Return contents of transport object for debugging forensics."""
if hasattr(self, 'transport'):
attrs = vars(self.transport)
return ', '.join("%s: %s" % item for item in attrs.items()) | Return contents of transport object for debugging forensics. | Below is the the instruction that describes the task:
### Input:
Return contents of transport object for debugging forensics.
### Response:
def dump_rawdata(self):
"""Return contents of transport object for debugging forensics."""
if hasattr(self, 'transport'):
attrs = vars(self.transport)
return ', '.join("%s: %s" % item for item in attrs.items()) |
def get_indexer_for(self, target, **kwargs):
"""
Guaranteed return of an indexer even when non-unique.
This dispatches to get_indexer or get_indexer_nonunique
as appropriate.
"""
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer | Guaranteed return of an indexer even when non-unique.
This dispatches to get_indexer or get_indexer_nonunique
as appropriate. | Below is the the instruction that describes the task:
### Input:
Guaranteed return of an indexer even when non-unique.
This dispatches to get_indexer or get_indexer_nonunique
as appropriate.
### Response:
def get_indexer_for(self, target, **kwargs):
"""
Guaranteed return of an indexer even when non-unique.
This dispatches to get_indexer or get_indexer_nonunique
as appropriate.
"""
if self.is_unique:
return self.get_indexer(target, **kwargs)
indexer, _ = self.get_indexer_non_unique(target, **kwargs)
return indexer |
def listeI(l):
""" Fonction importante permettant de renvoyer
une liste d'entiers à partir d'une chaîne.
La chaîne est une liste de sections séparées
par des virgules. Une section peut être soit
un entier, soit un intervalle d'entiers. On
donne alors les limites inférieure et supérieure
de l'intervale, séparées par le caractère '-'.
Nombreux exemples d'intervalles dans le fichier
data/modeles.la.
:param l: Chaîne à transformer
:type l: str
:return: Liste des sections étendues
:rtype: list of int
"""
result = []
lvirg = l.split(',')
for virg in lvirg:
if "-" in virg:
deb, fin = tuple(virg.split("-"))
result += [i for i in range(int(deb), int(fin) + 1)]
else:
result.append(int(virg))
return result | Fonction importante permettant de renvoyer
une liste d'entiers à partir d'une chaîne.
La chaîne est une liste de sections séparées
par des virgules. Une section peut être soit
un entier, soit un intervalle d'entiers. On
donne alors les limites inférieure et supérieure
de l'intervale, séparées par le caractère '-'.
Nombreux exemples d'intervalles dans le fichier
data/modeles.la.
:param l: Chaîne à transformer
:type l: str
:return: Liste des sections étendues
:rtype: list of int | Below is the the instruction that describes the task:
### Input:
Fonction importante permettant de renvoyer
une liste d'entiers à partir d'une chaîne.
La chaîne est une liste de sections séparées
par des virgules. Une section peut être soit
un entier, soit un intervalle d'entiers. On
donne alors les limites inférieure et supérieure
de l'intervale, séparées par le caractère '-'.
Nombreux exemples d'intervalles dans le fichier
data/modeles.la.
:param l: Chaîne à transformer
:type l: str
:return: Liste des sections étendues
:rtype: list of int
### Response:
def listeI(l):
""" Fonction importante permettant de renvoyer
une liste d'entiers à partir d'une chaîne.
La chaîne est une liste de sections séparées
par des virgules. Une section peut être soit
un entier, soit un intervalle d'entiers. On
donne alors les limites inférieure et supérieure
de l'intervale, séparées par le caractère '-'.
Nombreux exemples d'intervalles dans le fichier
data/modeles.la.
:param l: Chaîne à transformer
:type l: str
:return: Liste des sections étendues
:rtype: list of int
"""
result = []
lvirg = l.split(',')
for virg in lvirg:
if "-" in virg:
deb, fin = tuple(virg.split("-"))
result += [i for i in range(int(deb), int(fin) + 1)]
else:
result.append(int(virg))
return result |
def ancestor(self, value):
"""Set the ancestor for the query
:type value: :class:`~google.cloud.datastore.key.Key`
:param value: the new ancestor key
"""
if not isinstance(value, Key):
raise TypeError("Ancestor must be a Key")
self._ancestor = value | Set the ancestor for the query
:type value: :class:`~google.cloud.datastore.key.Key`
:param value: the new ancestor key | Below is the the instruction that describes the task:
### Input:
Set the ancestor for the query
:type value: :class:`~google.cloud.datastore.key.Key`
:param value: the new ancestor key
### Response:
def ancestor(self, value):
"""Set the ancestor for the query
:type value: :class:`~google.cloud.datastore.key.Key`
:param value: the new ancestor key
"""
if not isinstance(value, Key):
raise TypeError("Ancestor must be a Key")
self._ancestor = value |
def urlopen(self, method, url, body=None, headers=None, **kwargs):
"""Implementation of urllib3's urlopen."""
# pylint: disable=arguments-differ
# We use kwargs to collect additional args that we don't need to
# introspect here. However, we do explicitly collect the two
# positional arguments.
# Use a kwarg for this instead of an attribute to maintain
# thread-safety.
_credential_refresh_attempt = kwargs.pop(
'_credential_refresh_attempt', 0)
if headers is None:
headers = self.headers
# Make a copy of the headers. They will be modified by the credentials
# and we want to pass the original headers if we recurse.
request_headers = headers.copy()
self.credentials.before_request(
self._request, method, url, request_headers)
response = self.http.urlopen(
method, url, body=body, headers=request_headers, **kwargs)
# If the response indicated that the credentials needed to be
# refreshed, then refresh the credentials and re-attempt the
# request.
# A stored token may expire between the time it is retrieved and
# the time the request is made, so we may need to try twice.
# The reason urllib3's retries aren't used is because they
# don't allow you to modify the request headers. :/
if (response.status in self._refresh_status_codes
and _credential_refresh_attempt < self._max_refresh_attempts):
_LOGGER.info(
'Refreshing credentials due to a %s response. Attempt %s/%s.',
response.status, _credential_refresh_attempt + 1,
self._max_refresh_attempts)
self.credentials.refresh(self._request)
# Recurse. Pass in the original headers, not our modified set.
return self.urlopen(
method, url, body=body, headers=headers,
_credential_refresh_attempt=_credential_refresh_attempt + 1,
**kwargs)
return response | Implementation of urllib3's urlopen. | Below is the the instruction that describes the task:
### Input:
Implementation of urllib3's urlopen.
### Response:
def urlopen(self, method, url, body=None, headers=None, **kwargs):
"""Implementation of urllib3's urlopen."""
# pylint: disable=arguments-differ
# We use kwargs to collect additional args that we don't need to
# introspect here. However, we do explicitly collect the two
# positional arguments.
# Use a kwarg for this instead of an attribute to maintain
# thread-safety.
_credential_refresh_attempt = kwargs.pop(
'_credential_refresh_attempt', 0)
if headers is None:
headers = self.headers
# Make a copy of the headers. They will be modified by the credentials
# and we want to pass the original headers if we recurse.
request_headers = headers.copy()
self.credentials.before_request(
self._request, method, url, request_headers)
response = self.http.urlopen(
method, url, body=body, headers=request_headers, **kwargs)
# If the response indicated that the credentials needed to be
# refreshed, then refresh the credentials and re-attempt the
# request.
# A stored token may expire between the time it is retrieved and
# the time the request is made, so we may need to try twice.
# The reason urllib3's retries aren't used is because they
# don't allow you to modify the request headers. :/
if (response.status in self._refresh_status_codes
and _credential_refresh_attempt < self._max_refresh_attempts):
_LOGGER.info(
'Refreshing credentials due to a %s response. Attempt %s/%s.',
response.status, _credential_refresh_attempt + 1,
self._max_refresh_attempts)
self.credentials.refresh(self._request)
# Recurse. Pass in the original headers, not our modified set.
return self.urlopen(
method, url, body=body, headers=headers,
_credential_refresh_attempt=_credential_refresh_attempt + 1,
**kwargs)
return response |
def stations_iter(self, *, page_size=250):
"""Get a paged iterator of library stations.
Parameters:
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Station dicts.
"""
start_token = None
while True:
response = self._call(
mc_calls.RadioStation,
max_results=page_size,
start_token=start_token
)
yield response.body.get('data', {}).get('items', [])
start_token = response.body.get('nextPageToken')
if start_token is None:
break | Get a paged iterator of library stations.
Parameters:
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Station dicts. | Below is the the instruction that describes the task:
### Input:
Get a paged iterator of library stations.
Parameters:
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Station dicts.
### Response:
def stations_iter(self, *, page_size=250):
"""Get a paged iterator of library stations.
Parameters:
page_size (int, Optional): The maximum number of results per returned page.
Max allowed is ``49995``.
Default: ``250``
Yields:
list: Station dicts.
"""
start_token = None
while True:
response = self._call(
mc_calls.RadioStation,
max_results=page_size,
start_token=start_token
)
yield response.body.get('data', {}).get('items', [])
start_token = response.body.get('nextPageToken')
if start_token is None:
break |
def define_contributor(self, request):
"""Define contributor by adding it to request.data."""
request.data['contributor'] = self.resolve_user(request.user).pk | Define contributor by adding it to request.data. | Below is the the instruction that describes the task:
### Input:
Define contributor by adding it to request.data.
### Response:
def define_contributor(self, request):
"""Define contributor by adding it to request.data."""
request.data['contributor'] = self.resolve_user(request.user).pk |
def update_pointed(self):
'''
Grabs the latest file contents based on the pointer uri
'''
# only grab file if our pointer is still good (not None)
if not self.pointed_at_expired:
try:
conf_string, stat2 = self.zoo_client.get(self.point_path,
watch=self.watch_pointed)
except ZookeeperError:
self.old_data = ''
self.set_valid(False)
self.pointed_at_expired = True
self.call_error(self.INVALID_PATH)
return
if self.compare_data(conf_string):
self.call_config(conf_string)
self.set_valid(True) | Grabs the latest file contents based on the pointer uri | Below is the the instruction that describes the task:
### Input:
Grabs the latest file contents based on the pointer uri
### Response:
def update_pointed(self):
'''
Grabs the latest file contents based on the pointer uri
'''
# only grab file if our pointer is still good (not None)
if not self.pointed_at_expired:
try:
conf_string, stat2 = self.zoo_client.get(self.point_path,
watch=self.watch_pointed)
except ZookeeperError:
self.old_data = ''
self.set_valid(False)
self.pointed_at_expired = True
self.call_error(self.INVALID_PATH)
return
if self.compare_data(conf_string):
self.call_config(conf_string)
self.set_valid(True) |
def create_char_dataframe(words):
"""
Give list of input tokenized words,
create dataframe of characters where first character of
the word is tagged as 1, otherwise 0
Example
=======
['กิน', 'หมด'] to dataframe of
[{'char': 'ก', 'type': ..., 'target': 1}, ...,
{'char': 'ด', 'type': ..., 'target': 0}]
"""
char_dict = []
for word in words:
for i, char in enumerate(word):
if i == 0:
char_dict.append({'char': char,
'type': CHAR_TYPE_FLATTEN.get(char, 'o'),
'target': True})
else:
char_dict.append({'char': char,
'type': CHAR_TYPE_FLATTEN.get(char, 'o'),
'target': False})
return pd.DataFrame(char_dict) | Give list of input tokenized words,
create dataframe of characters where first character of
the word is tagged as 1, otherwise 0
Example
=======
['กิน', 'หมด'] to dataframe of
[{'char': 'ก', 'type': ..., 'target': 1}, ...,
{'char': 'ด', 'type': ..., 'target': 0}] | Below is the the instruction that describes the task:
### Input:
Give list of input tokenized words,
create dataframe of characters where first character of
the word is tagged as 1, otherwise 0
Example
=======
['กิน', 'หมด'] to dataframe of
[{'char': 'ก', 'type': ..., 'target': 1}, ...,
{'char': 'ด', 'type': ..., 'target': 0}]
### Response:
def create_char_dataframe(words):
"""
Give list of input tokenized words,
create dataframe of characters where first character of
the word is tagged as 1, otherwise 0
Example
=======
['กิน', 'หมด'] to dataframe of
[{'char': 'ก', 'type': ..., 'target': 1}, ...,
{'char': 'ด', 'type': ..., 'target': 0}]
"""
char_dict = []
for word in words:
for i, char in enumerate(word):
if i == 0:
char_dict.append({'char': char,
'type': CHAR_TYPE_FLATTEN.get(char, 'o'),
'target': True})
else:
char_dict.append({'char': char,
'type': CHAR_TYPE_FLATTEN.get(char, 'o'),
'target': False})
return pd.DataFrame(char_dict) |
def add_output_path(path: str = None) -> str:
"""
Adds the specified path to the output logging paths if it is not
already in the listed paths.
:param path:
The path to add to the logging output paths. If the path is empty
or no path is given, the current working directory will be used
instead.
"""
cleaned = paths.clean(path or os.getcwd())
if cleaned not in _logging_paths:
_logging_paths.append(cleaned)
return cleaned | Adds the specified path to the output logging paths if it is not
already in the listed paths.
:param path:
The path to add to the logging output paths. If the path is empty
or no path is given, the current working directory will be used
instead. | Below is the the instruction that describes the task:
### Input:
Adds the specified path to the output logging paths if it is not
already in the listed paths.
:param path:
The path to add to the logging output paths. If the path is empty
or no path is given, the current working directory will be used
instead.
### Response:
def add_output_path(path: str = None) -> str:
"""
Adds the specified path to the output logging paths if it is not
already in the listed paths.
:param path:
The path to add to the logging output paths. If the path is empty
or no path is given, the current working directory will be used
instead.
"""
cleaned = paths.clean(path or os.getcwd())
if cleaned not in _logging_paths:
_logging_paths.append(cleaned)
return cleaned |
def shape(self):
"""Number of grid points per axis."""
try:
return self.__shape
except AttributeError:
shape = tuple(len(vec) for vec in self.coord_vectors)
self.__shape = shape
return shape | Number of grid points per axis. | Below is the the instruction that describes the task:
### Input:
Number of grid points per axis.
### Response:
def shape(self):
"""Number of grid points per axis."""
try:
return self.__shape
except AttributeError:
shape = tuple(len(vec) for vec in self.coord_vectors)
self.__shape = shape
return shape |
def datacite_to_pif_reference(dc):
"""
Parse a top-level datacite dictionary into a Reference
:param dc: dictionary containing datacite metadata
:return: Reference corresponding to that datacite entry
"""
ref = Reference()
if dc.get('identifier', {}).get('identifierType') == "DOI":
ref.doi = dc.get('identifier', {}).get('identifier')
ref.title = dc.get('title')
ref.publisher = dc.get('publisher')
ref.year = dc.get('publicationYear')
ref.authors = [creator_to_person(x).name for x in dc.get('creators', [])] or None
return ref | Parse a top-level datacite dictionary into a Reference
:param dc: dictionary containing datacite metadata
:return: Reference corresponding to that datacite entry | Below is the the instruction that describes the task:
### Input:
Parse a top-level datacite dictionary into a Reference
:param dc: dictionary containing datacite metadata
:return: Reference corresponding to that datacite entry
### Response:
def datacite_to_pif_reference(dc):
"""
Parse a top-level datacite dictionary into a Reference
:param dc: dictionary containing datacite metadata
:return: Reference corresponding to that datacite entry
"""
ref = Reference()
if dc.get('identifier', {}).get('identifierType') == "DOI":
ref.doi = dc.get('identifier', {}).get('identifier')
ref.title = dc.get('title')
ref.publisher = dc.get('publisher')
ref.year = dc.get('publicationYear')
ref.authors = [creator_to_person(x).name for x in dc.get('creators', [])] or None
return ref |
def reshape_range(tensor, i, j, shape):
"""Reshapes a tensor between dimensions i and j."""
t_shape = common_layers.shape_list(tensor)
target_shape = t_shape[:i] + shape + t_shape[j:]
return tf.reshape(tensor, target_shape) | Reshapes a tensor between dimensions i and j. | Below is the the instruction that describes the task:
### Input:
Reshapes a tensor between dimensions i and j.
### Response:
def reshape_range(tensor, i, j, shape):
"""Reshapes a tensor between dimensions i and j."""
t_shape = common_layers.shape_list(tensor)
target_shape = t_shape[:i] + shape + t_shape[j:]
return tf.reshape(tensor, target_shape) |
def pointAt(self, **axis_values):
"""
Returns the point on the chart where the inputed values are located.
:return <QPointF>
"""
scene_point = self.renderer().pointAt(self.axes(), axis_values)
chart_point = self.uiChartVIEW.mapFromScene(scene_point)
return self.uiChartVIEW.mapToParent(chart_point) | Returns the point on the chart where the inputed values are located.
:return <QPointF> | Below is the the instruction that describes the task:
### Input:
Returns the point on the chart where the inputed values are located.
:return <QPointF>
### Response:
def pointAt(self, **axis_values):
"""
Returns the point on the chart where the inputed values are located.
:return <QPointF>
"""
scene_point = self.renderer().pointAt(self.axes(), axis_values)
chart_point = self.uiChartVIEW.mapFromScene(scene_point)
return self.uiChartVIEW.mapToParent(chart_point) |
def _encode_config(conf_dict):
"""Encode `conf_dict` to string."""
out = []
# get variables in order defined in settings._ALLOWED_MERGES
for var in settings._ALLOWED_MERGES:
out.append(conf_dict[var])
# convert bools to chars
out = map(
lambda x: "t" if x else "f",
out
)
return "".join(out) | Encode `conf_dict` to string. | Below is the the instruction that describes the task:
### Input:
Encode `conf_dict` to string.
### Response:
def _encode_config(conf_dict):
"""Encode `conf_dict` to string."""
out = []
# get variables in order defined in settings._ALLOWED_MERGES
for var in settings._ALLOWED_MERGES:
out.append(conf_dict[var])
# convert bools to chars
out = map(
lambda x: "t" if x else "f",
out
)
return "".join(out) |
def impact_rating(self):
'''
Returns the impact rating for this node. Impact rating is a measure
of how powerful this moment in the story is by evaluting how many simultaneous
arc elements are associated with it. There is also a generational bleed element,
where the impact score creates shockwaves throughout their direct ancestor and
descendant nodes. This echo fades fast, but the bigger the impact, the farther
it goes.
Currently, the impact bleed does not extend to sibling nodes.
WARNING: Here be dragons.
'''
if self.depth == 1:
logger.debug('Root node. Skipping.')
return 0 # pragma: no cover
impact_bleed = {
'mile': 0.5, # A milestone extends it's influence by 50% per generation
'tf_beat': 0.25,
}
inherited_impact = 0
base_impact, add_impact, mile_impact = self._local_impact_rating()
local_impact = base_impact + add_impact + mile_impact
logger.debug("Local impact is %f" % local_impact)
parents = self.get_ancestors().filter(depth__gt=1)
children = self.get_descendants()
logger.debug('Found %d parents and %d children' % (parents.count(), children.count()))
for node in parents | children:
if node.depth == 1:
logger.debug("Skipping root node...")
else:
logger.debug('Checking a related node...')
b, a, m = node._local_impact_rating()
logger.debug('Related node has %f of additional impact and %f of milestone impact.' % (a, m))
if (a + m) > 0:
if node.depth > self.depth:
depth_diff = node.depth - self.depth
else:
depth_diff = self.depth - node.depth
logger.debug('There is a generational difference of %f. Adjusting impact bleed.' % depth_diff)
for x in range(depth_diff):
a = a * impact_bleed['tf_beat']
m = m * impact_bleed['mile']
logger.debug('Additional impact bleed of %f. Milestone impact bleed of %f' % (a, m))
inherited_impact += a + m
logger.debug('Final impact bleed of %f. Adding to inherited impact.' % inherited_impact)
else:
logger.debug('Node had 0 bleedworthy impact. Skipping...')
logger.debug('Inherited impact of %f. Adding to local impact of %f' % (inherited_impact, local_impact))
return local_impact + inherited_impact | Returns the impact rating for this node. Impact rating is a measure
of how powerful this moment in the story is by evaluting how many simultaneous
arc elements are associated with it. There is also a generational bleed element,
where the impact score creates shockwaves throughout their direct ancestor and
descendant nodes. This echo fades fast, but the bigger the impact, the farther
it goes.
Currently, the impact bleed does not extend to sibling nodes.
WARNING: Here be dragons. | Below is the the instruction that describes the task:
### Input:
Returns the impact rating for this node. Impact rating is a measure
of how powerful this moment in the story is by evaluting how many simultaneous
arc elements are associated with it. There is also a generational bleed element,
where the impact score creates shockwaves throughout their direct ancestor and
descendant nodes. This echo fades fast, but the bigger the impact, the farther
it goes.
Currently, the impact bleed does not extend to sibling nodes.
WARNING: Here be dragons.
### Response:
def impact_rating(self):
'''
Returns the impact rating for this node. Impact rating is a measure
of how powerful this moment in the story is by evaluting how many simultaneous
arc elements are associated with it. There is also a generational bleed element,
where the impact score creates shockwaves throughout their direct ancestor and
descendant nodes. This echo fades fast, but the bigger the impact, the farther
it goes.
Currently, the impact bleed does not extend to sibling nodes.
WARNING: Here be dragons.
'''
if self.depth == 1:
logger.debug('Root node. Skipping.')
return 0 # pragma: no cover
impact_bleed = {
'mile': 0.5, # A milestone extends it's influence by 50% per generation
'tf_beat': 0.25,
}
inherited_impact = 0
base_impact, add_impact, mile_impact = self._local_impact_rating()
local_impact = base_impact + add_impact + mile_impact
logger.debug("Local impact is %f" % local_impact)
parents = self.get_ancestors().filter(depth__gt=1)
children = self.get_descendants()
logger.debug('Found %d parents and %d children' % (parents.count(), children.count()))
for node in parents | children:
if node.depth == 1:
logger.debug("Skipping root node...")
else:
logger.debug('Checking a related node...')
b, a, m = node._local_impact_rating()
logger.debug('Related node has %f of additional impact and %f of milestone impact.' % (a, m))
if (a + m) > 0:
if node.depth > self.depth:
depth_diff = node.depth - self.depth
else:
depth_diff = self.depth - node.depth
logger.debug('There is a generational difference of %f. Adjusting impact bleed.' % depth_diff)
for x in range(depth_diff):
a = a * impact_bleed['tf_beat']
m = m * impact_bleed['mile']
logger.debug('Additional impact bleed of %f. Milestone impact bleed of %f' % (a, m))
inherited_impact += a + m
logger.debug('Final impact bleed of %f. Adding to inherited impact.' % inherited_impact)
else:
logger.debug('Node had 0 bleedworthy impact. Skipping...')
logger.debug('Inherited impact of %f. Adding to local impact of %f' % (inherited_impact, local_impact))
return local_impact + inherited_impact |
def _create_variables_pretrain(self):
"""Create model variables (previous unsupervised pretraining).
:return: self
"""
for l, layer in enumerate(self.layers):
self.encoding_w_[l] = tf.Variable(
self.encoding_w_[l], name='enc-w-{}'.format(l))
self.encoding_b_[l] = tf.Variable(
self.encoding_b_[l], name='enc-b-{}'.format(l)) | Create model variables (previous unsupervised pretraining).
:return: self | Below is the the instruction that describes the task:
### Input:
Create model variables (previous unsupervised pretraining).
:return: self
### Response:
def _create_variables_pretrain(self):
"""Create model variables (previous unsupervised pretraining).
:return: self
"""
for l, layer in enumerate(self.layers):
self.encoding_w_[l] = tf.Variable(
self.encoding_w_[l], name='enc-w-{}'.format(l))
self.encoding_b_[l] = tf.Variable(
self.encoding_b_[l], name='enc-b-{}'.format(l)) |
def main_photo(self):
"""Return user's main photo."""
if not self._main_photo:
self._main_photo = self.photos_factory()
return self._main_photo | Return user's main photo. | Below is the the instruction that describes the task:
### Input:
Return user's main photo.
### Response:
def main_photo(self):
"""Return user's main photo."""
if not self._main_photo:
self._main_photo = self.photos_factory()
return self._main_photo |
def get_len(self, key):
"""Return sequence length"""
data = self.model.get_data()
return len(data[key]) | Return sequence length | Below is the the instruction that describes the task:
### Input:
Return sequence length
### Response:
def get_len(self, key):
"""Return sequence length"""
data = self.model.get_data()
return len(data[key]) |
def compile_dependencies(self, sourcepath, include_self=True):
"""
Same as inherit method but the default value for keyword argument
``ìnclude_self`` is ``True``.
"""
return super(SassProjectEventHandler, self).compile_dependencies(
sourcepath,
include_self=include_self
) | Same as inherit method but the default value for keyword argument
``ìnclude_self`` is ``True``. | Below is the the instruction that describes the task:
### Input:
Same as inherit method but the default value for keyword argument
``ìnclude_self`` is ``True``.
### Response:
def compile_dependencies(self, sourcepath, include_self=True):
"""
Same as inherit method but the default value for keyword argument
``ìnclude_self`` is ``True``.
"""
return super(SassProjectEventHandler, self).compile_dependencies(
sourcepath,
include_self=include_self
) |
def header(self):
'''
Displays the scan header, as defined by self.HEADER and self.HEADER_FORMAT.
Returns None.
'''
self.config.display.format_strings(self.HEADER_FORMAT, self.RESULT_FORMAT)
self.config.display.add_custom_header(self.VERBOSE_FORMAT, self.VERBOSE)
if type(self.HEADER) == type([]):
self.config.display.header(*self.HEADER, file_name=self.current_target_file_name)
elif self.HEADER:
self.config.display.header(self.HEADER, file_name=self.current_target_file_name) | Displays the scan header, as defined by self.HEADER and self.HEADER_FORMAT.
Returns None. | Below is the the instruction that describes the task:
### Input:
Displays the scan header, as defined by self.HEADER and self.HEADER_FORMAT.
Returns None.
### Response:
def header(self):
'''
Displays the scan header, as defined by self.HEADER and self.HEADER_FORMAT.
Returns None.
'''
self.config.display.format_strings(self.HEADER_FORMAT, self.RESULT_FORMAT)
self.config.display.add_custom_header(self.VERBOSE_FORMAT, self.VERBOSE)
if type(self.HEADER) == type([]):
self.config.display.header(*self.HEADER, file_name=self.current_target_file_name)
elif self.HEADER:
self.config.display.header(self.HEADER, file_name=self.current_target_file_name) |
def get_access_token(self):
"""
判断现有的token是否过期。
用户需要多进程或者多机部署可以手动重写这个函数
来自定义token的存储,刷新策略。
:return: 返回token
"""
if self._token:
now = time.time()
if self.token_expires_at - now > 60:
return self._token
json = self.grant_token()
self._token = json["access_token"]
self.token_expires_at = int(time.time()) + json["expires_in"]
return self._token | 判断现有的token是否过期。
用户需要多进程或者多机部署可以手动重写这个函数
来自定义token的存储,刷新策略。
:return: 返回token | Below is the the instruction that describes the task:
### Input:
判断现有的token是否过期。
用户需要多进程或者多机部署可以手动重写这个函数
来自定义token的存储,刷新策略。
:return: 返回token
### Response:
def get_access_token(self):
"""
判断现有的token是否过期。
用户需要多进程或者多机部署可以手动重写这个函数
来自定义token的存储,刷新策略。
:return: 返回token
"""
if self._token:
now = time.time()
if self.token_expires_at - now > 60:
return self._token
json = self.grant_token()
self._token = json["access_token"]
self.token_expires_at = int(time.time()) + json["expires_in"]
return self._token |
def bind(self):
"""Binds reference objects to the proper parse actions."""
self.endline <<= attach(self.endline_ref, self.endline_handle)
self.moduledoc_item <<= trace(attach(self.moduledoc, self.set_docstring))
self.name <<= trace(attach(self.base_name, self.name_check))
# comments are evaluated greedily because we need to know about them even if we're going to suppress them
self.comment <<= trace(attach(self.comment_ref, self.comment_handle, greedy=True))
self.set_literal <<= trace(attach(self.set_literal_ref, self.set_literal_handle))
self.set_letter_literal <<= trace(attach(self.set_letter_literal_ref, self.set_letter_literal_handle))
self.classlist <<= trace(attach(self.classlist_ref, self.classlist_handle))
self.import_stmt <<= trace(attach(self.import_stmt_ref, self.import_handle))
self.complex_raise_stmt <<= trace(attach(self.complex_raise_stmt_ref, self.complex_raise_stmt_handle))
self.augassign_stmt <<= trace(attach(self.augassign_stmt_ref, self.augassign_handle))
self.dict_comp <<= trace(attach(self.dict_comp_ref, self.dict_comp_handle))
self.destructuring_stmt <<= trace(attach(self.destructuring_stmt_ref, self.destructuring_stmt_handle))
self.name_match_funcdef <<= trace(attach(self.name_match_funcdef_ref, self.name_match_funcdef_handle))
self.op_match_funcdef <<= trace(attach(self.op_match_funcdef_ref, self.op_match_funcdef_handle))
self.yield_from <<= trace(attach(self.yield_from_ref, self.yield_from_handle))
self.exec_stmt <<= trace(attach(self.exec_stmt_ref, self.exec_stmt_handle))
self.stmt_lambdef <<= trace(attach(self.stmt_lambdef_ref, self.stmt_lambdef_handle))
self.typedef <<= trace(attach(self.typedef_ref, self.typedef_handle))
self.typedef_default <<= trace(attach(self.typedef_default_ref, self.typedef_handle))
self.unsafe_typedef_default <<= trace(attach(self.unsafe_typedef_default_ref, self.unsafe_typedef_handle))
self.return_typedef <<= trace(attach(self.return_typedef_ref, self.typedef_handle))
self.typed_assign_stmt <<= trace(attach(self.typed_assign_stmt_ref, self.typed_assign_stmt_handle))
self.datadef <<= trace(attach(self.datadef_ref, self.data_handle))
self.with_stmt <<= trace(attach(self.with_stmt_ref, self.with_stmt_handle))
self.await_item <<= trace(attach(self.await_item_ref, self.await_item_handle))
self.ellipsis <<= trace(attach(self.ellipsis_ref, self.ellipsis_handle))
self.case_stmt <<= trace(attach(self.case_stmt_ref, self.case_stmt_handle))
self.decoratable_normal_funcdef_stmt <<= trace(attach(
self.decoratable_normal_funcdef_stmt_ref,
self.decoratable_funcdef_stmt_handle,
))
self.decoratable_async_funcdef_stmt <<= trace(attach(
self.decoratable_async_funcdef_stmt_ref,
partial(self.decoratable_funcdef_stmt_handle, is_async=True),
))
self.u_string <<= attach(self.u_string_ref, self.u_string_check)
self.matrix_at <<= attach(self.matrix_at_ref, self.matrix_at_check)
self.nonlocal_stmt <<= attach(self.nonlocal_stmt_ref, self.nonlocal_check)
self.star_assign_item <<= attach(self.star_assign_item_ref, self.star_assign_item_check)
self.classic_lambdef <<= attach(self.classic_lambdef_ref, self.lambdef_check)
self.star_expr <<= attach(self.star_expr_ref, self.star_expr_check)
self.dubstar_expr <<= attach(self.dubstar_expr_ref, self.star_expr_check)
self.star_sep_arg <<= attach(self.star_sep_arg_ref, self.star_sep_check)
self.star_sep_vararg <<= attach(self.star_sep_vararg_ref, self.star_sep_check)
self.endline_semicolon <<= attach(self.endline_semicolon_ref, self.endline_semicolon_check)
self.async_stmt <<= attach(self.async_stmt_ref, self.async_stmt_check)
self.async_comp_for <<= attach(self.async_comp_for_ref, self.async_comp_check)
self.f_string <<= attach(self.f_string_ref, self.f_string_check) | Binds reference objects to the proper parse actions. | Below is the the instruction that describes the task:
### Input:
Binds reference objects to the proper parse actions.
### Response:
def bind(self):
"""Binds reference objects to the proper parse actions."""
self.endline <<= attach(self.endline_ref, self.endline_handle)
self.moduledoc_item <<= trace(attach(self.moduledoc, self.set_docstring))
self.name <<= trace(attach(self.base_name, self.name_check))
# comments are evaluated greedily because we need to know about them even if we're going to suppress them
self.comment <<= trace(attach(self.comment_ref, self.comment_handle, greedy=True))
self.set_literal <<= trace(attach(self.set_literal_ref, self.set_literal_handle))
self.set_letter_literal <<= trace(attach(self.set_letter_literal_ref, self.set_letter_literal_handle))
self.classlist <<= trace(attach(self.classlist_ref, self.classlist_handle))
self.import_stmt <<= trace(attach(self.import_stmt_ref, self.import_handle))
self.complex_raise_stmt <<= trace(attach(self.complex_raise_stmt_ref, self.complex_raise_stmt_handle))
self.augassign_stmt <<= trace(attach(self.augassign_stmt_ref, self.augassign_handle))
self.dict_comp <<= trace(attach(self.dict_comp_ref, self.dict_comp_handle))
self.destructuring_stmt <<= trace(attach(self.destructuring_stmt_ref, self.destructuring_stmt_handle))
self.name_match_funcdef <<= trace(attach(self.name_match_funcdef_ref, self.name_match_funcdef_handle))
self.op_match_funcdef <<= trace(attach(self.op_match_funcdef_ref, self.op_match_funcdef_handle))
self.yield_from <<= trace(attach(self.yield_from_ref, self.yield_from_handle))
self.exec_stmt <<= trace(attach(self.exec_stmt_ref, self.exec_stmt_handle))
self.stmt_lambdef <<= trace(attach(self.stmt_lambdef_ref, self.stmt_lambdef_handle))
self.typedef <<= trace(attach(self.typedef_ref, self.typedef_handle))
self.typedef_default <<= trace(attach(self.typedef_default_ref, self.typedef_handle))
self.unsafe_typedef_default <<= trace(attach(self.unsafe_typedef_default_ref, self.unsafe_typedef_handle))
self.return_typedef <<= trace(attach(self.return_typedef_ref, self.typedef_handle))
self.typed_assign_stmt <<= trace(attach(self.typed_assign_stmt_ref, self.typed_assign_stmt_handle))
self.datadef <<= trace(attach(self.datadef_ref, self.data_handle))
self.with_stmt <<= trace(attach(self.with_stmt_ref, self.with_stmt_handle))
self.await_item <<= trace(attach(self.await_item_ref, self.await_item_handle))
self.ellipsis <<= trace(attach(self.ellipsis_ref, self.ellipsis_handle))
self.case_stmt <<= trace(attach(self.case_stmt_ref, self.case_stmt_handle))
self.decoratable_normal_funcdef_stmt <<= trace(attach(
self.decoratable_normal_funcdef_stmt_ref,
self.decoratable_funcdef_stmt_handle,
))
self.decoratable_async_funcdef_stmt <<= trace(attach(
self.decoratable_async_funcdef_stmt_ref,
partial(self.decoratable_funcdef_stmt_handle, is_async=True),
))
self.u_string <<= attach(self.u_string_ref, self.u_string_check)
self.matrix_at <<= attach(self.matrix_at_ref, self.matrix_at_check)
self.nonlocal_stmt <<= attach(self.nonlocal_stmt_ref, self.nonlocal_check)
self.star_assign_item <<= attach(self.star_assign_item_ref, self.star_assign_item_check)
self.classic_lambdef <<= attach(self.classic_lambdef_ref, self.lambdef_check)
self.star_expr <<= attach(self.star_expr_ref, self.star_expr_check)
self.dubstar_expr <<= attach(self.dubstar_expr_ref, self.star_expr_check)
self.star_sep_arg <<= attach(self.star_sep_arg_ref, self.star_sep_check)
self.star_sep_vararg <<= attach(self.star_sep_vararg_ref, self.star_sep_check)
self.endline_semicolon <<= attach(self.endline_semicolon_ref, self.endline_semicolon_check)
self.async_stmt <<= attach(self.async_stmt_ref, self.async_stmt_check)
self.async_comp_for <<= attach(self.async_comp_for_ref, self.async_comp_check)
self.f_string <<= attach(self.f_string_ref, self.f_string_check) |
def lock(self, id=str(uuid.uuid4()), ttl=DEFAULT_TIMEOUT):
"""Create a Lock object given an ID and timeout
:param id: ID for the lock, creates a new uuid if not provided
:param ttl: timeout
:return: Lock object
"""
return Lock(id, ttl=ttl, client=self) | Create a Lock object given an ID and timeout
:param id: ID for the lock, creates a new uuid if not provided
:param ttl: timeout
:return: Lock object | Below is the the instruction that describes the task:
### Input:
Create a Lock object given an ID and timeout
:param id: ID for the lock, creates a new uuid if not provided
:param ttl: timeout
:return: Lock object
### Response:
def lock(self, id=str(uuid.uuid4()), ttl=DEFAULT_TIMEOUT):
"""Create a Lock object given an ID and timeout
:param id: ID for the lock, creates a new uuid if not provided
:param ttl: timeout
:return: Lock object
"""
return Lock(id, ttl=ttl, client=self) |
def text_cleanup(data, key, last_type):
""" I strip extra whitespace off multi-line strings if they are ready to be stripped!"""
if key in data and last_type == STRING_TYPE:
data[key] = data[key].strip()
return data | I strip extra whitespace off multi-line strings if they are ready to be stripped! | Below is the the instruction that describes the task:
### Input:
I strip extra whitespace off multi-line strings if they are ready to be stripped!
### Response:
def text_cleanup(data, key, last_type):
""" I strip extra whitespace off multi-line strings if they are ready to be stripped!"""
if key in data and last_type == STRING_TYPE:
data[key] = data[key].strip()
return data |
def fetch_batch(self):
""" Fetch a batch of data without waiting"""
inp, f = self.queue.get()
nr_input_var = len(inp)
batched, futures = [[] for _ in range(nr_input_var)], []
for k in range(nr_input_var):
batched[k].append(inp[k])
futures.append(f)
while len(futures) < self.batch_size:
try:
inp, f = self.queue.get_nowait()
for k in range(nr_input_var):
batched[k].append(inp[k])
futures.append(f)
except queue.Empty:
break # do not wait
for k in range(nr_input_var):
batched[k] = np.asarray(batched[k])
return batched, futures | Fetch a batch of data without waiting | Below is the the instruction that describes the task:
### Input:
Fetch a batch of data without waiting
### Response:
def fetch_batch(self):
""" Fetch a batch of data without waiting"""
inp, f = self.queue.get()
nr_input_var = len(inp)
batched, futures = [[] for _ in range(nr_input_var)], []
for k in range(nr_input_var):
batched[k].append(inp[k])
futures.append(f)
while len(futures) < self.batch_size:
try:
inp, f = self.queue.get_nowait()
for k in range(nr_input_var):
batched[k].append(inp[k])
futures.append(f)
except queue.Empty:
break # do not wait
for k in range(nr_input_var):
batched[k] = np.asarray(batched[k])
return batched, futures |
def _make_params_pb(params, param_types):
"""Helper for :meth:`execute_update`.
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``dml``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:rtype: Union[None, :class:`Struct`]
:returns: a struct message for the passed params, or None
:raises ValueError:
If ``param_types`` is None but ``params`` is not None.
:raises ValueError:
If ``params`` is None but ``param_types`` is not None.
"""
if params is not None:
if param_types is None:
raise ValueError("Specify 'param_types' when passing 'params'.")
return Struct(
fields={key: _make_value_pb(value) for key, value in params.items()}
)
else:
if param_types is not None:
raise ValueError("Specify 'params' when passing 'param_types'.")
return None | Helper for :meth:`execute_update`.
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``dml``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:rtype: Union[None, :class:`Struct`]
:returns: a struct message for the passed params, or None
:raises ValueError:
If ``param_types`` is None but ``params`` is not None.
:raises ValueError:
If ``params`` is None but ``param_types`` is not None. | Below is the the instruction that describes the task:
### Input:
Helper for :meth:`execute_update`.
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``dml``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:rtype: Union[None, :class:`Struct`]
:returns: a struct message for the passed params, or None
:raises ValueError:
If ``param_types`` is None but ``params`` is not None.
:raises ValueError:
If ``params`` is None but ``param_types`` is not None.
### Response:
def _make_params_pb(params, param_types):
"""Helper for :meth:`execute_update`.
:type params: dict, {str -> column value}
:param params: values for parameter replacement. Keys must match
the names used in ``dml``.
:type param_types: dict[str -> Union[dict, .types.Type]]
:param param_types:
(Optional) maps explicit types for one or more param values;
required if parameters are passed.
:rtype: Union[None, :class:`Struct`]
:returns: a struct message for the passed params, or None
:raises ValueError:
If ``param_types`` is None but ``params`` is not None.
:raises ValueError:
If ``params`` is None but ``param_types`` is not None.
"""
if params is not None:
if param_types is None:
raise ValueError("Specify 'param_types' when passing 'params'.")
return Struct(
fields={key: _make_value_pb(value) for key, value in params.items()}
)
else:
if param_types is not None:
raise ValueError("Specify 'params' when passing 'param_types'.")
return None |
def _validate_user_class(cls, user_class):
"""
Validates the supplied user_class to make sure that it has the
class methods necessary to function correctly.
Requirements:
- ``lookup`` method. Accepts a string parameter, returns instance
- ``identify`` method. Accepts an identity parameter, returns instance
"""
PraetorianError.require_condition(
getattr(user_class, 'lookup', None) is not None,
textwrap.dedent("""
The user_class must have a lookup class method:
user_class.lookup(<str>) -> <user instance>
"""),
)
PraetorianError.require_condition(
getattr(user_class, 'identify', None) is not None,
textwrap.dedent("""
The user_class must have an identify class method:
user_class.identify(<identity>) -> <user instance>
"""),
)
# TODO: Figure out how to check for an identity property
return user_class | Validates the supplied user_class to make sure that it has the
class methods necessary to function correctly.
Requirements:
- ``lookup`` method. Accepts a string parameter, returns instance
- ``identify`` method. Accepts an identity parameter, returns instance | Below is the the instruction that describes the task:
### Input:
Validates the supplied user_class to make sure that it has the
class methods necessary to function correctly.
Requirements:
- ``lookup`` method. Accepts a string parameter, returns instance
- ``identify`` method. Accepts an identity parameter, returns instance
### Response:
def _validate_user_class(cls, user_class):
"""
Validates the supplied user_class to make sure that it has the
class methods necessary to function correctly.
Requirements:
- ``lookup`` method. Accepts a string parameter, returns instance
- ``identify`` method. Accepts an identity parameter, returns instance
"""
PraetorianError.require_condition(
getattr(user_class, 'lookup', None) is not None,
textwrap.dedent("""
The user_class must have a lookup class method:
user_class.lookup(<str>) -> <user instance>
"""),
)
PraetorianError.require_condition(
getattr(user_class, 'identify', None) is not None,
textwrap.dedent("""
The user_class must have an identify class method:
user_class.identify(<identity>) -> <user instance>
"""),
)
# TODO: Figure out how to check for an identity property
return user_class |
def urlopen(link):
"""Return urllib2 urlopen
"""
try:
return urllib2.urlopen(link)
except urllib2.URLError:
pass
except ValueError:
return ""
except KeyboardInterrupt:
print("")
raise SystemExit() | Return urllib2 urlopen | Below is the the instruction that describes the task:
### Input:
Return urllib2 urlopen
### Response:
def urlopen(link):
"""Return urllib2 urlopen
"""
try:
return urllib2.urlopen(link)
except urllib2.URLError:
pass
except ValueError:
return ""
except KeyboardInterrupt:
print("")
raise SystemExit() |
def set_providers(self, *providers):
"""Replace current providers with given ones"""
if self.providers:
self.clear()
for provider in providers:
self.add(provider) | Replace current providers with given ones | Below is the the instruction that describes the task:
### Input:
Replace current providers with given ones
### Response:
def set_providers(self, *providers):
"""Replace current providers with given ones"""
if self.providers:
self.clear()
for provider in providers:
self.add(provider) |
def collect_string_fields(format_string) -> Iterable[Optional[str]]:
""" Given a format string, return an iterator
of all the valid format fields. It handles nested fields
as well.
"""
formatter = string.Formatter()
try:
parseiterator = formatter.parse(format_string)
for result in parseiterator:
if all(item is None for item in result[1:]):
# not a replacement format
continue
name = result[1]
nested = result[2]
yield name
if nested:
for field in collect_string_fields(nested):
yield field
except ValueError as exc:
# Probably the format string is invalid.
if exc.args[0].startswith("cannot switch from manual"):
# On Jython, parsing a string with both manual
# and automatic positions will fail with a ValueError,
# while on CPython it will simply return the fields,
# the validation being done in the interpreter (?).
# We're just returning two mixed fields in order
# to trigger the format-combined-specification check.
yield ""
yield "1"
return
raise IncompleteFormatString(format_string) | Given a format string, return an iterator
of all the valid format fields. It handles nested fields
as well. | Below is the the instruction that describes the task:
### Input:
Given a format string, return an iterator
of all the valid format fields. It handles nested fields
as well.
### Response:
def collect_string_fields(format_string) -> Iterable[Optional[str]]:
""" Given a format string, return an iterator
of all the valid format fields. It handles nested fields
as well.
"""
formatter = string.Formatter()
try:
parseiterator = formatter.parse(format_string)
for result in parseiterator:
if all(item is None for item in result[1:]):
# not a replacement format
continue
name = result[1]
nested = result[2]
yield name
if nested:
for field in collect_string_fields(nested):
yield field
except ValueError as exc:
# Probably the format string is invalid.
if exc.args[0].startswith("cannot switch from manual"):
# On Jython, parsing a string with both manual
# and automatic positions will fail with a ValueError,
# while on CPython it will simply return the fields,
# the validation being done in the interpreter (?).
# We're just returning two mixed fields in order
# to trigger the format-combined-specification check.
yield ""
yield "1"
return
raise IncompleteFormatString(format_string) |
def run_mainloop_with(self, target):
"""Start the OS's main loop to process asyncronous BLE events and then
run the specified target function in a background thread. Target
function should be a function that takes no parameters and optionally
return an integer response code. When the target function stops
executing or returns with value then the main loop will be stopped and
the program will exit with the returned code.
Note that an OS main loop is required to process asyncronous BLE events
and this function is provided as a convenience for writing simple tools
and scripts that don't need to be full-blown GUI applications. If you
are writing a GUI application that has a main loop (a GTK glib main loop
on Linux, or a Cocoa main loop on OSX) then you don't need to call this
function.
"""
# Create background thread to run user code.
self._user_thread = threading.Thread(target=self._user_thread_main,
args=(target,))
self._user_thread.daemon = True
self._user_thread.start()
# Run main loop. This call will never return!
try:
AppHelper.runConsoleEventLoop(installInterrupt=True)
except KeyboardInterrupt:
AppHelper.stopEventLoop()
sys.exit(0) | Start the OS's main loop to process asyncronous BLE events and then
run the specified target function in a background thread. Target
function should be a function that takes no parameters and optionally
return an integer response code. When the target function stops
executing or returns with value then the main loop will be stopped and
the program will exit with the returned code.
Note that an OS main loop is required to process asyncronous BLE events
and this function is provided as a convenience for writing simple tools
and scripts that don't need to be full-blown GUI applications. If you
are writing a GUI application that has a main loop (a GTK glib main loop
on Linux, or a Cocoa main loop on OSX) then you don't need to call this
function. | Below is the the instruction that describes the task:
### Input:
Start the OS's main loop to process asyncronous BLE events and then
run the specified target function in a background thread. Target
function should be a function that takes no parameters and optionally
return an integer response code. When the target function stops
executing or returns with value then the main loop will be stopped and
the program will exit with the returned code.
Note that an OS main loop is required to process asyncronous BLE events
and this function is provided as a convenience for writing simple tools
and scripts that don't need to be full-blown GUI applications. If you
are writing a GUI application that has a main loop (a GTK glib main loop
on Linux, or a Cocoa main loop on OSX) then you don't need to call this
function.
### Response:
def run_mainloop_with(self, target):
"""Start the OS's main loop to process asyncronous BLE events and then
run the specified target function in a background thread. Target
function should be a function that takes no parameters and optionally
return an integer response code. When the target function stops
executing or returns with value then the main loop will be stopped and
the program will exit with the returned code.
Note that an OS main loop is required to process asyncronous BLE events
and this function is provided as a convenience for writing simple tools
and scripts that don't need to be full-blown GUI applications. If you
are writing a GUI application that has a main loop (a GTK glib main loop
on Linux, or a Cocoa main loop on OSX) then you don't need to call this
function.
"""
# Create background thread to run user code.
self._user_thread = threading.Thread(target=self._user_thread_main,
args=(target,))
self._user_thread.daemon = True
self._user_thread.start()
# Run main loop. This call will never return!
try:
AppHelper.runConsoleEventLoop(installInterrupt=True)
except KeyboardInterrupt:
AppHelper.stopEventLoop()
sys.exit(0) |
def currentRegion(self):
"""
Returns the current region based on the current cursor position.
:return <XDropZoneWidget>
"""
pos = QtGui.QCursor.pos()
pos = self.mapFromGlobal(pos)
for region in self.regions():
if region.testHovered(pos):
return region
return None | Returns the current region based on the current cursor position.
:return <XDropZoneWidget> | Below is the the instruction that describes the task:
### Input:
Returns the current region based on the current cursor position.
:return <XDropZoneWidget>
### Response:
def currentRegion(self):
"""
Returns the current region based on the current cursor position.
:return <XDropZoneWidget>
"""
pos = QtGui.QCursor.pos()
pos = self.mapFromGlobal(pos)
for region in self.regions():
if region.testHovered(pos):
return region
return None |
def decode_b64(data):
'''Wrapper for b64decode, without having to struggle with bytestrings.'''
byte_string = data.encode('utf-8')
decoded = base64.b64decode(byte_string)
return decoded.decode('utf-8') | Wrapper for b64decode, without having to struggle with bytestrings. | Below is the the instruction that describes the task:
### Input:
Wrapper for b64decode, without having to struggle with bytestrings.
### Response:
def decode_b64(data):
'''Wrapper for b64decode, without having to struggle with bytestrings.'''
byte_string = data.encode('utf-8')
decoded = base64.b64decode(byte_string)
return decoded.decode('utf-8') |
def POST_AUTH(self, courseid): # pylint: disable=arguments-differ
""" POST request """
course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=False)
errors = []
course_content = {}
try:
data = web.input()
course_content = self.course_factory.get_course_descriptor_content(courseid)
course_content['name'] = data['name']
if course_content['name'] == "":
errors.append(_('Invalid name'))
course_content['description'] = data['description']
course_content['admins'] = list(map(str.strip, data['admins'].split(',')))
if not self.user_manager.user_is_superadmin() and self.user_manager.session_username() not in course_content['admins']:
errors.append(_('You cannot remove yourself from the administrators of this course'))
course_content['tutors'] = list(map(str.strip, data['tutors'].split(',')))
if len(course_content['tutors']) == 1 and course_content['tutors'][0].strip() == "":
course_content['tutors'] = []
course_content['groups_student_choice'] = True if data["groups_student_choice"] == "true" else False
if course_content.get('use_classrooms', True) != (data['use_classrooms'] == "true"):
self.database.aggregations.delete_many({"courseid": course.get_id()})
course_content['use_classrooms'] = True if data["use_classrooms"] == "true" else False
if data["accessible"] == "custom":
course_content['accessible'] = "{}/{}".format(data["accessible_start"], data["accessible_end"])
elif data["accessible"] == "true":
course_content['accessible'] = True
else:
course_content['accessible'] = False
try:
AccessibleTime(course_content['accessible'])
except:
errors.append(_('Invalid accessibility dates'))
course_content['allow_unregister'] = True if data["allow_unregister"] == "true" else False
course_content['allow_preview'] = True if data["allow_preview"] == "true" else False
if data["registration"] == "custom":
course_content['registration'] = "{}/{}".format(data["registration_start"], data["registration_end"])
elif data["registration"] == "true":
course_content['registration'] = True
else:
course_content['registration'] = False
try:
AccessibleTime(course_content['registration'])
except:
errors.append(_('Invalid registration dates'))
course_content['registration_password'] = data['registration_password']
if course_content['registration_password'] == "":
course_content['registration_password'] = None
course_content['registration_ac'] = data['registration_ac']
if course_content['registration_ac'] not in ["None", "username", "binding", "email"]:
errors.append(_('Invalid ACL value'))
if course_content['registration_ac'] == "None":
course_content['registration_ac'] = None
course_content['registration_ac_list'] = data['registration_ac_list'].splitlines()
course_content['is_lti'] = 'lti' in data and data['lti'] == "true"
course_content['lti_keys'] = dict([x.split(":") for x in data['lti_keys'].splitlines() if x])
for lti_key in course_content['lti_keys'].keys():
if not re.match("^[a-zA-Z0-9]*$", lti_key):
errors.append(_("LTI keys must be alphanumerical."))
course_content['lti_send_back_grade'] = 'lti_send_back_grade' in data and data['lti_send_back_grade'] == "true"
except:
errors.append(_('User returned an invalid form.'))
if len(errors) == 0:
self.course_factory.update_course_descriptor_content(courseid, course_content)
errors = None
course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=False) # don't forget to reload the modified course
return self.page(course, errors, errors is None) | POST request | Below is the the instruction that describes the task:
### Input:
POST request
### Response:
def POST_AUTH(self, courseid): # pylint: disable=arguments-differ
""" POST request """
course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=False)
errors = []
course_content = {}
try:
data = web.input()
course_content = self.course_factory.get_course_descriptor_content(courseid)
course_content['name'] = data['name']
if course_content['name'] == "":
errors.append(_('Invalid name'))
course_content['description'] = data['description']
course_content['admins'] = list(map(str.strip, data['admins'].split(',')))
if not self.user_manager.user_is_superadmin() and self.user_manager.session_username() not in course_content['admins']:
errors.append(_('You cannot remove yourself from the administrators of this course'))
course_content['tutors'] = list(map(str.strip, data['tutors'].split(',')))
if len(course_content['tutors']) == 1 and course_content['tutors'][0].strip() == "":
course_content['tutors'] = []
course_content['groups_student_choice'] = True if data["groups_student_choice"] == "true" else False
if course_content.get('use_classrooms', True) != (data['use_classrooms'] == "true"):
self.database.aggregations.delete_many({"courseid": course.get_id()})
course_content['use_classrooms'] = True if data["use_classrooms"] == "true" else False
if data["accessible"] == "custom":
course_content['accessible'] = "{}/{}".format(data["accessible_start"], data["accessible_end"])
elif data["accessible"] == "true":
course_content['accessible'] = True
else:
course_content['accessible'] = False
try:
AccessibleTime(course_content['accessible'])
except:
errors.append(_('Invalid accessibility dates'))
course_content['allow_unregister'] = True if data["allow_unregister"] == "true" else False
course_content['allow_preview'] = True if data["allow_preview"] == "true" else False
if data["registration"] == "custom":
course_content['registration'] = "{}/{}".format(data["registration_start"], data["registration_end"])
elif data["registration"] == "true":
course_content['registration'] = True
else:
course_content['registration'] = False
try:
AccessibleTime(course_content['registration'])
except:
errors.append(_('Invalid registration dates'))
course_content['registration_password'] = data['registration_password']
if course_content['registration_password'] == "":
course_content['registration_password'] = None
course_content['registration_ac'] = data['registration_ac']
if course_content['registration_ac'] not in ["None", "username", "binding", "email"]:
errors.append(_('Invalid ACL value'))
if course_content['registration_ac'] == "None":
course_content['registration_ac'] = None
course_content['registration_ac_list'] = data['registration_ac_list'].splitlines()
course_content['is_lti'] = 'lti' in data and data['lti'] == "true"
course_content['lti_keys'] = dict([x.split(":") for x in data['lti_keys'].splitlines() if x])
for lti_key in course_content['lti_keys'].keys():
if not re.match("^[a-zA-Z0-9]*$", lti_key):
errors.append(_("LTI keys must be alphanumerical."))
course_content['lti_send_back_grade'] = 'lti_send_back_grade' in data and data['lti_send_back_grade'] == "true"
except:
errors.append(_('User returned an invalid form.'))
if len(errors) == 0:
self.course_factory.update_course_descriptor_content(courseid, course_content)
errors = None
course, __ = self.get_course_and_check_rights(courseid, allow_all_staff=False) # don't forget to reload the modified course
return self.page(course, errors, errors is None) |
def fpy_interface(fpy, static, interface, typedict):
"""Splices the full list of subroutines and the module procedure list
into the static.f90 file.
:arg static: the string contents of the static.f90 file.
:arg interface: the name of the interface *field* being replaced.
:arg typedict: the dictionary of dtypes and their kind and suffix combos.
"""
modprocs = []
subtext = []
for dtype, combos in list(typedict.items()):
for tcombo in combos:
kind, suffix = tcombo
xnames, sub = fpy_interface_sub(fpy, dtype, kind, suffix)
modprocs.extend(xnames)
subtext.append(sub)
subtext.append("\n")
#Next, chunk the names of the module procedures into blocks of five
#so that they display nicely for human readability.
from fortpy.printing.formatting import present_params
splice = static.replace(interface, present_params(modprocs, 21))
return splice.replace(interface.replace("py", "xpy"), ''.join(subtext)) | Splices the full list of subroutines and the module procedure list
into the static.f90 file.
:arg static: the string contents of the static.f90 file.
:arg interface: the name of the interface *field* being replaced.
:arg typedict: the dictionary of dtypes and their kind and suffix combos. | Below is the the instruction that describes the task:
### Input:
Splices the full list of subroutines and the module procedure list
into the static.f90 file.
:arg static: the string contents of the static.f90 file.
:arg interface: the name of the interface *field* being replaced.
:arg typedict: the dictionary of dtypes and their kind and suffix combos.
### Response:
def fpy_interface(fpy, static, interface, typedict):
"""Splices the full list of subroutines and the module procedure list
into the static.f90 file.
:arg static: the string contents of the static.f90 file.
:arg interface: the name of the interface *field* being replaced.
:arg typedict: the dictionary of dtypes and their kind and suffix combos.
"""
modprocs = []
subtext = []
for dtype, combos in list(typedict.items()):
for tcombo in combos:
kind, suffix = tcombo
xnames, sub = fpy_interface_sub(fpy, dtype, kind, suffix)
modprocs.extend(xnames)
subtext.append(sub)
subtext.append("\n")
#Next, chunk the names of the module procedures into blocks of five
#so that they display nicely for human readability.
from fortpy.printing.formatting import present_params
splice = static.replace(interface, present_params(modprocs, 21))
return splice.replace(interface.replace("py", "xpy"), ''.join(subtext)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.