code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def OnOpen(self, event):
"""File open event handler"""
# If changes have taken place save of old grid
if undo.stack().haschanged():
save_choice = self.interfaces.get_save_request_from_user()
if save_choice is None:
# Cancelled close operation
return
elif save_choice:
# User wants to save content
post_command_event(self.main_window, self.main_window.SaveMsg)
# Get filepath from user
f2w = get_filetypes2wildcards(
["pys", "pysu", "xls", "xlsx", "ods", "all"])
filetypes = f2w.keys()
wildcards = f2w.values()
wildcard = "|".join(wildcards)
message = _("Choose file to open.")
style = wx.OPEN
default_filetype = config["default_open_filetype"]
try:
default_filterindex = filetypes.index(default_filetype)
except ValueError:
# Be graceful if the user has entered an unkown filetype
default_filterindex = 0
get_fp_fidx = self.interfaces.get_filepath_findex_from_user
filepath, filterindex = get_fp_fidx(wildcard, message, style,
filterindex=default_filterindex)
if filepath is None:
return
filetype = filetypes[filterindex]
# Change the main window filepath state
self.main_window.filepath = filepath
# Load file into grid
post_command_event(self.main_window,
self.main_window.GridActionOpenMsg,
attr={"filepath": filepath, "filetype": filetype})
# Set Window title to new filepath
title_text = filepath.split("/")[-1] + " - pyspread"
post_command_event(self.main_window,
self.main_window.TitleMsg, text=title_text)
self.main_window.grid.ForceRefresh()
if is_gtk():
try:
wx.Yield()
except:
pass
# Update savepoint and clear the undo stack
undo.stack().clear()
undo.stack().savepoint()
# Update content changed state
try:
post_command_event(self.main_window, self.ContentChangedMsg)
except TypeError:
# The main window does not exist any more
pass | File open event handler | Below is the the instruction that describes the task:
### Input:
File open event handler
### Response:
def OnOpen(self, event):
"""File open event handler"""
# If changes have taken place save of old grid
if undo.stack().haschanged():
save_choice = self.interfaces.get_save_request_from_user()
if save_choice is None:
# Cancelled close operation
return
elif save_choice:
# User wants to save content
post_command_event(self.main_window, self.main_window.SaveMsg)
# Get filepath from user
f2w = get_filetypes2wildcards(
["pys", "pysu", "xls", "xlsx", "ods", "all"])
filetypes = f2w.keys()
wildcards = f2w.values()
wildcard = "|".join(wildcards)
message = _("Choose file to open.")
style = wx.OPEN
default_filetype = config["default_open_filetype"]
try:
default_filterindex = filetypes.index(default_filetype)
except ValueError:
# Be graceful if the user has entered an unkown filetype
default_filterindex = 0
get_fp_fidx = self.interfaces.get_filepath_findex_from_user
filepath, filterindex = get_fp_fidx(wildcard, message, style,
filterindex=default_filterindex)
if filepath is None:
return
filetype = filetypes[filterindex]
# Change the main window filepath state
self.main_window.filepath = filepath
# Load file into grid
post_command_event(self.main_window,
self.main_window.GridActionOpenMsg,
attr={"filepath": filepath, "filetype": filetype})
# Set Window title to new filepath
title_text = filepath.split("/")[-1] + " - pyspread"
post_command_event(self.main_window,
self.main_window.TitleMsg, text=title_text)
self.main_window.grid.ForceRefresh()
if is_gtk():
try:
wx.Yield()
except:
pass
# Update savepoint and clear the undo stack
undo.stack().clear()
undo.stack().savepoint()
# Update content changed state
try:
post_command_event(self.main_window, self.ContentChangedMsg)
except TypeError:
# The main window does not exist any more
pass |
def preprocess_legislation(legislation_json):
'''
Preprocess the legislation parameters to add prices and amounts from national accounts
'''
import os
import pkg_resources
import pandas as pd
# Add fuel prices to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
prix_annuel_carburants = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'prix',
'prix_annuel_carburants.csv'
), sep =';'
)
prix_annuel_carburants['Date'] = prix_annuel_carburants['Date'].astype(int)
prix_annuel_carburants = prix_annuel_carburants.set_index('Date')
all_values = {}
prix_carburants = {
"@type": "Node",
"description": "prix des carburants en euros par hectolitre",
"children": {},
}
# For super_95_e10, we need to use the price of super_95 between 2009 and 2012 included,
# because we don't have the data. We use super_95 because it is very close and won't affect the results too much
prix_annuel = prix_annuel_carburants['super_95_e10_ttc']
all_values['super_95_e10_ttc'] = []
for year in range(1990, 2009):
values1 = dict()
values1['start'] = u'{}-01-01'.format(year)
values1['stop'] = u'{}-12-31'.format(year)
values1['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values1)
prix_annuel = prix_annuel_carburants['super_95_ttc']
for year in range(2009, 2013):
values2 = dict()
values2['start'] = u'{}-01-01'.format(year)
values2['stop'] = u'{}-12-31'.format(year)
values2['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values2)
prix_annuel = prix_annuel_carburants['super_95_e10_ttc']
for year in range(2013, 2015):
values3 = dict()
values3['start'] = u'{}-01-01'.format(year)
values3['stop'] = u'{}-12-31'.format(year)
values3['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values3)
prix_carburants['children']['super_95_e10_ttc'] = {
"@type": "Parameter",
"description": 'super_95_e10_ttc'.replace('_', ' '),
"format": "float",
"values": all_values['super_95_e10_ttc']
}
for element in ['diesel_ht', 'diesel_ttc', 'super_95_ht', 'super_95_ttc', 'super_98_ht', 'super_98_ttc',
'super_95_e10_ht', 'gplc_ht', 'gplc_ttc', 'super_plombe_ht', 'super_plombe_ttc']:
assert element in prix_annuel_carburants.columns
prix_annuel = prix_annuel_carburants[element]
all_values[element] = []
for year in range(1990, 2015):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = prix_annuel.loc[year] * 100
all_values[element].append(values)
prix_carburants['children'][element] = {
"@type": "Parameter",
"description": element.replace('_', ' '),
"format": "float",
"values": all_values[element]
}
legislation_json['children']['imposition_indirecte']['children']['prix_carburants'] = prix_carburants
# Add the number of vehicle in circulation to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
parc_annuel_moyen_vp = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'quantites',
'parc_annuel_moyen_vp.csv'
), sep =';'
)
parc_annuel_moyen_vp = parc_annuel_moyen_vp.set_index('Unnamed: 0')
values_parc = {}
parc_vp = {
"@type": "Node",
"description": "taille moyenne du parc automobile en France métropolitaine en milliers de véhicules",
"children": {},
}
for element in ['diesel', 'essence']:
taille_parc = parc_annuel_moyen_vp[element]
values_parc[element] = []
for year in range(1990, 2014):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = taille_parc.loc[year]
values_parc[element].append(values)
parc_vp['children'][element] = {
"@type": "Parameter",
"description": "nombre de véhicules particuliers immatriculés en France à motorisation " + element,
"format": "float",
"values": values_parc[element]
}
legislation_json['children']['imposition_indirecte']['children']['parc_vp'] = parc_vp
# Add the total quantity of fuel consumed per year to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
quantite_carbu_vp_france = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'quantites',
'quantite_carbu_vp_france.csv'
), sep =';'
)
quantite_carbu_vp_france = quantite_carbu_vp_france.set_index('Unnamed: 0')
values_quantite = {}
quantite_carbu_vp = {
"@type": "Node",
"description": "quantite de carburants consommés en France métropolitaine",
"children": {},
}
for element in ['diesel', 'essence']:
quantite_carburants = quantite_carbu_vp_france[element]
values_quantite[element] = []
for year in range(1990, 2014):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = quantite_carburants.loc[year]
values_quantite[element].append(values)
quantite_carbu_vp['children'][element] = {
"@type": "Parameter",
"description": "consommation totale de " + element + " en France",
"format": "float",
"values": values_quantite[element]
}
legislation_json['children']['imposition_indirecte']['children']['quantite_carbu_vp'] = quantite_carbu_vp
# Add the shares of each type of supercabrurant (SP95, SP98, E10, etc.) among supercarburants
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
part_des_types_de_supercarburants = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'part_des_types_de_supercarburants.csv'
), sep =';'
)
del part_des_types_de_supercarburants['Source']
part_des_types_de_supercarburants = \
part_des_types_de_supercarburants[part_des_types_de_supercarburants['annee'] > 0].copy()
part_des_types_de_supercarburants['annee'] = part_des_types_de_supercarburants['annee'].astype(int)
part_des_types_de_supercarburants = part_des_types_de_supercarburants.set_index('annee')
# delete share of e_85 because we have no data for its price
# When the sum of all shares is not one, need to multiply each share by the same coefficient
cols = part_des_types_de_supercarburants.columns
for element in cols:
part_des_types_de_supercarburants[element] = (
part_des_types_de_supercarburants[element] /
(part_des_types_de_supercarburants['somme'] - part_des_types_de_supercarburants['sp_e85'])
)
del part_des_types_de_supercarburants['sp_e85']
del part_des_types_de_supercarburants['somme']
cols = part_des_types_de_supercarburants.columns
part_des_types_de_supercarburants['somme'] = 0
for element in cols:
part_des_types_de_supercarburants['somme'] += part_des_types_de_supercarburants[element]
assert (part_des_types_de_supercarburants['somme'] == 1).any(), "The weighting of the shares did not work"
values_part_supercarburants = {}
part_type_supercaburant = {
"@type": "Node",
"description": "part de la consommation totale d'essence de chaque type supercarburant",
"children": {},
}
for element in ['super_plombe', 'sp_95', 'sp_98', 'sp_e10']:
part_par_carburant = part_des_types_de_supercarburants[element]
values_part_supercarburants[element] = []
for year in range(2000, 2015):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = part_par_carburant.loc[year]
values_part_supercarburants[element].append(values)
part_type_supercaburant['children'][element] = {
"@type": "Parameter",
"description": "part de " + element + " dans la consommation totale d'essences",
"format": "float",
"values": values_part_supercarburants[element]
}
legislation_json['children']['imposition_indirecte']['children']['part_type_supercarburants'] = \
part_type_supercaburant
# Add data from comptabilite national about alcohol
alcool_conso_et_vin = {
"@type": "Node",
"description": "alcools",
"children": {},
}
alcool_conso_et_vin['children']['vin'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur le vin",
"children": {
"droit_cn_vin": {
"@type": "Parameter",
"description": u"Masse droit vin, vin mousseux, cidres et poirés selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 129},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 130},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 129},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 132},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 133},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 127},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 127},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 127},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 127},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 125},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 117},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 119},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 117},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 114},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 117},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 119},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 118},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 120},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 122},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_vin": {
"@type": "Parameter",
"description": u"Masse consommation vin, vin mousseux, cidres et poirés selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 7191},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 7419},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 7636},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 8025},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 8451},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 8854},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 9168},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 9476},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 9695},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 9985},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 9933},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 10002},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 10345},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 10461},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 10728},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 11002},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 11387},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 11407},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 11515},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
},
}
alcool_conso_et_vin['children']['biere'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur la bière",
"children": {
"droit_cn_biere": {
"@type": "Parameter",
"description": "Masse droit biere selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 361},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 366},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 364},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 365},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 380},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 359},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 364},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 361},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 370},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 378},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 364},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 396},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 382},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 375}, {'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 376},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 375},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 393},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 783},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 897},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_biere": {
"@type": "Parameter",
"description": u"Masse consommation biere selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 2111},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 2144},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 2186},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 2291},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 2334},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 2290},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 2327},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 2405},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 2554},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 2484},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 2466},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 2486},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 2458},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2287},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2375},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2461},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 2769},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2868},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 3321},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
},
}
alcool_conso_et_vin['children']['alcools_forts'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur alcools forts",
"children": {
"droit_cn_alcools": {
"@type": "Parameter",
"description": "Masse droit alcool selon comptabilité nationale sans droits sur les produits intermediaires et cotisation spéciale alcool fort",
"format": "float",
"values": [
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 1872},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 1957},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 1932},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 1891},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 1908},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 1842},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 1954},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 1990},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2005},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2031},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2111},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 2150},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2225},
# TODO: Problème pour les alcools forts chiffres différents entre les deux bases excel !
],
},
"droit_cn_alcools_total": {
"@type": "Parameter",
"description": u"Masse droit alcool selon comptabilité nationale avec les differents droits",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 2337},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 2350},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 2366},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 2369},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 2385},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 2416}, {'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 2514},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 2503},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 2453},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 2409},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 2352},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 2477},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 2516},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2528},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2629},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2734},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 3078},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2718},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 3022},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_alcools": {
"@type": "Parameter",
"description": u"Masse consommation alcool selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 4893},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 5075},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 5065},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 5123},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 5234},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 5558},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 5721},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 5932},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 5895},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 5967},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 5960},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 6106},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 6142},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 6147},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 6342},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 6618},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 6680},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 6996},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 7022},
],
},
},
}
legislation_json['children']['imposition_indirecte']['children']['alcool_conso_et_vin'] = alcool_conso_et_vin
# Make the change from francs to euros for excise taxes in ticpe
keys_ticpe = legislation_json['children']['imposition_indirecte']['children']['ticpe']['children'].keys()
for element in keys_ticpe:
get_values = \
legislation_json['children']['imposition_indirecte']['children']['ticpe']['children'][element]['values']
for each_value in get_values:
get_character = '{}'.format(each_value['start'])
year = int(get_character[:4])
if year < 2002:
each_value['value'] = each_value['value'] / 6.55957
else:
each_value['value'] = each_value['value']
return legislation_json | Preprocess the legislation parameters to add prices and amounts from national accounts | Below is the the instruction that describes the task:
### Input:
Preprocess the legislation parameters to add prices and amounts from national accounts
### Response:
def preprocess_legislation(legislation_json):
'''
Preprocess the legislation parameters to add prices and amounts from national accounts
'''
import os
import pkg_resources
import pandas as pd
# Add fuel prices to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
prix_annuel_carburants = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'prix',
'prix_annuel_carburants.csv'
), sep =';'
)
prix_annuel_carburants['Date'] = prix_annuel_carburants['Date'].astype(int)
prix_annuel_carburants = prix_annuel_carburants.set_index('Date')
all_values = {}
prix_carburants = {
"@type": "Node",
"description": "prix des carburants en euros par hectolitre",
"children": {},
}
# For super_95_e10, we need to use the price of super_95 between 2009 and 2012 included,
# because we don't have the data. We use super_95 because it is very close and won't affect the results too much
prix_annuel = prix_annuel_carburants['super_95_e10_ttc']
all_values['super_95_e10_ttc'] = []
for year in range(1990, 2009):
values1 = dict()
values1['start'] = u'{}-01-01'.format(year)
values1['stop'] = u'{}-12-31'.format(year)
values1['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values1)
prix_annuel = prix_annuel_carburants['super_95_ttc']
for year in range(2009, 2013):
values2 = dict()
values2['start'] = u'{}-01-01'.format(year)
values2['stop'] = u'{}-12-31'.format(year)
values2['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values2)
prix_annuel = prix_annuel_carburants['super_95_e10_ttc']
for year in range(2013, 2015):
values3 = dict()
values3['start'] = u'{}-01-01'.format(year)
values3['stop'] = u'{}-12-31'.format(year)
values3['value'] = prix_annuel.loc[year] * 100
all_values['super_95_e10_ttc'].append(values3)
prix_carburants['children']['super_95_e10_ttc'] = {
"@type": "Parameter",
"description": 'super_95_e10_ttc'.replace('_', ' '),
"format": "float",
"values": all_values['super_95_e10_ttc']
}
for element in ['diesel_ht', 'diesel_ttc', 'super_95_ht', 'super_95_ttc', 'super_98_ht', 'super_98_ttc',
'super_95_e10_ht', 'gplc_ht', 'gplc_ttc', 'super_plombe_ht', 'super_plombe_ttc']:
assert element in prix_annuel_carburants.columns
prix_annuel = prix_annuel_carburants[element]
all_values[element] = []
for year in range(1990, 2015):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = prix_annuel.loc[year] * 100
all_values[element].append(values)
prix_carburants['children'][element] = {
"@type": "Parameter",
"description": element.replace('_', ' '),
"format": "float",
"values": all_values[element]
}
legislation_json['children']['imposition_indirecte']['children']['prix_carburants'] = prix_carburants
# Add the number of vehicle in circulation to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
parc_annuel_moyen_vp = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'quantites',
'parc_annuel_moyen_vp.csv'
), sep =';'
)
parc_annuel_moyen_vp = parc_annuel_moyen_vp.set_index('Unnamed: 0')
values_parc = {}
parc_vp = {
"@type": "Node",
"description": "taille moyenne du parc automobile en France métropolitaine en milliers de véhicules",
"children": {},
}
for element in ['diesel', 'essence']:
taille_parc = parc_annuel_moyen_vp[element]
values_parc[element] = []
for year in range(1990, 2014):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = taille_parc.loc[year]
values_parc[element].append(values)
parc_vp['children'][element] = {
"@type": "Parameter",
"description": "nombre de véhicules particuliers immatriculés en France à motorisation " + element,
"format": "float",
"values": values_parc[element]
}
legislation_json['children']['imposition_indirecte']['children']['parc_vp'] = parc_vp
# Add the total quantity of fuel consumed per year to the tree
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
quantite_carbu_vp_france = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'quantites',
'quantite_carbu_vp_france.csv'
), sep =';'
)
quantite_carbu_vp_france = quantite_carbu_vp_france.set_index('Unnamed: 0')
values_quantite = {}
quantite_carbu_vp = {
"@type": "Node",
"description": "quantite de carburants consommés en France métropolitaine",
"children": {},
}
for element in ['diesel', 'essence']:
quantite_carburants = quantite_carbu_vp_france[element]
values_quantite[element] = []
for year in range(1990, 2014):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = quantite_carburants.loc[year]
values_quantite[element].append(values)
quantite_carbu_vp['children'][element] = {
"@type": "Parameter",
"description": "consommation totale de " + element + " en France",
"format": "float",
"values": values_quantite[element]
}
legislation_json['children']['imposition_indirecte']['children']['quantite_carbu_vp'] = quantite_carbu_vp
# Add the shares of each type of supercabrurant (SP95, SP98, E10, etc.) among supercarburants
default_config_files_directory = os.path.join(
pkg_resources.get_distribution('openfisca_france_indirect_taxation').location)
part_des_types_de_supercarburants = pd.read_csv(
os.path.join(
default_config_files_directory,
'openfisca_france_indirect_taxation',
'assets',
'part_des_types_de_supercarburants.csv'
), sep =';'
)
del part_des_types_de_supercarburants['Source']
part_des_types_de_supercarburants = \
part_des_types_de_supercarburants[part_des_types_de_supercarburants['annee'] > 0].copy()
part_des_types_de_supercarburants['annee'] = part_des_types_de_supercarburants['annee'].astype(int)
part_des_types_de_supercarburants = part_des_types_de_supercarburants.set_index('annee')
# delete share of e_85 because we have no data for its price
# When the sum of all shares is not one, need to multiply each share by the same coefficient
cols = part_des_types_de_supercarburants.columns
for element in cols:
part_des_types_de_supercarburants[element] = (
part_des_types_de_supercarburants[element] /
(part_des_types_de_supercarburants['somme'] - part_des_types_de_supercarburants['sp_e85'])
)
del part_des_types_de_supercarburants['sp_e85']
del part_des_types_de_supercarburants['somme']
cols = part_des_types_de_supercarburants.columns
part_des_types_de_supercarburants['somme'] = 0
for element in cols:
part_des_types_de_supercarburants['somme'] += part_des_types_de_supercarburants[element]
assert (part_des_types_de_supercarburants['somme'] == 1).any(), "The weighting of the shares did not work"
values_part_supercarburants = {}
part_type_supercaburant = {
"@type": "Node",
"description": "part de la consommation totale d'essence de chaque type supercarburant",
"children": {},
}
for element in ['super_plombe', 'sp_95', 'sp_98', 'sp_e10']:
part_par_carburant = part_des_types_de_supercarburants[element]
values_part_supercarburants[element] = []
for year in range(2000, 2015):
values = dict()
values['start'] = u'{}-01-01'.format(year)
values['stop'] = u'{}-12-31'.format(year)
values['value'] = part_par_carburant.loc[year]
values_part_supercarburants[element].append(values)
part_type_supercaburant['children'][element] = {
"@type": "Parameter",
"description": "part de " + element + " dans la consommation totale d'essences",
"format": "float",
"values": values_part_supercarburants[element]
}
legislation_json['children']['imposition_indirecte']['children']['part_type_supercarburants'] = \
part_type_supercaburant
# Add data from comptabilite national about alcohol
alcool_conso_et_vin = {
"@type": "Node",
"description": "alcools",
"children": {},
}
alcool_conso_et_vin['children']['vin'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur le vin",
"children": {
"droit_cn_vin": {
"@type": "Parameter",
"description": u"Masse droit vin, vin mousseux, cidres et poirés selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 129},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 130},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 129},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 132},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 133},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 127},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 127},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 127},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 127},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 125},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 117},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 119},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 117},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 114},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 117},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 119},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 118},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 120},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 122},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_vin": {
"@type": "Parameter",
"description": u"Masse consommation vin, vin mousseux, cidres et poirés selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 7191},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 7419},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 7636},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 8025},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 8451},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 8854},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 9168},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 9476},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 9695},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 9985},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 9933},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 10002},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 10345},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 10461},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 10728},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 11002},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 11387},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 11407},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 11515},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
},
}
alcool_conso_et_vin['children']['biere'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur la bière",
"children": {
"droit_cn_biere": {
"@type": "Parameter",
"description": "Masse droit biere selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 361},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 366},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 364},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 365},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 380},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 359},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 364},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 361},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 370},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 378},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 364},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 396},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 382},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 375}, {'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 376},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 375},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 393},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 783},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 897},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_biere": {
"@type": "Parameter",
"description": u"Masse consommation biere selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 2111},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 2144},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 2186},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 2291},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 2334},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 2290},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 2327},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 2405},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 2554},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 2484},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 2466},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 2486},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 2458},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2287},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2375},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2461},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 2769},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2868},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 3321},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
},
}
alcool_conso_et_vin['children']['alcools_forts'] = {
"@type": "Node",
"description": "Pour calculer le taux de taxation implicite sur alcools forts",
"children": {
"droit_cn_alcools": {
"@type": "Parameter",
"description": "Masse droit alcool selon comptabilité nationale sans droits sur les produits intermediaires et cotisation spéciale alcool fort",
"format": "float",
"values": [
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 1872},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 1957},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 1932},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 1891},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 1908},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 1842},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 1954},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 1990},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2005},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2031},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2111},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 2150},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2225},
# TODO: Problème pour les alcools forts chiffres différents entre les deux bases excel !
],
},
"droit_cn_alcools_total": {
"@type": "Parameter",
"description": u"Masse droit alcool selon comptabilité nationale avec les differents droits",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 2337},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 2350},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 2366},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 2369},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 2385},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 2416}, {'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 2514},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 2503},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 2453},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 2409},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 2352},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 2477},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 2516},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 2528},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 2629},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 2734},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 3078},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 2718},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 3022},
# {'start': u'2014-01-01', 'stop': u'2014-12-31', 'value': },
],
},
"masse_conso_cn_alcools": {
"@type": "Parameter",
"description": u"Masse consommation alcool selon comptabilité nationale",
"format": "float",
"values": [
{'start': u'1995-01-01', 'stop': u'1995-12-31', 'value': 4893},
{'start': u'1996-01-01', 'stop': u'1996-12-31', 'value': 5075},
{'start': u'1997-01-01', 'stop': u'1997-12-31', 'value': 5065},
{'start': u'1998-01-01', 'stop': u'1998-12-31', 'value': 5123},
{'start': u'1999-01-01', 'stop': u'1999-12-31', 'value': 5234},
{'start': u'2000-01-01', 'stop': u'2000-12-31', 'value': 5558},
{'start': u'2001-01-01', 'stop': u'2001-12-31', 'value': 5721},
{'start': u'2002-01-01', 'stop': u'2002-12-31', 'value': 5932},
{'start': u'2003-01-01', 'stop': u'2003-12-31', 'value': 5895},
{'start': u'2004-01-01', 'stop': u'2004-12-31', 'value': 5967},
{'start': u'2005-01-01', 'stop': u'2005-12-31', 'value': 5960},
{'start': u'2006-01-01', 'stop': u'2006-12-31', 'value': 6106},
{'start': u'2007-01-01', 'stop': u'2007-12-31', 'value': 6142},
{'start': u'2008-01-01', 'stop': u'2008-12-31', 'value': 6147},
{'start': u'2009-01-01', 'stop': u'2009-12-31', 'value': 6342},
{'start': u'2010-01-01', 'stop': u'2010-12-31', 'value': 6618},
{'start': u'2011-01-01', 'stop': u'2011-12-31', 'value': 6680},
{'start': u'2012-01-01', 'stop': u'2012-12-31', 'value': 6996},
{'start': u'2013-01-01', 'stop': u'2013-12-31', 'value': 7022},
],
},
},
}
legislation_json['children']['imposition_indirecte']['children']['alcool_conso_et_vin'] = alcool_conso_et_vin
# Make the change from francs to euros for excise taxes in ticpe
keys_ticpe = legislation_json['children']['imposition_indirecte']['children']['ticpe']['children'].keys()
for element in keys_ticpe:
get_values = \
legislation_json['children']['imposition_indirecte']['children']['ticpe']['children'][element]['values']
for each_value in get_values:
get_character = '{}'.format(each_value['start'])
year = int(get_character[:4])
if year < 2002:
each_value['value'] = each_value['value'] / 6.55957
else:
each_value['value'] = each_value['value']
return legislation_json |
def contains_no_backer(self, addr):
"""
Tests if the address is contained in any page of paged memory, without considering memory backers.
:param int addr: The address to test.
:return: True if the address is included in one of the pages, False otherwise.
:rtype: bool
"""
for i, p in self._pages.items():
if i * self._page_size <= addr < (i + 1) * self._page_size:
return addr - (i * self._page_size) in p.keys()
return False | Tests if the address is contained in any page of paged memory, without considering memory backers.
:param int addr: The address to test.
:return: True if the address is included in one of the pages, False otherwise.
:rtype: bool | Below is the the instruction that describes the task:
### Input:
Tests if the address is contained in any page of paged memory, without considering memory backers.
:param int addr: The address to test.
:return: True if the address is included in one of the pages, False otherwise.
:rtype: bool
### Response:
def contains_no_backer(self, addr):
"""
Tests if the address is contained in any page of paged memory, without considering memory backers.
:param int addr: The address to test.
:return: True if the address is included in one of the pages, False otherwise.
:rtype: bool
"""
for i, p in self._pages.items():
if i * self._page_size <= addr < (i + 1) * self._page_size:
return addr - (i * self._page_size) in p.keys()
return False |
def lambda_return(reward, value, length, discount, lambda_):
"""TD-lambda returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
sequence = mask * reward + discount * value * (1 - lambda_)
discount = mask * discount * lambda_
sequence = tf.stack([sequence, discount], 2)
return_ = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur[0] + cur[1] * agg,
tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]),
tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(return_), 'return') | TD-lambda returns. | Below is the the instruction that describes the task:
### Input:
TD-lambda returns.
### Response:
def lambda_return(reward, value, length, discount, lambda_):
"""TD-lambda returns."""
timestep = tf.range(reward.shape[1].value)
mask = tf.cast(timestep[None, :] < length[:, None], tf.float32)
sequence = mask * reward + discount * value * (1 - lambda_)
discount = mask * discount * lambda_
sequence = tf.stack([sequence, discount], 2)
return_ = tf.reverse(tf.transpose(tf.scan(
lambda agg, cur: cur[0] + cur[1] * agg,
tf.transpose(tf.reverse(sequence, [1]), [1, 2, 0]),
tf.zeros_like(value[:, -1]), 1, False), [1, 0]), [1])
return tf.check_numerics(tf.stop_gradient(return_), 'return') |
def virtual_machines_list_all(**kwargs):
'''
.. versionadded:: 2019.2.0
List all virtual machines within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurearm_compute.virtual_machines_list_all
'''
result = {}
compconn = __utils__['azurearm.get_client']('compute', **kwargs)
try:
vms = __utils__['azurearm.paged_object_to_list'](
compconn.virtual_machines.list_all()
)
for vm in vms: # pylint: disable=invalid-name
result[vm['name']] = vm
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
result = {'error': str(exc)}
return result | .. versionadded:: 2019.2.0
List all virtual machines within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurearm_compute.virtual_machines_list_all | Below is the the instruction that describes the task:
### Input:
.. versionadded:: 2019.2.0
List all virtual machines within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurearm_compute.virtual_machines_list_all
### Response:
def virtual_machines_list_all(**kwargs):
'''
.. versionadded:: 2019.2.0
List all virtual machines within a subscription.
CLI Example:
.. code-block:: bash
salt-call azurearm_compute.virtual_machines_list_all
'''
result = {}
compconn = __utils__['azurearm.get_client']('compute', **kwargs)
try:
vms = __utils__['azurearm.paged_object_to_list'](
compconn.virtual_machines.list_all()
)
for vm in vms: # pylint: disable=invalid-name
result[vm['name']] = vm
except CloudError as exc:
__utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs)
result = {'error': str(exc)}
return result |
def synthesize(vers, opts):
""" Synthesize a capability enabled version response
This is a very limited emulation for relatively recent feature sets
"""
parsed_version = parse_version(vers["version"])
vers["capabilities"] = {}
for name in opts["optional"]:
vers["capabilities"][name] = check(parsed_version, name)
failed = False # noqa: F841 T25377293 Grandfathered in
for name in opts["required"]:
have = check(parsed_version, name)
vers["capabilities"][name] = have
if not have:
vers["error"] = (
"client required capability `"
+ name
+ "` is not supported by this server"
)
return vers | Synthesize a capability enabled version response
This is a very limited emulation for relatively recent feature sets | Below is the the instruction that describes the task:
### Input:
Synthesize a capability enabled version response
This is a very limited emulation for relatively recent feature sets
### Response:
def synthesize(vers, opts):
""" Synthesize a capability enabled version response
This is a very limited emulation for relatively recent feature sets
"""
parsed_version = parse_version(vers["version"])
vers["capabilities"] = {}
for name in opts["optional"]:
vers["capabilities"][name] = check(parsed_version, name)
failed = False # noqa: F841 T25377293 Grandfathered in
for name in opts["required"]:
have = check(parsed_version, name)
vers["capabilities"][name] = have
if not have:
vers["error"] = (
"client required capability `"
+ name
+ "` is not supported by this server"
)
return vers |
def sliver_reader(filename_end_mask="*[0-9].mhd", sliver_reference_dir="~/data/medical/orig/sliver07/training/", read_orig=True, read_seg=False):
"""
Generator for reading sliver data from directory structure.
:param filename_end_mask: file selection can be controlled with this parameter
:param sliver_reference_dir: directory with sliver .mhd and .raw files
:param read_orig: read image data if is set True
:param read_seg: read segmentation data if is set True
:return: numeric_label, vs_mm, oname, orig_data, rname, ref_data
"""
sliver_reference_dir = op.expanduser(sliver_reference_dir)
orig_fnames = glob.glob(sliver_reference_dir + "*orig" + filename_end_mask)
ref_fnames = glob.glob(sliver_reference_dir + "*seg"+ filename_end_mask)
orig_fnames.sort()
ref_fnames.sort()
output = []
for i in range(0, len(orig_fnames)):
oname = orig_fnames[i]
rname = ref_fnames[i]
vs_mm = None
ref_data= None
orig_data = None
if read_orig:
orig_data, metadata = io3d.datareader.read(oname)
vs_mm = metadata['voxelsize_mm']
if read_seg:
ref_data, metadata = io3d.datareader.read(rname)
vs_mm = metadata['voxelsize_mm']
import re
numeric_label = re.search(".*g(\d+)", oname).group(1)
out = (numeric_label, vs_mm, oname, orig_data, rname, ref_data)
yield out | Generator for reading sliver data from directory structure.
:param filename_end_mask: file selection can be controlled with this parameter
:param sliver_reference_dir: directory with sliver .mhd and .raw files
:param read_orig: read image data if is set True
:param read_seg: read segmentation data if is set True
:return: numeric_label, vs_mm, oname, orig_data, rname, ref_data | Below is the the instruction that describes the task:
### Input:
Generator for reading sliver data from directory structure.
:param filename_end_mask: file selection can be controlled with this parameter
:param sliver_reference_dir: directory with sliver .mhd and .raw files
:param read_orig: read image data if is set True
:param read_seg: read segmentation data if is set True
:return: numeric_label, vs_mm, oname, orig_data, rname, ref_data
### Response:
def sliver_reader(filename_end_mask="*[0-9].mhd", sliver_reference_dir="~/data/medical/orig/sliver07/training/", read_orig=True, read_seg=False):
"""
Generator for reading sliver data from directory structure.
:param filename_end_mask: file selection can be controlled with this parameter
:param sliver_reference_dir: directory with sliver .mhd and .raw files
:param read_orig: read image data if is set True
:param read_seg: read segmentation data if is set True
:return: numeric_label, vs_mm, oname, orig_data, rname, ref_data
"""
sliver_reference_dir = op.expanduser(sliver_reference_dir)
orig_fnames = glob.glob(sliver_reference_dir + "*orig" + filename_end_mask)
ref_fnames = glob.glob(sliver_reference_dir + "*seg"+ filename_end_mask)
orig_fnames.sort()
ref_fnames.sort()
output = []
for i in range(0, len(orig_fnames)):
oname = orig_fnames[i]
rname = ref_fnames[i]
vs_mm = None
ref_data= None
orig_data = None
if read_orig:
orig_data, metadata = io3d.datareader.read(oname)
vs_mm = metadata['voxelsize_mm']
if read_seg:
ref_data, metadata = io3d.datareader.read(rname)
vs_mm = metadata['voxelsize_mm']
import re
numeric_label = re.search(".*g(\d+)", oname).group(1)
out = (numeric_label, vs_mm, oname, orig_data, rname, ref_data)
yield out |
def anyword_substring_search_inner(query_word, target_words):
""" return True if ANY target_word matches a query_word
"""
for target_word in target_words:
if(target_word.startswith(query_word)):
return query_word
return False | return True if ANY target_word matches a query_word | Below is the the instruction that describes the task:
### Input:
return True if ANY target_word matches a query_word
### Response:
def anyword_substring_search_inner(query_word, target_words):
""" return True if ANY target_word matches a query_word
"""
for target_word in target_words:
if(target_word.startswith(query_word)):
return query_word
return False |
def find_tf_idf(file_names=['./../test/testdata'],prev_file_path=None, dump_path=None):
'''Function to create a TF-IDF list of dictionaries for a corpus of docs.
If you opt for dumping the data, you can provide a file_path with .tfidfpkl extension(standard made for better understanding)
and also re-generate a new tfidf list which overrides over an old one by mentioning its path.
@Args:
--
file_names : paths of files to be processed on, these files are created using twitter_streaming module.
prev_file_path : path of old .tfidfpkl file, if available. (default=None)
dump_path : directory-path where to dump generated lists.(default=None)
@returns:
--
df : a dict of unique words in corpus,with their document frequency as values.
tf_idf : the generated tf-idf list of dictionaries for mentioned docs.
'''
tf_idf = [] # will hold a dict of word_count for every doc(line in a doc in this case)
df = defaultdict(int)
# this statement is useful for altering existant tf-idf file and adding new docs in itself.(## memory is now the biggest issue)
if prev_file_path:
print(TAG,'modifying over exising file.. @',prev_file_path)
df,tf_idf = pickle.load(open(prev_file_path,'rb'))
prev_doc_count = len(df)
prev_corpus_length = len(tf_idf)
for f in file_names:
# never use 'rb' for textual data, it creates something like, {b'line-inside-the-doc'}
with open(f,'r') as file1:
#create word_count dict for all docs
for line in file1:
wdict = defaultdict(int)
#find the amount of doc a word is in
for word in set(line.split()):
df[word] +=1
#find the count of all words in every doc
for word in line.split():
wdict[word] += 1
tf_idf.append(wdict)
#calculating final TF-IDF values for all words in all docs(line is a doc in this case)
for doc in tf_idf:
for key in doc:
true_idf = math.log(len(tf_idf)/df[key])
true_tf = doc[key]/float(len(doc))
doc[key] = true_tf * true_idf
print(TAG,'Total number of unique words in corpus',len(df),'( '+paint('++'+str(len(df)-prev_doc_count),'g')+' )' if prev_file_path else '')
print(TAG,'Total number of docs in corpus:',len(tf_idf),'( '+paint('++'+str(len(tf_idf)-prev_corpus_length),'g')+' )' if prev_file_path else '')
# dump if a dir-path is given
if dump_path:
if dump_path[-8:] == 'tfidfpkl':
pickle.dump((df,tf_idf),open(dump_path,'wb'),protocol=pickle.HIGHEST_PROTOCOL)
print(TAG,'Dumping TF-IDF vars @',dump_path)
return df,tf_idf | Function to create a TF-IDF list of dictionaries for a corpus of docs.
If you opt for dumping the data, you can provide a file_path with .tfidfpkl extension(standard made for better understanding)
and also re-generate a new tfidf list which overrides over an old one by mentioning its path.
@Args:
--
file_names : paths of files to be processed on, these files are created using twitter_streaming module.
prev_file_path : path of old .tfidfpkl file, if available. (default=None)
dump_path : directory-path where to dump generated lists.(default=None)
@returns:
--
df : a dict of unique words in corpus,with their document frequency as values.
tf_idf : the generated tf-idf list of dictionaries for mentioned docs. | Below is the the instruction that describes the task:
### Input:
Function to create a TF-IDF list of dictionaries for a corpus of docs.
If you opt for dumping the data, you can provide a file_path with .tfidfpkl extension(standard made for better understanding)
and also re-generate a new tfidf list which overrides over an old one by mentioning its path.
@Args:
--
file_names : paths of files to be processed on, these files are created using twitter_streaming module.
prev_file_path : path of old .tfidfpkl file, if available. (default=None)
dump_path : directory-path where to dump generated lists.(default=None)
@returns:
--
df : a dict of unique words in corpus,with their document frequency as values.
tf_idf : the generated tf-idf list of dictionaries for mentioned docs.
### Response:
def find_tf_idf(file_names=['./../test/testdata'],prev_file_path=None, dump_path=None):
'''Function to create a TF-IDF list of dictionaries for a corpus of docs.
If you opt for dumping the data, you can provide a file_path with .tfidfpkl extension(standard made for better understanding)
and also re-generate a new tfidf list which overrides over an old one by mentioning its path.
@Args:
--
file_names : paths of files to be processed on, these files are created using twitter_streaming module.
prev_file_path : path of old .tfidfpkl file, if available. (default=None)
dump_path : directory-path where to dump generated lists.(default=None)
@returns:
--
df : a dict of unique words in corpus,with their document frequency as values.
tf_idf : the generated tf-idf list of dictionaries for mentioned docs.
'''
tf_idf = [] # will hold a dict of word_count for every doc(line in a doc in this case)
df = defaultdict(int)
# this statement is useful for altering existant tf-idf file and adding new docs in itself.(## memory is now the biggest issue)
if prev_file_path:
print(TAG,'modifying over exising file.. @',prev_file_path)
df,tf_idf = pickle.load(open(prev_file_path,'rb'))
prev_doc_count = len(df)
prev_corpus_length = len(tf_idf)
for f in file_names:
# never use 'rb' for textual data, it creates something like, {b'line-inside-the-doc'}
with open(f,'r') as file1:
#create word_count dict for all docs
for line in file1:
wdict = defaultdict(int)
#find the amount of doc a word is in
for word in set(line.split()):
df[word] +=1
#find the count of all words in every doc
for word in line.split():
wdict[word] += 1
tf_idf.append(wdict)
#calculating final TF-IDF values for all words in all docs(line is a doc in this case)
for doc in tf_idf:
for key in doc:
true_idf = math.log(len(tf_idf)/df[key])
true_tf = doc[key]/float(len(doc))
doc[key] = true_tf * true_idf
print(TAG,'Total number of unique words in corpus',len(df),'( '+paint('++'+str(len(df)-prev_doc_count),'g')+' )' if prev_file_path else '')
print(TAG,'Total number of docs in corpus:',len(tf_idf),'( '+paint('++'+str(len(tf_idf)-prev_corpus_length),'g')+' )' if prev_file_path else '')
# dump if a dir-path is given
if dump_path:
if dump_path[-8:] == 'tfidfpkl':
pickle.dump((df,tf_idf),open(dump_path,'wb'),protocol=pickle.HIGHEST_PROTOCOL)
print(TAG,'Dumping TF-IDF vars @',dump_path)
return df,tf_idf |
def cbpdnmd_ystep(k):
"""Do the Y step of the cbpdn stage. The only parameter is the slice
index `k` and there are no return values; all inputs and outputs are
from and to global variables.
"""
if mp_W.shape[0] > 1:
W = mp_W[k]
else:
W = mp_W
AXU0 = mp_DX[k] - mp_S[k] + mp_Z_U0[k]
AXU1 = mp_Z_X[k] + mp_Z_U1[k]
mp_Z_Y0[k] = mp_xrho*AXU0 / (W**2 + mp_xrho)
mp_Z_Y1[k] = sp.prox_l1(AXU1, (mp_lmbda/mp_xrho)) | Do the Y step of the cbpdn stage. The only parameter is the slice
index `k` and there are no return values; all inputs and outputs are
from and to global variables. | Below is the the instruction that describes the task:
### Input:
Do the Y step of the cbpdn stage. The only parameter is the slice
index `k` and there are no return values; all inputs and outputs are
from and to global variables.
### Response:
def cbpdnmd_ystep(k):
"""Do the Y step of the cbpdn stage. The only parameter is the slice
index `k` and there are no return values; all inputs and outputs are
from and to global variables.
"""
if mp_W.shape[0] > 1:
W = mp_W[k]
else:
W = mp_W
AXU0 = mp_DX[k] - mp_S[k] + mp_Z_U0[k]
AXU1 = mp_Z_X[k] + mp_Z_U1[k]
mp_Z_Y0[k] = mp_xrho*AXU0 / (W**2 + mp_xrho)
mp_Z_Y1[k] = sp.prox_l1(AXU1, (mp_lmbda/mp_xrho)) |
def get(self, sid):
"""
Constructs a SessionContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.proxy.v1.service.session.SessionContext
:rtype: twilio.rest.proxy.v1.service.session.SessionContext
"""
return SessionContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) | Constructs a SessionContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.proxy.v1.service.session.SessionContext
:rtype: twilio.rest.proxy.v1.service.session.SessionContext | Below is the the instruction that describes the task:
### Input:
Constructs a SessionContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.proxy.v1.service.session.SessionContext
:rtype: twilio.rest.proxy.v1.service.session.SessionContext
### Response:
def get(self, sid):
"""
Constructs a SessionContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.proxy.v1.service.session.SessionContext
:rtype: twilio.rest.proxy.v1.service.session.SessionContext
"""
return SessionContext(self._version, service_sid=self._solution['service_sid'], sid=sid, ) |
def choose_colour(self, title="Select Colour", **kwargs):
"""
Show a Colour Chooser dialog
Usage: C{dialog.choose_colour(title="Select Colour")}
@param title: window title for the dialog
@return:
@rtype: C{DialogData(int, Optional[ColourData])}
"""
return_data = self._run_zenity(title, ["--color-selection"], kwargs)
if return_data.successful:
converted_colour = ColourData.from_zenity_tuple_str(return_data.data)
return DialogData(return_data.return_code, converted_colour)
else:
return DialogData(return_data.return_code, None) | Show a Colour Chooser dialog
Usage: C{dialog.choose_colour(title="Select Colour")}
@param title: window title for the dialog
@return:
@rtype: C{DialogData(int, Optional[ColourData])} | Below is the the instruction that describes the task:
### Input:
Show a Colour Chooser dialog
Usage: C{dialog.choose_colour(title="Select Colour")}
@param title: window title for the dialog
@return:
@rtype: C{DialogData(int, Optional[ColourData])}
### Response:
def choose_colour(self, title="Select Colour", **kwargs):
"""
Show a Colour Chooser dialog
Usage: C{dialog.choose_colour(title="Select Colour")}
@param title: window title for the dialog
@return:
@rtype: C{DialogData(int, Optional[ColourData])}
"""
return_data = self._run_zenity(title, ["--color-selection"], kwargs)
if return_data.successful:
converted_colour = ColourData.from_zenity_tuple_str(return_data.data)
return DialogData(return_data.return_code, converted_colour)
else:
return DialogData(return_data.return_code, None) |
def _validate_first_message(cls, msg):
"""Check the first message matches the expected handshake.
Note:
The handshake is provided as :py:attr:`RTM_HANDSHAKE`.
Arguments:
msg (:py:class:`aiohttp.Message`): The message to validate.
Raises:
:py:class:`SlackApiError`: If the data doesn't match the
expected handshake.
"""
data = cls._unpack_message(msg)
logger.debug(data)
if data != cls.RTM_HANDSHAKE:
raise SlackApiError('Unexpected response: {!r}'.format(data))
logger.info('Joined real-time messaging.') | Check the first message matches the expected handshake.
Note:
The handshake is provided as :py:attr:`RTM_HANDSHAKE`.
Arguments:
msg (:py:class:`aiohttp.Message`): The message to validate.
Raises:
:py:class:`SlackApiError`: If the data doesn't match the
expected handshake. | Below is the the instruction that describes the task:
### Input:
Check the first message matches the expected handshake.
Note:
The handshake is provided as :py:attr:`RTM_HANDSHAKE`.
Arguments:
msg (:py:class:`aiohttp.Message`): The message to validate.
Raises:
:py:class:`SlackApiError`: If the data doesn't match the
expected handshake.
### Response:
def _validate_first_message(cls, msg):
"""Check the first message matches the expected handshake.
Note:
The handshake is provided as :py:attr:`RTM_HANDSHAKE`.
Arguments:
msg (:py:class:`aiohttp.Message`): The message to validate.
Raises:
:py:class:`SlackApiError`: If the data doesn't match the
expected handshake.
"""
data = cls._unpack_message(msg)
logger.debug(data)
if data != cls.RTM_HANDSHAKE:
raise SlackApiError('Unexpected response: {!r}'.format(data))
logger.info('Joined real-time messaging.') |
def _validateListedSubdirsExist(self, component):
''' Return true if all the subdirectories which this component lists in
its module.json file exist (although their validity is otherwise
not checked).
If they don't, warning messages are printed.
'''
lib_subdirs = component.getLibs(explicit_only=True)
bin_subdirs = component.getBinaries()
ok = True
for d in lib_subdirs:
if not os.path.exists(os.path.join(component.path, d)):
logger.warning(
"lib directory \"%s\" doesn't exist but is listed in the module.json file of %s", d, component
)
ok = False
for d in bin_subdirs:
if not os.path.exists(os.path.join(component.path, d)):
logger.warning(
"bin directory \"%s\" doesn't exist but is listed in the module.json file of %s", d, component
)
ok = False
return ok | Return true if all the subdirectories which this component lists in
its module.json file exist (although their validity is otherwise
not checked).
If they don't, warning messages are printed. | Below is the the instruction that describes the task:
### Input:
Return true if all the subdirectories which this component lists in
its module.json file exist (although their validity is otherwise
not checked).
If they don't, warning messages are printed.
### Response:
def _validateListedSubdirsExist(self, component):
''' Return true if all the subdirectories which this component lists in
its module.json file exist (although their validity is otherwise
not checked).
If they don't, warning messages are printed.
'''
lib_subdirs = component.getLibs(explicit_only=True)
bin_subdirs = component.getBinaries()
ok = True
for d in lib_subdirs:
if not os.path.exists(os.path.join(component.path, d)):
logger.warning(
"lib directory \"%s\" doesn't exist but is listed in the module.json file of %s", d, component
)
ok = False
for d in bin_subdirs:
if not os.path.exists(os.path.join(component.path, d)):
logger.warning(
"bin directory \"%s\" doesn't exist but is listed in the module.json file of %s", d, component
)
ok = False
return ok |
def _increment_504_stat(self, request):
'''
Increments the 504 stat counters
@param request: The scrapy request in the spider
'''
for key in self.stats_dict:
if key == 'lifetime':
unique = request.url + str(time.time())
self.stats_dict[key].increment(unique)
else:
self.stats_dict[key].increment()
self.logger.debug("Incremented status_code '504' stats") | Increments the 504 stat counters
@param request: The scrapy request in the spider | Below is the the instruction that describes the task:
### Input:
Increments the 504 stat counters
@param request: The scrapy request in the spider
### Response:
def _increment_504_stat(self, request):
'''
Increments the 504 stat counters
@param request: The scrapy request in the spider
'''
for key in self.stats_dict:
if key == 'lifetime':
unique = request.url + str(time.time())
self.stats_dict[key].increment(unique)
else:
self.stats_dict[key].increment()
self.logger.debug("Incremented status_code '504' stats") |
def layer_uri(self, layer_name):
"""Get layer URI.
:param layer_name: The name of the layer to fetch.
:type layer_name: str
:return: The URI to the layer.
:rtype: str
.. versionadded:: 4.0
"""
layers = self.layers()
for layer, extension in product(layers, EXTENSIONS):
one_file = QFileInfo(
self.uri.filePath(layer + '.' + extension))
if one_file.exists():
if one_file.baseName() == layer_name:
return one_file.absoluteFilePath()
else:
return None | Get layer URI.
:param layer_name: The name of the layer to fetch.
:type layer_name: str
:return: The URI to the layer.
:rtype: str
.. versionadded:: 4.0 | Below is the the instruction that describes the task:
### Input:
Get layer URI.
:param layer_name: The name of the layer to fetch.
:type layer_name: str
:return: The URI to the layer.
:rtype: str
.. versionadded:: 4.0
### Response:
def layer_uri(self, layer_name):
"""Get layer URI.
:param layer_name: The name of the layer to fetch.
:type layer_name: str
:return: The URI to the layer.
:rtype: str
.. versionadded:: 4.0
"""
layers = self.layers()
for layer, extension in product(layers, EXTENSIONS):
one_file = QFileInfo(
self.uri.filePath(layer + '.' + extension))
if one_file.exists():
if one_file.baseName() == layer_name:
return one_file.absoluteFilePath()
else:
return None |
def variability_threshold(featuresdir,
outfile,
magbins=DEFAULT_MAGBINS,
maxobjects=None,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
min_lcmad_stdev=5.0,
min_stetj_stdev=2.0,
min_iqr_stdev=2.0,
min_inveta_stdev=2.0,
verbose=True):
'''This generates a list of objects with stetson J, IQR, and 1.0/eta
above some threshold value to select them as potential variable stars.
Use this to pare down the objects to review and put through
period-finding. This does the thresholding per magnitude bin; this should be
better than one single cut through the entire magnitude range. Set the
magnitude bins using the magbins kwarg.
FIXME: implement a voting classifier here. this will choose variables based
on the thresholds in IQR, stetson, and inveta based on weighting carried
over from the variability recovery sims.
Parameters
----------
featuresdir : str
This is the directory containing variability feature pickles created by
:py:func:`astrobase.lcproc.lcpfeatures.parallel_varfeatures` or similar.
outfile : str
This is the output pickle file that will contain all the threshold
information.
magbins : np.array of floats
This sets the magnitude bins to use for calculating thresholds.
maxobjects : int or None
This is the number of objects to process. If None, all objects with
feature pickles in `featuresdir` will be processed.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the thresholds.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the thresholds.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the thresholds.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
min_lcmad_stdev,min_stetj_stdev,min_iqr_stdev,min_inveta_stdev : float or np.array
These are all the standard deviation multiplier for the distributions of
light curve standard deviation, Stetson J variability index, the light
curve interquartile range, and 1/eta variability index
respectively. These multipliers set the minimum values of these measures
to use for selecting variable stars. If provided as floats, the same
value will be used for all magbins. If provided as np.arrays of `size =
magbins.size - 1`, will be used to apply possibly different sigma cuts
for each magbin.
verbose : bool
If True, will report progress and warn about any problems.
Returns
-------
dict
Contains all of the variability threshold information along with indices
into the array of the object IDs chosen as variables.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
# list of input pickles generated by varfeatures functions above
pklist = glob.glob(os.path.join(featuresdir, 'varfeatures-*.pkl'))
if maxobjects:
pklist = pklist[:maxobjects]
allobjects = {}
for magcol in magcols:
# keep local copies of these so we can fix them independently in case of
# nans
if (isinstance(min_stetj_stdev, list) or
isinstance(min_stetj_stdev, np.ndarray)):
magcol_min_stetj_stdev = min_stetj_stdev[::]
else:
magcol_min_stetj_stdev = min_stetj_stdev
if (isinstance(min_iqr_stdev, list) or
isinstance(min_iqr_stdev, np.ndarray)):
magcol_min_iqr_stdev = min_iqr_stdev[::]
else:
magcol_min_iqr_stdev = min_iqr_stdev
if (isinstance(min_inveta_stdev, list) or
isinstance(min_inveta_stdev, np.ndarray)):
magcol_min_inveta_stdev = min_inveta_stdev[::]
else:
magcol_min_inveta_stdev = min_inveta_stdev
LOGINFO('getting all object sdssr, LC MAD, stet J, IQR, eta...')
# we'll calculate the sigma per magnitude bin, so get the mags as well
allobjects[magcol] = {
'objectid':[],
'sdssr':[],
'lcmad':[],
'stetsonj':[],
'iqr':[],
'eta':[]
}
# fancy progress bar with tqdm if present
if TQDM and verbose:
listiterator = tqdm(pklist)
else:
listiterator = pklist
for pkl in listiterator:
with open(pkl,'rb') as infd:
thisfeatures = pickle.load(infd)
objectid = thisfeatures['objectid']
# the object magnitude
if ('info' in thisfeatures and
thisfeatures['info'] and
'sdssr' in thisfeatures['info']):
if (thisfeatures['info']['sdssr'] and
thisfeatures['info']['sdssr'] > 3.0):
sdssr = thisfeatures['info']['sdssr']
elif (magcol in thisfeatures and
thisfeatures[magcol] and
'median' in thisfeatures[magcol] and
thisfeatures[magcol]['median'] > 3.0):
sdssr = thisfeatures[magcol]['median']
elif (thisfeatures['info']['jmag'] and
thisfeatures['info']['hmag'] and
thisfeatures['info']['kmag']):
sdssr = jhk_to_sdssr(thisfeatures['info']['jmag'],
thisfeatures['info']['hmag'],
thisfeatures['info']['kmag'])
else:
sdssr = np.nan
else:
sdssr = np.nan
# the MAD of the light curve
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['mad']):
lcmad = thisfeatures[magcol]['mad']
else:
lcmad = np.nan
# stetson index
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['stetsonj']):
stetsonj = thisfeatures[magcol]['stetsonj']
else:
stetsonj = np.nan
# IQR
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['mag_iqr']):
iqr = thisfeatures[magcol]['mag_iqr']
else:
iqr = np.nan
# eta
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['eta_normal']):
eta = thisfeatures[magcol]['eta_normal']
else:
eta = np.nan
allobjects[magcol]['objectid'].append(objectid)
allobjects[magcol]['sdssr'].append(sdssr)
allobjects[magcol]['lcmad'].append(lcmad)
allobjects[magcol]['stetsonj'].append(stetsonj)
allobjects[magcol]['iqr'].append(iqr)
allobjects[magcol]['eta'].append(eta)
#
# done with collection of info
#
LOGINFO('finding objects above thresholds per magbin...')
# turn the info into arrays
allobjects[magcol]['objectid'] = np.ravel(np.array(
allobjects[magcol]['objectid']
))
allobjects[magcol]['sdssr'] = np.ravel(np.array(
allobjects[magcol]['sdssr']
))
allobjects[magcol]['lcmad'] = np.ravel(np.array(
allobjects[magcol]['lcmad']
))
allobjects[magcol]['stetsonj'] = np.ravel(np.array(
allobjects[magcol]['stetsonj']
))
allobjects[magcol]['iqr'] = np.ravel(np.array(
allobjects[magcol]['iqr']
))
allobjects[magcol]['eta'] = np.ravel(np.array(
allobjects[magcol]['eta']
))
# only get finite elements everywhere
thisfinind = (
np.isfinite(allobjects[magcol]['sdssr']) &
np.isfinite(allobjects[magcol]['lcmad']) &
np.isfinite(allobjects[magcol]['stetsonj']) &
np.isfinite(allobjects[magcol]['iqr']) &
np.isfinite(allobjects[magcol]['eta'])
)
allobjects[magcol]['objectid'] = allobjects[magcol]['objectid'][
thisfinind
]
allobjects[magcol]['sdssr'] = allobjects[magcol]['sdssr'][thisfinind]
allobjects[magcol]['lcmad'] = allobjects[magcol]['lcmad'][thisfinind]
allobjects[magcol]['stetsonj'] = allobjects[magcol]['stetsonj'][
thisfinind
]
allobjects[magcol]['iqr'] = allobjects[magcol]['iqr'][thisfinind]
allobjects[magcol]['eta'] = allobjects[magcol]['eta'][thisfinind]
# invert eta so we can threshold the same way as the others
allobjects[magcol]['inveta'] = 1.0/allobjects[magcol]['eta']
# do the thresholding by magnitude bin
magbininds = np.digitize(allobjects[magcol]['sdssr'],
magbins)
binned_objectids = []
binned_sdssr = []
binned_sdssr_median = []
binned_lcmad = []
binned_stetsonj = []
binned_iqr = []
binned_inveta = []
binned_count = []
binned_objectids_thresh_stetsonj = []
binned_objectids_thresh_iqr = []
binned_objectids_thresh_inveta = []
binned_objectids_thresh_all = []
binned_lcmad_median = []
binned_lcmad_stdev = []
binned_stetsonj_median = []
binned_stetsonj_stdev = []
binned_inveta_median = []
binned_inveta_stdev = []
binned_iqr_median = []
binned_iqr_stdev = []
# go through all the mag bins and get the thresholds for J, inveta, IQR
for mbinind, magi in zip(np.unique(magbininds),
range(len(magbins)-1)):
thisbinind = np.where(magbininds == mbinind)
thisbin_sdssr_median = (magbins[magi] + magbins[magi+1])/2.0
binned_sdssr_median.append(thisbin_sdssr_median)
thisbin_objectids = allobjects[magcol]['objectid'][thisbinind]
thisbin_sdssr = allobjects[magcol]['sdssr'][thisbinind]
thisbin_lcmad = allobjects[magcol]['lcmad'][thisbinind]
thisbin_stetsonj = allobjects[magcol]['stetsonj'][thisbinind]
thisbin_iqr = allobjects[magcol]['iqr'][thisbinind]
thisbin_inveta = allobjects[magcol]['inveta'][thisbinind]
thisbin_count = thisbin_objectids.size
if thisbin_count > 4:
thisbin_lcmad_median = np.median(thisbin_lcmad)
thisbin_lcmad_stdev = np.median(
np.abs(thisbin_lcmad - thisbin_lcmad_median)
) * 1.483
binned_lcmad_median.append(thisbin_lcmad_median)
binned_lcmad_stdev.append(thisbin_lcmad_stdev)
thisbin_stetsonj_median = np.median(thisbin_stetsonj)
thisbin_stetsonj_stdev = np.median(
np.abs(thisbin_stetsonj - thisbin_stetsonj_median)
) * 1.483
binned_stetsonj_median.append(thisbin_stetsonj_median)
binned_stetsonj_stdev.append(thisbin_stetsonj_stdev)
# now get the objects above the required stdev threshold
if isinstance(magcol_min_stetj_stdev, float):
thisbin_objectids_thresh_stetsonj = thisbin_objectids[
thisbin_stetsonj > (
thisbin_stetsonj_median +
magcol_min_stetj_stdev*thisbin_stetsonj_stdev
)
]
elif (isinstance(magcol_min_stetj_stdev, np.ndarray) or
isinstance(magcol_min_stetj_stdev, list)):
thisbin_min_stetj_stdev = magcol_min_stetj_stdev[magi]
if not np.isfinite(thisbin_min_stetj_stdev):
LOGWARNING('provided threshold stetson J stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_stetj_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_stetj_stdev[magi] = 2.0
thisbin_objectids_thresh_stetsonj = thisbin_objectids[
thisbin_stetsonj > (
thisbin_stetsonj_median +
thisbin_min_stetj_stdev*thisbin_stetsonj_stdev
)
]
thisbin_iqr_median = np.median(thisbin_iqr)
thisbin_iqr_stdev = np.median(
np.abs(thisbin_iqr - thisbin_iqr_median)
) * 1.483
binned_iqr_median.append(thisbin_iqr_median)
binned_iqr_stdev.append(thisbin_iqr_stdev)
# get the objects above the required stdev threshold
if isinstance(magcol_min_iqr_stdev, float):
thisbin_objectids_thresh_iqr = thisbin_objectids[
thisbin_iqr > (thisbin_iqr_median +
magcol_min_iqr_stdev*thisbin_iqr_stdev)
]
elif (isinstance(magcol_min_iqr_stdev, np.ndarray) or
isinstance(magcol_min_iqr_stdev, list)):
thisbin_min_iqr_stdev = magcol_min_iqr_stdev[magi]
if not np.isfinite(thisbin_min_iqr_stdev):
LOGWARNING('provided threshold IQR stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_iqr_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_iqr_stdev[magi] = 2.0
thisbin_objectids_thresh_iqr = thisbin_objectids[
thisbin_iqr > (thisbin_iqr_median +
thisbin_min_iqr_stdev*thisbin_iqr_stdev)
]
thisbin_inveta_median = np.median(thisbin_inveta)
thisbin_inveta_stdev = np.median(
np.abs(thisbin_inveta - thisbin_inveta_median)
) * 1.483
binned_inveta_median.append(thisbin_inveta_median)
binned_inveta_stdev.append(thisbin_inveta_stdev)
if isinstance(magcol_min_inveta_stdev, float):
thisbin_objectids_thresh_inveta = thisbin_objectids[
thisbin_inveta > (
thisbin_inveta_median +
magcol_min_inveta_stdev*thisbin_inveta_stdev
)
]
elif (isinstance(magcol_min_inveta_stdev, np.ndarray) or
isinstance(magcol_min_inveta_stdev, list)):
thisbin_min_inveta_stdev = magcol_min_inveta_stdev[magi]
if not np.isfinite(thisbin_min_inveta_stdev):
LOGWARNING('provided threshold inveta stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_inveta_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_inveta_stdev[magi] = 2.0
thisbin_objectids_thresh_inveta = thisbin_objectids[
thisbin_inveta > (
thisbin_inveta_median +
thisbin_min_inveta_stdev*thisbin_inveta_stdev
)
]
else:
thisbin_objectids_thresh_stetsonj = (
np.array([],dtype=np.unicode_)
)
thisbin_objectids_thresh_iqr = (
np.array([],dtype=np.unicode_)
)
thisbin_objectids_thresh_inveta = (
np.array([],dtype=np.unicode_)
)
#
# done with check for enough objects in the bin
#
# get the intersection of all threshold objects to get objects that
# lie above the threshold for all variable indices
thisbin_objectids_thresh_all = reduce(
np.intersect1d,
(thisbin_objectids_thresh_stetsonj,
thisbin_objectids_thresh_iqr,
thisbin_objectids_thresh_inveta)
)
binned_objectids.append(thisbin_objectids)
binned_sdssr.append(thisbin_sdssr)
binned_lcmad.append(thisbin_lcmad)
binned_stetsonj.append(thisbin_stetsonj)
binned_iqr.append(thisbin_iqr)
binned_inveta.append(thisbin_inveta)
binned_count.append(thisbin_objectids.size)
binned_objectids_thresh_stetsonj.append(
thisbin_objectids_thresh_stetsonj
)
binned_objectids_thresh_iqr.append(
thisbin_objectids_thresh_iqr
)
binned_objectids_thresh_inveta.append(
thisbin_objectids_thresh_inveta
)
binned_objectids_thresh_all.append(
thisbin_objectids_thresh_all
)
#
# done with magbins
#
# update the output dict for this magcol
allobjects[magcol]['magbins'] = magbins
allobjects[magcol]['binned_objectids'] = binned_objectids
allobjects[magcol]['binned_sdssr_median'] = binned_sdssr_median
allobjects[magcol]['binned_sdssr'] = binned_sdssr
allobjects[magcol]['binned_count'] = binned_count
allobjects[magcol]['binned_lcmad'] = binned_lcmad
allobjects[magcol]['binned_lcmad_median'] = binned_lcmad_median
allobjects[magcol]['binned_lcmad_stdev'] = binned_lcmad_stdev
allobjects[magcol]['binned_stetsonj'] = binned_stetsonj
allobjects[magcol]['binned_stetsonj_median'] = binned_stetsonj_median
allobjects[magcol]['binned_stetsonj_stdev'] = binned_stetsonj_stdev
allobjects[magcol]['binned_iqr'] = binned_iqr
allobjects[magcol]['binned_iqr_median'] = binned_iqr_median
allobjects[magcol]['binned_iqr_stdev'] = binned_iqr_stdev
allobjects[magcol]['binned_inveta'] = binned_inveta
allobjects[magcol]['binned_inveta_median'] = binned_inveta_median
allobjects[magcol]['binned_inveta_stdev'] = binned_inveta_stdev
allobjects[magcol]['binned_objectids_thresh_stetsonj'] = (
binned_objectids_thresh_stetsonj
)
allobjects[magcol]['binned_objectids_thresh_iqr'] = (
binned_objectids_thresh_iqr
)
allobjects[magcol]['binned_objectids_thresh_inveta'] = (
binned_objectids_thresh_inveta
)
allobjects[magcol]['binned_objectids_thresh_all'] = (
binned_objectids_thresh_all
)
# get the common selected objects thru all measures
try:
allobjects[magcol]['objectids_all_thresh_all_magbins'] = np.unique(
np.concatenate(
allobjects[magcol]['binned_objectids_thresh_all']
)
)
except ValueError:
LOGWARNING('not enough variable objects matching all thresholds')
allobjects[magcol]['objectids_all_thresh_all_magbins'] = (
np.array([])
)
allobjects[magcol]['objectids_stetsonj_thresh_all_magbins'] = np.unique(
np.concatenate(
allobjects[magcol]['binned_objectids_thresh_stetsonj']
)
)
allobjects[magcol]['objectids_inveta_thresh_all_magbins'] = np.unique(
np.concatenate(allobjects[magcol]['binned_objectids_thresh_inveta'])
)
allobjects[magcol]['objectids_iqr_thresh_all_magbins'] = np.unique(
np.concatenate(allobjects[magcol]['binned_objectids_thresh_iqr'])
)
# turn these into np.arrays for easier plotting if they're lists
if isinstance(min_stetj_stdev, list):
allobjects[magcol]['min_stetj_stdev'] = np.array(
magcol_min_stetj_stdev
)
else:
allobjects[magcol]['min_stetj_stdev'] = magcol_min_stetj_stdev
if isinstance(min_iqr_stdev, list):
allobjects[magcol]['min_iqr_stdev'] = np.array(
magcol_min_iqr_stdev
)
else:
allobjects[magcol]['min_iqr_stdev'] = magcol_min_iqr_stdev
if isinstance(min_inveta_stdev, list):
allobjects[magcol]['min_inveta_stdev'] = np.array(
magcol_min_inveta_stdev
)
else:
allobjects[magcol]['min_inveta_stdev'] = magcol_min_inveta_stdev
# this one doesn't get touched (for now)
allobjects[magcol]['min_lcmad_stdev'] = min_lcmad_stdev
#
# done with all magcols
#
allobjects['magbins'] = magbins
with open(outfile,'wb') as outfd:
pickle.dump(allobjects, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return allobjects | This generates a list of objects with stetson J, IQR, and 1.0/eta
above some threshold value to select them as potential variable stars.
Use this to pare down the objects to review and put through
period-finding. This does the thresholding per magnitude bin; this should be
better than one single cut through the entire magnitude range. Set the
magnitude bins using the magbins kwarg.
FIXME: implement a voting classifier here. this will choose variables based
on the thresholds in IQR, stetson, and inveta based on weighting carried
over from the variability recovery sims.
Parameters
----------
featuresdir : str
This is the directory containing variability feature pickles created by
:py:func:`astrobase.lcproc.lcpfeatures.parallel_varfeatures` or similar.
outfile : str
This is the output pickle file that will contain all the threshold
information.
magbins : np.array of floats
This sets the magnitude bins to use for calculating thresholds.
maxobjects : int or None
This is the number of objects to process. If None, all objects with
feature pickles in `featuresdir` will be processed.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the thresholds.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the thresholds.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the thresholds.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
min_lcmad_stdev,min_stetj_stdev,min_iqr_stdev,min_inveta_stdev : float or np.array
These are all the standard deviation multiplier for the distributions of
light curve standard deviation, Stetson J variability index, the light
curve interquartile range, and 1/eta variability index
respectively. These multipliers set the minimum values of these measures
to use for selecting variable stars. If provided as floats, the same
value will be used for all magbins. If provided as np.arrays of `size =
magbins.size - 1`, will be used to apply possibly different sigma cuts
for each magbin.
verbose : bool
If True, will report progress and warn about any problems.
Returns
-------
dict
Contains all of the variability threshold information along with indices
into the array of the object IDs chosen as variables. | Below is the the instruction that describes the task:
### Input:
This generates a list of objects with stetson J, IQR, and 1.0/eta
above some threshold value to select them as potential variable stars.
Use this to pare down the objects to review and put through
period-finding. This does the thresholding per magnitude bin; this should be
better than one single cut through the entire magnitude range. Set the
magnitude bins using the magbins kwarg.
FIXME: implement a voting classifier here. this will choose variables based
on the thresholds in IQR, stetson, and inveta based on weighting carried
over from the variability recovery sims.
Parameters
----------
featuresdir : str
This is the directory containing variability feature pickles created by
:py:func:`astrobase.lcproc.lcpfeatures.parallel_varfeatures` or similar.
outfile : str
This is the output pickle file that will contain all the threshold
information.
magbins : np.array of floats
This sets the magnitude bins to use for calculating thresholds.
maxobjects : int or None
This is the number of objects to process. If None, all objects with
feature pickles in `featuresdir` will be processed.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the thresholds.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the thresholds.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the thresholds.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
min_lcmad_stdev,min_stetj_stdev,min_iqr_stdev,min_inveta_stdev : float or np.array
These are all the standard deviation multiplier for the distributions of
light curve standard deviation, Stetson J variability index, the light
curve interquartile range, and 1/eta variability index
respectively. These multipliers set the minimum values of these measures
to use for selecting variable stars. If provided as floats, the same
value will be used for all magbins. If provided as np.arrays of `size =
magbins.size - 1`, will be used to apply possibly different sigma cuts
for each magbin.
verbose : bool
If True, will report progress and warn about any problems.
Returns
-------
dict
Contains all of the variability threshold information along with indices
into the array of the object IDs chosen as variables.
### Response:
def variability_threshold(featuresdir,
outfile,
magbins=DEFAULT_MAGBINS,
maxobjects=None,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
min_lcmad_stdev=5.0,
min_stetj_stdev=2.0,
min_iqr_stdev=2.0,
min_inveta_stdev=2.0,
verbose=True):
'''This generates a list of objects with stetson J, IQR, and 1.0/eta
above some threshold value to select them as potential variable stars.
Use this to pare down the objects to review and put through
period-finding. This does the thresholding per magnitude bin; this should be
better than one single cut through the entire magnitude range. Set the
magnitude bins using the magbins kwarg.
FIXME: implement a voting classifier here. this will choose variables based
on the thresholds in IQR, stetson, and inveta based on weighting carried
over from the variability recovery sims.
Parameters
----------
featuresdir : str
This is the directory containing variability feature pickles created by
:py:func:`astrobase.lcproc.lcpfeatures.parallel_varfeatures` or similar.
outfile : str
This is the output pickle file that will contain all the threshold
information.
magbins : np.array of floats
This sets the magnitude bins to use for calculating thresholds.
maxobjects : int or None
This is the number of objects to process. If None, all objects with
feature pickles in `featuresdir` will be processed.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the thresholds.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the thresholds.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the thresholds.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
min_lcmad_stdev,min_stetj_stdev,min_iqr_stdev,min_inveta_stdev : float or np.array
These are all the standard deviation multiplier for the distributions of
light curve standard deviation, Stetson J variability index, the light
curve interquartile range, and 1/eta variability index
respectively. These multipliers set the minimum values of these measures
to use for selecting variable stars. If provided as floats, the same
value will be used for all magbins. If provided as np.arrays of `size =
magbins.size - 1`, will be used to apply possibly different sigma cuts
for each magbin.
verbose : bool
If True, will report progress and warn about any problems.
Returns
-------
dict
Contains all of the variability threshold information along with indices
into the array of the object IDs chosen as variables.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
# list of input pickles generated by varfeatures functions above
pklist = glob.glob(os.path.join(featuresdir, 'varfeatures-*.pkl'))
if maxobjects:
pklist = pklist[:maxobjects]
allobjects = {}
for magcol in magcols:
# keep local copies of these so we can fix them independently in case of
# nans
if (isinstance(min_stetj_stdev, list) or
isinstance(min_stetj_stdev, np.ndarray)):
magcol_min_stetj_stdev = min_stetj_stdev[::]
else:
magcol_min_stetj_stdev = min_stetj_stdev
if (isinstance(min_iqr_stdev, list) or
isinstance(min_iqr_stdev, np.ndarray)):
magcol_min_iqr_stdev = min_iqr_stdev[::]
else:
magcol_min_iqr_stdev = min_iqr_stdev
if (isinstance(min_inveta_stdev, list) or
isinstance(min_inveta_stdev, np.ndarray)):
magcol_min_inveta_stdev = min_inveta_stdev[::]
else:
magcol_min_inveta_stdev = min_inveta_stdev
LOGINFO('getting all object sdssr, LC MAD, stet J, IQR, eta...')
# we'll calculate the sigma per magnitude bin, so get the mags as well
allobjects[magcol] = {
'objectid':[],
'sdssr':[],
'lcmad':[],
'stetsonj':[],
'iqr':[],
'eta':[]
}
# fancy progress bar with tqdm if present
if TQDM and verbose:
listiterator = tqdm(pklist)
else:
listiterator = pklist
for pkl in listiterator:
with open(pkl,'rb') as infd:
thisfeatures = pickle.load(infd)
objectid = thisfeatures['objectid']
# the object magnitude
if ('info' in thisfeatures and
thisfeatures['info'] and
'sdssr' in thisfeatures['info']):
if (thisfeatures['info']['sdssr'] and
thisfeatures['info']['sdssr'] > 3.0):
sdssr = thisfeatures['info']['sdssr']
elif (magcol in thisfeatures and
thisfeatures[magcol] and
'median' in thisfeatures[magcol] and
thisfeatures[magcol]['median'] > 3.0):
sdssr = thisfeatures[magcol]['median']
elif (thisfeatures['info']['jmag'] and
thisfeatures['info']['hmag'] and
thisfeatures['info']['kmag']):
sdssr = jhk_to_sdssr(thisfeatures['info']['jmag'],
thisfeatures['info']['hmag'],
thisfeatures['info']['kmag'])
else:
sdssr = np.nan
else:
sdssr = np.nan
# the MAD of the light curve
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['mad']):
lcmad = thisfeatures[magcol]['mad']
else:
lcmad = np.nan
# stetson index
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['stetsonj']):
stetsonj = thisfeatures[magcol]['stetsonj']
else:
stetsonj = np.nan
# IQR
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['mag_iqr']):
iqr = thisfeatures[magcol]['mag_iqr']
else:
iqr = np.nan
# eta
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['eta_normal']):
eta = thisfeatures[magcol]['eta_normal']
else:
eta = np.nan
allobjects[magcol]['objectid'].append(objectid)
allobjects[magcol]['sdssr'].append(sdssr)
allobjects[magcol]['lcmad'].append(lcmad)
allobjects[magcol]['stetsonj'].append(stetsonj)
allobjects[magcol]['iqr'].append(iqr)
allobjects[magcol]['eta'].append(eta)
#
# done with collection of info
#
LOGINFO('finding objects above thresholds per magbin...')
# turn the info into arrays
allobjects[magcol]['objectid'] = np.ravel(np.array(
allobjects[magcol]['objectid']
))
allobjects[magcol]['sdssr'] = np.ravel(np.array(
allobjects[magcol]['sdssr']
))
allobjects[magcol]['lcmad'] = np.ravel(np.array(
allobjects[magcol]['lcmad']
))
allobjects[magcol]['stetsonj'] = np.ravel(np.array(
allobjects[magcol]['stetsonj']
))
allobjects[magcol]['iqr'] = np.ravel(np.array(
allobjects[magcol]['iqr']
))
allobjects[magcol]['eta'] = np.ravel(np.array(
allobjects[magcol]['eta']
))
# only get finite elements everywhere
thisfinind = (
np.isfinite(allobjects[magcol]['sdssr']) &
np.isfinite(allobjects[magcol]['lcmad']) &
np.isfinite(allobjects[magcol]['stetsonj']) &
np.isfinite(allobjects[magcol]['iqr']) &
np.isfinite(allobjects[magcol]['eta'])
)
allobjects[magcol]['objectid'] = allobjects[magcol]['objectid'][
thisfinind
]
allobjects[magcol]['sdssr'] = allobjects[magcol]['sdssr'][thisfinind]
allobjects[magcol]['lcmad'] = allobjects[magcol]['lcmad'][thisfinind]
allobjects[magcol]['stetsonj'] = allobjects[magcol]['stetsonj'][
thisfinind
]
allobjects[magcol]['iqr'] = allobjects[magcol]['iqr'][thisfinind]
allobjects[magcol]['eta'] = allobjects[magcol]['eta'][thisfinind]
# invert eta so we can threshold the same way as the others
allobjects[magcol]['inveta'] = 1.0/allobjects[magcol]['eta']
# do the thresholding by magnitude bin
magbininds = np.digitize(allobjects[magcol]['sdssr'],
magbins)
binned_objectids = []
binned_sdssr = []
binned_sdssr_median = []
binned_lcmad = []
binned_stetsonj = []
binned_iqr = []
binned_inveta = []
binned_count = []
binned_objectids_thresh_stetsonj = []
binned_objectids_thresh_iqr = []
binned_objectids_thresh_inveta = []
binned_objectids_thresh_all = []
binned_lcmad_median = []
binned_lcmad_stdev = []
binned_stetsonj_median = []
binned_stetsonj_stdev = []
binned_inveta_median = []
binned_inveta_stdev = []
binned_iqr_median = []
binned_iqr_stdev = []
# go through all the mag bins and get the thresholds for J, inveta, IQR
for mbinind, magi in zip(np.unique(magbininds),
range(len(magbins)-1)):
thisbinind = np.where(magbininds == mbinind)
thisbin_sdssr_median = (magbins[magi] + magbins[magi+1])/2.0
binned_sdssr_median.append(thisbin_sdssr_median)
thisbin_objectids = allobjects[magcol]['objectid'][thisbinind]
thisbin_sdssr = allobjects[magcol]['sdssr'][thisbinind]
thisbin_lcmad = allobjects[magcol]['lcmad'][thisbinind]
thisbin_stetsonj = allobjects[magcol]['stetsonj'][thisbinind]
thisbin_iqr = allobjects[magcol]['iqr'][thisbinind]
thisbin_inveta = allobjects[magcol]['inveta'][thisbinind]
thisbin_count = thisbin_objectids.size
if thisbin_count > 4:
thisbin_lcmad_median = np.median(thisbin_lcmad)
thisbin_lcmad_stdev = np.median(
np.abs(thisbin_lcmad - thisbin_lcmad_median)
) * 1.483
binned_lcmad_median.append(thisbin_lcmad_median)
binned_lcmad_stdev.append(thisbin_lcmad_stdev)
thisbin_stetsonj_median = np.median(thisbin_stetsonj)
thisbin_stetsonj_stdev = np.median(
np.abs(thisbin_stetsonj - thisbin_stetsonj_median)
) * 1.483
binned_stetsonj_median.append(thisbin_stetsonj_median)
binned_stetsonj_stdev.append(thisbin_stetsonj_stdev)
# now get the objects above the required stdev threshold
if isinstance(magcol_min_stetj_stdev, float):
thisbin_objectids_thresh_stetsonj = thisbin_objectids[
thisbin_stetsonj > (
thisbin_stetsonj_median +
magcol_min_stetj_stdev*thisbin_stetsonj_stdev
)
]
elif (isinstance(magcol_min_stetj_stdev, np.ndarray) or
isinstance(magcol_min_stetj_stdev, list)):
thisbin_min_stetj_stdev = magcol_min_stetj_stdev[magi]
if not np.isfinite(thisbin_min_stetj_stdev):
LOGWARNING('provided threshold stetson J stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_stetj_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_stetj_stdev[magi] = 2.0
thisbin_objectids_thresh_stetsonj = thisbin_objectids[
thisbin_stetsonj > (
thisbin_stetsonj_median +
thisbin_min_stetj_stdev*thisbin_stetsonj_stdev
)
]
thisbin_iqr_median = np.median(thisbin_iqr)
thisbin_iqr_stdev = np.median(
np.abs(thisbin_iqr - thisbin_iqr_median)
) * 1.483
binned_iqr_median.append(thisbin_iqr_median)
binned_iqr_stdev.append(thisbin_iqr_stdev)
# get the objects above the required stdev threshold
if isinstance(magcol_min_iqr_stdev, float):
thisbin_objectids_thresh_iqr = thisbin_objectids[
thisbin_iqr > (thisbin_iqr_median +
magcol_min_iqr_stdev*thisbin_iqr_stdev)
]
elif (isinstance(magcol_min_iqr_stdev, np.ndarray) or
isinstance(magcol_min_iqr_stdev, list)):
thisbin_min_iqr_stdev = magcol_min_iqr_stdev[magi]
if not np.isfinite(thisbin_min_iqr_stdev):
LOGWARNING('provided threshold IQR stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_iqr_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_iqr_stdev[magi] = 2.0
thisbin_objectids_thresh_iqr = thisbin_objectids[
thisbin_iqr > (thisbin_iqr_median +
thisbin_min_iqr_stdev*thisbin_iqr_stdev)
]
thisbin_inveta_median = np.median(thisbin_inveta)
thisbin_inveta_stdev = np.median(
np.abs(thisbin_inveta - thisbin_inveta_median)
) * 1.483
binned_inveta_median.append(thisbin_inveta_median)
binned_inveta_stdev.append(thisbin_inveta_stdev)
if isinstance(magcol_min_inveta_stdev, float):
thisbin_objectids_thresh_inveta = thisbin_objectids[
thisbin_inveta > (
thisbin_inveta_median +
magcol_min_inveta_stdev*thisbin_inveta_stdev
)
]
elif (isinstance(magcol_min_inveta_stdev, np.ndarray) or
isinstance(magcol_min_inveta_stdev, list)):
thisbin_min_inveta_stdev = magcol_min_inveta_stdev[magi]
if not np.isfinite(thisbin_min_inveta_stdev):
LOGWARNING('provided threshold inveta stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_inveta_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_inveta_stdev[magi] = 2.0
thisbin_objectids_thresh_inveta = thisbin_objectids[
thisbin_inveta > (
thisbin_inveta_median +
thisbin_min_inveta_stdev*thisbin_inveta_stdev
)
]
else:
thisbin_objectids_thresh_stetsonj = (
np.array([],dtype=np.unicode_)
)
thisbin_objectids_thresh_iqr = (
np.array([],dtype=np.unicode_)
)
thisbin_objectids_thresh_inveta = (
np.array([],dtype=np.unicode_)
)
#
# done with check for enough objects in the bin
#
# get the intersection of all threshold objects to get objects that
# lie above the threshold for all variable indices
thisbin_objectids_thresh_all = reduce(
np.intersect1d,
(thisbin_objectids_thresh_stetsonj,
thisbin_objectids_thresh_iqr,
thisbin_objectids_thresh_inveta)
)
binned_objectids.append(thisbin_objectids)
binned_sdssr.append(thisbin_sdssr)
binned_lcmad.append(thisbin_lcmad)
binned_stetsonj.append(thisbin_stetsonj)
binned_iqr.append(thisbin_iqr)
binned_inveta.append(thisbin_inveta)
binned_count.append(thisbin_objectids.size)
binned_objectids_thresh_stetsonj.append(
thisbin_objectids_thresh_stetsonj
)
binned_objectids_thresh_iqr.append(
thisbin_objectids_thresh_iqr
)
binned_objectids_thresh_inveta.append(
thisbin_objectids_thresh_inveta
)
binned_objectids_thresh_all.append(
thisbin_objectids_thresh_all
)
#
# done with magbins
#
# update the output dict for this magcol
allobjects[magcol]['magbins'] = magbins
allobjects[magcol]['binned_objectids'] = binned_objectids
allobjects[magcol]['binned_sdssr_median'] = binned_sdssr_median
allobjects[magcol]['binned_sdssr'] = binned_sdssr
allobjects[magcol]['binned_count'] = binned_count
allobjects[magcol]['binned_lcmad'] = binned_lcmad
allobjects[magcol]['binned_lcmad_median'] = binned_lcmad_median
allobjects[magcol]['binned_lcmad_stdev'] = binned_lcmad_stdev
allobjects[magcol]['binned_stetsonj'] = binned_stetsonj
allobjects[magcol]['binned_stetsonj_median'] = binned_stetsonj_median
allobjects[magcol]['binned_stetsonj_stdev'] = binned_stetsonj_stdev
allobjects[magcol]['binned_iqr'] = binned_iqr
allobjects[magcol]['binned_iqr_median'] = binned_iqr_median
allobjects[magcol]['binned_iqr_stdev'] = binned_iqr_stdev
allobjects[magcol]['binned_inveta'] = binned_inveta
allobjects[magcol]['binned_inveta_median'] = binned_inveta_median
allobjects[magcol]['binned_inveta_stdev'] = binned_inveta_stdev
allobjects[magcol]['binned_objectids_thresh_stetsonj'] = (
binned_objectids_thresh_stetsonj
)
allobjects[magcol]['binned_objectids_thresh_iqr'] = (
binned_objectids_thresh_iqr
)
allobjects[magcol]['binned_objectids_thresh_inveta'] = (
binned_objectids_thresh_inveta
)
allobjects[magcol]['binned_objectids_thresh_all'] = (
binned_objectids_thresh_all
)
# get the common selected objects thru all measures
try:
allobjects[magcol]['objectids_all_thresh_all_magbins'] = np.unique(
np.concatenate(
allobjects[magcol]['binned_objectids_thresh_all']
)
)
except ValueError:
LOGWARNING('not enough variable objects matching all thresholds')
allobjects[magcol]['objectids_all_thresh_all_magbins'] = (
np.array([])
)
allobjects[magcol]['objectids_stetsonj_thresh_all_magbins'] = np.unique(
np.concatenate(
allobjects[magcol]['binned_objectids_thresh_stetsonj']
)
)
allobjects[magcol]['objectids_inveta_thresh_all_magbins'] = np.unique(
np.concatenate(allobjects[magcol]['binned_objectids_thresh_inveta'])
)
allobjects[magcol]['objectids_iqr_thresh_all_magbins'] = np.unique(
np.concatenate(allobjects[magcol]['binned_objectids_thresh_iqr'])
)
# turn these into np.arrays for easier plotting if they're lists
if isinstance(min_stetj_stdev, list):
allobjects[magcol]['min_stetj_stdev'] = np.array(
magcol_min_stetj_stdev
)
else:
allobjects[magcol]['min_stetj_stdev'] = magcol_min_stetj_stdev
if isinstance(min_iqr_stdev, list):
allobjects[magcol]['min_iqr_stdev'] = np.array(
magcol_min_iqr_stdev
)
else:
allobjects[magcol]['min_iqr_stdev'] = magcol_min_iqr_stdev
if isinstance(min_inveta_stdev, list):
allobjects[magcol]['min_inveta_stdev'] = np.array(
magcol_min_inveta_stdev
)
else:
allobjects[magcol]['min_inveta_stdev'] = magcol_min_inveta_stdev
# this one doesn't get touched (for now)
allobjects[magcol]['min_lcmad_stdev'] = min_lcmad_stdev
#
# done with all magcols
#
allobjects['magbins'] = magbins
with open(outfile,'wb') as outfd:
pickle.dump(allobjects, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return allobjects |
def _get_names(self):
"""Get the list of first names.
:return: A list of first name entries.
"""
names = self._read_name_file('names.json')
names = self._compute_weights(names)
return names | Get the list of first names.
:return: A list of first name entries. | Below is the the instruction that describes the task:
### Input:
Get the list of first names.
:return: A list of first name entries.
### Response:
def _get_names(self):
"""Get the list of first names.
:return: A list of first name entries.
"""
names = self._read_name_file('names.json')
names = self._compute_weights(names)
return names |
def main():
""" Get arguments and call the execution function"""
if len(sys.argv) < 6:
print("Usage: %s server_url username password namespace' \
' classname" % sys.argv[0])
print('Using internal defaults')
server_url = SERVER_URL
namespace = TEST_NAMESPACE
username = USERNAME
password = PASSWORD
classname = TEST_CLASS
else:
print('Get from input')
server_url = sys.argv[1]
namespace = sys.argv[2]
username = sys.argv[3]
password = sys.argv[4]
classname = sys.argv[5]
# create the credentials tuple for WBEMConnection
creds = (username, password)
# call the method to execute the request and display results
execute_request(server_url, creds, namespace, classname)
return 0 | Get arguments and call the execution function | Below is the the instruction that describes the task:
### Input:
Get arguments and call the execution function
### Response:
def main():
""" Get arguments and call the execution function"""
if len(sys.argv) < 6:
print("Usage: %s server_url username password namespace' \
' classname" % sys.argv[0])
print('Using internal defaults')
server_url = SERVER_URL
namespace = TEST_NAMESPACE
username = USERNAME
password = PASSWORD
classname = TEST_CLASS
else:
print('Get from input')
server_url = sys.argv[1]
namespace = sys.argv[2]
username = sys.argv[3]
password = sys.argv[4]
classname = sys.argv[5]
# create the credentials tuple for WBEMConnection
creds = (username, password)
# call the method to execute the request and display results
execute_request(server_url, creds, namespace, classname)
return 0 |
def added(self):
'''
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_added(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['old'] == self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_added(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix='')) | Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation | Below is the the instruction that describes the task:
### Input:
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
### Response:
def added(self):
'''
Returns all keys that have been added.
If the keys are in child dictionaries they will be represented with
. notation
'''
def _added(diffs, prefix):
keys = []
for key in diffs.keys():
if isinstance(diffs[key], dict) and 'old' not in diffs[key]:
keys.extend(_added(diffs[key],
prefix='{0}{1}.'.format(prefix, key)))
elif diffs[key]['old'] == self.NONE_VALUE:
if isinstance(diffs[key]['new'], dict):
keys.extend(
_added(diffs[key]['new'],
prefix='{0}{1}.'.format(prefix, key)))
else:
keys.append('{0}{1}'.format(prefix, key))
return keys
return sorted(_added(self._diffs, prefix='')) |
def replace(path, value, **kwargs):
"""
Replace an existing path. This works on any valid path if the path already
exists. Valid only in :cb_bmeth:`mutate_in`
:param path: The path to replace
:param value: The new value
"""
return _gen_4spec(LCB_SDCMD_REPLACE, path, value,
create_path=False, **kwargs) | Replace an existing path. This works on any valid path if the path already
exists. Valid only in :cb_bmeth:`mutate_in`
:param path: The path to replace
:param value: The new value | Below is the the instruction that describes the task:
### Input:
Replace an existing path. This works on any valid path if the path already
exists. Valid only in :cb_bmeth:`mutate_in`
:param path: The path to replace
:param value: The new value
### Response:
def replace(path, value, **kwargs):
"""
Replace an existing path. This works on any valid path if the path already
exists. Valid only in :cb_bmeth:`mutate_in`
:param path: The path to replace
:param value: The new value
"""
return _gen_4spec(LCB_SDCMD_REPLACE, path, value,
create_path=False, **kwargs) |
def pyside_load_ui(uifile, base_instance=None):
"""Provide PyQt4.uic.loadUi functionality to PySide
Args:
uifile (str): Absolute path to .ui file
base_instance (QWidget): The widget into which UI widgets are loaded
Note:
pysideuic is required for this to work with PySide.
This seems to work correctly in Maya as well as outside of it as
opposed to other implementations which involve overriding QUiLoader.
Returns:
QWidget: the base instance
"""
form_class, base_class = load_ui_type(uifile)
if not base_instance:
typeName = form_class.__name__
finalType = type(typeName,
(form_class, base_class),
{})
base_instance = finalType()
else:
if not isinstance(base_instance, base_class):
raise RuntimeError(
'The base_instance passed to loadUi does not inherit from'
' needed base type (%s)' % type(base_class))
typeName = type(base_instance).__name__
base_instance.__class__ = type(typeName,
(form_class, type(base_instance)),
{})
base_instance.setupUi(base_instance)
return base_instance | Provide PyQt4.uic.loadUi functionality to PySide
Args:
uifile (str): Absolute path to .ui file
base_instance (QWidget): The widget into which UI widgets are loaded
Note:
pysideuic is required for this to work with PySide.
This seems to work correctly in Maya as well as outside of it as
opposed to other implementations which involve overriding QUiLoader.
Returns:
QWidget: the base instance | Below is the the instruction that describes the task:
### Input:
Provide PyQt4.uic.loadUi functionality to PySide
Args:
uifile (str): Absolute path to .ui file
base_instance (QWidget): The widget into which UI widgets are loaded
Note:
pysideuic is required for this to work with PySide.
This seems to work correctly in Maya as well as outside of it as
opposed to other implementations which involve overriding QUiLoader.
Returns:
QWidget: the base instance
### Response:
def pyside_load_ui(uifile, base_instance=None):
"""Provide PyQt4.uic.loadUi functionality to PySide
Args:
uifile (str): Absolute path to .ui file
base_instance (QWidget): The widget into which UI widgets are loaded
Note:
pysideuic is required for this to work with PySide.
This seems to work correctly in Maya as well as outside of it as
opposed to other implementations which involve overriding QUiLoader.
Returns:
QWidget: the base instance
"""
form_class, base_class = load_ui_type(uifile)
if not base_instance:
typeName = form_class.__name__
finalType = type(typeName,
(form_class, base_class),
{})
base_instance = finalType()
else:
if not isinstance(base_instance, base_class):
raise RuntimeError(
'The base_instance passed to loadUi does not inherit from'
' needed base type (%s)' % type(base_class))
typeName = type(base_instance).__name__
base_instance.__class__ = type(typeName,
(form_class, type(base_instance)),
{})
base_instance.setupUi(base_instance)
return base_instance |
def find_types(observatory, match=None, trend=None,
connection=None, **connection_kw):
"""Find the available data types for a given observatory.
See also
--------
gwdatafind.http.HTTPConnection.find_types
FflConnection.find_types
for details on the underlying method(s)
"""
return sorted(connection.find_types(observatory, match=match),
key=lambda x: _type_priority(observatory, x, trend=trend)) | Find the available data types for a given observatory.
See also
--------
gwdatafind.http.HTTPConnection.find_types
FflConnection.find_types
for details on the underlying method(s) | Below is the the instruction that describes the task:
### Input:
Find the available data types for a given observatory.
See also
--------
gwdatafind.http.HTTPConnection.find_types
FflConnection.find_types
for details on the underlying method(s)
### Response:
def find_types(observatory, match=None, trend=None,
connection=None, **connection_kw):
"""Find the available data types for a given observatory.
See also
--------
gwdatafind.http.HTTPConnection.find_types
FflConnection.find_types
for details on the underlying method(s)
"""
return sorted(connection.find_types(observatory, match=match),
key=lambda x: _type_priority(observatory, x, trend=trend)) |
def get_scopes_for(self, user_provided_scopes):
""" Returns a list of scopes needed for each of the
scope_helpers provided, by adding the prefix to them if required
:param user_provided_scopes: a list of scopes or scope helpers
:type user_provided_scopes: list or tuple or str
:return: scopes with url prefix added
:rtype: list
:raises ValueError: if unexpected datatype of scopes are passed
"""
if user_provided_scopes is None:
# return all available scopes
user_provided_scopes = [app_part for app_part in self._oauth_scopes]
elif isinstance(user_provided_scopes, str):
user_provided_scopes = [user_provided_scopes]
if not isinstance(user_provided_scopes, (list, tuple)):
raise ValueError(
"'user_provided_scopes' must be a list or a tuple of strings")
scopes = set()
for app_part in user_provided_scopes:
for scope in self._oauth_scopes.get(app_part, [(app_part,)]):
scopes.add(self._prefix_scope(scope))
return list(scopes) | Returns a list of scopes needed for each of the
scope_helpers provided, by adding the prefix to them if required
:param user_provided_scopes: a list of scopes or scope helpers
:type user_provided_scopes: list or tuple or str
:return: scopes with url prefix added
:rtype: list
:raises ValueError: if unexpected datatype of scopes are passed | Below is the the instruction that describes the task:
### Input:
Returns a list of scopes needed for each of the
scope_helpers provided, by adding the prefix to them if required
:param user_provided_scopes: a list of scopes or scope helpers
:type user_provided_scopes: list or tuple or str
:return: scopes with url prefix added
:rtype: list
:raises ValueError: if unexpected datatype of scopes are passed
### Response:
def get_scopes_for(self, user_provided_scopes):
""" Returns a list of scopes needed for each of the
scope_helpers provided, by adding the prefix to them if required
:param user_provided_scopes: a list of scopes or scope helpers
:type user_provided_scopes: list or tuple or str
:return: scopes with url prefix added
:rtype: list
:raises ValueError: if unexpected datatype of scopes are passed
"""
if user_provided_scopes is None:
# return all available scopes
user_provided_scopes = [app_part for app_part in self._oauth_scopes]
elif isinstance(user_provided_scopes, str):
user_provided_scopes = [user_provided_scopes]
if not isinstance(user_provided_scopes, (list, tuple)):
raise ValueError(
"'user_provided_scopes' must be a list or a tuple of strings")
scopes = set()
for app_part in user_provided_scopes:
for scope in self._oauth_scopes.get(app_part, [(app_part,)]):
scopes.add(self._prefix_scope(scope))
return list(scopes) |
def get_route_name(resource_uri):
""" Get route name from RAML resource URI.
:param resource_uri: String representing RAML resource URI.
:returns string: String with route name, which is :resource_uri:
stripped of non-word characters.
"""
resource_uri = resource_uri.strip('/')
resource_uri = re.sub('\W', '', resource_uri)
return resource_uri | Get route name from RAML resource URI.
:param resource_uri: String representing RAML resource URI.
:returns string: String with route name, which is :resource_uri:
stripped of non-word characters. | Below is the the instruction that describes the task:
### Input:
Get route name from RAML resource URI.
:param resource_uri: String representing RAML resource URI.
:returns string: String with route name, which is :resource_uri:
stripped of non-word characters.
### Response:
def get_route_name(resource_uri):
""" Get route name from RAML resource URI.
:param resource_uri: String representing RAML resource URI.
:returns string: String with route name, which is :resource_uri:
stripped of non-word characters.
"""
resource_uri = resource_uri.strip('/')
resource_uri = re.sub('\W', '', resource_uri)
return resource_uri |
def psd(tachogram_time, tachogram_data):
"""
-----
Brief
-----
Determination of the Power Spectral Density Function (Fourier Domain)
-----------
Description
-----------
The Power Spectral Density Function allows to perceive the behavior of a given signal in terms of its frequency.
This procedure costs the time resolution of the signal but may be important to extract features in a different
domain appart from the time domain.
This function constructs the Power Spectral Density Function in the frequency domain.
----------
Parameters
----------
tachogram_time : list
X Axis of tachogram.
tachogram_data : list
Y Axis of tachogram.
Returns
-------
out : list, list
Frequency and power axis.
"""
init_time = tachogram_time[0]
fin_time = tachogram_time[-1]
tck = interpol.splrep(tachogram_time, tachogram_data)
interpolation_rate = 4
nn_time_even = numpy.linspace(init_time, fin_time, (fin_time - init_time) * interpolation_rate)
nn_tachogram_even = interpol.splev(nn_time_even, tck)
freq_axis, power_axis = scisignal.welch(nn_tachogram_even, interpolation_rate,
window=scisignal.get_window("hanning",
min(len(nn_tachogram_even),
1000)),
nperseg=min(len(nn_tachogram_even), 1000))
freqs = [round(val, 3) for val in freq_axis if val < 0.5]
power = [round(val, 4) for val, freq in zip(power_axis, freq_axis) if freq < 0.5]
return freqs, power | -----
Brief
-----
Determination of the Power Spectral Density Function (Fourier Domain)
-----------
Description
-----------
The Power Spectral Density Function allows to perceive the behavior of a given signal in terms of its frequency.
This procedure costs the time resolution of the signal but may be important to extract features in a different
domain appart from the time domain.
This function constructs the Power Spectral Density Function in the frequency domain.
----------
Parameters
----------
tachogram_time : list
X Axis of tachogram.
tachogram_data : list
Y Axis of tachogram.
Returns
-------
out : list, list
Frequency and power axis. | Below is the the instruction that describes the task:
### Input:
-----
Brief
-----
Determination of the Power Spectral Density Function (Fourier Domain)
-----------
Description
-----------
The Power Spectral Density Function allows to perceive the behavior of a given signal in terms of its frequency.
This procedure costs the time resolution of the signal but may be important to extract features in a different
domain appart from the time domain.
This function constructs the Power Spectral Density Function in the frequency domain.
----------
Parameters
----------
tachogram_time : list
X Axis of tachogram.
tachogram_data : list
Y Axis of tachogram.
Returns
-------
out : list, list
Frequency and power axis.
### Response:
def psd(tachogram_time, tachogram_data):
"""
-----
Brief
-----
Determination of the Power Spectral Density Function (Fourier Domain)
-----------
Description
-----------
The Power Spectral Density Function allows to perceive the behavior of a given signal in terms of its frequency.
This procedure costs the time resolution of the signal but may be important to extract features in a different
domain appart from the time domain.
This function constructs the Power Spectral Density Function in the frequency domain.
----------
Parameters
----------
tachogram_time : list
X Axis of tachogram.
tachogram_data : list
Y Axis of tachogram.
Returns
-------
out : list, list
Frequency and power axis.
"""
init_time = tachogram_time[0]
fin_time = tachogram_time[-1]
tck = interpol.splrep(tachogram_time, tachogram_data)
interpolation_rate = 4
nn_time_even = numpy.linspace(init_time, fin_time, (fin_time - init_time) * interpolation_rate)
nn_tachogram_even = interpol.splev(nn_time_even, tck)
freq_axis, power_axis = scisignal.welch(nn_tachogram_even, interpolation_rate,
window=scisignal.get_window("hanning",
min(len(nn_tachogram_even),
1000)),
nperseg=min(len(nn_tachogram_even), 1000))
freqs = [round(val, 3) for val in freq_axis if val < 0.5]
power = [round(val, 4) for val, freq in zip(power_axis, freq_axis) if freq < 0.5]
return freqs, power |
def parsedeglat (latstr):
"""Parse a latitude formatted as sexagesimal degrees into an angle.
This function converts a textual representation of a latitude, measured in
degrees, into a floating point value measured in radians. The format of
*latstr* is very limited: it may not have leading or trailing whitespace,
and the components of the sexagesimal representation must be separated by
colons. The input must therefore resemble something like
``"-00:12:34.5"``. A :exc:`ValueError` will be raised if the input does
not resemble this template. Latitudes greater than 90 or less than -90
degrees are not allowed.
"""
deg = _parsesexagesimal (latstr, 'latitude', True)
if abs (deg) > 90:
raise ValueError ('illegal latitude specification: ' + latstr)
return deg * D2R | Parse a latitude formatted as sexagesimal degrees into an angle.
This function converts a textual representation of a latitude, measured in
degrees, into a floating point value measured in radians. The format of
*latstr* is very limited: it may not have leading or trailing whitespace,
and the components of the sexagesimal representation must be separated by
colons. The input must therefore resemble something like
``"-00:12:34.5"``. A :exc:`ValueError` will be raised if the input does
not resemble this template. Latitudes greater than 90 or less than -90
degrees are not allowed. | Below is the the instruction that describes the task:
### Input:
Parse a latitude formatted as sexagesimal degrees into an angle.
This function converts a textual representation of a latitude, measured in
degrees, into a floating point value measured in radians. The format of
*latstr* is very limited: it may not have leading or trailing whitespace,
and the components of the sexagesimal representation must be separated by
colons. The input must therefore resemble something like
``"-00:12:34.5"``. A :exc:`ValueError` will be raised if the input does
not resemble this template. Latitudes greater than 90 or less than -90
degrees are not allowed.
### Response:
def parsedeglat (latstr):
"""Parse a latitude formatted as sexagesimal degrees into an angle.
This function converts a textual representation of a latitude, measured in
degrees, into a floating point value measured in radians. The format of
*latstr* is very limited: it may not have leading or trailing whitespace,
and the components of the sexagesimal representation must be separated by
colons. The input must therefore resemble something like
``"-00:12:34.5"``. A :exc:`ValueError` will be raised if the input does
not resemble this template. Latitudes greater than 90 or less than -90
degrees are not allowed.
"""
deg = _parsesexagesimal (latstr, 'latitude', True)
if abs (deg) > 90:
raise ValueError ('illegal latitude specification: ' + latstr)
return deg * D2R |
def container_rename(name, newname, remote_addr=None,
cert=None, key=None, verify_cert=True):
'''
Rename a container
name :
Name of the container to Rename
newname :
The new name of the contianer
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
'''
container = container_get(
name, remote_addr, cert, key, verify_cert, _raw=True
)
if container.status_code == CONTAINER_STATUS_RUNNING:
raise SaltInvocationError(
"Can't rename the running container '{0}'.".format(name)
)
container.rename(newname, wait=True)
return _pylxd_model_to_dict(container) | Rename a container
name :
Name of the container to Rename
newname :
The new name of the contianer
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates. | Below is the the instruction that describes the task:
### Input:
Rename a container
name :
Name of the container to Rename
newname :
The new name of the contianer
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
### Response:
def container_rename(name, newname, remote_addr=None,
cert=None, key=None, verify_cert=True):
'''
Rename a container
name :
Name of the container to Rename
newname :
The new name of the contianer
remote_addr :
An URL to a remote Server, you also have to give cert and key if
you provide remote_addr and its a TCP Address!
Examples:
https://myserver.lan:8443
/var/lib/mysocket.sock
cert :
PEM Formatted SSL Certificate.
Examples:
~/.config/lxc/client.crt
key :
PEM Formatted SSL Key.
Examples:
~/.config/lxc/client.key
verify_cert : True
Wherever to verify the cert, this is by default True
but in the most cases you want to set it off as LXD
normaly uses self-signed certificates.
'''
container = container_get(
name, remote_addr, cert, key, verify_cert, _raw=True
)
if container.status_code == CONTAINER_STATUS_RUNNING:
raise SaltInvocationError(
"Can't rename the running container '{0}'.".format(name)
)
container.rename(newname, wait=True)
return _pylxd_model_to_dict(container) |
def next(self):
""" Next CapitainsCtsPassage (Interactive CapitainsCtsPassage)
"""
if self.nextId is not None:
return super(CapitainsCtsPassage, self).getTextualNode(subreference=self.nextId) | Next CapitainsCtsPassage (Interactive CapitainsCtsPassage) | Below is the the instruction that describes the task:
### Input:
Next CapitainsCtsPassage (Interactive CapitainsCtsPassage)
### Response:
def next(self):
""" Next CapitainsCtsPassage (Interactive CapitainsCtsPassage)
"""
if self.nextId is not None:
return super(CapitainsCtsPassage, self).getTextualNode(subreference=self.nextId) |
def load_pos_model(lang="en", version="2"):
"""Return a part of speech tagger parameters for `lang` and of version `version`
Args:
lang (string): language code.
version (string): version of the parameters to be used.
"""
src_dir = "pos{}".format(version)
p = locate_resource(src_dir, lang)
fh = _open(p)
return dict(np.load(fh)) | Return a part of speech tagger parameters for `lang` and of version `version`
Args:
lang (string): language code.
version (string): version of the parameters to be used. | Below is the the instruction that describes the task:
### Input:
Return a part of speech tagger parameters for `lang` and of version `version`
Args:
lang (string): language code.
version (string): version of the parameters to be used.
### Response:
def load_pos_model(lang="en", version="2"):
"""Return a part of speech tagger parameters for `lang` and of version `version`
Args:
lang (string): language code.
version (string): version of the parameters to be used.
"""
src_dir = "pos{}".format(version)
p = locate_resource(src_dir, lang)
fh = _open(p)
return dict(np.load(fh)) |
def set_visible_region(self, rectangles, count):
"""Suggests a new visible region to this frame buffer. This region
represents the area of the VM display which is a union of regions of
all top-level windows of the guest operating system running inside the
VM (if the Guest Additions for this system support this
functionality). This information may be used by the frontends to
implement the seamless desktop integration feature.
The address of the provided array must be in the process space of
this IFramebuffer object.
The IFramebuffer implementation must make a copy of the provided
array of rectangles.
Method not yet implemented.
in rectangles of type str
Pointer to the @c RTRECT array.
in count of type int
Number of @c RTRECT elements in the @a rectangles array.
"""
if not isinstance(rectangles, basestring):
raise TypeError("rectangles can only be an instance of type basestring")
if not isinstance(count, baseinteger):
raise TypeError("count can only be an instance of type baseinteger")
self._call("setVisibleRegion",
in_p=[rectangles, count]) | Suggests a new visible region to this frame buffer. This region
represents the area of the VM display which is a union of regions of
all top-level windows of the guest operating system running inside the
VM (if the Guest Additions for this system support this
functionality). This information may be used by the frontends to
implement the seamless desktop integration feature.
The address of the provided array must be in the process space of
this IFramebuffer object.
The IFramebuffer implementation must make a copy of the provided
array of rectangles.
Method not yet implemented.
in rectangles of type str
Pointer to the @c RTRECT array.
in count of type int
Number of @c RTRECT elements in the @a rectangles array. | Below is the the instruction that describes the task:
### Input:
Suggests a new visible region to this frame buffer. This region
represents the area of the VM display which is a union of regions of
all top-level windows of the guest operating system running inside the
VM (if the Guest Additions for this system support this
functionality). This information may be used by the frontends to
implement the seamless desktop integration feature.
The address of the provided array must be in the process space of
this IFramebuffer object.
The IFramebuffer implementation must make a copy of the provided
array of rectangles.
Method not yet implemented.
in rectangles of type str
Pointer to the @c RTRECT array.
in count of type int
Number of @c RTRECT elements in the @a rectangles array.
### Response:
def set_visible_region(self, rectangles, count):
"""Suggests a new visible region to this frame buffer. This region
represents the area of the VM display which is a union of regions of
all top-level windows of the guest operating system running inside the
VM (if the Guest Additions for this system support this
functionality). This information may be used by the frontends to
implement the seamless desktop integration feature.
The address of the provided array must be in the process space of
this IFramebuffer object.
The IFramebuffer implementation must make a copy of the provided
array of rectangles.
Method not yet implemented.
in rectangles of type str
Pointer to the @c RTRECT array.
in count of type int
Number of @c RTRECT elements in the @a rectangles array.
"""
if not isinstance(rectangles, basestring):
raise TypeError("rectangles can only be an instance of type basestring")
if not isinstance(count, baseinteger):
raise TypeError("count can only be an instance of type baseinteger")
self._call("setVisibleRegion",
in_p=[rectangles, count]) |
def save_tc_to_nii(strCsvCnfg, lgcTest=False, lstRat=None, lgcMdlRsp=False,
strPathHrf=None, lgcSaveRam=False):
"""
Save empirical and fitted time courses to nii file format.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file used for pRF fitting.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcMdlRsp : boolean
Should the aperture responses for the winner model also be saved?
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, defaults
parameters were used.
lgcSaveRam : boolean
Whether to also save a nii file that uses little RAM.
Notes
-----
This function does not return any arguments but, instead, saves nii files
to disk.
"""
# %% Load configuration settings that were used for fitting
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# if fitting was done with custom hrf, make sure to retrieve results with
# '_hrf' appendix
if strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
# If suppressive surround flag is on, make sure to retrieve results with
# '_supsur' appendix
if lstRat is not None:
cfg.strPathOut = cfg.strPathOut + '_supsur'
cfg.strPathMdl = cfg.strPathMdl + '_supsur'
# Append 1.0 as the first entry, which is the key for fitting without
# surround (only centre)
lstRat.insert(0, 1.0)
# %% Load previous pRF fitting results
# Derive paths to the x, y, sigma winner parameters from pyprf_feature
lstWnrPrm = [cfg.strPathOut + '_x_pos.nii.gz',
cfg.strPathOut + '_y_pos.nii.gz',
cfg.strPathOut + '_SD.nii.gz']
# Check if fitting has been performed, i.e. whether parameter files exist
# Throw error message if they do not exist.
errorMsg = 'Files that should have resulted from fitting do not exist. \
\nPlease perform pRF fitting first, calling e.g.: \
\npyprf_feature -config /path/to/my_config_file.csv'
assert os.path.isfile(lstWnrPrm[0]), errorMsg
assert os.path.isfile(lstWnrPrm[1]), errorMsg
assert os.path.isfile(lstWnrPrm[2]), errorMsg
# Load the x, y, sigma winner parameters from pyprf_feature
aryIntGssPrm = load_res_prm(lstWnrPrm,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Load beta parameters estimates, aka weights for time courses
lstPathBeta = [cfg.strPathOut + '_Betas.nii.gz']
aryBetas = load_res_prm(lstPathBeta, lstFlsMsk=[cfg.strPathNiiMask])[0][0]
assert os.path.isfile(lstPathBeta[0]), errorMsg
# Load ratio image, if fitting was obtained with suppressive surround
if lstRat is not None:
lstPathRatio = [cfg.strPathOut + '_Ratios.nii.gz']
aryRatio = load_res_prm(lstPathRatio,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
assert os.path.isfile(lstPathRatio[0]), errorMsg
# Some voxels were excluded because they did not have sufficient mean
# and/or variance - exclude their initial parameters, too
# Get inclusion mask and nii header
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100)
# Apply inclusion mask
aryIntGssPrm = aryIntGssPrm[aryLgcVar, :]
aryBetas = aryBetas[aryLgcVar, :]
if lstRat is not None:
aryRatio = aryRatio[aryLgcVar, :]
# Get array with model parameters that were fitted on a grid
# [x positions, y positions, sigmas]
aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)), cfg.varNum1,
cfg.varExtXmin, cfg.varExtXmax, cfg.varNum2,
cfg.varExtYmin, cfg.varExtYmax,
cfg.varNumPrfSizes, cfg.varPrfStdMin,
cfg.varPrfStdMax, kwUnt='deg',
kwCrd=cfg.strKwCrd)
# Load logical for parameter exclusion in unstimulated area
lgcMdlInc = np.load(cfg.strPathMdl + '_lgcMdlInc.npy')
# Apply logical
aryMdlParams = aryMdlParams[lgcMdlInc, :]
# Get corresponding pRF model time courses
aryPrfTc = np.load(cfg.strPathMdl + '.npy')
# The model time courses will be preprocessed such that they are smoothed
# (temporally) with same factor as the data and that they will be z-scored:
aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp)
if lgcMdlRsp:
aryMdlRsp = np.load(cfg.strPathMdl + '_mdlRsp.npy')
# %% Derive fitted time course models for all voxels
# Initialize array that will collect the fitted time courses
aryFitTc = np.zeros((aryFunc.shape), dtype=np.float32)
# If desired, initiliaze array that will collect model responses underlying
# the fitted time course
if lgcMdlRsp:
if lstRat is not None:
aryFitMdlRsp = np.zeros((aryIntGssPrm.shape[0], aryMdlRsp.shape[1],
aryMdlRsp.shape[3]),
dtype=np.float32)
else:
aryFitMdlRsp = np.zeros((aryIntGssPrm.shape[0],
aryMdlRsp.shape[1]), dtype=np.float32)
# create vector that allows to check whether every voxel is visited
# exactly once
vecVxlTst = np.zeros(aryIntGssPrm.shape[0])
# Find unique rows of fitted model parameters
aryUnqRows, aryUnqInd = fnd_unq_rws(aryIntGssPrm, return_index=False,
return_inverse=True)
# Loop over all best-fitting model parameter combinations found
print('---Assign models to voxels')
for indRow, vecPrm in enumerate(aryUnqRows):
# Get logical for voxels for which this prm combi was the best
lgcVxl = [aryUnqInd == indRow][0]
if np.all(np.invert(lgcVxl)):
print('---No voxel found')
# Mark those voxels that were visited
vecVxlTst[lgcVxl] += 1
# Get logical index for the model number
# This can only be 1 index, so we directly get 1st entry of array
lgcMdl = np.where(np.isclose(aryMdlParams, vecPrm,
atol=0.01).all(axis=1))[0][0]
# Tell user if no model was found
if lgcMdl is None:
print('---No model found')
# Get model time courses
aryMdlTc = aryPrfTc[lgcMdl, ...]
# Get beta parameter estimates
aryWeights = aryBetas[lgcVxl, :]
# If fitting was done with surround suppression, find ratios for voxels
# and the indices of these ratios in lstRat
if lstRat is not None:
aryVxlRatio = aryRatio[lgcVxl, :]
indRat = [ind for ind, rat1 in enumerate(lstRat) for rat2 in
aryVxlRatio[:, 0] if np.isclose(rat1, rat2)]
indVxl = range(len(indRat))
# Combine model time courses and weights to yield fitted time course
if lstRat is not None:
aryFitTcTmp = np.tensordot(aryWeights, aryMdlTc, axes=([1], [0]))
aryFitTc[lgcVxl, :] = aryFitTcTmp[indVxl, indRat, :]
else:
aryFitTc[lgcVxl, :] = np.dot(aryWeights, aryMdlTc)
# If desired by user, also save the model responses per voxels
if lgcMdlRsp:
# If desired also save the model responses that won
if lstRat is not None:
aryFitMdlRsp[lgcVxl, :] = aryMdlRsp[lgcMdl, :, indRat, :]
else:
aryFitMdlRsp[lgcVxl, :] = aryMdlRsp[lgcMdl, :]
# check that every voxel was visited exactly once
errMsg = 'At least one voxel visited more than once for tc recreation'
assert len(vecVxlTst) == np.sum(vecVxlTst), errMsg
# %% Export preprocessed voxel time courses as nii
# List with name suffices of output images:
lstNiiNames = ['_EmpTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export aryFunc as a single 4D nii file
print('---Save empirical time courses')
export_nii(aryFunc, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
# If desired by user, also save RAM-saving version of nii
if lgcSaveRam:
strPthRamOut = cfg.strPathOut + '_EmpTc_saveRAM' + '.nii.gz'
imgNii = nb.Nifti1Image(np.expand_dims(np.expand_dims(aryFunc, axis=1),
axis=1),
affine=np.eye(4))
nb.save(imgNii, strPthRamOut)
# %% Export fitted time courses and, if desired, model responses as nii
# List with name suffices of output images:
lstNiiNames = ['_FitTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export aryFitTc as a single 4D nii file
print('---Save fitted time courses')
export_nii(aryFitTc, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
if lgcMdlRsp:
# Create full path name
strNpyName = cfg.strPathOut + '_FitMdlRsp' + '.npy'
# Save aryFitMdlRsp as npy file
print('---Save fitted model responses')
np.save(strNpyName, aryFitMdlRsp)
print('------Done.')
# Save the mask so we know which voxels these parameters belonged to
strNpyMskName = cfg.strPathOut + '_FitMdlRsp_Mask' + '.npy'
aryLgcMsk[aryLgcMsk] = aryLgcVar
print('---Save mask for fitted model responses')
np.save(strNpyMskName, aryLgcMsk)
print('------Done.')
# If desired by user, also save RAM-saving version of nii
if lgcSaveRam:
strPthRamOut = cfg.strPathOut + '_FitTc_saveRAM' + '.nii.gz'
imgNii = nb.Nifti1Image(np.expand_dims(np.expand_dims(aryFitTc,
axis=1),
axis=1),
affine=np.eye(4))
nb.save(imgNii, strPthRamOut) | Save empirical and fitted time courses to nii file format.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file used for pRF fitting.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcMdlRsp : boolean
Should the aperture responses for the winner model also be saved?
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, defaults
parameters were used.
lgcSaveRam : boolean
Whether to also save a nii file that uses little RAM.
Notes
-----
This function does not return any arguments but, instead, saves nii files
to disk. | Below is the the instruction that describes the task:
### Input:
Save empirical and fitted time courses to nii file format.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file used for pRF fitting.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcMdlRsp : boolean
Should the aperture responses for the winner model also be saved?
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, defaults
parameters were used.
lgcSaveRam : boolean
Whether to also save a nii file that uses little RAM.
Notes
-----
This function does not return any arguments but, instead, saves nii files
to disk.
### Response:
def save_tc_to_nii(strCsvCnfg, lgcTest=False, lstRat=None, lgcMdlRsp=False,
strPathHrf=None, lgcSaveRam=False):
"""
Save empirical and fitted time courses to nii file format.
Parameters
----------
strCsvCnfg : str
Absolute file path of config file used for pRF fitting.
lgcTest : boolean
Whether this is a test (pytest). If yes, absolute path of pyprf libary
will be prepended to config file paths.
lstRat : None or list
Ratio of size of center to size of suppressive surround.
lgcMdlRsp : boolean
Should the aperture responses for the winner model also be saved?
strPathHrf : str or None:
Path to npy file with custom hrf parameters. If None, defaults
parameters were used.
lgcSaveRam : boolean
Whether to also save a nii file that uses little RAM.
Notes
-----
This function does not return any arguments but, instead, saves nii files
to disk.
"""
# %% Load configuration settings that were used for fitting
# Load config parameters from csv file into dictionary:
dicCnfg = load_config(strCsvCnfg, lgcTest=lgcTest)
# Load config parameters from dictionary into namespace:
cfg = cls_set_config(dicCnfg)
# if fitting was done with custom hrf, make sure to retrieve results with
# '_hrf' appendix
if strPathHrf is not None:
cfg.strPathOut = cfg.strPathOut + '_hrf'
# If suppressive surround flag is on, make sure to retrieve results with
# '_supsur' appendix
if lstRat is not None:
cfg.strPathOut = cfg.strPathOut + '_supsur'
cfg.strPathMdl = cfg.strPathMdl + '_supsur'
# Append 1.0 as the first entry, which is the key for fitting without
# surround (only centre)
lstRat.insert(0, 1.0)
# %% Load previous pRF fitting results
# Derive paths to the x, y, sigma winner parameters from pyprf_feature
lstWnrPrm = [cfg.strPathOut + '_x_pos.nii.gz',
cfg.strPathOut + '_y_pos.nii.gz',
cfg.strPathOut + '_SD.nii.gz']
# Check if fitting has been performed, i.e. whether parameter files exist
# Throw error message if they do not exist.
errorMsg = 'Files that should have resulted from fitting do not exist. \
\nPlease perform pRF fitting first, calling e.g.: \
\npyprf_feature -config /path/to/my_config_file.csv'
assert os.path.isfile(lstWnrPrm[0]), errorMsg
assert os.path.isfile(lstWnrPrm[1]), errorMsg
assert os.path.isfile(lstWnrPrm[2]), errorMsg
# Load the x, y, sigma winner parameters from pyprf_feature
aryIntGssPrm = load_res_prm(lstWnrPrm,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
# Load beta parameters estimates, aka weights for time courses
lstPathBeta = [cfg.strPathOut + '_Betas.nii.gz']
aryBetas = load_res_prm(lstPathBeta, lstFlsMsk=[cfg.strPathNiiMask])[0][0]
assert os.path.isfile(lstPathBeta[0]), errorMsg
# Load ratio image, if fitting was obtained with suppressive surround
if lstRat is not None:
lstPathRatio = [cfg.strPathOut + '_Ratios.nii.gz']
aryRatio = load_res_prm(lstPathRatio,
lstFlsMsk=[cfg.strPathNiiMask])[0][0]
assert os.path.isfile(lstPathRatio[0]), errorMsg
# Some voxels were excluded because they did not have sufficient mean
# and/or variance - exclude their initial parameters, too
# Get inclusion mask and nii header
aryLgcMsk, aryLgcVar, hdrMsk, aryAff, aryFunc, tplNiiShp = prep_func(
cfg.strPathNiiMask, cfg.lstPathNiiFunc, varAvgThr=-100)
# Apply inclusion mask
aryIntGssPrm = aryIntGssPrm[aryLgcVar, :]
aryBetas = aryBetas[aryLgcVar, :]
if lstRat is not None:
aryRatio = aryRatio[aryLgcVar, :]
# Get array with model parameters that were fitted on a grid
# [x positions, y positions, sigmas]
aryMdlParams = crt_mdl_prms((int(cfg.varVslSpcSzeX),
int(cfg.varVslSpcSzeY)), cfg.varNum1,
cfg.varExtXmin, cfg.varExtXmax, cfg.varNum2,
cfg.varExtYmin, cfg.varExtYmax,
cfg.varNumPrfSizes, cfg.varPrfStdMin,
cfg.varPrfStdMax, kwUnt='deg',
kwCrd=cfg.strKwCrd)
# Load logical for parameter exclusion in unstimulated area
lgcMdlInc = np.load(cfg.strPathMdl + '_lgcMdlInc.npy')
# Apply logical
aryMdlParams = aryMdlParams[lgcMdlInc, :]
# Get corresponding pRF model time courses
aryPrfTc = np.load(cfg.strPathMdl + '.npy')
# The model time courses will be preprocessed such that they are smoothed
# (temporally) with same factor as the data and that they will be z-scored:
aryPrfTc = prep_models(aryPrfTc, varSdSmthTmp=cfg.varSdSmthTmp)
if lgcMdlRsp:
aryMdlRsp = np.load(cfg.strPathMdl + '_mdlRsp.npy')
# %% Derive fitted time course models for all voxels
# Initialize array that will collect the fitted time courses
aryFitTc = np.zeros((aryFunc.shape), dtype=np.float32)
# If desired, initiliaze array that will collect model responses underlying
# the fitted time course
if lgcMdlRsp:
if lstRat is not None:
aryFitMdlRsp = np.zeros((aryIntGssPrm.shape[0], aryMdlRsp.shape[1],
aryMdlRsp.shape[3]),
dtype=np.float32)
else:
aryFitMdlRsp = np.zeros((aryIntGssPrm.shape[0],
aryMdlRsp.shape[1]), dtype=np.float32)
# create vector that allows to check whether every voxel is visited
# exactly once
vecVxlTst = np.zeros(aryIntGssPrm.shape[0])
# Find unique rows of fitted model parameters
aryUnqRows, aryUnqInd = fnd_unq_rws(aryIntGssPrm, return_index=False,
return_inverse=True)
# Loop over all best-fitting model parameter combinations found
print('---Assign models to voxels')
for indRow, vecPrm in enumerate(aryUnqRows):
# Get logical for voxels for which this prm combi was the best
lgcVxl = [aryUnqInd == indRow][0]
if np.all(np.invert(lgcVxl)):
print('---No voxel found')
# Mark those voxels that were visited
vecVxlTst[lgcVxl] += 1
# Get logical index for the model number
# This can only be 1 index, so we directly get 1st entry of array
lgcMdl = np.where(np.isclose(aryMdlParams, vecPrm,
atol=0.01).all(axis=1))[0][0]
# Tell user if no model was found
if lgcMdl is None:
print('---No model found')
# Get model time courses
aryMdlTc = aryPrfTc[lgcMdl, ...]
# Get beta parameter estimates
aryWeights = aryBetas[lgcVxl, :]
# If fitting was done with surround suppression, find ratios for voxels
# and the indices of these ratios in lstRat
if lstRat is not None:
aryVxlRatio = aryRatio[lgcVxl, :]
indRat = [ind for ind, rat1 in enumerate(lstRat) for rat2 in
aryVxlRatio[:, 0] if np.isclose(rat1, rat2)]
indVxl = range(len(indRat))
# Combine model time courses and weights to yield fitted time course
if lstRat is not None:
aryFitTcTmp = np.tensordot(aryWeights, aryMdlTc, axes=([1], [0]))
aryFitTc[lgcVxl, :] = aryFitTcTmp[indVxl, indRat, :]
else:
aryFitTc[lgcVxl, :] = np.dot(aryWeights, aryMdlTc)
# If desired by user, also save the model responses per voxels
if lgcMdlRsp:
# If desired also save the model responses that won
if lstRat is not None:
aryFitMdlRsp[lgcVxl, :] = aryMdlRsp[lgcMdl, :, indRat, :]
else:
aryFitMdlRsp[lgcVxl, :] = aryMdlRsp[lgcMdl, :]
# check that every voxel was visited exactly once
errMsg = 'At least one voxel visited more than once for tc recreation'
assert len(vecVxlTst) == np.sum(vecVxlTst), errMsg
# %% Export preprocessed voxel time courses as nii
# List with name suffices of output images:
lstNiiNames = ['_EmpTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export aryFunc as a single 4D nii file
print('---Save empirical time courses')
export_nii(aryFunc, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
# If desired by user, also save RAM-saving version of nii
if lgcSaveRam:
strPthRamOut = cfg.strPathOut + '_EmpTc_saveRAM' + '.nii.gz'
imgNii = nb.Nifti1Image(np.expand_dims(np.expand_dims(aryFunc, axis=1),
axis=1),
affine=np.eye(4))
nb.save(imgNii, strPthRamOut)
# %% Export fitted time courses and, if desired, model responses as nii
# List with name suffices of output images:
lstNiiNames = ['_FitTc']
# Create full path names from nii file names and output path
lstNiiNames = [cfg.strPathOut + strNii + '.nii.gz' for strNii in
lstNiiNames]
# export aryFitTc as a single 4D nii file
print('---Save fitted time courses')
export_nii(aryFitTc, lstNiiNames, aryLgcMsk, aryLgcVar, tplNiiShp,
aryAff, hdrMsk, outFormat='4D')
print('------Done.')
if lgcMdlRsp:
# Create full path name
strNpyName = cfg.strPathOut + '_FitMdlRsp' + '.npy'
# Save aryFitMdlRsp as npy file
print('---Save fitted model responses')
np.save(strNpyName, aryFitMdlRsp)
print('------Done.')
# Save the mask so we know which voxels these parameters belonged to
strNpyMskName = cfg.strPathOut + '_FitMdlRsp_Mask' + '.npy'
aryLgcMsk[aryLgcMsk] = aryLgcVar
print('---Save mask for fitted model responses')
np.save(strNpyMskName, aryLgcMsk)
print('------Done.')
# If desired by user, also save RAM-saving version of nii
if lgcSaveRam:
strPthRamOut = cfg.strPathOut + '_FitTc_saveRAM' + '.nii.gz'
imgNii = nb.Nifti1Image(np.expand_dims(np.expand_dims(aryFitTc,
axis=1),
axis=1),
affine=np.eye(4))
nb.save(imgNii, strPthRamOut) |
def delete_alias(self, index, name, params=None):
"""
Delete specific alias.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_
:arg index: A comma-separated list of index names (supports wildcards);
use `_all` for all indices
:arg name: A comma-separated list of aliases to delete (supports
wildcards); use `_all` to delete all aliases for the specified
indices.
:arg master_timeout: Specify timeout for connection to master
:arg request_timeout: Explicit timeout for the operation
"""
for param in (index, name):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"DELETE", _make_path(index, "_alias", name), params=params
) | Delete specific alias.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_
:arg index: A comma-separated list of index names (supports wildcards);
use `_all` for all indices
:arg name: A comma-separated list of aliases to delete (supports
wildcards); use `_all` to delete all aliases for the specified
indices.
:arg master_timeout: Specify timeout for connection to master
:arg request_timeout: Explicit timeout for the operation | Below is the the instruction that describes the task:
### Input:
Delete specific alias.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_
:arg index: A comma-separated list of index names (supports wildcards);
use `_all` for all indices
:arg name: A comma-separated list of aliases to delete (supports
wildcards); use `_all` to delete all aliases for the specified
indices.
:arg master_timeout: Specify timeout for connection to master
:arg request_timeout: Explicit timeout for the operation
### Response:
def delete_alias(self, index, name, params=None):
"""
Delete specific alias.
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_
:arg index: A comma-separated list of index names (supports wildcards);
use `_all` for all indices
:arg name: A comma-separated list of aliases to delete (supports
wildcards); use `_all` to delete all aliases for the specified
indices.
:arg master_timeout: Specify timeout for connection to master
:arg request_timeout: Explicit timeout for the operation
"""
for param in (index, name):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"DELETE", _make_path(index, "_alias", name), params=params
) |
def send_response(self, body, set_content_type=True):
"""
Serialize and send ``body`` in the response.
:param dict body: the body to serialize
:param bool set_content_type: should the :http:header:`Content-Type`
header be set? Defaults to :data:`True`
"""
settings = get_settings(self.application, force_instance=True)
handler = settings[self.get_response_content_type()]
content_type, data_bytes = handler.to_bytes(body)
if set_content_type:
self.set_header('Content-Type', content_type)
self.add_header('Vary', 'Accept')
self.write(data_bytes) | Serialize and send ``body`` in the response.
:param dict body: the body to serialize
:param bool set_content_type: should the :http:header:`Content-Type`
header be set? Defaults to :data:`True` | Below is the the instruction that describes the task:
### Input:
Serialize and send ``body`` in the response.
:param dict body: the body to serialize
:param bool set_content_type: should the :http:header:`Content-Type`
header be set? Defaults to :data:`True`
### Response:
def send_response(self, body, set_content_type=True):
"""
Serialize and send ``body`` in the response.
:param dict body: the body to serialize
:param bool set_content_type: should the :http:header:`Content-Type`
header be set? Defaults to :data:`True`
"""
settings = get_settings(self.application, force_instance=True)
handler = settings[self.get_response_content_type()]
content_type, data_bytes = handler.to_bytes(body)
if set_content_type:
self.set_header('Content-Type', content_type)
self.add_header('Vary', 'Accept')
self.write(data_bytes) |
def _check_keys(dictionary):
"""
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
"""
for key in dictionary:
if isinstance(dictionary[key], matlab.mio5_params.mat_struct):
dictionary[key] = _todict(dictionary[key])
return dictionary | checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries | Below is the the instruction that describes the task:
### Input:
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
### Response:
def _check_keys(dictionary):
"""
checks if entries in dictionary are mat-objects. If yes
todict is called to change them to nested dictionaries
"""
for key in dictionary:
if isinstance(dictionary[key], matlab.mio5_params.mat_struct):
dictionary[key] = _todict(dictionary[key])
return dictionary |
def is_duplicate_of(self, other):
"""Check if spectrum is duplicate of another."""
if super(Spectrum, self).is_duplicate_of(other):
return True
row_matches = 0
for ri, row in enumerate(self.get(self._KEYS.DATA, [])):
lambda1, flux1 = tuple(row[0:2])
if (self._KEYS.DATA not in other or
ri > len(other[self._KEYS.DATA])):
break
lambda2, flux2 = tuple(other[self._KEYS.DATA][ri][0:2])
minlambdalen = min(len(lambda1), len(lambda2))
minfluxlen = min(len(flux1), len(flux2))
if (lambda1[:minlambdalen + 1] == lambda2[:minlambdalen + 1] and
flux1[:minfluxlen + 1] == flux2[:minfluxlen + 1] and
float(flux1[:minfluxlen + 1]) != 0.0):
row_matches += 1
# Five row matches should be enough to be sure spectrum is a dupe.
if row_matches >= 5:
return True
# Matches need to happen in the first 10 rows.
if ri >= 10:
break
return False | Check if spectrum is duplicate of another. | Below is the the instruction that describes the task:
### Input:
Check if spectrum is duplicate of another.
### Response:
def is_duplicate_of(self, other):
"""Check if spectrum is duplicate of another."""
if super(Spectrum, self).is_duplicate_of(other):
return True
row_matches = 0
for ri, row in enumerate(self.get(self._KEYS.DATA, [])):
lambda1, flux1 = tuple(row[0:2])
if (self._KEYS.DATA not in other or
ri > len(other[self._KEYS.DATA])):
break
lambda2, flux2 = tuple(other[self._KEYS.DATA][ri][0:2])
minlambdalen = min(len(lambda1), len(lambda2))
minfluxlen = min(len(flux1), len(flux2))
if (lambda1[:minlambdalen + 1] == lambda2[:minlambdalen + 1] and
flux1[:minfluxlen + 1] == flux2[:minfluxlen + 1] and
float(flux1[:minfluxlen + 1]) != 0.0):
row_matches += 1
# Five row matches should be enough to be sure spectrum is a dupe.
if row_matches >= 5:
return True
# Matches need to happen in the first 10 rows.
if ri >= 10:
break
return False |
def save(self, savefile):
"""Do the TTS API request and write result to file.
Args:
savefile (string): The path and file name to save the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request.
"""
with open(str(savefile), 'wb') as f:
self.write_to_fp(f)
log.debug("Saved to %s", savefile) | Do the TTS API request and write result to file.
Args:
savefile (string): The path and file name to save the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request. | Below is the the instruction that describes the task:
### Input:
Do the TTS API request and write result to file.
Args:
savefile (string): The path and file name to save the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request.
### Response:
def save(self, savefile):
"""Do the TTS API request and write result to file.
Args:
savefile (string): The path and file name to save the ``mp3`` to.
Raises:
:class:`gTTSError`: When there's an error with the API request.
"""
with open(str(savefile), 'wb') as f:
self.write_to_fp(f)
log.debug("Saved to %s", savefile) |
def _pick_state_im_name(state_name, im_name, use_full_path=False):
"""
If state_name or im_name is None, picks them interactively through Tk,
and then sets with or without the full path.
Parameters
----------
state_name : {string, None}
The name of the state. If None, selected through Tk.
im_name : {string, None}
The name of the image. If None, selected through Tk.
use_full_path : Bool, optional
Set to True to return the names as full paths rather than
relative paths. Default is False (relative path).
"""
initial_dir = os.getcwd()
if (state_name is None) or (im_name is None):
wid = tk.Tk()
wid.withdraw()
if state_name is None:
state_name = tkfd.askopenfilename(
initialdir=initial_dir, title='Select pre-featured state')
os.chdir(os.path.dirname(state_name))
if im_name is None:
im_name = tkfd.askopenfilename(
initialdir=initial_dir, title='Select new image')
if (not use_full_path) and (os.path.dirname(im_name) != ''):
im_path = os.path.dirname(im_name)
os.chdir(im_path)
im_name = os.path.basename(im_name)
else:
os.chdir(initial_dir)
return state_name, im_name | If state_name or im_name is None, picks them interactively through Tk,
and then sets with or without the full path.
Parameters
----------
state_name : {string, None}
The name of the state. If None, selected through Tk.
im_name : {string, None}
The name of the image. If None, selected through Tk.
use_full_path : Bool, optional
Set to True to return the names as full paths rather than
relative paths. Default is False (relative path). | Below is the the instruction that describes the task:
### Input:
If state_name or im_name is None, picks them interactively through Tk,
and then sets with or without the full path.
Parameters
----------
state_name : {string, None}
The name of the state. If None, selected through Tk.
im_name : {string, None}
The name of the image. If None, selected through Tk.
use_full_path : Bool, optional
Set to True to return the names as full paths rather than
relative paths. Default is False (relative path).
### Response:
def _pick_state_im_name(state_name, im_name, use_full_path=False):
"""
If state_name or im_name is None, picks them interactively through Tk,
and then sets with or without the full path.
Parameters
----------
state_name : {string, None}
The name of the state. If None, selected through Tk.
im_name : {string, None}
The name of the image. If None, selected through Tk.
use_full_path : Bool, optional
Set to True to return the names as full paths rather than
relative paths. Default is False (relative path).
"""
initial_dir = os.getcwd()
if (state_name is None) or (im_name is None):
wid = tk.Tk()
wid.withdraw()
if state_name is None:
state_name = tkfd.askopenfilename(
initialdir=initial_dir, title='Select pre-featured state')
os.chdir(os.path.dirname(state_name))
if im_name is None:
im_name = tkfd.askopenfilename(
initialdir=initial_dir, title='Select new image')
if (not use_full_path) and (os.path.dirname(im_name) != ''):
im_path = os.path.dirname(im_name)
os.chdir(im_path)
im_name = os.path.basename(im_name)
else:
os.chdir(initial_dir)
return state_name, im_name |
def verify_ticket_signature(self, data, sig):
"""Verify ticket signature. """
try:
signature = base64.b64decode(sig)
except TypeError as e:
if hasattr(self, "debug"):
print("Exception in function base64.b64decode. File %s" % (__file__))
print("%s" % e)
return False
if six.PY3:
# To avoid "TypeError: Unicode-objects must be encoded before hashing'
data = data.encode('utf-8')
digest = hashlib.sha1(data).digest()
if isinstance(self.pub_key, RSA.RSA_pub):
try:
self.pub_key.verify(digest, signature, 'sha1')
except RSA.RSAError:
return False
return True
if isinstance(self.pub_key, DSA.DSA_pub):
try:
return self.pub_key.verify_asn1(digest, signature)
except DSA.DSAError as e:
if hasattr(self, "debug"):
print("Exception in function self.pub_key.verify_asn1(digest, signature). File %s" % (__file__))
print("%s" % e)
return False
# Unknown key type
return False | Verify ticket signature. | Below is the the instruction that describes the task:
### Input:
Verify ticket signature.
### Response:
def verify_ticket_signature(self, data, sig):
"""Verify ticket signature. """
try:
signature = base64.b64decode(sig)
except TypeError as e:
if hasattr(self, "debug"):
print("Exception in function base64.b64decode. File %s" % (__file__))
print("%s" % e)
return False
if six.PY3:
# To avoid "TypeError: Unicode-objects must be encoded before hashing'
data = data.encode('utf-8')
digest = hashlib.sha1(data).digest()
if isinstance(self.pub_key, RSA.RSA_pub):
try:
self.pub_key.verify(digest, signature, 'sha1')
except RSA.RSAError:
return False
return True
if isinstance(self.pub_key, DSA.DSA_pub):
try:
return self.pub_key.verify_asn1(digest, signature)
except DSA.DSAError as e:
if hasattr(self, "debug"):
print("Exception in function self.pub_key.verify_asn1(digest, signature). File %s" % (__file__))
print("%s" % e)
return False
# Unknown key type
return False |
def new_connection(self):
"""Make a new connection."""
if not self.prepared: self.prepare()
con = sqlite3.connect(self.path, isolation_level=self.isolation)
con.row_factory = self.factory
if self.text_fact: con.text_factory = self.text_fact
return con | Make a new connection. | Below is the the instruction that describes the task:
### Input:
Make a new connection.
### Response:
def new_connection(self):
"""Make a new connection."""
if not self.prepared: self.prepare()
con = sqlite3.connect(self.path, isolation_level=self.isolation)
con.row_factory = self.factory
if self.text_fact: con.text_factory = self.text_fact
return con |
def _affine_mult(c:FlowField,m:AffineMatrix)->FlowField:
"Multiply `c` by `m` - can adjust for rectangular shaped `c`."
if m is None: return c
size = c.flow.size()
h,w = c.size
m[0,1] *= h/w
m[1,0] *= w/h
c.flow = c.flow.view(-1,2)
c.flow = torch.addmm(m[:2,2], c.flow, m[:2,:2].t()).view(size)
return c | Multiply `c` by `m` - can adjust for rectangular shaped `c`. | Below is the the instruction that describes the task:
### Input:
Multiply `c` by `m` - can adjust for rectangular shaped `c`.
### Response:
def _affine_mult(c:FlowField,m:AffineMatrix)->FlowField:
"Multiply `c` by `m` - can adjust for rectangular shaped `c`."
if m is None: return c
size = c.flow.size()
h,w = c.size
m[0,1] *= h/w
m[1,0] *= w/h
c.flow = c.flow.view(-1,2)
c.flow = torch.addmm(m[:2,2], c.flow, m[:2,:2].t()).view(size)
return c |
def _session():
'''
Return the boto3 session to use for the KMS client.
If aws_kms:profile_name is set in the salt configuration, use that profile.
Otherwise, fall back on the default aws profile.
We use the boto3 profile system to avoid having to duplicate
individual boto3 configuration settings in salt configuration.
'''
profile_name = _cfg('profile_name')
if profile_name:
log.info('Using the "%s" aws profile.', profile_name)
else:
log.info('aws_kms:profile_name is not set in salt. Falling back on default profile.')
try:
return boto3.Session(profile_name=profile_name)
except botocore.exceptions.ProfileNotFound as orig_exc:
err_msg = 'Boto3 could not find the "{}" profile configured in Salt.'.format(
profile_name or 'default')
config_error = salt.exceptions.SaltConfigurationError(err_msg)
six.raise_from(config_error, orig_exc)
except botocore.exceptions.NoRegionError as orig_exc:
err_msg = ('Boto3 was unable to determine the AWS '
'endpoint region using the {} profile.').format(profile_name or 'default')
config_error = salt.exceptions.SaltConfigurationError(err_msg)
six.raise_from(config_error, orig_exc) | Return the boto3 session to use for the KMS client.
If aws_kms:profile_name is set in the salt configuration, use that profile.
Otherwise, fall back on the default aws profile.
We use the boto3 profile system to avoid having to duplicate
individual boto3 configuration settings in salt configuration. | Below is the the instruction that describes the task:
### Input:
Return the boto3 session to use for the KMS client.
If aws_kms:profile_name is set in the salt configuration, use that profile.
Otherwise, fall back on the default aws profile.
We use the boto3 profile system to avoid having to duplicate
individual boto3 configuration settings in salt configuration.
### Response:
def _session():
'''
Return the boto3 session to use for the KMS client.
If aws_kms:profile_name is set in the salt configuration, use that profile.
Otherwise, fall back on the default aws profile.
We use the boto3 profile system to avoid having to duplicate
individual boto3 configuration settings in salt configuration.
'''
profile_name = _cfg('profile_name')
if profile_name:
log.info('Using the "%s" aws profile.', profile_name)
else:
log.info('aws_kms:profile_name is not set in salt. Falling back on default profile.')
try:
return boto3.Session(profile_name=profile_name)
except botocore.exceptions.ProfileNotFound as orig_exc:
err_msg = 'Boto3 could not find the "{}" profile configured in Salt.'.format(
profile_name or 'default')
config_error = salt.exceptions.SaltConfigurationError(err_msg)
six.raise_from(config_error, orig_exc)
except botocore.exceptions.NoRegionError as orig_exc:
err_msg = ('Boto3 was unable to determine the AWS '
'endpoint region using the {} profile.').format(profile_name or 'default')
config_error = salt.exceptions.SaltConfigurationError(err_msg)
six.raise_from(config_error, orig_exc) |
def curl_couchdb(url, method='GET', base_url=BASE_URL, data=None):
'''
Launch a curl on CouchDB instance
'''
(username, password) = get_admin()
if username is None:
auth = None
else:
auth = (username, password)
if method == 'PUT':
req = requests.put('{}{}'.format(base_url, url), auth=auth, data=data)
elif method == 'DELETE':
req = requests.delete('{}{}'.format(base_url, url), auth=auth)
else:
req = requests.get('{}{}'.format(base_url, url), auth=auth)
if req.status_code not in [200, 201]:
raise HTTPError('{}: {}'.format(req.status_code, req.text))
return req | Launch a curl on CouchDB instance | Below is the the instruction that describes the task:
### Input:
Launch a curl on CouchDB instance
### Response:
def curl_couchdb(url, method='GET', base_url=BASE_URL, data=None):
'''
Launch a curl on CouchDB instance
'''
(username, password) = get_admin()
if username is None:
auth = None
else:
auth = (username, password)
if method == 'PUT':
req = requests.put('{}{}'.format(base_url, url), auth=auth, data=data)
elif method == 'DELETE':
req = requests.delete('{}{}'.format(base_url, url), auth=auth)
else:
req = requests.get('{}{}'.format(base_url, url), auth=auth)
if req.status_code not in [200, 201]:
raise HTTPError('{}: {}'.format(req.status_code, req.text))
return req |
def _infinite_iterator(self):
"""this iterator wraps the "_basic_iterator" when the configuration
specifies that the "number_of_submissions" is set to "forever".
Whenever the "_basic_iterator" is exhausted, it is called again to
restart the iteration. It is up to the implementation of the innermost
iterator to define what starting over means. Some iterators may
repeat exactly what they did before, while others may iterate over
new values"""
while True:
for crash_id in self._basic_iterator():
if self._filter_disallowed_values(crash_id):
continue
yield crash_id | this iterator wraps the "_basic_iterator" when the configuration
specifies that the "number_of_submissions" is set to "forever".
Whenever the "_basic_iterator" is exhausted, it is called again to
restart the iteration. It is up to the implementation of the innermost
iterator to define what starting over means. Some iterators may
repeat exactly what they did before, while others may iterate over
new values | Below is the the instruction that describes the task:
### Input:
this iterator wraps the "_basic_iterator" when the configuration
specifies that the "number_of_submissions" is set to "forever".
Whenever the "_basic_iterator" is exhausted, it is called again to
restart the iteration. It is up to the implementation of the innermost
iterator to define what starting over means. Some iterators may
repeat exactly what they did before, while others may iterate over
new values
### Response:
def _infinite_iterator(self):
"""this iterator wraps the "_basic_iterator" when the configuration
specifies that the "number_of_submissions" is set to "forever".
Whenever the "_basic_iterator" is exhausted, it is called again to
restart the iteration. It is up to the implementation of the innermost
iterator to define what starting over means. Some iterators may
repeat exactly what they did before, while others may iterate over
new values"""
while True:
for crash_id in self._basic_iterator():
if self._filter_disallowed_values(crash_id):
continue
yield crash_id |
def binboolflip(item):
"""
Convert 0 or 1 to False or True (or vice versa).
The converter works as follows:
- 0 > False
- False > 0
- 1 > True
- True > 1
:type item: integer or boolean
:param item: The item to convert.
>>> binboolflip(0)
False
>>> binboolflip(False)
0
>>> binboolflip(1)
True
>>> binboolflip(True)
1
>>> binboolflip("foo")
Traceback (most recent call last):
...
ValueError: Invalid item specified.
"""
if item in [0, False, 1, True]:
return int(item) if isinstance(item, bool) else bool(item)
# Raise a warning
raise ValueError("Invalid item specified.") | Convert 0 or 1 to False or True (or vice versa).
The converter works as follows:
- 0 > False
- False > 0
- 1 > True
- True > 1
:type item: integer or boolean
:param item: The item to convert.
>>> binboolflip(0)
False
>>> binboolflip(False)
0
>>> binboolflip(1)
True
>>> binboolflip(True)
1
>>> binboolflip("foo")
Traceback (most recent call last):
...
ValueError: Invalid item specified. | Below is the the instruction that describes the task:
### Input:
Convert 0 or 1 to False or True (or vice versa).
The converter works as follows:
- 0 > False
- False > 0
- 1 > True
- True > 1
:type item: integer or boolean
:param item: The item to convert.
>>> binboolflip(0)
False
>>> binboolflip(False)
0
>>> binboolflip(1)
True
>>> binboolflip(True)
1
>>> binboolflip("foo")
Traceback (most recent call last):
...
ValueError: Invalid item specified.
### Response:
def binboolflip(item):
"""
Convert 0 or 1 to False or True (or vice versa).
The converter works as follows:
- 0 > False
- False > 0
- 1 > True
- True > 1
:type item: integer or boolean
:param item: The item to convert.
>>> binboolflip(0)
False
>>> binboolflip(False)
0
>>> binboolflip(1)
True
>>> binboolflip(True)
1
>>> binboolflip("foo")
Traceback (most recent call last):
...
ValueError: Invalid item specified.
"""
if item in [0, False, 1, True]:
return int(item) if isinstance(item, bool) else bool(item)
# Raise a warning
raise ValueError("Invalid item specified.") |
def start_volume(name, force=False):
'''
Start a gluster volume
name
Volume name
force
Force the volume start even if the volume is started
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' glusterfs.start mycluster
'''
cmd = 'volume start {0}'.format(name)
if force:
cmd = '{0} force'.format(cmd)
volinfo = info(name)
if name not in volinfo:
log.error("Cannot start non-existing volume %s", name)
return False
if not force and volinfo[name]['status'] == '1':
log.info("Volume %s already started", name)
return True
return _gluster(cmd) | Start a gluster volume
name
Volume name
force
Force the volume start even if the volume is started
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' glusterfs.start mycluster | Below is the the instruction that describes the task:
### Input:
Start a gluster volume
name
Volume name
force
Force the volume start even if the volume is started
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' glusterfs.start mycluster
### Response:
def start_volume(name, force=False):
'''
Start a gluster volume
name
Volume name
force
Force the volume start even if the volume is started
.. versionadded:: 2015.8.4
CLI Example:
.. code-block:: bash
salt '*' glusterfs.start mycluster
'''
cmd = 'volume start {0}'.format(name)
if force:
cmd = '{0} force'.format(cmd)
volinfo = info(name)
if name not in volinfo:
log.error("Cannot start non-existing volume %s", name)
return False
if not force and volinfo[name]['status'] == '1':
log.info("Volume %s already started", name)
return True
return _gluster(cmd) |
def _my_top_k(x, k):
"""GPU-compatible version of top-k that works for very small constant k.
Calls argmax repeatedly.
tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense,
seems not to be, so if we use tf.nn.top_k, then both the top_k and its
gradient go on cpu. Once this is not an issue, this function becomes
obsolete and should be replaced by tf.nn.top_k.
Args:
x: a 2d Tensor.
k: a small integer.
Returns:
values: a Tensor of shape [batch_size, k]
indices: a int32 Tensor of shape [batch_size, k]
"""
if k > 10:
return tf.nn.top_k(x, k)
values = []
indices = []
depth = tf.shape(x)[1]
for i in range(k):
values.append(tf.reduce_max(x, 1))
argmax = tf.argmax(x, 1)
indices.append(argmax)
if i + 1 < k:
x += tf.one_hot(argmax, depth, -1e9)
return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1)) | GPU-compatible version of top-k that works for very small constant k.
Calls argmax repeatedly.
tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense,
seems not to be, so if we use tf.nn.top_k, then both the top_k and its
gradient go on cpu. Once this is not an issue, this function becomes
obsolete and should be replaced by tf.nn.top_k.
Args:
x: a 2d Tensor.
k: a small integer.
Returns:
values: a Tensor of shape [batch_size, k]
indices: a int32 Tensor of shape [batch_size, k] | Below is the the instruction that describes the task:
### Input:
GPU-compatible version of top-k that works for very small constant k.
Calls argmax repeatedly.
tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense,
seems not to be, so if we use tf.nn.top_k, then both the top_k and its
gradient go on cpu. Once this is not an issue, this function becomes
obsolete and should be replaced by tf.nn.top_k.
Args:
x: a 2d Tensor.
k: a small integer.
Returns:
values: a Tensor of shape [batch_size, k]
indices: a int32 Tensor of shape [batch_size, k]
### Response:
def _my_top_k(x, k):
"""GPU-compatible version of top-k that works for very small constant k.
Calls argmax repeatedly.
tf.nn.top_k is implemented for GPU, but the gradient, sparse_to_dense,
seems not to be, so if we use tf.nn.top_k, then both the top_k and its
gradient go on cpu. Once this is not an issue, this function becomes
obsolete and should be replaced by tf.nn.top_k.
Args:
x: a 2d Tensor.
k: a small integer.
Returns:
values: a Tensor of shape [batch_size, k]
indices: a int32 Tensor of shape [batch_size, k]
"""
if k > 10:
return tf.nn.top_k(x, k)
values = []
indices = []
depth = tf.shape(x)[1]
for i in range(k):
values.append(tf.reduce_max(x, 1))
argmax = tf.argmax(x, 1)
indices.append(argmax)
if i + 1 < k:
x += tf.one_hot(argmax, depth, -1e9)
return tf.stack(values, axis=1), tf.to_int32(tf.stack(indices, axis=1)) |
def shell(args):
" A helper command to be used for shell integration "
print
print "# Makesite integration "
print "# ==================== "
print "export MAKESITE_HOME=%s" % args.path
print "source %s" % op.join(settings.BASEDIR, 'shell.sh')
print | A helper command to be used for shell integration | Below is the the instruction that describes the task:
### Input:
A helper command to be used for shell integration
### Response:
def shell(args):
" A helper command to be used for shell integration "
print
print "# Makesite integration "
print "# ==================== "
print "export MAKESITE_HOME=%s" % args.path
print "source %s" % op.join(settings.BASEDIR, 'shell.sh')
print |
def get_label(self, label_name):
"""Return the user's label that has a given name.
:param label_name: The name to search for.
:type label_name: str
:return: A label that has a matching name or ``None`` if not found.
:rtype: :class:`pytodoist.todoist.Label`
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> label = user.get_label('family')
"""
for label in self.get_labels():
if label.name == label_name:
return label | Return the user's label that has a given name.
:param label_name: The name to search for.
:type label_name: str
:return: A label that has a matching name or ``None`` if not found.
:rtype: :class:`pytodoist.todoist.Label`
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> label = user.get_label('family') | Below is the the instruction that describes the task:
### Input:
Return the user's label that has a given name.
:param label_name: The name to search for.
:type label_name: str
:return: A label that has a matching name or ``None`` if not found.
:rtype: :class:`pytodoist.todoist.Label`
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> label = user.get_label('family')
### Response:
def get_label(self, label_name):
"""Return the user's label that has a given name.
:param label_name: The name to search for.
:type label_name: str
:return: A label that has a matching name or ``None`` if not found.
:rtype: :class:`pytodoist.todoist.Label`
>>> from pytodoist import todoist
>>> user = todoist.login('[email protected]', 'password')
>>> label = user.get_label('family')
"""
for label in self.get_labels():
if label.name == label_name:
return label |
def download(self, storagemodel:object, modeldefinition = None):
""" load blob from storage into StorageBlobModelInstance """
if (storagemodel.name is None):
# No content to download
raise AzureStorageWrapException(storagemodel, "StorageBlobModel does not contain content nor content settings")
else:
container_name = modeldefinition['container']
blob_name = storagemodel.name
try:
if modeldefinition['blobservice'].exists(container_name, blob_name):
""" download blob """
blob = modeldefinition['blobservice'].get_blob_to_bytes(
container_name=modeldefinition['container'],
blob_name=storagemodel.name
)
storagemodel.__mergeblob__(blob)
except Exception as e:
msg = 'can not load blob from container {} because {!s}'.format(storagemodel._containername, e)
raise AzureStorageWrapException(storagemodel, msg=msg)
return storagemodel | load blob from storage into StorageBlobModelInstance | Below is the the instruction that describes the task:
### Input:
load blob from storage into StorageBlobModelInstance
### Response:
def download(self, storagemodel:object, modeldefinition = None):
""" load blob from storage into StorageBlobModelInstance """
if (storagemodel.name is None):
# No content to download
raise AzureStorageWrapException(storagemodel, "StorageBlobModel does not contain content nor content settings")
else:
container_name = modeldefinition['container']
blob_name = storagemodel.name
try:
if modeldefinition['blobservice'].exists(container_name, blob_name):
""" download blob """
blob = modeldefinition['blobservice'].get_blob_to_bytes(
container_name=modeldefinition['container'],
blob_name=storagemodel.name
)
storagemodel.__mergeblob__(blob)
except Exception as e:
msg = 'can not load blob from container {} because {!s}'.format(storagemodel._containername, e)
raise AzureStorageWrapException(storagemodel, msg=msg)
return storagemodel |
def files_info(self, *, id: str, **kwargs) -> SlackResponse:
"""Gets information about a team file.
Args:
id (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"id": id})
return self.api_call("files.info", http_verb="GET", params=kwargs) | Gets information about a team file.
Args:
id (str): The file id. e.g. 'F1234467890' | Below is the the instruction that describes the task:
### Input:
Gets information about a team file.
Args:
id (str): The file id. e.g. 'F1234467890'
### Response:
def files_info(self, *, id: str, **kwargs) -> SlackResponse:
"""Gets information about a team file.
Args:
id (str): The file id. e.g. 'F1234467890'
"""
kwargs.update({"id": id})
return self.api_call("files.info", http_verb="GET", params=kwargs) |
def transform_language_code(code):
"""
Transform ISO language code (e.g. en-us) to the language name expected by SAPSF.
"""
if code is None:
return 'English'
components = code.split('-', 2)
language_code = components[0]
try:
country_code = components[1]
except IndexError:
country_code = '_'
language_family = SUCCESSFACTORS_OCN_LANGUAGE_CODES.get(language_code)
if not language_family:
return 'English'
return language_family.get(country_code, language_family['_']) | Transform ISO language code (e.g. en-us) to the language name expected by SAPSF. | Below is the the instruction that describes the task:
### Input:
Transform ISO language code (e.g. en-us) to the language name expected by SAPSF.
### Response:
def transform_language_code(code):
"""
Transform ISO language code (e.g. en-us) to the language name expected by SAPSF.
"""
if code is None:
return 'English'
components = code.split('-', 2)
language_code = components[0]
try:
country_code = components[1]
except IndexError:
country_code = '_'
language_family = SUCCESSFACTORS_OCN_LANGUAGE_CODES.get(language_code)
if not language_family:
return 'English'
return language_family.get(country_code, language_family['_']) |
def add_distinguished_name_list(list_name):
'''
Add a list of policy distinguished names.
list_name(str): The name of the specific policy distinguished name list to add.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_distinguished_name_list MyDistinguishedList
'''
payload = {"jsonrpc": "2.0",
"id": "ID0",
"method": "add_policy_distinguished_names_list",
"params": [{"list_name": list_name}]}
response = __proxy__['bluecoat_sslv.call'](payload, True)
return _validate_change_result(response) | Add a list of policy distinguished names.
list_name(str): The name of the specific policy distinguished name list to add.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_distinguished_name_list MyDistinguishedList | Below is the the instruction that describes the task:
### Input:
Add a list of policy distinguished names.
list_name(str): The name of the specific policy distinguished name list to add.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_distinguished_name_list MyDistinguishedList
### Response:
def add_distinguished_name_list(list_name):
'''
Add a list of policy distinguished names.
list_name(str): The name of the specific policy distinguished name list to add.
CLI Example:
.. code-block:: bash
salt '*' bluecoat_sslv.add_distinguished_name_list MyDistinguishedList
'''
payload = {"jsonrpc": "2.0",
"id": "ID0",
"method": "add_policy_distinguished_names_list",
"params": [{"list_name": list_name}]}
response = __proxy__['bluecoat_sslv.call'](payload, True)
return _validate_change_result(response) |
def insert(self, key, value):
'''Adds a new key-value pair. Returns any discarded values.'''
# Add to history and catch expectorate
if len(self.history) == self.maxsize:
expectorate = self.history[0]
else:
expectorate = None
self.history.append((key, value))
# Add to the appropriate list of values
if key in self:
super().__getitem__(key).append(value)
else:
super().__setitem__(key, [value])
# Clean up old values
if expectorate is not None:
old_key, old_value = expectorate
super().__getitem__(old_key).pop(0)
if len(super().__getitem__(old_key)) == 0:
super().__delitem__(old_key)
return (old_key, old_value) | Adds a new key-value pair. Returns any discarded values. | Below is the the instruction that describes the task:
### Input:
Adds a new key-value pair. Returns any discarded values.
### Response:
def insert(self, key, value):
'''Adds a new key-value pair. Returns any discarded values.'''
# Add to history and catch expectorate
if len(self.history) == self.maxsize:
expectorate = self.history[0]
else:
expectorate = None
self.history.append((key, value))
# Add to the appropriate list of values
if key in self:
super().__getitem__(key).append(value)
else:
super().__setitem__(key, [value])
# Clean up old values
if expectorate is not None:
old_key, old_value = expectorate
super().__getitem__(old_key).pop(0)
if len(super().__getitem__(old_key)) == 0:
super().__delitem__(old_key)
return (old_key, old_value) |
def absolute(self):
"""Calculate the absolute value element-wise.
Returns:
absolute (Timeseries):
Absolute value. For complex input (a + b*j) gives sqrt(a**a + b**2)
"""
return Timeseries(np.absolute(self), self.tspan, self.labels) | Calculate the absolute value element-wise.
Returns:
absolute (Timeseries):
Absolute value. For complex input (a + b*j) gives sqrt(a**a + b**2) | Below is the the instruction that describes the task:
### Input:
Calculate the absolute value element-wise.
Returns:
absolute (Timeseries):
Absolute value. For complex input (a + b*j) gives sqrt(a**a + b**2)
### Response:
def absolute(self):
"""Calculate the absolute value element-wise.
Returns:
absolute (Timeseries):
Absolute value. For complex input (a + b*j) gives sqrt(a**a + b**2)
"""
return Timeseries(np.absolute(self), self.tspan, self.labels) |
def setup(self):
"""
instantiates all report formats that have been added to this
reporter, and calls their setup methods.
"""
if self._formats:
# setup has been run already.
return
basedir = self.basedir
options = self.options
crumbs = self.get_relative_breadcrumbs()
fmts = list()
for fmt_class in self.formats:
fmt = fmt_class(basedir, options, crumbs)
fmt.setup()
fmts.append(fmt)
self._formats = fmts | instantiates all report formats that have been added to this
reporter, and calls their setup methods. | Below is the the instruction that describes the task:
### Input:
instantiates all report formats that have been added to this
reporter, and calls their setup methods.
### Response:
def setup(self):
"""
instantiates all report formats that have been added to this
reporter, and calls their setup methods.
"""
if self._formats:
# setup has been run already.
return
basedir = self.basedir
options = self.options
crumbs = self.get_relative_breadcrumbs()
fmts = list()
for fmt_class in self.formats:
fmt = fmt_class(basedir, options, crumbs)
fmt.setup()
fmts.append(fmt)
self._formats = fmts |
def remove_extra_delims(expr, ldelim="(", rdelim=")"):
r"""
Remove unnecessary delimiters in mathematical expressions.
Delimiters (parenthesis, brackets, etc.) may be removed either because
there are multiple consecutive delimiters enclosing a single expressions or
because the delimiters are implied by operator precedence rules. Function
names must start with a letter and then can contain alphanumeric characters
and a maximum of one underscore
:param expr: Mathematical expression
:type expr: string
:param ldelim: Single character left delimiter
:type ldelim: string
:param rdelim: Single character right delimiter
:type rdelim: string
:rtype: string
:raises:
* RuntimeError (Argument \`expr\` is not valid)
* RuntimeError (Argument \`ldelim\` is not valid)
* RuntimeError (Argument \`rdelim\` is not valid)
* RuntimeError (Function name `*[function_name]*` is not valid)
* RuntimeError (Mismatched delimiters)
"""
op_group = ""
for item1 in _OP_PREC:
if isinstance(item1, list):
for item2 in item1:
op_group += item2
else:
op_group += item1
iobj = zip([expr, ldelim, rdelim], ["expr", "ldelim", "rdelim"])
for item, desc in iobj:
if not isinstance(item, str):
raise RuntimeError("Argument `{0}` is not valid".format(desc))
if (len(ldelim) != 1) or ((len(ldelim) == 1) and (ldelim in op_group)):
raise RuntimeError("Argument `ldelim` is not valid")
if (len(rdelim) != 1) or ((len(rdelim) == 1) and (rdelim in op_group)):
raise RuntimeError("Argument `rdelim` is not valid")
if expr.count(ldelim) != expr.count(rdelim):
raise RuntimeError("Mismatched delimiters")
if not expr:
return expr
vchars = (
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
".0123456789"
r"_()[]\{\}" + rdelim + ldelim + op_group
)
if any([item not in vchars for item in expr]) or ("__" in expr):
raise RuntimeError("Argument `expr` is not valid")
expr = _remove_consecutive_delims(expr, ldelim=ldelim, rdelim=rdelim)
expr = expr.replace(ldelim + rdelim, "")
return _remove_extra_delims(expr, ldelim=ldelim, rdelim=rdelim) | r"""
Remove unnecessary delimiters in mathematical expressions.
Delimiters (parenthesis, brackets, etc.) may be removed either because
there are multiple consecutive delimiters enclosing a single expressions or
because the delimiters are implied by operator precedence rules. Function
names must start with a letter and then can contain alphanumeric characters
and a maximum of one underscore
:param expr: Mathematical expression
:type expr: string
:param ldelim: Single character left delimiter
:type ldelim: string
:param rdelim: Single character right delimiter
:type rdelim: string
:rtype: string
:raises:
* RuntimeError (Argument \`expr\` is not valid)
* RuntimeError (Argument \`ldelim\` is not valid)
* RuntimeError (Argument \`rdelim\` is not valid)
* RuntimeError (Function name `*[function_name]*` is not valid)
* RuntimeError (Mismatched delimiters) | Below is the the instruction that describes the task:
### Input:
r"""
Remove unnecessary delimiters in mathematical expressions.
Delimiters (parenthesis, brackets, etc.) may be removed either because
there are multiple consecutive delimiters enclosing a single expressions or
because the delimiters are implied by operator precedence rules. Function
names must start with a letter and then can contain alphanumeric characters
and a maximum of one underscore
:param expr: Mathematical expression
:type expr: string
:param ldelim: Single character left delimiter
:type ldelim: string
:param rdelim: Single character right delimiter
:type rdelim: string
:rtype: string
:raises:
* RuntimeError (Argument \`expr\` is not valid)
* RuntimeError (Argument \`ldelim\` is not valid)
* RuntimeError (Argument \`rdelim\` is not valid)
* RuntimeError (Function name `*[function_name]*` is not valid)
* RuntimeError (Mismatched delimiters)
### Response:
def remove_extra_delims(expr, ldelim="(", rdelim=")"):
r"""
Remove unnecessary delimiters in mathematical expressions.
Delimiters (parenthesis, brackets, etc.) may be removed either because
there are multiple consecutive delimiters enclosing a single expressions or
because the delimiters are implied by operator precedence rules. Function
names must start with a letter and then can contain alphanumeric characters
and a maximum of one underscore
:param expr: Mathematical expression
:type expr: string
:param ldelim: Single character left delimiter
:type ldelim: string
:param rdelim: Single character right delimiter
:type rdelim: string
:rtype: string
:raises:
* RuntimeError (Argument \`expr\` is not valid)
* RuntimeError (Argument \`ldelim\` is not valid)
* RuntimeError (Argument \`rdelim\` is not valid)
* RuntimeError (Function name `*[function_name]*` is not valid)
* RuntimeError (Mismatched delimiters)
"""
op_group = ""
for item1 in _OP_PREC:
if isinstance(item1, list):
for item2 in item1:
op_group += item2
else:
op_group += item1
iobj = zip([expr, ldelim, rdelim], ["expr", "ldelim", "rdelim"])
for item, desc in iobj:
if not isinstance(item, str):
raise RuntimeError("Argument `{0}` is not valid".format(desc))
if (len(ldelim) != 1) or ((len(ldelim) == 1) and (ldelim in op_group)):
raise RuntimeError("Argument `ldelim` is not valid")
if (len(rdelim) != 1) or ((len(rdelim) == 1) and (rdelim in op_group)):
raise RuntimeError("Argument `rdelim` is not valid")
if expr.count(ldelim) != expr.count(rdelim):
raise RuntimeError("Mismatched delimiters")
if not expr:
return expr
vchars = (
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
".0123456789"
r"_()[]\{\}" + rdelim + ldelim + op_group
)
if any([item not in vchars for item in expr]) or ("__" in expr):
raise RuntimeError("Argument `expr` is not valid")
expr = _remove_consecutive_delims(expr, ldelim=ldelim, rdelim=rdelim)
expr = expr.replace(ldelim + rdelim, "")
return _remove_extra_delims(expr, ldelim=ldelim, rdelim=rdelim) |
def setPalette(self, palette):
"""
Sets the palette for this node to the inputed palette. If None is
provided, then the scene's palette will be used for this node.
:param palette | <XNodePalette> || None
"""
self._palette = XNodePalette(palette) if palette is not None else None
self.setDirty() | Sets the palette for this node to the inputed palette. If None is
provided, then the scene's palette will be used for this node.
:param palette | <XNodePalette> || None | Below is the the instruction that describes the task:
### Input:
Sets the palette for this node to the inputed palette. If None is
provided, then the scene's palette will be used for this node.
:param palette | <XNodePalette> || None
### Response:
def setPalette(self, palette):
"""
Sets the palette for this node to the inputed palette. If None is
provided, then the scene's palette will be used for this node.
:param palette | <XNodePalette> || None
"""
self._palette = XNodePalette(palette) if palette is not None else None
self.setDirty() |
def _unpack_batch_response(response):
"""Convert requests.Response -> [(headers, payload)].
Creates a generator of tuples of emulating the responses to
:meth:`requests.Session.request`.
:type response: :class:`requests.Response`
:param response: HTTP response / headers from a request.
"""
parser = Parser()
message = _generate_faux_mime_message(parser, response)
if not isinstance(message._payload, list):
raise ValueError("Bad response: not multi-part")
for subrequest in message._payload:
status_line, rest = subrequest._payload.split("\n", 1)
_, status, _ = status_line.split(" ", 2)
sub_message = parser.parsestr(rest)
payload = sub_message._payload
msg_headers = dict(sub_message._headers)
content_id = msg_headers.get("Content-ID")
subresponse = requests.Response()
subresponse.request = requests.Request(
method="BATCH", url="contentid://{}".format(content_id)
).prepare()
subresponse.status_code = int(status)
subresponse.headers.update(msg_headers)
subresponse._content = payload.encode("utf-8")
yield subresponse | Convert requests.Response -> [(headers, payload)].
Creates a generator of tuples of emulating the responses to
:meth:`requests.Session.request`.
:type response: :class:`requests.Response`
:param response: HTTP response / headers from a request. | Below is the the instruction that describes the task:
### Input:
Convert requests.Response -> [(headers, payload)].
Creates a generator of tuples of emulating the responses to
:meth:`requests.Session.request`.
:type response: :class:`requests.Response`
:param response: HTTP response / headers from a request.
### Response:
def _unpack_batch_response(response):
"""Convert requests.Response -> [(headers, payload)].
Creates a generator of tuples of emulating the responses to
:meth:`requests.Session.request`.
:type response: :class:`requests.Response`
:param response: HTTP response / headers from a request.
"""
parser = Parser()
message = _generate_faux_mime_message(parser, response)
if not isinstance(message._payload, list):
raise ValueError("Bad response: not multi-part")
for subrequest in message._payload:
status_line, rest = subrequest._payload.split("\n", 1)
_, status, _ = status_line.split(" ", 2)
sub_message = parser.parsestr(rest)
payload = sub_message._payload
msg_headers = dict(sub_message._headers)
content_id = msg_headers.get("Content-ID")
subresponse = requests.Response()
subresponse.request = requests.Request(
method="BATCH", url="contentid://{}".format(content_id)
).prepare()
subresponse.status_code = int(status)
subresponse.headers.update(msg_headers)
subresponse._content = payload.encode("utf-8")
yield subresponse |
def gen_rupture_getters(dstore, slc=slice(None),
concurrent_tasks=1, hdf5cache=None):
"""
:yields: RuptureGetters
"""
if dstore.parent:
dstore = dstore.parent
csm_info = dstore['csm_info']
trt_by_grp = csm_info.grp_by("trt")
samples = csm_info.get_samples_by_grp()
rlzs_by_gsim = csm_info.get_rlzs_by_gsim_grp()
rup_array = dstore['ruptures'][slc]
maxweight = numpy.ceil(len(rup_array) / (concurrent_tasks or 1))
nr, ne = 0, 0
for grp_id, arr in general.group_array(rup_array, 'grp_id').items():
if not rlzs_by_gsim[grp_id]:
# this may happen if a source model has no sources, like
# in event_based_risk/case_3
continue
for block in general.block_splitter(arr, maxweight):
rgetter = RuptureGetter(
hdf5cache or dstore.filename, numpy.array(block), grp_id,
trt_by_grp[grp_id], samples[grp_id], rlzs_by_gsim[grp_id])
rgetter.weight = getattr(block, 'weight', len(block))
yield rgetter
nr += len(block)
ne += rgetter.num_events
logging.info('Read %d ruptures and %d events', nr, ne) | :yields: RuptureGetters | Below is the the instruction that describes the task:
### Input:
:yields: RuptureGetters
### Response:
def gen_rupture_getters(dstore, slc=slice(None),
concurrent_tasks=1, hdf5cache=None):
"""
:yields: RuptureGetters
"""
if dstore.parent:
dstore = dstore.parent
csm_info = dstore['csm_info']
trt_by_grp = csm_info.grp_by("trt")
samples = csm_info.get_samples_by_grp()
rlzs_by_gsim = csm_info.get_rlzs_by_gsim_grp()
rup_array = dstore['ruptures'][slc]
maxweight = numpy.ceil(len(rup_array) / (concurrent_tasks or 1))
nr, ne = 0, 0
for grp_id, arr in general.group_array(rup_array, 'grp_id').items():
if not rlzs_by_gsim[grp_id]:
# this may happen if a source model has no sources, like
# in event_based_risk/case_3
continue
for block in general.block_splitter(arr, maxweight):
rgetter = RuptureGetter(
hdf5cache or dstore.filename, numpy.array(block), grp_id,
trt_by_grp[grp_id], samples[grp_id], rlzs_by_gsim[grp_id])
rgetter.weight = getattr(block, 'weight', len(block))
yield rgetter
nr += len(block)
ne += rgetter.num_events
logging.info('Read %d ruptures and %d events', nr, ne) |
def __make_http_query(self, params, topkey=''):
"""
Function to covert params into url encoded query string
:param dict params: Json string sent by Authy.
:param string topkey: params key
:return string: url encoded Query.
"""
if len(params) == 0:
return ""
result = ""
# is a dictionary?
if type(params) is dict:
for key in params.keys():
newkey = quote(key)
if topkey != '':
newkey = topkey + quote('[' + key + ']')
if type(params[key]) is dict:
result += self.__make_http_query(params[key], newkey)
elif type(params[key]) is list:
i = 0
for val in params[key]:
if type(val) is dict:
result += self.__make_http_query(
val, newkey + quote('['+str(i)+']'))
else:
result += newkey + \
quote('['+str(i)+']') + "=" + \
quote(str(val)) + "&"
i = i + 1
# boolean should have special treatment as well
elif type(params[key]) is bool:
result += newkey + "=" + \
quote(str(params[key]).lower()) + "&"
# assume string (integers and floats work well)
else:
result += newkey + "=" + quote(str(params[key])) + "&"
# remove the last '&'
if (result) and (topkey == '') and (result[-1] == '&'):
result = result[:-1]
return result | Function to covert params into url encoded query string
:param dict params: Json string sent by Authy.
:param string topkey: params key
:return string: url encoded Query. | Below is the the instruction that describes the task:
### Input:
Function to covert params into url encoded query string
:param dict params: Json string sent by Authy.
:param string topkey: params key
:return string: url encoded Query.
### Response:
def __make_http_query(self, params, topkey=''):
"""
Function to covert params into url encoded query string
:param dict params: Json string sent by Authy.
:param string topkey: params key
:return string: url encoded Query.
"""
if len(params) == 0:
return ""
result = ""
# is a dictionary?
if type(params) is dict:
for key in params.keys():
newkey = quote(key)
if topkey != '':
newkey = topkey + quote('[' + key + ']')
if type(params[key]) is dict:
result += self.__make_http_query(params[key], newkey)
elif type(params[key]) is list:
i = 0
for val in params[key]:
if type(val) is dict:
result += self.__make_http_query(
val, newkey + quote('['+str(i)+']'))
else:
result += newkey + \
quote('['+str(i)+']') + "=" + \
quote(str(val)) + "&"
i = i + 1
# boolean should have special treatment as well
elif type(params[key]) is bool:
result += newkey + "=" + \
quote(str(params[key]).lower()) + "&"
# assume string (integers and floats work well)
else:
result += newkey + "=" + quote(str(params[key])) + "&"
# remove the last '&'
if (result) and (topkey == '') and (result[-1] == '&'):
result = result[:-1]
return result |
def zero_nan(self, *cols):
"""
Converts zero values to nan values in selected columns
:param \*cols: names of the colums
:type \*cols: str, at least one
:example: ``ds.zero_nan("mycol1", "mycol2")``
"""
if len(cols) == 0:
self.warning("Can not nan zero values if a column name "
"is not provided")
df = self._zero_nan(*cols)
if df is None:
self.err("Can not fill zero values with nan")
return
self.df = df | Converts zero values to nan values in selected columns
:param \*cols: names of the colums
:type \*cols: str, at least one
:example: ``ds.zero_nan("mycol1", "mycol2")`` | Below is the the instruction that describes the task:
### Input:
Converts zero values to nan values in selected columns
:param \*cols: names of the colums
:type \*cols: str, at least one
:example: ``ds.zero_nan("mycol1", "mycol2")``
### Response:
def zero_nan(self, *cols):
"""
Converts zero values to nan values in selected columns
:param \*cols: names of the colums
:type \*cols: str, at least one
:example: ``ds.zero_nan("mycol1", "mycol2")``
"""
if len(cols) == 0:
self.warning("Can not nan zero values if a column name "
"is not provided")
df = self._zero_nan(*cols)
if df is None:
self.err("Can not fill zero values with nan")
return
self.df = df |
def __calc_signed_volume(triangle):
""" Calculate signed volume of given triangle
:param list of list triangle:
:rtype float
"""
v321 = triangle[2][0] * triangle[1][1] * triangle[0][2]
v231 = triangle[1][0] * triangle[2][1] * triangle[0][2]
v312 = triangle[2][0] * triangle[0][1] * triangle[1][2]
v132 = triangle[0][0] * triangle[2][1] * triangle[1][2]
v213 = triangle[1][0] * triangle[0][1] * triangle[2][2]
v123 = triangle[0][0] * triangle[1][1] * triangle[2][2]
signed_volume = (-v321 + v231 + v312 - v132 - v213 + v123) / 6.0
return signed_volume | Calculate signed volume of given triangle
:param list of list triangle:
:rtype float | Below is the the instruction that describes the task:
### Input:
Calculate signed volume of given triangle
:param list of list triangle:
:rtype float
### Response:
def __calc_signed_volume(triangle):
""" Calculate signed volume of given triangle
:param list of list triangle:
:rtype float
"""
v321 = triangle[2][0] * triangle[1][1] * triangle[0][2]
v231 = triangle[1][0] * triangle[2][1] * triangle[0][2]
v312 = triangle[2][0] * triangle[0][1] * triangle[1][2]
v132 = triangle[0][0] * triangle[2][1] * triangle[1][2]
v213 = triangle[1][0] * triangle[0][1] * triangle[2][2]
v123 = triangle[0][0] * triangle[1][1] * triangle[2][2]
signed_volume = (-v321 + v231 + v312 - v132 - v213 + v123) / 6.0
return signed_volume |
def getSwapStats(self, dev):
"""Returns I/O stats for swap partition.
@param dev: Device name for swap partition.
@return: Dict of stats.
"""
if self._swapList is None:
self._initSwapInfo()
if dev in self._swapList:
return self.getDevStats(dev)
else:
return None | Returns I/O stats for swap partition.
@param dev: Device name for swap partition.
@return: Dict of stats. | Below is the the instruction that describes the task:
### Input:
Returns I/O stats for swap partition.
@param dev: Device name for swap partition.
@return: Dict of stats.
### Response:
def getSwapStats(self, dev):
"""Returns I/O stats for swap partition.
@param dev: Device name for swap partition.
@return: Dict of stats.
"""
if self._swapList is None:
self._initSwapInfo()
if dev in self._swapList:
return self.getDevStats(dev)
else:
return None |
def search(cls, search_string, values_of='', group=whoosh.qparser.OrGroup, match_substrings=True, limit=None):
"""Searches the fields for given search_string.
Returns the found records if 'values_of' is left empty,
else the values of the given columns.
:param search_string: The string to search for.
:param values_of: If given, the method will not return the whole
records, but only values of given column.
Defaults to returning whole records.
:param group: The whoosh group to use for searching.
Defaults to :class:`whoosh.qparser.OrGroup` which
searches for all words in all columns.
:param match_substrings: ``True`` if you want to match substrings,
``False`` otherwise.
:param limit: The number of the top records to be returned.
Defaults to ``None`` and returns all records.
"""
index = Whooshee.get_or_create_index(_get_app(cls), cls)
prepped_string = cls.prep_search_string(search_string, match_substrings)
with index.searcher() as searcher:
parser = whoosh.qparser.MultifieldParser(cls.schema.names(), index.schema, group=group)
query = parser.parse(prepped_string)
results = searcher.search(query, limit=limit)
if values_of:
return [x[values_of] for x in results]
return results | Searches the fields for given search_string.
Returns the found records if 'values_of' is left empty,
else the values of the given columns.
:param search_string: The string to search for.
:param values_of: If given, the method will not return the whole
records, but only values of given column.
Defaults to returning whole records.
:param group: The whoosh group to use for searching.
Defaults to :class:`whoosh.qparser.OrGroup` which
searches for all words in all columns.
:param match_substrings: ``True`` if you want to match substrings,
``False`` otherwise.
:param limit: The number of the top records to be returned.
Defaults to ``None`` and returns all records. | Below is the the instruction that describes the task:
### Input:
Searches the fields for given search_string.
Returns the found records if 'values_of' is left empty,
else the values of the given columns.
:param search_string: The string to search for.
:param values_of: If given, the method will not return the whole
records, but only values of given column.
Defaults to returning whole records.
:param group: The whoosh group to use for searching.
Defaults to :class:`whoosh.qparser.OrGroup` which
searches for all words in all columns.
:param match_substrings: ``True`` if you want to match substrings,
``False`` otherwise.
:param limit: The number of the top records to be returned.
Defaults to ``None`` and returns all records.
### Response:
def search(cls, search_string, values_of='', group=whoosh.qparser.OrGroup, match_substrings=True, limit=None):
"""Searches the fields for given search_string.
Returns the found records if 'values_of' is left empty,
else the values of the given columns.
:param search_string: The string to search for.
:param values_of: If given, the method will not return the whole
records, but only values of given column.
Defaults to returning whole records.
:param group: The whoosh group to use for searching.
Defaults to :class:`whoosh.qparser.OrGroup` which
searches for all words in all columns.
:param match_substrings: ``True`` if you want to match substrings,
``False`` otherwise.
:param limit: The number of the top records to be returned.
Defaults to ``None`` and returns all records.
"""
index = Whooshee.get_or_create_index(_get_app(cls), cls)
prepped_string = cls.prep_search_string(search_string, match_substrings)
with index.searcher() as searcher:
parser = whoosh.qparser.MultifieldParser(cls.schema.names(), index.schema, group=group)
query = parser.parse(prepped_string)
results = searcher.search(query, limit=limit)
if values_of:
return [x[values_of] for x in results]
return results |
def store_mapping(self, path):
"""Store the current Id mappings into a TSV file."""
with open(path, "w") as writer:
for key, value in self.mapping.iteritems():
writer.write("{}\t{}\n".format(key, value)) | Store the current Id mappings into a TSV file. | Below is the the instruction that describes the task:
### Input:
Store the current Id mappings into a TSV file.
### Response:
def store_mapping(self, path):
"""Store the current Id mappings into a TSV file."""
with open(path, "w") as writer:
for key, value in self.mapping.iteritems():
writer.write("{}\t{}\n".format(key, value)) |
def delete_files_or_folders(self, request, files_queryset, folders_queryset):
"""
Action which deletes the selected files and/or folders.
This action first displays a confirmation page whichs shows all the
deleteable files and/or folders, or, if the user has no permission on
one of the related childs (foreignkeys), a "permission denied" message.
Next, it deletes all selected files and/or folders and redirects back to
the folder.
"""
opts = self.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not self.has_delete_permission(request):
raise PermissionDenied
current_folder = self._get_current_action_folder(
request, files_queryset, folders_queryset)
all_protected = []
# Populate deletable_objects, a data structure of all related objects
# that will also be deleted. Hopefully this also checks for necessary
# permissions.
# TODO: Check if permissions are really verified
using = router.db_for_write(self.model)
deletable_files, model_count_files, perms_needed_files, protected_files = get_deleted_objects(files_queryset, files_queryset.model._meta, request.user, self.admin_site, using)
deletable_folders, model_count_folder, perms_needed_folders, protected_folders = get_deleted_objects(folders_queryset, folders_queryset.model._meta, request.user, self.admin_site, using)
all_protected.extend(protected_files)
all_protected.extend(protected_folders)
all_deletable_objects = [deletable_files, deletable_folders]
all_perms_needed = perms_needed_files.union(perms_needed_folders)
# The user has already confirmed the deletion. Do the deletion and
# return a None to display the change list view again.
if request.POST.get('post'):
if all_perms_needed:
raise PermissionDenied
n = files_queryset.count() + folders_queryset.count()
if n:
# delete all explicitly selected files
for f in files_queryset:
self.log_deletion(request, f, force_text(f))
f.delete()
# delete all files in all selected folders and their children
# This would happen automatically by ways of the delete
# cascade, but then the individual .delete() methods won't be
# called and the files won't be deleted from the filesystem.
folder_ids = set()
for folder in folders_queryset:
folder_ids.add(folder.id)
folder_ids.update(
folder.get_descendants().values_list('id', flat=True))
for f in File.objects.filter(folder__in=folder_ids):
self.log_deletion(request, f, force_text(f))
f.delete()
# delete all folders
for f in folders_queryset:
self.log_deletion(request, f, force_text(f))
f.delete()
self.message_user(request, _("Successfully deleted %(count)d files and/or folders.") % {"count": n, })
# Return None to display the change list page again.
return None
if all_perms_needed or all_protected:
title = _("Cannot delete files and/or folders")
else:
title = _("Are you sure?")
context = self.admin_site.each_context(request)
context.update({
"title": title,
"instance": current_folder,
"breadcrumbs_action": _("Delete files and/or folders"),
"deletable_objects": all_deletable_objects,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": all_perms_needed,
"protected": all_protected,
"opts": opts,
'is_popup': popup_status(request),
'filer_admin_context': AdminContext(request),
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
})
# Display the destination folder selection page
return render(
request,
"admin/filer/delete_selected_files_confirmation.html",
context
) | Action which deletes the selected files and/or folders.
This action first displays a confirmation page whichs shows all the
deleteable files and/or folders, or, if the user has no permission on
one of the related childs (foreignkeys), a "permission denied" message.
Next, it deletes all selected files and/or folders and redirects back to
the folder. | Below is the the instruction that describes the task:
### Input:
Action which deletes the selected files and/or folders.
This action first displays a confirmation page whichs shows all the
deleteable files and/or folders, or, if the user has no permission on
one of the related childs (foreignkeys), a "permission denied" message.
Next, it deletes all selected files and/or folders and redirects back to
the folder.
### Response:
def delete_files_or_folders(self, request, files_queryset, folders_queryset):
"""
Action which deletes the selected files and/or folders.
This action first displays a confirmation page whichs shows all the
deleteable files and/or folders, or, if the user has no permission on
one of the related childs (foreignkeys), a "permission denied" message.
Next, it deletes all selected files and/or folders and redirects back to
the folder.
"""
opts = self.model._meta
app_label = opts.app_label
# Check that the user has delete permission for the actual model
if not self.has_delete_permission(request):
raise PermissionDenied
current_folder = self._get_current_action_folder(
request, files_queryset, folders_queryset)
all_protected = []
# Populate deletable_objects, a data structure of all related objects
# that will also be deleted. Hopefully this also checks for necessary
# permissions.
# TODO: Check if permissions are really verified
using = router.db_for_write(self.model)
deletable_files, model_count_files, perms_needed_files, protected_files = get_deleted_objects(files_queryset, files_queryset.model._meta, request.user, self.admin_site, using)
deletable_folders, model_count_folder, perms_needed_folders, protected_folders = get_deleted_objects(folders_queryset, folders_queryset.model._meta, request.user, self.admin_site, using)
all_protected.extend(protected_files)
all_protected.extend(protected_folders)
all_deletable_objects = [deletable_files, deletable_folders]
all_perms_needed = perms_needed_files.union(perms_needed_folders)
# The user has already confirmed the deletion. Do the deletion and
# return a None to display the change list view again.
if request.POST.get('post'):
if all_perms_needed:
raise PermissionDenied
n = files_queryset.count() + folders_queryset.count()
if n:
# delete all explicitly selected files
for f in files_queryset:
self.log_deletion(request, f, force_text(f))
f.delete()
# delete all files in all selected folders and their children
# This would happen automatically by ways of the delete
# cascade, but then the individual .delete() methods won't be
# called and the files won't be deleted from the filesystem.
folder_ids = set()
for folder in folders_queryset:
folder_ids.add(folder.id)
folder_ids.update(
folder.get_descendants().values_list('id', flat=True))
for f in File.objects.filter(folder__in=folder_ids):
self.log_deletion(request, f, force_text(f))
f.delete()
# delete all folders
for f in folders_queryset:
self.log_deletion(request, f, force_text(f))
f.delete()
self.message_user(request, _("Successfully deleted %(count)d files and/or folders.") % {"count": n, })
# Return None to display the change list page again.
return None
if all_perms_needed or all_protected:
title = _("Cannot delete files and/or folders")
else:
title = _("Are you sure?")
context = self.admin_site.each_context(request)
context.update({
"title": title,
"instance": current_folder,
"breadcrumbs_action": _("Delete files and/or folders"),
"deletable_objects": all_deletable_objects,
"files_queryset": files_queryset,
"folders_queryset": folders_queryset,
"perms_lacking": all_perms_needed,
"protected": all_protected,
"opts": opts,
'is_popup': popup_status(request),
'filer_admin_context': AdminContext(request),
"root_path": reverse('admin:index'),
"app_label": app_label,
"action_checkbox_name": helpers.ACTION_CHECKBOX_NAME,
})
# Display the destination folder selection page
return render(
request,
"admin/filer/delete_selected_files_confirmation.html",
context
) |
def b64_encode(self):
"""
Generate a base64 encoded representation of this SPKI object.
:return: The base64 encoded string.
:rtype: :py:class:`bytes`
"""
encoded = _lib.NETSCAPE_SPKI_b64_encode(self._spki)
result = _ffi.string(encoded)
_lib.OPENSSL_free(encoded)
return result | Generate a base64 encoded representation of this SPKI object.
:return: The base64 encoded string.
:rtype: :py:class:`bytes` | Below is the the instruction that describes the task:
### Input:
Generate a base64 encoded representation of this SPKI object.
:return: The base64 encoded string.
:rtype: :py:class:`bytes`
### Response:
def b64_encode(self):
"""
Generate a base64 encoded representation of this SPKI object.
:return: The base64 encoded string.
:rtype: :py:class:`bytes`
"""
encoded = _lib.NETSCAPE_SPKI_b64_encode(self._spki)
result = _ffi.string(encoded)
_lib.OPENSSL_free(encoded)
return result |
def set_password(self, raw_password):
"""Calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` for the user.
If password is ``None``, calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_unusable_password`.
"""
if raw_password is None:
self.set_unusable_password()
else:
xmpp_backend.set_password(self.node, self.domain, raw_password) | Calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` for the user.
If password is ``None``, calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_unusable_password`. | Below is the the instruction that describes the task:
### Input:
Calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` for the user.
If password is ``None``, calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_unusable_password`.
### Response:
def set_password(self, raw_password):
"""Calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_password` for the user.
If password is ``None``, calls :py:func:`~xmpp_backends.base.XmppBackendBase.set_unusable_password`.
"""
if raw_password is None:
self.set_unusable_password()
else:
xmpp_backend.set_password(self.node, self.domain, raw_password) |
def enable_faulthandler(cls, signum=signal.SIGUSR1):
"""
Enable dumping thread stack traces when specified signals are received, similar to java's handling of SIGQUIT
Note: this must be called from the surviving process in case of daemonization.
Note that SIGQUIT does not work in all environments with a python process.
:param int|None signum: Signal number to register for full thread stack dump (use None to disable)
"""
with cls._lock:
if not signum:
cls._disable_faulthandler()
return
if not cls.file_handler or faulthandler is None:
return
cls.faulthandler_signum = signum
dump_file = cls.file_handler.stream
faulthandler.enable(file=dump_file, all_threads=True)
faulthandler.register(signum, file=dump_file, all_threads=True, chain=False) | Enable dumping thread stack traces when specified signals are received, similar to java's handling of SIGQUIT
Note: this must be called from the surviving process in case of daemonization.
Note that SIGQUIT does not work in all environments with a python process.
:param int|None signum: Signal number to register for full thread stack dump (use None to disable) | Below is the the instruction that describes the task:
### Input:
Enable dumping thread stack traces when specified signals are received, similar to java's handling of SIGQUIT
Note: this must be called from the surviving process in case of daemonization.
Note that SIGQUIT does not work in all environments with a python process.
:param int|None signum: Signal number to register for full thread stack dump (use None to disable)
### Response:
def enable_faulthandler(cls, signum=signal.SIGUSR1):
"""
Enable dumping thread stack traces when specified signals are received, similar to java's handling of SIGQUIT
Note: this must be called from the surviving process in case of daemonization.
Note that SIGQUIT does not work in all environments with a python process.
:param int|None signum: Signal number to register for full thread stack dump (use None to disable)
"""
with cls._lock:
if not signum:
cls._disable_faulthandler()
return
if not cls.file_handler or faulthandler is None:
return
cls.faulthandler_signum = signum
dump_file = cls.file_handler.stream
faulthandler.enable(file=dump_file, all_threads=True)
faulthandler.register(signum, file=dump_file, all_threads=True, chain=False) |
def def_alignment(self, year):
"""Returns the name of the defensive alignment the team ran in the
given year.
:year: Int representing the season year.
:returns: A string representing the defensive alignment.
"""
scheme_text = self._year_info_pq(year, 'Defensive Alignment').text()
m = re.search(r'Defensive Alignment[:\s]*(.+)\s*', scheme_text, re.I)
if m:
return m.group(1)
else:
return None | Returns the name of the defensive alignment the team ran in the
given year.
:year: Int representing the season year.
:returns: A string representing the defensive alignment. | Below is the the instruction that describes the task:
### Input:
Returns the name of the defensive alignment the team ran in the
given year.
:year: Int representing the season year.
:returns: A string representing the defensive alignment.
### Response:
def def_alignment(self, year):
"""Returns the name of the defensive alignment the team ran in the
given year.
:year: Int representing the season year.
:returns: A string representing the defensive alignment.
"""
scheme_text = self._year_info_pq(year, 'Defensive Alignment').text()
m = re.search(r'Defensive Alignment[:\s]*(.+)\s*', scheme_text, re.I)
if m:
return m.group(1)
else:
return None |
def _inverse_lower_triangular(M):
"""
Take inverse of lower triangular (e.g. Cholesky) matrix. This function
broadcasts over the first index.
:param M: Tensor with lower triangular structure of shape DxNxN
:return: The inverse of the Cholesky decomposition. Same shape as input.
"""
if M.get_shape().ndims != 3: # pragma: no cover
raise ValueError("Number of dimensions for input is required to be 3.")
D, N = tf.shape(M)[0], tf.shape(M)[1]
I_DNN = tf.eye(N, dtype=M.dtype)[None, :, :] * tf.ones((D, 1, 1), dtype=M.dtype)
return tf.matrix_triangular_solve(M, I_DNN) | Take inverse of lower triangular (e.g. Cholesky) matrix. This function
broadcasts over the first index.
:param M: Tensor with lower triangular structure of shape DxNxN
:return: The inverse of the Cholesky decomposition. Same shape as input. | Below is the the instruction that describes the task:
### Input:
Take inverse of lower triangular (e.g. Cholesky) matrix. This function
broadcasts over the first index.
:param M: Tensor with lower triangular structure of shape DxNxN
:return: The inverse of the Cholesky decomposition. Same shape as input.
### Response:
def _inverse_lower_triangular(M):
"""
Take inverse of lower triangular (e.g. Cholesky) matrix. This function
broadcasts over the first index.
:param M: Tensor with lower triangular structure of shape DxNxN
:return: The inverse of the Cholesky decomposition. Same shape as input.
"""
if M.get_shape().ndims != 3: # pragma: no cover
raise ValueError("Number of dimensions for input is required to be 3.")
D, N = tf.shape(M)[0], tf.shape(M)[1]
I_DNN = tf.eye(N, dtype=M.dtype)[None, :, :] * tf.ones((D, 1, 1), dtype=M.dtype)
return tf.matrix_triangular_solve(M, I_DNN) |
def extend(self, collection):
"""Merges collections. Ensure uniqueness of ids"""
l_ids = set([a.Id for a in self])
for acces in collection:
if not acces.Id in l_ids:
list.append(self,acces)
info = collection.get_info(Id=acces.Id)
if info:
self.infos[acces.Id] = info | Merges collections. Ensure uniqueness of ids | Below is the the instruction that describes the task:
### Input:
Merges collections. Ensure uniqueness of ids
### Response:
def extend(self, collection):
"""Merges collections. Ensure uniqueness of ids"""
l_ids = set([a.Id for a in self])
for acces in collection:
if not acces.Id in l_ids:
list.append(self,acces)
info = collection.get_info(Id=acces.Id)
if info:
self.infos[acces.Id] = info |
def draw_interface(objects, callback, callback_text):
"""
Draws a ncurses interface. Based on the given object list, every object should have a "string" key, this is whats displayed on the screen, callback is called with the selected object.
Rest of the code is modified from:
https://stackoverflow.com/a/30834868
"""
screen = curses.initscr()
height, width = screen.getmaxyx()
curses.noecho()
curses.cbreak()
curses.start_color()
screen.keypad( 1 )
curses.init_pair(1,curses.COLOR_BLACK, curses.COLOR_CYAN)
highlightText = curses.color_pair( 1 )
normalText = curses.A_NORMAL
screen.border( 0 )
curses.curs_set( 0 )
max_row = height - 15 # max number of rows
box = curses.newwin( max_row + 2, int(width - 2), 1, 1 )
box.box()
fmt = PartialFormatter()
row_num = len( objects )
pages = int( ceil( row_num / max_row ) )
position = 1
page = 1
for i in range( 1, max_row + 1 ):
if row_num == 0:
box.addstr( 1, 1, "There aren't strings", highlightText )
else:
if (i == position):
box.addstr( i, 2, str( i ) + " - " + objects[ i - 1 ]['string'], highlightText )
else:
box.addstr( i, 2, str( i ) + " - " + objects[ i - 1 ]['string'], normalText )
if i == row_num:
break
screen.refresh()
box.refresh()
x = screen.getch()
while x != 27:
if x == curses.KEY_DOWN:
if page == 1:
if position < i:
position = position + 1
else:
if pages > 1:
page = page + 1
position = 1 + ( max_row * ( page - 1 ) )
elif page == pages:
if position < row_num:
position = position + 1
else:
if position < max_row + ( max_row * ( page - 1 ) ):
position = position + 1
else:
page = page + 1
position = 1 + ( max_row * ( page - 1 ) )
if x == curses.KEY_UP:
if page == 1:
if position > 1:
position = position - 1
else:
if position > ( 1 + ( max_row * ( page - 1 ) ) ):
position = position - 1
else:
page = page - 1
position = max_row + ( max_row * ( page - 1 ) )
screen.erase()
if x == ord( "\n" ) and row_num != 0:
screen.erase()
screen.border( 0 )
service = objects[position -1]
text = fmt.format(callback_text, **service)
screen.addstr( max_row + 4, 3, text)
text = callback(service)
count = 0
for line in text:
screen.addstr( max_row + 5 + count, 3, line)
count += 1
box.erase()
screen.border( 0 )
box.border( 0 )
for i in range( 1 + ( max_row * ( page - 1 ) ), max_row + 1 + ( max_row * ( page - 1 ) ) ):
if row_num == 0:
box.addstr( 1, 1, "There aren't strings", highlightText )
else:
if ( i + ( max_row * ( page - 1 ) ) == position + ( max_row * ( page - 1 ) ) ):
box.addstr( i - ( max_row * ( page - 1 ) ), 2, str( i ) + " - " + objects[ i - 1 ]['string'], highlightText )
else:
box.addstr( i - ( max_row * ( page - 1 ) ), 2, str( i ) + " - " + objects[ i - 1 ]['string'], normalText )
if i == row_num:
break
screen.refresh()
box.refresh()
x = screen.getch()
curses.endwin()
exit() | Draws a ncurses interface. Based on the given object list, every object should have a "string" key, this is whats displayed on the screen, callback is called with the selected object.
Rest of the code is modified from:
https://stackoverflow.com/a/30834868 | Below is the the instruction that describes the task:
### Input:
Draws a ncurses interface. Based on the given object list, every object should have a "string" key, this is whats displayed on the screen, callback is called with the selected object.
Rest of the code is modified from:
https://stackoverflow.com/a/30834868
### Response:
def draw_interface(objects, callback, callback_text):
"""
Draws a ncurses interface. Based on the given object list, every object should have a "string" key, this is whats displayed on the screen, callback is called with the selected object.
Rest of the code is modified from:
https://stackoverflow.com/a/30834868
"""
screen = curses.initscr()
height, width = screen.getmaxyx()
curses.noecho()
curses.cbreak()
curses.start_color()
screen.keypad( 1 )
curses.init_pair(1,curses.COLOR_BLACK, curses.COLOR_CYAN)
highlightText = curses.color_pair( 1 )
normalText = curses.A_NORMAL
screen.border( 0 )
curses.curs_set( 0 )
max_row = height - 15 # max number of rows
box = curses.newwin( max_row + 2, int(width - 2), 1, 1 )
box.box()
fmt = PartialFormatter()
row_num = len( objects )
pages = int( ceil( row_num / max_row ) )
position = 1
page = 1
for i in range( 1, max_row + 1 ):
if row_num == 0:
box.addstr( 1, 1, "There aren't strings", highlightText )
else:
if (i == position):
box.addstr( i, 2, str( i ) + " - " + objects[ i - 1 ]['string'], highlightText )
else:
box.addstr( i, 2, str( i ) + " - " + objects[ i - 1 ]['string'], normalText )
if i == row_num:
break
screen.refresh()
box.refresh()
x = screen.getch()
while x != 27:
if x == curses.KEY_DOWN:
if page == 1:
if position < i:
position = position + 1
else:
if pages > 1:
page = page + 1
position = 1 + ( max_row * ( page - 1 ) )
elif page == pages:
if position < row_num:
position = position + 1
else:
if position < max_row + ( max_row * ( page - 1 ) ):
position = position + 1
else:
page = page + 1
position = 1 + ( max_row * ( page - 1 ) )
if x == curses.KEY_UP:
if page == 1:
if position > 1:
position = position - 1
else:
if position > ( 1 + ( max_row * ( page - 1 ) ) ):
position = position - 1
else:
page = page - 1
position = max_row + ( max_row * ( page - 1 ) )
screen.erase()
if x == ord( "\n" ) and row_num != 0:
screen.erase()
screen.border( 0 )
service = objects[position -1]
text = fmt.format(callback_text, **service)
screen.addstr( max_row + 4, 3, text)
text = callback(service)
count = 0
for line in text:
screen.addstr( max_row + 5 + count, 3, line)
count += 1
box.erase()
screen.border( 0 )
box.border( 0 )
for i in range( 1 + ( max_row * ( page - 1 ) ), max_row + 1 + ( max_row * ( page - 1 ) ) ):
if row_num == 0:
box.addstr( 1, 1, "There aren't strings", highlightText )
else:
if ( i + ( max_row * ( page - 1 ) ) == position + ( max_row * ( page - 1 ) ) ):
box.addstr( i - ( max_row * ( page - 1 ) ), 2, str( i ) + " - " + objects[ i - 1 ]['string'], highlightText )
else:
box.addstr( i - ( max_row * ( page - 1 ) ), 2, str( i ) + " - " + objects[ i - 1 ]['string'], normalText )
if i == row_num:
break
screen.refresh()
box.refresh()
x = screen.getch()
curses.endwin()
exit() |
def create_user(**data):
"""Creates user with encrypted password"""
if 'username' not in data or 'password' not in data:
raise ValueError('username and password are required.')
# Hash the user password
data['password'] = generate_password_hash(
data.pop('password'),
method='pbkdf2:sha256'
)
# Here you insert the `data` in your users database
# for this simple example we are recording in a json file
db_users = json.load(open('users.json'))
# add the new created user to json
db_users[data['username']] = data
# commit changes to database
json.dump(db_users, open('users.json', 'w'))
return data | Creates user with encrypted password | Below is the the instruction that describes the task:
### Input:
Creates user with encrypted password
### Response:
def create_user(**data):
"""Creates user with encrypted password"""
if 'username' not in data or 'password' not in data:
raise ValueError('username and password are required.')
# Hash the user password
data['password'] = generate_password_hash(
data.pop('password'),
method='pbkdf2:sha256'
)
# Here you insert the `data` in your users database
# for this simple example we are recording in a json file
db_users = json.load(open('users.json'))
# add the new created user to json
db_users[data['username']] = data
# commit changes to database
json.dump(db_users, open('users.json', 'w'))
return data |
def get_context(self, parent_context, data):
"""
Wrap the context data in a :class:`~django.template.Context` object.
:param parent_context: The context of the parent template.
:type parent_context: :class:`~django.template.Context`
:param data: The result from :func:`get_context_data`
:type data: dict
:return: Context data.
:rtype: :class:`~django.template.Context`
"""
if django.VERSION >= (1, 8):
new_context = parent_context.new(data)
else:
settings = {
'autoescape': parent_context.autoescape,
'current_app': parent_context.current_app,
'use_l10n': parent_context.use_l10n,
'use_tz': parent_context.use_tz,
}
new_context = Context(data, **settings)
# Pass CSRF token for same reasons as @register.inclusion_tag does.
csrf_token = parent_context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return new_context | Wrap the context data in a :class:`~django.template.Context` object.
:param parent_context: The context of the parent template.
:type parent_context: :class:`~django.template.Context`
:param data: The result from :func:`get_context_data`
:type data: dict
:return: Context data.
:rtype: :class:`~django.template.Context` | Below is the the instruction that describes the task:
### Input:
Wrap the context data in a :class:`~django.template.Context` object.
:param parent_context: The context of the parent template.
:type parent_context: :class:`~django.template.Context`
:param data: The result from :func:`get_context_data`
:type data: dict
:return: Context data.
:rtype: :class:`~django.template.Context`
### Response:
def get_context(self, parent_context, data):
"""
Wrap the context data in a :class:`~django.template.Context` object.
:param parent_context: The context of the parent template.
:type parent_context: :class:`~django.template.Context`
:param data: The result from :func:`get_context_data`
:type data: dict
:return: Context data.
:rtype: :class:`~django.template.Context`
"""
if django.VERSION >= (1, 8):
new_context = parent_context.new(data)
else:
settings = {
'autoescape': parent_context.autoescape,
'current_app': parent_context.current_app,
'use_l10n': parent_context.use_l10n,
'use_tz': parent_context.use_tz,
}
new_context = Context(data, **settings)
# Pass CSRF token for same reasons as @register.inclusion_tag does.
csrf_token = parent_context.get('csrf_token', None)
if csrf_token is not None:
new_context['csrf_token'] = csrf_token
return new_context |
def stop_func_accept_retry_state(stop_func):
"""Wrap "stop" function to accept "retry_state" parameter."""
if not six.callable(stop_func):
return stop_func
if func_takes_retry_state(stop_func):
return stop_func
@_utils.wraps(stop_func)
def wrapped_stop_func(retry_state):
warn_about_non_retry_state_deprecation(
'stop', stop_func, stacklevel=4)
return stop_func(
retry_state.attempt_number,
retry_state.seconds_since_start,
)
return wrapped_stop_func | Wrap "stop" function to accept "retry_state" parameter. | Below is the the instruction that describes the task:
### Input:
Wrap "stop" function to accept "retry_state" parameter.
### Response:
def stop_func_accept_retry_state(stop_func):
"""Wrap "stop" function to accept "retry_state" parameter."""
if not six.callable(stop_func):
return stop_func
if func_takes_retry_state(stop_func):
return stop_func
@_utils.wraps(stop_func)
def wrapped_stop_func(retry_state):
warn_about_non_retry_state_deprecation(
'stop', stop_func, stacklevel=4)
return stop_func(
retry_state.attempt_number,
retry_state.seconds_since_start,
)
return wrapped_stop_func |
def delete(self, url):
"""
To make a DELETE request to Falkonry API server
:param url: string
"""
response = requests.delete(
self.host + url,
headers={
'Authorization': 'Bearer ' + self.token,
'x-falkonry-source':self.sourceHeader
},
verify=False
)
if response.status_code == 204:
return None
elif response.status_code == 401:
raise Exception(json.dumps({'message':'Unauthorized Access'}))
else:
raise Exception(response.content) | To make a DELETE request to Falkonry API server
:param url: string | Below is the the instruction that describes the task:
### Input:
To make a DELETE request to Falkonry API server
:param url: string
### Response:
def delete(self, url):
"""
To make a DELETE request to Falkonry API server
:param url: string
"""
response = requests.delete(
self.host + url,
headers={
'Authorization': 'Bearer ' + self.token,
'x-falkonry-source':self.sourceHeader
},
verify=False
)
if response.status_code == 204:
return None
elif response.status_code == 401:
raise Exception(json.dumps({'message':'Unauthorized Access'}))
else:
raise Exception(response.content) |
def _nsplit(self):
"""
Split `self` at the north pole, which is the same as in signed arithmetic.
:return: A list of split StridedIntervals
"""
north_pole_left = self.max_int(self.bits - 1) # 01111...1
north_pole_right = 2 ** (self.bits - 1) # 1000...0
# Is `self` straddling the north pole?
straddling = False
if self.upper_bound >= north_pole_right:
if self.lower_bound > self.upper_bound:
# Yes it does!
straddling = True
elif self.lower_bound <= north_pole_left:
straddling = True
else:
if self.lower_bound > self.upper_bound and self.lower_bound <= north_pole_left:
straddling = True
if straddling:
a_upper_bound = north_pole_left - ((north_pole_left - self.lower_bound) % self.stride)
a = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=self.lower_bound,
upper_bound=a_upper_bound, uninitialized=self.uninitialized)
b_lower_bound = a_upper_bound + self.stride
b = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=b_lower_bound,
upper_bound=self.upper_bound, uninitialized=self.uninitialized)
return [ a, b ]
else:
return [ self.copy() ] | Split `self` at the north pole, which is the same as in signed arithmetic.
:return: A list of split StridedIntervals | Below is the the instruction that describes the task:
### Input:
Split `self` at the north pole, which is the same as in signed arithmetic.
:return: A list of split StridedIntervals
### Response:
def _nsplit(self):
"""
Split `self` at the north pole, which is the same as in signed arithmetic.
:return: A list of split StridedIntervals
"""
north_pole_left = self.max_int(self.bits - 1) # 01111...1
north_pole_right = 2 ** (self.bits - 1) # 1000...0
# Is `self` straddling the north pole?
straddling = False
if self.upper_bound >= north_pole_right:
if self.lower_bound > self.upper_bound:
# Yes it does!
straddling = True
elif self.lower_bound <= north_pole_left:
straddling = True
else:
if self.lower_bound > self.upper_bound and self.lower_bound <= north_pole_left:
straddling = True
if straddling:
a_upper_bound = north_pole_left - ((north_pole_left - self.lower_bound) % self.stride)
a = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=self.lower_bound,
upper_bound=a_upper_bound, uninitialized=self.uninitialized)
b_lower_bound = a_upper_bound + self.stride
b = StridedInterval(bits=self.bits, stride=self.stride, lower_bound=b_lower_bound,
upper_bound=self.upper_bound, uninitialized=self.uninitialized)
return [ a, b ]
else:
return [ self.copy() ] |
def show_progress(self, n, total_runs):
"""Displays a progressbar"""
if self.report_progress:
percentage, logger_name, log_level = self.report_progress
if logger_name == 'print':
logger = 'print'
else:
logger = logging.getLogger(logger_name)
if n == -1:
# Compute the number of digits and avoid log10(0)
digits = int(math.log10(total_runs + 0.1)) + 1
self._format_string = 'PROGRESS: Finished %' + '%d' % digits + 'd/%d runs '
fmt_string = self._format_string % (n + 1, total_runs) + '%s'
reprint = log_level == 0
progressbar(n, total_runs, percentage_step=percentage,
logger=logger, log_level=log_level,
fmt_string=fmt_string, reprint=reprint) | Displays a progressbar | Below is the the instruction that describes the task:
### Input:
Displays a progressbar
### Response:
def show_progress(self, n, total_runs):
"""Displays a progressbar"""
if self.report_progress:
percentage, logger_name, log_level = self.report_progress
if logger_name == 'print':
logger = 'print'
else:
logger = logging.getLogger(logger_name)
if n == -1:
# Compute the number of digits and avoid log10(0)
digits = int(math.log10(total_runs + 0.1)) + 1
self._format_string = 'PROGRESS: Finished %' + '%d' % digits + 'd/%d runs '
fmt_string = self._format_string % (n + 1, total_runs) + '%s'
reprint = log_level == 0
progressbar(n, total_runs, percentage_step=percentage,
logger=logger, log_level=log_level,
fmt_string=fmt_string, reprint=reprint) |
def getAnalysisRequestsBrains(self, **kwargs):
"""Return all the Analysis Requests brains linked to the Batch
kargs are passed directly to the catalog.
"""
kwargs['getBatchUID'] = self.UID()
catalog = getToolByName(self, CATALOG_ANALYSIS_REQUEST_LISTING)
brains = catalog(kwargs)
return brains | Return all the Analysis Requests brains linked to the Batch
kargs are passed directly to the catalog. | Below is the the instruction that describes the task:
### Input:
Return all the Analysis Requests brains linked to the Batch
kargs are passed directly to the catalog.
### Response:
def getAnalysisRequestsBrains(self, **kwargs):
"""Return all the Analysis Requests brains linked to the Batch
kargs are passed directly to the catalog.
"""
kwargs['getBatchUID'] = self.UID()
catalog = getToolByName(self, CATALOG_ANALYSIS_REQUEST_LISTING)
brains = catalog(kwargs)
return brains |
def format_numeric_result(analysis, result, decimalmark='.', sciformat=1):
"""
Returns the formatted number part of a results value. This is
responsible for deciding the precision, and notation of numeric
values in accordance to the uncertainty. If a non-numeric
result value is given, the value will be returned unchanged.
The following rules apply:
If the "Calculate precision from uncertainties" is enabled in
the Analysis service, and
a) If the non-decimal number of digits of the result is above
the service's ExponentialFormatPrecision, the result will
be formatted in scientific notation.
Example:
Given an Analysis with an uncertainty of 37 for a range of
results between 30000 and 40000, with an
ExponentialFormatPrecision equal to 4 and a result of 32092,
this method will return 3.2092E+04
b) If the number of digits of the integer part of the result is
below the ExponentialFormatPrecision, the result will be
formatted as decimal notation and the resulta will be rounded
in accordance to the precision (calculated from the uncertainty)
Example:
Given an Analysis with an uncertainty of 0.22 for a range of
results between 1 and 10 with an ExponentialFormatPrecision
equal to 4 and a result of 5.234, this method will return 5.2
If the "Calculate precision from Uncertainties" is disabled in the
analysis service, the same rules described above applies, but the
precision used for rounding the result is not calculated from
the uncertainty. The fixed length precision is used instead.
For further details, visit
https://jira.bikalabs.com/browse/LIMS-1334
The default decimal mark '.' will be replaced by the decimalmark
specified.
:param analysis: the analysis from which the uncertainty, precision
and other additional info have to be retrieved
:param result: result to be formatted.
:param decimalmark: decimal mark to use. By default '.'
:param sciformat: 1. The sci notation has to be formatted as aE^+b
2. The sci notation has to be formatted as ax10^b
3. As 2, but with super html entity for exp
4. The sci notation has to be formatted as a·10^b
5. As 4, but with super html entity for exp
By default 1
:result: should be a string to preserve the decimal precision.
:returns: the formatted result as string
"""
try:
result = float(result)
except ValueError:
return result
# continuing with 'nan' result will cause formatting to fail.
if math.isnan(result):
return result
# Scientific notation?
# Get the default precision for scientific notation
threshold = analysis.getExponentialFormatPrecision()
precision = analysis.getPrecision(result)
formatted = _format_decimal_or_sci(result, precision, threshold, sciformat)
return formatDecimalMark(formatted, decimalmark) | Returns the formatted number part of a results value. This is
responsible for deciding the precision, and notation of numeric
values in accordance to the uncertainty. If a non-numeric
result value is given, the value will be returned unchanged.
The following rules apply:
If the "Calculate precision from uncertainties" is enabled in
the Analysis service, and
a) If the non-decimal number of digits of the result is above
the service's ExponentialFormatPrecision, the result will
be formatted in scientific notation.
Example:
Given an Analysis with an uncertainty of 37 for a range of
results between 30000 and 40000, with an
ExponentialFormatPrecision equal to 4 and a result of 32092,
this method will return 3.2092E+04
b) If the number of digits of the integer part of the result is
below the ExponentialFormatPrecision, the result will be
formatted as decimal notation and the resulta will be rounded
in accordance to the precision (calculated from the uncertainty)
Example:
Given an Analysis with an uncertainty of 0.22 for a range of
results between 1 and 10 with an ExponentialFormatPrecision
equal to 4 and a result of 5.234, this method will return 5.2
If the "Calculate precision from Uncertainties" is disabled in the
analysis service, the same rules described above applies, but the
precision used for rounding the result is not calculated from
the uncertainty. The fixed length precision is used instead.
For further details, visit
https://jira.bikalabs.com/browse/LIMS-1334
The default decimal mark '.' will be replaced by the decimalmark
specified.
:param analysis: the analysis from which the uncertainty, precision
and other additional info have to be retrieved
:param result: result to be formatted.
:param decimalmark: decimal mark to use. By default '.'
:param sciformat: 1. The sci notation has to be formatted as aE^+b
2. The sci notation has to be formatted as ax10^b
3. As 2, but with super html entity for exp
4. The sci notation has to be formatted as a·10^b
5. As 4, but with super html entity for exp
By default 1
:result: should be a string to preserve the decimal precision.
:returns: the formatted result as string | Below is the the instruction that describes the task:
### Input:
Returns the formatted number part of a results value. This is
responsible for deciding the precision, and notation of numeric
values in accordance to the uncertainty. If a non-numeric
result value is given, the value will be returned unchanged.
The following rules apply:
If the "Calculate precision from uncertainties" is enabled in
the Analysis service, and
a) If the non-decimal number of digits of the result is above
the service's ExponentialFormatPrecision, the result will
be formatted in scientific notation.
Example:
Given an Analysis with an uncertainty of 37 for a range of
results between 30000 and 40000, with an
ExponentialFormatPrecision equal to 4 and a result of 32092,
this method will return 3.2092E+04
b) If the number of digits of the integer part of the result is
below the ExponentialFormatPrecision, the result will be
formatted as decimal notation and the resulta will be rounded
in accordance to the precision (calculated from the uncertainty)
Example:
Given an Analysis with an uncertainty of 0.22 for a range of
results between 1 and 10 with an ExponentialFormatPrecision
equal to 4 and a result of 5.234, this method will return 5.2
If the "Calculate precision from Uncertainties" is disabled in the
analysis service, the same rules described above applies, but the
precision used for rounding the result is not calculated from
the uncertainty. The fixed length precision is used instead.
For further details, visit
https://jira.bikalabs.com/browse/LIMS-1334
The default decimal mark '.' will be replaced by the decimalmark
specified.
:param analysis: the analysis from which the uncertainty, precision
and other additional info have to be retrieved
:param result: result to be formatted.
:param decimalmark: decimal mark to use. By default '.'
:param sciformat: 1. The sci notation has to be formatted as aE^+b
2. The sci notation has to be formatted as ax10^b
3. As 2, but with super html entity for exp
4. The sci notation has to be formatted as a·10^b
5. As 4, but with super html entity for exp
By default 1
:result: should be a string to preserve the decimal precision.
:returns: the formatted result as string
### Response:
def format_numeric_result(analysis, result, decimalmark='.', sciformat=1):
"""
Returns the formatted number part of a results value. This is
responsible for deciding the precision, and notation of numeric
values in accordance to the uncertainty. If a non-numeric
result value is given, the value will be returned unchanged.
The following rules apply:
If the "Calculate precision from uncertainties" is enabled in
the Analysis service, and
a) If the non-decimal number of digits of the result is above
the service's ExponentialFormatPrecision, the result will
be formatted in scientific notation.
Example:
Given an Analysis with an uncertainty of 37 for a range of
results between 30000 and 40000, with an
ExponentialFormatPrecision equal to 4 and a result of 32092,
this method will return 3.2092E+04
b) If the number of digits of the integer part of the result is
below the ExponentialFormatPrecision, the result will be
formatted as decimal notation and the resulta will be rounded
in accordance to the precision (calculated from the uncertainty)
Example:
Given an Analysis with an uncertainty of 0.22 for a range of
results between 1 and 10 with an ExponentialFormatPrecision
equal to 4 and a result of 5.234, this method will return 5.2
If the "Calculate precision from Uncertainties" is disabled in the
analysis service, the same rules described above applies, but the
precision used for rounding the result is not calculated from
the uncertainty. The fixed length precision is used instead.
For further details, visit
https://jira.bikalabs.com/browse/LIMS-1334
The default decimal mark '.' will be replaced by the decimalmark
specified.
:param analysis: the analysis from which the uncertainty, precision
and other additional info have to be retrieved
:param result: result to be formatted.
:param decimalmark: decimal mark to use. By default '.'
:param sciformat: 1. The sci notation has to be formatted as aE^+b
2. The sci notation has to be formatted as ax10^b
3. As 2, but with super html entity for exp
4. The sci notation has to be formatted as a·10^b
5. As 4, but with super html entity for exp
By default 1
:result: should be a string to preserve the decimal precision.
:returns: the formatted result as string
"""
try:
result = float(result)
except ValueError:
return result
# continuing with 'nan' result will cause formatting to fail.
if math.isnan(result):
return result
# Scientific notation?
# Get the default precision for scientific notation
threshold = analysis.getExponentialFormatPrecision()
precision = analysis.getPrecision(result)
formatted = _format_decimal_or_sci(result, precision, threshold, sciformat)
return formatDecimalMark(formatted, decimalmark) |
def sync(self, hooks=True, async_hooks=True):
"""Synchronize user repositories.
:param bool hooks: True for syncing hooks.
:param bool async_hooks: True for sending of an asynchronous task to
sync hooks.
.. note::
Syncing happens from GitHub's direction only. This means that we
consider the information on GitHub as valid, and we overwrite our
own state based on this information.
"""
active_repos = {}
github_repos = {repo.id: repo for repo in self.api.repositories()
if repo.permissions['admin']}
for gh_repo_id, gh_repo in github_repos.items():
active_repos[gh_repo_id] = {
'id': gh_repo_id,
'full_name': gh_repo.full_name,
'description': gh_repo.description,
}
if hooks:
self._sync_hooks(list(active_repos.keys()),
asynchronous=async_hooks)
# Update changed names for repositories stored in DB
db_repos = Repository.query.filter(
Repository.user_id == self.user_id,
Repository.github_id.in_(github_repos.keys())
)
for repo in db_repos:
gh_repo = github_repos.get(repo.github_id)
if gh_repo and repo.name != gh_repo.full_name:
repo.name = gh_repo.full_name
db.session.add(repo)
# Remove ownership from repositories that the user has no longer
# 'admin' permissions, or have been deleted.
Repository.query.filter(
Repository.user_id == self.user_id,
~Repository.github_id.in_(github_repos.keys())
).update(dict(user_id=None, hook=None), synchronize_session=False)
# Update repos and last sync
self.account.extra_data.update(dict(
repos=active_repos,
last_sync=iso_utcnow(),
))
self.account.extra_data.changed()
db.session.add(self.account) | Synchronize user repositories.
:param bool hooks: True for syncing hooks.
:param bool async_hooks: True for sending of an asynchronous task to
sync hooks.
.. note::
Syncing happens from GitHub's direction only. This means that we
consider the information on GitHub as valid, and we overwrite our
own state based on this information. | Below is the the instruction that describes the task:
### Input:
Synchronize user repositories.
:param bool hooks: True for syncing hooks.
:param bool async_hooks: True for sending of an asynchronous task to
sync hooks.
.. note::
Syncing happens from GitHub's direction only. This means that we
consider the information on GitHub as valid, and we overwrite our
own state based on this information.
### Response:
def sync(self, hooks=True, async_hooks=True):
"""Synchronize user repositories.
:param bool hooks: True for syncing hooks.
:param bool async_hooks: True for sending of an asynchronous task to
sync hooks.
.. note::
Syncing happens from GitHub's direction only. This means that we
consider the information on GitHub as valid, and we overwrite our
own state based on this information.
"""
active_repos = {}
github_repos = {repo.id: repo for repo in self.api.repositories()
if repo.permissions['admin']}
for gh_repo_id, gh_repo in github_repos.items():
active_repos[gh_repo_id] = {
'id': gh_repo_id,
'full_name': gh_repo.full_name,
'description': gh_repo.description,
}
if hooks:
self._sync_hooks(list(active_repos.keys()),
asynchronous=async_hooks)
# Update changed names for repositories stored in DB
db_repos = Repository.query.filter(
Repository.user_id == self.user_id,
Repository.github_id.in_(github_repos.keys())
)
for repo in db_repos:
gh_repo = github_repos.get(repo.github_id)
if gh_repo and repo.name != gh_repo.full_name:
repo.name = gh_repo.full_name
db.session.add(repo)
# Remove ownership from repositories that the user has no longer
# 'admin' permissions, or have been deleted.
Repository.query.filter(
Repository.user_id == self.user_id,
~Repository.github_id.in_(github_repos.keys())
).update(dict(user_id=None, hook=None), synchronize_session=False)
# Update repos and last sync
self.account.extra_data.update(dict(
repos=active_repos,
last_sync=iso_utcnow(),
))
self.account.extra_data.changed()
db.session.add(self.account) |
def transfer(ctx, _to='address', _value='uint256', returns=STATUS):
""" Standardized Contract API:
function transfer(address _to, uint256 _value) returns (bool _success)
"""
log.DEV('In Fungible.transfer')
if ctx.accounts[ctx.msg_sender] >= _value:
ctx.accounts[ctx.msg_sender] -= _value
ctx.accounts[_to] += _value
ctx.Transfer(ctx.msg_sender, _to, _value)
return OK
else:
return INSUFFICIENTFUNDS | Standardized Contract API:
function transfer(address _to, uint256 _value) returns (bool _success) | Below is the the instruction that describes the task:
### Input:
Standardized Contract API:
function transfer(address _to, uint256 _value) returns (bool _success)
### Response:
def transfer(ctx, _to='address', _value='uint256', returns=STATUS):
""" Standardized Contract API:
function transfer(address _to, uint256 _value) returns (bool _success)
"""
log.DEV('In Fungible.transfer')
if ctx.accounts[ctx.msg_sender] >= _value:
ctx.accounts[ctx.msg_sender] -= _value
ctx.accounts[_to] += _value
ctx.Transfer(ctx.msg_sender, _to, _value)
return OK
else:
return INSUFFICIENTFUNDS |
def has_unchecked_field(self, locator, **kwargs):
"""
Checks if the page or current node has a radio button or checkbox with the given label,
value, or id, that is currently unchecked.
Args:
locator (str): The label, name, or id of an unchecked field.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
bool: Whether it exists.
"""
kwargs["checked"] = False
return self.has_selector("field", locator, **kwargs) | Checks if the page or current node has a radio button or checkbox with the given label,
value, or id, that is currently unchecked.
Args:
locator (str): The label, name, or id of an unchecked field.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
bool: Whether it exists. | Below is the the instruction that describes the task:
### Input:
Checks if the page or current node has a radio button or checkbox with the given label,
value, or id, that is currently unchecked.
Args:
locator (str): The label, name, or id of an unchecked field.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
bool: Whether it exists.
### Response:
def has_unchecked_field(self, locator, **kwargs):
"""
Checks if the page or current node has a radio button or checkbox with the given label,
value, or id, that is currently unchecked.
Args:
locator (str): The label, name, or id of an unchecked field.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
bool: Whether it exists.
"""
kwargs["checked"] = False
return self.has_selector("field", locator, **kwargs) |
def variance_inflation_factors(df):
'''
Computes the variance inflation factor (VIF) for each column in the df.
Returns a pandas Series of VIFs
Args:
df: pandas DataFrame with columns to run diagnostics on
'''
corr = np.corrcoef(df, rowvar=0)
corr_inv = np.linalg.inv(corr)
vifs = np.diagonal(corr_inv)
return pd.Series(vifs, df.columns, name='VIF') | Computes the variance inflation factor (VIF) for each column in the df.
Returns a pandas Series of VIFs
Args:
df: pandas DataFrame with columns to run diagnostics on | Below is the the instruction that describes the task:
### Input:
Computes the variance inflation factor (VIF) for each column in the df.
Returns a pandas Series of VIFs
Args:
df: pandas DataFrame with columns to run diagnostics on
### Response:
def variance_inflation_factors(df):
'''
Computes the variance inflation factor (VIF) for each column in the df.
Returns a pandas Series of VIFs
Args:
df: pandas DataFrame with columns to run diagnostics on
'''
corr = np.corrcoef(df, rowvar=0)
corr_inv = np.linalg.inv(corr)
vifs = np.diagonal(corr_inv)
return pd.Series(vifs, df.columns, name='VIF') |
def _ParseEntryArrayObject(self, file_object, file_offset):
"""Parses an entry array object.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the entry array object relative to the start
of the file-like object.
Returns:
systemd_journal_entry_array_object: entry array object.
Raises:
ParseError: if the entry array object cannot be parsed.
"""
entry_array_object_map = self._GetDataTypeMap(
'systemd_journal_entry_array_object')
try:
entry_array_object, _ = self._ReadStructureFromFileObject(
file_object, file_offset, entry_array_object_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse entry array object at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
if entry_array_object.object_type != self._OBJECT_TYPE_ENTRY_ARRAY:
raise errors.ParseError('Unsupported object type: {0:d}.'.format(
entry_array_object.object_type))
if entry_array_object.object_flags != 0:
raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format(
entry_array_object.object_flags))
return entry_array_object | Parses an entry array object.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the entry array object relative to the start
of the file-like object.
Returns:
systemd_journal_entry_array_object: entry array object.
Raises:
ParseError: if the entry array object cannot be parsed. | Below is the the instruction that describes the task:
### Input:
Parses an entry array object.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the entry array object relative to the start
of the file-like object.
Returns:
systemd_journal_entry_array_object: entry array object.
Raises:
ParseError: if the entry array object cannot be parsed.
### Response:
def _ParseEntryArrayObject(self, file_object, file_offset):
"""Parses an entry array object.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the entry array object relative to the start
of the file-like object.
Returns:
systemd_journal_entry_array_object: entry array object.
Raises:
ParseError: if the entry array object cannot be parsed.
"""
entry_array_object_map = self._GetDataTypeMap(
'systemd_journal_entry_array_object')
try:
entry_array_object, _ = self._ReadStructureFromFileObject(
file_object, file_offset, entry_array_object_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError((
'Unable to parse entry array object at offset: 0x{0:08x} with error: '
'{1!s}').format(file_offset, exception))
if entry_array_object.object_type != self._OBJECT_TYPE_ENTRY_ARRAY:
raise errors.ParseError('Unsupported object type: {0:d}.'.format(
entry_array_object.object_type))
if entry_array_object.object_flags != 0:
raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format(
entry_array_object.object_flags))
return entry_array_object |
def weighted_n(self):
"""float count of returned rows adjusted for weighting."""
if not self.is_weighted:
return float(self.unweighted_n)
return float(sum(self._cube_dict["result"]["measures"]["count"]["data"])) | float count of returned rows adjusted for weighting. | Below is the the instruction that describes the task:
### Input:
float count of returned rows adjusted for weighting.
### Response:
def weighted_n(self):
"""float count of returned rows adjusted for weighting."""
if not self.is_weighted:
return float(self.unweighted_n)
return float(sum(self._cube_dict["result"]["measures"]["count"]["data"])) |
def get_execution_info(self, driver_id, function_descriptor):
"""Get the FunctionExecutionInfo of a remote function.
Args:
driver_id: ID of the driver that the function belongs to.
function_descriptor: The FunctionDescriptor of the function to get.
Returns:
A FunctionExecutionInfo object.
"""
if self._worker.load_code_from_local:
# Load function from local code.
# Currently, we don't support isolating code by drivers,
# thus always set driver ID to NIL here.
driver_id = ray.DriverID.nil()
if not function_descriptor.is_actor_method():
self._load_function_from_local(driver_id, function_descriptor)
else:
# Load function from GCS.
# Wait until the function to be executed has actually been
# registered on this worker. We will push warnings to the user if
# we spend too long in this loop.
# The driver function may not be found in sys.path. Try to load
# the function from GCS.
with profiling.profile("wait_for_function"):
self._wait_for_function(function_descriptor, driver_id)
try:
function_id = function_descriptor.function_id
info = self._function_execution_info[driver_id][function_id]
except KeyError as e:
message = ("Error occurs in get_execution_info: "
"driver_id: %s, function_descriptor: %s. Message: %s" %
(driver_id, function_descriptor, e))
raise KeyError(message)
return info | Get the FunctionExecutionInfo of a remote function.
Args:
driver_id: ID of the driver that the function belongs to.
function_descriptor: The FunctionDescriptor of the function to get.
Returns:
A FunctionExecutionInfo object. | Below is the the instruction that describes the task:
### Input:
Get the FunctionExecutionInfo of a remote function.
Args:
driver_id: ID of the driver that the function belongs to.
function_descriptor: The FunctionDescriptor of the function to get.
Returns:
A FunctionExecutionInfo object.
### Response:
def get_execution_info(self, driver_id, function_descriptor):
"""Get the FunctionExecutionInfo of a remote function.
Args:
driver_id: ID of the driver that the function belongs to.
function_descriptor: The FunctionDescriptor of the function to get.
Returns:
A FunctionExecutionInfo object.
"""
if self._worker.load_code_from_local:
# Load function from local code.
# Currently, we don't support isolating code by drivers,
# thus always set driver ID to NIL here.
driver_id = ray.DriverID.nil()
if not function_descriptor.is_actor_method():
self._load_function_from_local(driver_id, function_descriptor)
else:
# Load function from GCS.
# Wait until the function to be executed has actually been
# registered on this worker. We will push warnings to the user if
# we spend too long in this loop.
# The driver function may not be found in sys.path. Try to load
# the function from GCS.
with profiling.profile("wait_for_function"):
self._wait_for_function(function_descriptor, driver_id)
try:
function_id = function_descriptor.function_id
info = self._function_execution_info[driver_id][function_id]
except KeyError as e:
message = ("Error occurs in get_execution_info: "
"driver_id: %s, function_descriptor: %s. Message: %s" %
(driver_id, function_descriptor, e))
raise KeyError(message)
return info |
def start_replication(mysql_settings,
binlog_pos_memory=(None, 2),
**kwargs):
""" Start replication on server specified by *mysql_settings*
Args:
mysql_settings (dict): mysql settings that is used to connect to
mysql via pymysql
binlog_pos_memory (_bpm.BaseBinlogPosMemory):
Binlog Position Memory, it should be an instance of subclass of
:py:class:`_bpm.BaseBinlogPosMemory`.
If a tuple (str, float) is passed, it will be initialize parameters
for default :py:class:`_bpm.FileBasedBinlogPosMemory`. It the file-
name is None, it will be *`cwd`\mysqlbinlog2blinker.binlog.pos*
**kwargs: any arguments that are accepted by
:py:class:`pymysqlreplication.BinLogStreamReader`'s constructor
"""
if not isinstance(binlog_pos_memory, _bpm.BaseBinlogPosMemory):
if not isinstance(binlog_pos_memory, (tuple, list)):
raise ValueError('Invalid binlog position memory: %s'
% binlog_pos_memory)
binlog_pos_memory = _bpm.FileBasedBinlogPosMemory(*binlog_pos_memory)
mysql_settings.setdefault('connect_timeout', 5)
kwargs.setdefault('blocking', True)
kwargs.setdefault('resume_stream', True)
with binlog_pos_memory:
kwargs.setdefault('log_file', binlog_pos_memory.log_file)
kwargs.setdefault('log_pos', binlog_pos_memory.log_pos)
_logger.info('Start replication from %s with:\n%s'
% (mysql_settings, kwargs))
start_publishing(mysql_settings, **kwargs) | Start replication on server specified by *mysql_settings*
Args:
mysql_settings (dict): mysql settings that is used to connect to
mysql via pymysql
binlog_pos_memory (_bpm.BaseBinlogPosMemory):
Binlog Position Memory, it should be an instance of subclass of
:py:class:`_bpm.BaseBinlogPosMemory`.
If a tuple (str, float) is passed, it will be initialize parameters
for default :py:class:`_bpm.FileBasedBinlogPosMemory`. It the file-
name is None, it will be *`cwd`\mysqlbinlog2blinker.binlog.pos*
**kwargs: any arguments that are accepted by
:py:class:`pymysqlreplication.BinLogStreamReader`'s constructor | Below is the the instruction that describes the task:
### Input:
Start replication on server specified by *mysql_settings*
Args:
mysql_settings (dict): mysql settings that is used to connect to
mysql via pymysql
binlog_pos_memory (_bpm.BaseBinlogPosMemory):
Binlog Position Memory, it should be an instance of subclass of
:py:class:`_bpm.BaseBinlogPosMemory`.
If a tuple (str, float) is passed, it will be initialize parameters
for default :py:class:`_bpm.FileBasedBinlogPosMemory`. It the file-
name is None, it will be *`cwd`\mysqlbinlog2blinker.binlog.pos*
**kwargs: any arguments that are accepted by
:py:class:`pymysqlreplication.BinLogStreamReader`'s constructor
### Response:
def start_replication(mysql_settings,
binlog_pos_memory=(None, 2),
**kwargs):
""" Start replication on server specified by *mysql_settings*
Args:
mysql_settings (dict): mysql settings that is used to connect to
mysql via pymysql
binlog_pos_memory (_bpm.BaseBinlogPosMemory):
Binlog Position Memory, it should be an instance of subclass of
:py:class:`_bpm.BaseBinlogPosMemory`.
If a tuple (str, float) is passed, it will be initialize parameters
for default :py:class:`_bpm.FileBasedBinlogPosMemory`. It the file-
name is None, it will be *`cwd`\mysqlbinlog2blinker.binlog.pos*
**kwargs: any arguments that are accepted by
:py:class:`pymysqlreplication.BinLogStreamReader`'s constructor
"""
if not isinstance(binlog_pos_memory, _bpm.BaseBinlogPosMemory):
if not isinstance(binlog_pos_memory, (tuple, list)):
raise ValueError('Invalid binlog position memory: %s'
% binlog_pos_memory)
binlog_pos_memory = _bpm.FileBasedBinlogPosMemory(*binlog_pos_memory)
mysql_settings.setdefault('connect_timeout', 5)
kwargs.setdefault('blocking', True)
kwargs.setdefault('resume_stream', True)
with binlog_pos_memory:
kwargs.setdefault('log_file', binlog_pos_memory.log_file)
kwargs.setdefault('log_pos', binlog_pos_memory.log_pos)
_logger.info('Start replication from %s with:\n%s'
% (mysql_settings, kwargs))
start_publishing(mysql_settings, **kwargs) |
def find_file_in_zip(zip_file):
'''Returns the twb/tds file from a Tableau packaged file format. Packaged
files can contain cache entries which are also valid XML, so only look for
files with a .tds or .twb extension.
'''
candidate_files = filter(lambda x: x.split('.')[-1] in ('twb', 'tds'),
zip_file.namelist())
for filename in candidate_files:
with zip_file.open(filename) as xml_candidate:
try:
ET.parse(xml_candidate)
return filename
except ET.ParseError:
# That's not an XML file by gosh
pass | Returns the twb/tds file from a Tableau packaged file format. Packaged
files can contain cache entries which are also valid XML, so only look for
files with a .tds or .twb extension. | Below is the the instruction that describes the task:
### Input:
Returns the twb/tds file from a Tableau packaged file format. Packaged
files can contain cache entries which are also valid XML, so only look for
files with a .tds or .twb extension.
### Response:
def find_file_in_zip(zip_file):
'''Returns the twb/tds file from a Tableau packaged file format. Packaged
files can contain cache entries which are also valid XML, so only look for
files with a .tds or .twb extension.
'''
candidate_files = filter(lambda x: x.split('.')[-1] in ('twb', 'tds'),
zip_file.namelist())
for filename in candidate_files:
with zip_file.open(filename) as xml_candidate:
try:
ET.parse(xml_candidate)
return filename
except ET.ParseError:
# That's not an XML file by gosh
pass |
def _configure_interrupt(self, function_name, timeout, container, is_debugging):
"""
When a Lambda function is executing, we setup certain interrupt handlers to stop the execution.
Usually, we setup a function timeout interrupt to kill the container after timeout expires. If debugging though,
we don't enforce a timeout. But we setup a SIGINT interrupt to catch Ctrl+C and terminate the container.
:param string function_name: Name of the function we are running
:param integer timeout: Timeout in seconds
:param samcli.local.docker.container.Container container: Instance of a container to terminate
:param bool is_debugging: Are we debugging?
:return threading.Timer: Timer object, if we setup a timer. None otherwise
"""
def timer_handler():
# NOTE: This handler runs in a separate thread. So don't try to mutate any non-thread-safe data structures
LOG.info("Function '%s' timed out after %d seconds", function_name, timeout)
self._container_manager.stop(container)
def signal_handler(sig, frame):
# NOTE: This handler runs in a separate thread. So don't try to mutate any non-thread-safe data structures
LOG.info("Execution of function %s was interrupted", function_name)
self._container_manager.stop(container)
if is_debugging:
LOG.debug("Setting up SIGTERM interrupt handler")
signal.signal(signal.SIGTERM, signal_handler)
else:
# Start a timer, we'll use this to abort the function if it runs beyond the specified timeout
LOG.debug("Starting a timer for %s seconds for function '%s'", timeout, function_name)
timer = threading.Timer(timeout, timer_handler, ())
timer.start()
return timer | When a Lambda function is executing, we setup certain interrupt handlers to stop the execution.
Usually, we setup a function timeout interrupt to kill the container after timeout expires. If debugging though,
we don't enforce a timeout. But we setup a SIGINT interrupt to catch Ctrl+C and terminate the container.
:param string function_name: Name of the function we are running
:param integer timeout: Timeout in seconds
:param samcli.local.docker.container.Container container: Instance of a container to terminate
:param bool is_debugging: Are we debugging?
:return threading.Timer: Timer object, if we setup a timer. None otherwise | Below is the the instruction that describes the task:
### Input:
When a Lambda function is executing, we setup certain interrupt handlers to stop the execution.
Usually, we setup a function timeout interrupt to kill the container after timeout expires. If debugging though,
we don't enforce a timeout. But we setup a SIGINT interrupt to catch Ctrl+C and terminate the container.
:param string function_name: Name of the function we are running
:param integer timeout: Timeout in seconds
:param samcli.local.docker.container.Container container: Instance of a container to terminate
:param bool is_debugging: Are we debugging?
:return threading.Timer: Timer object, if we setup a timer. None otherwise
### Response:
def _configure_interrupt(self, function_name, timeout, container, is_debugging):
"""
When a Lambda function is executing, we setup certain interrupt handlers to stop the execution.
Usually, we setup a function timeout interrupt to kill the container after timeout expires. If debugging though,
we don't enforce a timeout. But we setup a SIGINT interrupt to catch Ctrl+C and terminate the container.
:param string function_name: Name of the function we are running
:param integer timeout: Timeout in seconds
:param samcli.local.docker.container.Container container: Instance of a container to terminate
:param bool is_debugging: Are we debugging?
:return threading.Timer: Timer object, if we setup a timer. None otherwise
"""
def timer_handler():
# NOTE: This handler runs in a separate thread. So don't try to mutate any non-thread-safe data structures
LOG.info("Function '%s' timed out after %d seconds", function_name, timeout)
self._container_manager.stop(container)
def signal_handler(sig, frame):
# NOTE: This handler runs in a separate thread. So don't try to mutate any non-thread-safe data structures
LOG.info("Execution of function %s was interrupted", function_name)
self._container_manager.stop(container)
if is_debugging:
LOG.debug("Setting up SIGTERM interrupt handler")
signal.signal(signal.SIGTERM, signal_handler)
else:
# Start a timer, we'll use this to abort the function if it runs beyond the specified timeout
LOG.debug("Starting a timer for %s seconds for function '%s'", timeout, function_name)
timer = threading.Timer(timeout, timer_handler, ())
timer.start()
return timer |
def fCPHASEs(self):
"""
Get a dictionary of CPHASE fidelities (normalized to unity) from the specs,
keyed by targets (qubit-qubit pairs).
:return: A dictionary of CPHASE fidelities, normalized to unity.
:rtype: Dict[tuple(int, int), float]
"""
return {tuple(es.targets): es.fCPHASE for es in self.edges_specs} | Get a dictionary of CPHASE fidelities (normalized to unity) from the specs,
keyed by targets (qubit-qubit pairs).
:return: A dictionary of CPHASE fidelities, normalized to unity.
:rtype: Dict[tuple(int, int), float] | Below is the the instruction that describes the task:
### Input:
Get a dictionary of CPHASE fidelities (normalized to unity) from the specs,
keyed by targets (qubit-qubit pairs).
:return: A dictionary of CPHASE fidelities, normalized to unity.
:rtype: Dict[tuple(int, int), float]
### Response:
def fCPHASEs(self):
"""
Get a dictionary of CPHASE fidelities (normalized to unity) from the specs,
keyed by targets (qubit-qubit pairs).
:return: A dictionary of CPHASE fidelities, normalized to unity.
:rtype: Dict[tuple(int, int), float]
"""
return {tuple(es.targets): es.fCPHASE for es in self.edges_specs} |
def error_codes(self):
"""ThreatConnect error codes."""
if self._error_codes is None:
from .tcex_error_codes import TcExErrorCodes
self._error_codes = TcExErrorCodes()
return self._error_codes | ThreatConnect error codes. | Below is the the instruction that describes the task:
### Input:
ThreatConnect error codes.
### Response:
def error_codes(self):
"""ThreatConnect error codes."""
if self._error_codes is None:
from .tcex_error_codes import TcExErrorCodes
self._error_codes = TcExErrorCodes()
return self._error_codes |
def convert_tuple_type(cls, name, value):
"""
Converts a tuple to RECORD that contains n fields, each will be converted
to its corresponding data type in bq and will be named 'field_<index>', where
index is determined by the order of the tuple elements defined in cassandra.
"""
names = ['field_' + str(i) for i in range(len(value))]
values = [cls.convert_value(name, value) for name, value in zip(names, value)]
return cls.generate_data_dict(names, values) | Converts a tuple to RECORD that contains n fields, each will be converted
to its corresponding data type in bq and will be named 'field_<index>', where
index is determined by the order of the tuple elements defined in cassandra. | Below is the the instruction that describes the task:
### Input:
Converts a tuple to RECORD that contains n fields, each will be converted
to its corresponding data type in bq and will be named 'field_<index>', where
index is determined by the order of the tuple elements defined in cassandra.
### Response:
def convert_tuple_type(cls, name, value):
"""
Converts a tuple to RECORD that contains n fields, each will be converted
to its corresponding data type in bq and will be named 'field_<index>', where
index is determined by the order of the tuple elements defined in cassandra.
"""
names = ['field_' + str(i) for i in range(len(value))]
values = [cls.convert_value(name, value) for name, value in zip(names, value)]
return cls.generate_data_dict(names, values) |
def _check_vmware_player_requirements(self, player_version):
"""
Check minimum requirements to use VMware Player.
VIX 1.13 was the release for Player 6.
VIX 1.14 was the release for Player 7.
VIX 1.15 was the release for Workstation Player 12.
:param player_version: VMware Player major version.
"""
player_version = int(player_version)
if player_version < 6:
raise VMwareError("Using VMware Player requires version 6 or above")
elif player_version == 6:
yield from self.check_vmrun_version(minimum_required_version="1.13.0")
elif player_version == 7:
yield from self.check_vmrun_version(minimum_required_version="1.14.0")
elif player_version >= 12:
yield from self.check_vmrun_version(minimum_required_version="1.15.0")
self._host_type = "player" | Check minimum requirements to use VMware Player.
VIX 1.13 was the release for Player 6.
VIX 1.14 was the release for Player 7.
VIX 1.15 was the release for Workstation Player 12.
:param player_version: VMware Player major version. | Below is the the instruction that describes the task:
### Input:
Check minimum requirements to use VMware Player.
VIX 1.13 was the release for Player 6.
VIX 1.14 was the release for Player 7.
VIX 1.15 was the release for Workstation Player 12.
:param player_version: VMware Player major version.
### Response:
def _check_vmware_player_requirements(self, player_version):
"""
Check minimum requirements to use VMware Player.
VIX 1.13 was the release for Player 6.
VIX 1.14 was the release for Player 7.
VIX 1.15 was the release for Workstation Player 12.
:param player_version: VMware Player major version.
"""
player_version = int(player_version)
if player_version < 6:
raise VMwareError("Using VMware Player requires version 6 or above")
elif player_version == 6:
yield from self.check_vmrun_version(minimum_required_version="1.13.0")
elif player_version == 7:
yield from self.check_vmrun_version(minimum_required_version="1.14.0")
elif player_version >= 12:
yield from self.check_vmrun_version(minimum_required_version="1.15.0")
self._host_type = "player" |
def post(self, request, *args, **kwargs):
""" Handles POST requests. """
return self.disapprove(request, *args, **kwargs) | Handles POST requests. | Below is the the instruction that describes the task:
### Input:
Handles POST requests.
### Response:
def post(self, request, *args, **kwargs):
""" Handles POST requests. """
return self.disapprove(request, *args, **kwargs) |
def rst(filename):
'''
Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badge
'''
content = codecs.open(filename, encoding='utf-8').read()
for regex, replacement in PYPI_RST_FILTERS:
content = re.sub(regex, replacement, content)
return content | Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badge | Below is the the instruction that describes the task:
### Input:
Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badge
### Response:
def rst(filename):
'''
Load rst file and sanitize it for PyPI.
Remove unsupported github tags:
- code-block directive
- travis ci build badge
'''
content = codecs.open(filename, encoding='utf-8').read()
for regex, replacement in PYPI_RST_FILTERS:
content = re.sub(regex, replacement, content)
return content |
async def _get_smallest_env(self):
"""Get address of the slave environment manager with the smallest
number of agents.
"""
async def slave_task(mgr_addr):
r_manager = await self.env.connect(mgr_addr, timeout=TIMEOUT)
ret = await r_manager.get_agents(addr=True)
return mgr_addr, len(ret)
sizes = await create_tasks(slave_task, self.addrs, flatten=False)
return sorted(sizes, key=lambda x: x[1])[0][0] | Get address of the slave environment manager with the smallest
number of agents. | Below is the the instruction that describes the task:
### Input:
Get address of the slave environment manager with the smallest
number of agents.
### Response:
async def _get_smallest_env(self):
"""Get address of the slave environment manager with the smallest
number of agents.
"""
async def slave_task(mgr_addr):
r_manager = await self.env.connect(mgr_addr, timeout=TIMEOUT)
ret = await r_manager.get_agents(addr=True)
return mgr_addr, len(ret)
sizes = await create_tasks(slave_task, self.addrs, flatten=False)
return sorted(sizes, key=lambda x: x[1])[0][0] |
def main():
"""Parser the command line and run the validator."""
parser = argparse.ArgumentParser(
description="[v" + __version__ + "] " + __doc__,
prog="w3c_validator",
)
parser.add_argument(
"--log",
default="INFO",
help=("log level: DEBUG, INFO or INFO "
"(default: INFO)"))
parser.add_argument(
"--version", action="version", version="%(prog)s " + __version__)
parser.add_argument(
"--verbose", help="increase output verbosity", action="store_true")
parser.add_argument(
"source", metavar="F", type=str, nargs="+", help="file or URL")
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log))
LOGGER.info("Files to validate: \n {0}".format("\n ".join(args.source)))
LOGGER.info("Number of files: {0}".format(len(args.source)))
errors = 0
warnings = 0
for f in args.source:
LOGGER.info("validating: %s ..." % f)
retrys = 0
while retrys < 2:
result = validate(f, verbose=args.verbose)
if result:
break
time.sleep(2)
retrys += 1
LOGGER.info("retrying: %s ..." % f)
else:
LOGGER.info("failed: %s" % f)
errors += 1
continue
# import pdb; pdb.set_trace()
if f.endswith(".css"):
errorcount = result["cssvalidation"]["result"]["errorcount"]
warningcount = result["cssvalidation"]["result"]["warningcount"]
errors += errorcount
warnings += warningcount
if errorcount > 0:
LOGGER.info("errors: %d" % errorcount)
if warningcount > 0:
LOGGER.info("warnings: %d" % warningcount)
else:
for msg in result["messages"]:
print_msg(msg)
if msg["type"] == "error":
errors += 1
else:
warnings += 1
sys.exit(min(errors, 255)) | Parser the command line and run the validator. | Below is the the instruction that describes the task:
### Input:
Parser the command line and run the validator.
### Response:
def main():
"""Parser the command line and run the validator."""
parser = argparse.ArgumentParser(
description="[v" + __version__ + "] " + __doc__,
prog="w3c_validator",
)
parser.add_argument(
"--log",
default="INFO",
help=("log level: DEBUG, INFO or INFO "
"(default: INFO)"))
parser.add_argument(
"--version", action="version", version="%(prog)s " + __version__)
parser.add_argument(
"--verbose", help="increase output verbosity", action="store_true")
parser.add_argument(
"source", metavar="F", type=str, nargs="+", help="file or URL")
args = parser.parse_args()
logging.basicConfig(level=getattr(logging, args.log))
LOGGER.info("Files to validate: \n {0}".format("\n ".join(args.source)))
LOGGER.info("Number of files: {0}".format(len(args.source)))
errors = 0
warnings = 0
for f in args.source:
LOGGER.info("validating: %s ..." % f)
retrys = 0
while retrys < 2:
result = validate(f, verbose=args.verbose)
if result:
break
time.sleep(2)
retrys += 1
LOGGER.info("retrying: %s ..." % f)
else:
LOGGER.info("failed: %s" % f)
errors += 1
continue
# import pdb; pdb.set_trace()
if f.endswith(".css"):
errorcount = result["cssvalidation"]["result"]["errorcount"]
warningcount = result["cssvalidation"]["result"]["warningcount"]
errors += errorcount
warnings += warningcount
if errorcount > 0:
LOGGER.info("errors: %d" % errorcount)
if warningcount > 0:
LOGGER.info("warnings: %d" % warningcount)
else:
for msg in result["messages"]:
print_msg(msg)
if msg["type"] == "error":
errors += 1
else:
warnings += 1
sys.exit(min(errors, 255)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.