content
stringlengths 35
762k
| sha1
stringlengths 40
40
| id
int64 0
3.66M
|
---|---|---|
from typing import TextIO
from typing import Set
def observe_birds(observations_file: TextIO) -> Set[str]:
"""Return a set of the bird species listed in observations_file, which has one bird species per line.
>>> file = StringIO("bird 1\\nbird 2\\nbird 1\\n")
>>> birds = observe_birds(file)
>>> 'bird 1' in birds
True
>>> 'bird 2' in birds
True
>>> len(birds) == 2
True
"""
birds_observed=set()
for line in observations_file:
bird = line.strip()
birds_observed.add(bird)
return birds_observed
|
e3ea90e8da4488121ec1ae75c4aa116646db08f5
| 18,200 |
def convert_sheet(sheet, result_dict, is_enum_mode=False):
"""
转换单个sheet的数据
Args:
sheet: openpyxl.worksheet.worksheet.Worksheet
result_dict: [dict]结果都存在这里, key为data_name,value为sheet_result
is_enum_mode: [bool]是否为enum导表模式
Returns:
bool, 是否成功
"""
if is_enum_mode:
data_name = convert.excel_handler.get_enum_class_name(sheet)
else:
data_name = convert.excel_handler.get_data_name(sheet)
sheet_name = convert.excel_handler.get_sheet_name(sheet)
if not data_name:
ec_converter.logger.info('sheet \'%s\' 的data名字为空或不符合命名规则,不导表', sheet_name)
return True
if data_name in result_dict:
ec_converter.logger.error('data名字 \'%s\' 重复, sheet name = \'%s\'', data_name, sheet_name)
return False
name_schema_dict = {}
col_schema_dict = {}
if not _get_sheet_schema_meta_info(sheet, name_schema_dict, col_schema_dict):
ec_converter.logger.error('sheet \'%s\' 获取字段信息失败', sheet_name)
return False
sheet_result = result_dict.setdefault(data_name, convert.sheet_result.SheetResult(data_name))
sheet_result.name_schema_dict = name_schema_dict
sheet_result.col_schema_dict = col_schema_dict
for row_data in convert.excel_handler.get_row_generator(sheet, settings.ROW_OFFSET):
if not _convert_row(row_data, sheet_name, sheet_result):
return False
return True
|
284f470844b6722941d0e4725e4c23b1473b08df
| 18,201 |
from __main__ import __file__ as main_script_path
import os
def _nsimage_from_file(filename, dimensions=None, template=None):
"""Take a path to an image file and return an NSImage object."""
try:
_log('attempting to open image at {0}'.format(filename))
with open(filename):
pass
except IOError: # literal file path didn't work -- try to locate image based on main script path
try:
main_script_path = os.path.dirname(main_script_path)
filename = os.path.join(main_script_path, filename)
except ImportError:
pass
_log('attempting (again) to open image at {0}'.format(filename))
with open(filename): # file doesn't exist
pass # otherwise silently errors in NSImage which isn't helpful for debugging
image = NSImage.alloc().initByReferencingFile_(filename)
image.setScalesWhenResized_(True)
image.setSize_((20, 20) if dimensions is None else dimensions)
if not template is None:
image.setTemplate_(template)
return image
|
079556d6959c6344feb8fa25db3467d4468124d0
| 18,202 |
def bytes_to_int(b: bytes, order: str = 'big') -> int:
"""Convert bytes 'b' to an int."""
return int.from_bytes(b, order)
|
c959683787e03cc956b5abffc814f98cf4722397
| 18,203 |
def fit_model(params,param_names,lam_gal,galaxy,noise,gal_temp,
feii_tab,feii_options,
temp_list,temp_fft,npad,line_profile,fwhm_gal,velscale,npix,vsyst,run_dir,
fit_type,output_model):
"""
Constructs galaxy model by convolving templates with a LOSVD given by
a specified set of velocity parameters.
Parameters:
pars: parameters of Markov-chain
lam_gal: wavelength vector used for continuum model
temp_fft: the Fourier-transformed templates
npad:
velscale: the velocity scale in km/s/pixel
npix: number of output pixels; must be same as galaxy
vsyst: dv; the systematic velocity fr
"""
# Construct dictionary of parameter names and their respective parameter values
# param_names = [param_dict[key]['name'] for key in param_dict ]
# params = [param_dict[key]['init'] for key in param_dict ]
keys = param_names
values = params
p = dict(zip(keys, values))
c = 299792.458 # speed of light
host_model = np.copy(galaxy)
comp_dict = {}
# Perform linear interpolation on the fwhm_gal array as a function of wavelength
# We will use this to determine the fwhm resolution as a fucntion of wavelenth for each
# emission line so we can correct for the resolution at every iteration.
fwhm_gal_ftn = interp1d(lam_gal,fwhm_gal,kind='linear',bounds_error=False,fill_value=(0,0))
# Re-directed line_profile function
def line_model(line_profile,*args):
"""
This function maps the user-chosen line profile
to the correct line_model
"""
if (line_profile=='Gaussian'):
line = gaussian(*args)
return line
elif (line_profile=='Lorentzian'):
line = lorentzian(*args)
return line
############################# Power-law Component ######################################################
# if all(comp in param_names for comp in ['power_amp','power_slope','power_break'])==True:
if all(comp in param_names for comp in ['power_amp','power_slope'])==True:
# Create a template model for the power-law continuum
# power = simple_power_law(lam_gal,p['power_amp'],p['power_slope'],p['power_break']) #
power = simple_power_law(lam_gal,p['power_amp'],p['power_slope']) #
host_model = (host_model) - (power) # Subtract off continuum from galaxy, since we only want template weights to be fit
comp_dict['power'] = {'comp':power,'pcolor':'xkcd:orange red','linewidth':1.0}
########################################################################################################
############################# Fe II Component ##########################################################
if (feii_tab is not None):
if (feii_options['template']['type']=='VC04'):
# Unpack feii_tab
na_feii_tab = (feii_tab[0],feii_tab[1])
br_feii_tab = (feii_tab[2],feii_tab[3])
# Parse FeII options
if (feii_options['amp_const']['bool']==False): # if amp not constant
na_feii_amp = p['na_feii_amp']
br_feii_amp = p['br_feii_amp']
elif (feii_options['amp_const']['bool']==True): # if amp constant
na_feii_amp = feii_options['amp_const']['na_feii_val']
br_feii_amp = feii_options['amp_const']['br_feii_val']
if (feii_options['fwhm_const']['bool']==False): # if amp not constant
na_feii_fwhm = p['na_feii_fwhm']
br_feii_fwhm = p['br_feii_fwhm']
elif (feii_options['fwhm_const']['bool']==True): # if amp constant
na_feii_fwhm = feii_options['fwhm_const']['na_feii_val']
br_feii_fwhm = feii_options['fwhm_const']['br_feii_val']
if (feii_options['voff_const']['bool']==False): # if amp not constant
na_feii_voff = p['na_feii_voff']
br_feii_voff = p['br_feii_voff']
elif (feii_options['voff_const']['bool']==True): # if amp constant
na_feii_voff = feii_options['voff_const']['na_feii_val']
br_feii_voff = feii_options['voff_const']['br_feii_val']
na_feii_template = VC04_feii_template(lam_gal,fwhm_gal,na_feii_tab,na_feii_amp,na_feii_fwhm,na_feii_voff,velscale,run_dir)
br_feii_template = VC04_feii_template(lam_gal,fwhm_gal,br_feii_tab,br_feii_amp,br_feii_fwhm,br_feii_voff,velscale,run_dir)
host_model = (host_model) - (na_feii_template) - (br_feii_template)
comp_dict['na_feii_template'] = {'comp':na_feii_template,'pcolor':'xkcd:yellow','linewidth':1.0}
comp_dict['br_feii_template'] = {'comp':br_feii_template,'pcolor':'xkcd:orange','linewidth':1.0}
elif (feii_options['template']['type']=='K10'):
# Unpack tables for each template
f_trans_tab = (feii_tab[0],feii_tab[1],feii_tab[2])
s_trans_tab = (feii_tab[3],feii_tab[4],feii_tab[5])
g_trans_tab = (feii_tab[6],feii_tab[7],feii_tab[8])
z_trans_tab = (feii_tab[9],feii_tab[10])
# Parse FeII options
if (feii_options['amp_const']['bool']==False): # if amp not constant
f_feii_amp = p['feii_f_amp']
s_feii_amp = p['feii_s_amp']
g_feii_amp = p['feii_g_amp']
z_feii_amp = p['feii_z_amp']
elif (feii_options['amp_const']['bool']==True): # if amp constant
f_feii_amp = feii_options['amp_const']['f_feii_val']
s_feii_amp = feii_options['amp_const']['s_feii_val']
g_feii_amp = feii_options['amp_const']['g_feii_val']
z_feii_amp = feii_options['amp_const']['z_feii_val']
#
if (feii_options['fwhm_const']['bool']==False): # if fwhm not constant
feii_fwhm = p['feii_fwhm']
elif (feii_options['fwhm_const']['bool']==True): # if fwhm constant
feii_fwhm = feii_options['fwhm_const']['val']
#
if (feii_options['voff_const']['bool']==False): # if voff not constant
feii_voff = p['feii_voff']
elif (feii_options['voff_const']['bool']==True): # if voff constant
feii_voff = feii_options['voff_const']['val']
#
if (feii_options['temp_const']['bool']==False): # if temp not constant
feii_temp = p['feii_temp']
elif (feii_options['temp_const']['bool']==True): # if temp constant
feii_temp = feii_options['temp_const']['val']
f_trans_feii_template = K10_feii_template(lam_gal,'F',fwhm_gal,f_trans_tab,f_feii_amp,feii_temp,feii_fwhm,feii_voff,velscale,run_dir)
s_trans_feii_template = K10_feii_template(lam_gal,'S',fwhm_gal,s_trans_tab,s_feii_amp,feii_temp,feii_fwhm,feii_voff,velscale,run_dir)
g_trans_feii_template = K10_feii_template(lam_gal,'G',fwhm_gal,g_trans_tab,g_feii_amp,feii_temp,feii_fwhm,feii_voff,velscale,run_dir)
z_trans_feii_template = K10_feii_template(lam_gal,'IZw1',fwhm_gal,z_trans_tab,z_feii_amp,feii_temp,feii_fwhm,feii_voff,velscale,run_dir)
host_model = (host_model) - (f_trans_feii_template) - (s_trans_feii_template) - (g_trans_feii_template) - (z_trans_feii_template)
comp_dict['F_feii_template'] = {'comp':f_trans_feii_template,'pcolor':'xkcd:rust orange','linewidth':1.0}
comp_dict['S_feii_template'] = {'comp':s_trans_feii_template,'pcolor':'xkcd:rust orange','linewidth':1.0}
comp_dict['G_feii_template'] = {'comp':g_trans_feii_template,'pcolor':'xkcd:rust orange','linewidth':1.0}
comp_dict['Z_feii_template'] = {'comp':z_trans_feii_template,'pcolor':'xkcd:rust orange','linewidth':1.0}
########################################################################################################
############################# Emission Line Components #################################################
# Narrow lines
#### [OII]3727,3729 #################################################################################
if all(comp in param_names for comp in ['na_oii3727_core_amp','na_oii3727_core_fwhm','na_oii3727_core_voff','na_oii3729_core_amp'])==True:
# Narrow [OII]3727
na_oii3727_core_center = 3727.092 # Angstroms
na_oii3727_core_amp = p['na_oii3727_core_amp'] # flux units
na_oii3727_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3727_core_center,p['na_oii3727_core_voff'])
na_oii3727_core_fwhm = np.sqrt(p['na_oii3727_core_fwhm']**2+(na_oii3727_core_fwhm_res)**2) # km/s
na_oii3727_core_voff = p['na_oii3727_core_voff'] # km/s
na_oii3727_core = gaussian(lam_gal,na_oii3727_core_center,na_oii3727_core_amp,na_oii3727_core_fwhm,na_oii3727_core_voff,velscale)
host_model = host_model - na_oii3727_core
comp_dict['na_oii3727_core'] = {'comp':na_oii3727_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [OII]3729
na_oii3729_core_center = 3729.875 # Angstroms
na_oii3729_core_amp = p['na_oii3729_core_amp'] # flux units
na_oii3729_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3729_core_center,na_oii3727_core_voff)
na_oii3729_core_fwhm = np.sqrt(p['na_oii3727_core_fwhm']**2+(na_oii3729_core_fwhm_res)**2) # km/s # km/s
na_oii3729_core_voff = na_oii3727_core_voff # km/s
na_oii3729_core = gaussian(lam_gal,na_oii3729_core_center,na_oii3729_core_amp,na_oii3729_core_fwhm,na_oii3729_core_voff,velscale)
host_model = host_model - na_oii3729_core
comp_dict['na_oii3729_core'] = {'comp':na_oii3729_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, and includes [OIII]5007
elif (all(comp in param_names for comp in ['na_oii3727_core_amp','na_oii3727_core_voff','na_oii3729_core_amp','na_oiii5007_core_fwhm'])==True) & \
(all(comp not in param_names for comp in ['na_neiii_core_fwhm','na_Hg_fwhm','oiii4363_core_fwhm'])==True):
# Narrow [OII]3727
na_oii3727_core_center = 3727.092 # Angstroms
na_oii3727_core_amp = p['na_oii3727_core_amp'] # flux units
na_oii3727_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3727_core_center,p['na_oii3727_core_voff'])
na_oii3727_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oii3727_core_fwhm_res)**2) # km/s
na_oii3727_core_voff = p['na_oii3727_core_voff'] # km/s
na_oii3727_core = gaussian(lam_gal,na_oii3727_core_center,na_oii3727_core_amp,na_oii3727_core_fwhm,na_oii3727_core_voff,velscale)
host_model = host_model - na_oii3727_core
comp_dict['na_oii3727_core'] = {'comp':na_oii3727_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [OII]3729
na_oii3729_core_center = 3729.875 # Angstroms
na_oii3729_core_amp = p['na_oii3729_core_amp'] # flux units
na_oii3729_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3729_core_center,na_oii3727_core_voff)
na_oii3729_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oii3729_core_fwhm_res)**2) # km/s
na_oii3729_core_voff = na_oii3727_core_voff # km/s
na_oii3729_core = gaussian(lam_gal,na_oii3729_core_center,na_oii3729_core_amp,na_oii3729_core_fwhm,na_oii3729_core_voff,velscale)
host_model = host_model - na_oii3729_core
comp_dict['na_oii3729_core'] = {'comp':na_oii3729_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, but doesn't include [OIII]5007
elif (all(comp in param_names for comp in ['na_oii3727_core_amp','na_oii3727_core_voff','na_oii3729_core_amp','na_Hg_fwhm'])==True) & \
(all(comp not in param_names for comp in ['na_neiii_core_fwhm','oiii4363_core_fwhm','na_oiii5007_core_fwhm'])==True):
# Narrow [OII]3727
na_oii3727_core_center = 3727.092 # Angstroms
na_oii3727_core_amp = p['na_oii3727_core_amp'] # flux units
na_oii3727_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3727_core_center,p['na_oii3727_core_voff'])
na_oii3727_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_oii3727_core_fwhm_res)**2) # km/s
na_oii3727_core_voff = p['na_oii3727_core_voff'] # km/s
na_oii3727_core = gaussian(lam_gal,na_oii3727_core_center,na_oii3727_core_amp,na_oii3727_core_fwhm,na_oii3727_core_voff,velscale)
host_model = host_model - na_oii3727_core
comp_dict['na_oii3727_core'] = {'comp':na_oii3727_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [OII]3729
na_oii3729_core_center = 3729.875 # Angstroms
na_oii3729_core_amp = p['na_oii3729_core_amp'] # flux units
na_oii3729_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oii3729_core_center,na_oii3727_core_voff)
na_oii3729_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_oii3729_core_fwhm_res)**2) # km/s
na_oii3729_core_voff = na_oii3727_core_voff # km/s
na_oii3729_core = gaussian(lam_gal,na_oii3729_core_center,na_oii3729_core_amp,na_oii3729_core_fwhm,na_oii3729_core_voff,velscale)
host_model = host_model - na_oii3729_core
comp_dict['na_oii3729_core'] = {'comp':na_oii3729_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
#### [NeIII]3870 #################################################################################
if all(comp in param_names for comp in ['na_neiii_core_amp','na_neiii_core_fwhm','na_neiii_core_voff'])==True:
# Narrow H-gamma
na_neiii_core_center = 3869.810 # Angstroms
na_neiii_core_amp = p['na_neiii_core_amp'] # flux units
na_neiii_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_neiii_core_center,p['na_neiii_core_voff'])
na_neiii_core_fwhm = np.sqrt(p['na_neiii_core_fwhm']**2+(na_neiii_core_fwhm_res)**2) # km/s
na_neiii_core_voff = p['na_neiii_core_voff'] # km/s
na_neiii_core = gaussian(lam_gal,na_neiii_core_center,na_neiii_core_amp,na_neiii_core_fwhm,na_neiii_core_voff,velscale)
host_model = host_model - na_neiii_core
comp_dict['na_neiii_core'] = {'comp':na_neiii_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, and includes [OIII]5007
elif (all(comp in param_names for comp in ['na_neiii_core_amp','na_neiii_core_voff','na_oiii5007_core_fwhm'])==True) & \
(all(comp not in param_names for comp in ['na_neiii_core_fwhm','na_Hg_fwhm','oiii4363_core_fwhm'])==True):
# Narrow H-gamma
na_neiii_core_center = 3869.810 # Angstroms
na_neiii_core_amp = p['na_neiii_core_amp'] # flux units
na_neiii_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_neiii_core_center,p['na_neiii_core_voff'])
na_neiii_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_neiii_core_fwhm_res)**2) # km/s
na_neiii_core_voff = p['na_neiii_core_voff'] # km/s
na_neiii_core = gaussian(lam_gal,na_neiii_core_center,na_neiii_core_amp,na_neiii_core_fwhm,na_neiii_core_voff,velscale)
host_model = host_model - na_neiii_core
comp_dict['na_neiii_core'] = {'comp':na_neiii_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, but doesn't include [OIII]5007
elif (all(comp in param_names for comp in ['na_neiii_core_amp','na_neiii_core_voff','na_Hg_fwhm'])==True) & \
(all(comp not in param_names for comp in ['na_neiii_core_fwhm','oiii4363_core_fwhm','na_oiii5007_core_fwhm'])==True):
# Narrow H-gamma
na_neiii_core_center = 3869.810 # Angstroms
na_neiii_core_amp = p['na_neiii_core_amp'] # flux units
na_neiii_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_neiii_core_center,p['na_neiii_core_voff'])
na_neiii_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_neiii_core_fwhm_res)**2) # km/s
na_neiii_core_voff = p['na_neiii_core_voff'] # km/s
na_neiii_core = gaussian(lam_gal,na_neiii_core_center,na_neiii_core_amp,na_neiii_core_fwhm,na_neiii_core_voff,velscale)
host_model = host_model - na_neiii_core
comp_dict['na_neiii_core'] = {'comp':na_neiii_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
#### H-delta #####################################################################################
if all(comp in param_names for comp in ['na_Hd_amp','na_Hd_fwhm','na_Hd_voff'])==True:
# Narrow H-gamma
na_hd_core_center = 4102.890 # Angstroms
na_hd_core_amp = p['na_Hd_amp'] # flux units
na_hd_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hd_core_center,p['na_Hd_voff'])
na_hd_core_fwhm = np.sqrt(p['na_Hd_fwhm']**2+(na_hd_core_fwhm_res)**2) # km/s
na_hd_core_voff = p['na_Hd_voff'] # km/s
na_Hd_core = gaussian(lam_gal,na_hd_core_center,na_hd_core_amp,na_hd_core_fwhm,na_hd_core_voff,velscale)
host_model = host_model - na_Hd_core
comp_dict['na_Hd_core'] = {'comp':na_Hd_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, and includes [OIII]5007
elif (all(comp in param_names for comp in ['na_Hd_amp','na_Hd_voff','na_oiii5007_core_fwhm'])==True) & \
(all(comp not in param_names for comp in ['na_Hd_fwhm','na_Hg_fwhm','oiii4363_core_fwhm'])==True):
# Narrow H-gamma
na_hd_core_center = 4102.890 # Angstroms
na_hd_core_amp = p['na_Hd_amp'] # flux units
na_hd_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hd_core_center,p['na_Hd_voff'])
na_hd_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_hd_core_fwhm_res)**2) # km/s
na_hd_core_voff = p['na_Hd_voff'] # km/s
na_Hd_core = gaussian(lam_gal,na_hd_core_center,na_hd_core_amp,na_hd_core_fwhm,na_hd_core_voff,velscale)
host_model = host_model - na_Hd_core
comp_dict['na_Hd_core'] = {'comp':na_Hd_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, but doesn't include [OIII]5007
elif (all(comp in param_names for comp in ['na_Hd_amp','na_Hd_voff','na_Hg_fwhm'])==True) & \
(all(comp not in param_names for comp in ['na_Hg_fwhm','oiii4363_core_fwhm','na_oiii5007_core_fwhm'])==True):
# Narrow H-gamma
na_hd_core_center = 4102.890 # Angstroms
na_hd_core_amp = p['na_Hd_amp'] # flux units
na_hd_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hd_core_center,p['na_Hd_voff'])
na_hd_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_hd_core_fwhm_res)**2) # km/s
na_hd_core_voff = p['na_Hd_voff'] # km/s
na_Hd_core = gaussian(lam_gal,na_hd_core_center,na_hd_core_amp,na_hd_core_fwhm,na_hd_core_voff,velscale)
host_model = host_model - na_Hd_core
comp_dict['na_Hd_core'] = {'comp':na_Hd_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
#### H-gamma/[OIII]4363 ##########################################################################
if all(comp in param_names for comp in ['na_Hg_amp','na_Hg_fwhm','na_Hg_voff','na_oiii4363_core_amp','na_oiii4363_core_fwhm','na_oiii4363_core_voff'])==True:
# Narrow H-gamma
na_hg_core_center = 4341.680 # Angstroms
na_hg_core_amp = p['na_Hg_amp'] # flux units
na_hg_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hg_core_center,p['na_Hg_voff'])
na_hg_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_hg_core_fwhm_res)**2) # km/s
na_hg_core_voff = p['na_Hg_voff'] # km/s
na_Hg_core = gaussian(lam_gal,na_hg_core_center,na_hg_core_amp,na_hg_core_fwhm,na_hg_core_voff,velscale)
host_model = host_model - na_Hg_core
comp_dict['na_Hg_core'] = {'comp':na_Hg_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [OIII]4363 core
na_oiii4363_core_center = 4364.436 # Angstroms
na_oiii4363_core_amp = p['na_oiii4363_core_amp'] # flux units
na_oiii4363_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4363_core_center,p['na_oiii4363_core_voff'])
na_oiii4363_core_fwhm = np.sqrt(p['na_oiii4363_core_fwhm']**2+(na_oiii4363_core_fwhm_res)**2) # km/s
na_oiii4363_core_voff = p['na_oiii4363_core_voff'] # km/s
na_oiii4363_core = gaussian(lam_gal,na_oiii4363_core_center,na_oiii4363_core_amp,na_oiii4363_core_fwhm,na_oiii4363_core_voff,velscale)
host_model = host_model - na_oiii4363_core
comp_dict['na_oiii4363_core'] = {'comp':na_oiii4363_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, and includes [OIII]5007
elif (all(comp in param_names for comp in ['na_Hg_amp','na_Hg_voff','na_oiii4363_core_amp','na_oiii4363_core_voff','na_oiii5007_core_fwhm'])==True) & \
(all(comp not in param_names for comp in ['na_Hg_fwhm','oiii4363_core_fwhm'])==True):
# Narrow H-gamma
na_hg_core_center = 4341.680 # Angstroms
na_hg_core_amp = p['na_Hg_amp'] # flux units
na_hg_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hg_core_center,p['na_Hg_voff'])
na_hg_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_hg_core_fwhm_res)**2) # km/s
na_hg_core_voff = p['na_Hg_voff'] # km/s
na_Hg_core = gaussian(lam_gal,na_hg_core_center,na_hg_core_amp,na_hg_core_fwhm,na_hg_core_voff,velscale)
host_model = host_model - na_Hg_core
comp_dict['na_Hg_core'] = {'comp':na_Hg_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [OIII]4363 core
na_oiii4363_core_center = 4364.436 # Angstroms
na_oiii4363_core_amp = p['na_oiii4363_core_amp'] # flux units
na_oiii4363_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4363_core_center,p['na_oiii4363_core_voff'])
na_oiii4363_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oiii4363_core_fwhm_res)**2) # km/s
na_oiii4363_core_voff = p['na_oiii4363_core_voff'] # km/s
na_oiii4363_core = gaussian(lam_gal,na_oiii4363_core_center,na_oiii4363_core_amp,na_oiii4363_core_fwhm,na_oiii4363_core_voff,velscale)
host_model = host_model - na_oiii4363_core
comp_dict['na_oiii4363_core'] = {'comp':na_oiii4363_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# If tie_narrow=True, but doesn't include [OIII]5007
elif (all(comp in param_names for comp in ['na_Hg_amp','na_Hg_fwhm','na_Hg_voff','na_oiii4363_core_amp','na_oiii4363_core_voff'])==True) & \
(all(comp not in param_names for comp in ['oiii4363_core_fwhm','na_oiii5007_core_fwhm'])==True):
# Narrow H-gamma
na_hg_core_center = 4341.680 # Angstroms
na_hg_core_amp = p['na_Hg_amp'] # flux units
na_hg_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hg_core_center,p['na_Hg_voff'])
na_hg_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_hg_core_fwhm_res)**2) # km/s
na_hg_core_voff = p['na_Hg_voff'] # km/s
na_Hg_core = gaussian(lam_gal,na_hg_core_center,na_hg_core_amp,na_hg_core_fwhm,na_hg_core_voff,velscale)
host_model = host_model - na_Hg_core
comp_dict['na_Hg_core'] = {'comp':na_Hg_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [OIII]4363 core
na_oiii4363_core_center = 4364.436 # Angstroms
na_oiii4363_core_amp = p['na_oiii4363_core_amp'] # flux units
na_oiii4363_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4363_core_center,p['na_oiii4363_core_voff'])
na_oiii4363_core_fwhm = np.sqrt(p['na_Hg_fwhm']**2+(na_oiii4363_core_fwhm_res)**2) # km/s
na_oiii4363_core_voff = p['na_oiii4363_core_voff'] # km/s
na_oiii4363_core = gaussian(lam_gal,na_oiii4363_core_center,na_oiii4363_core_amp,na_oiii4363_core_fwhm,na_oiii4363_core_voff,velscale)
host_model = host_model - na_oiii4363_core
comp_dict['na_oiii4363_core'] = {'comp':na_oiii4363_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
#### H-beta/[OIII] #########################################################################################
if all(comp in param_names for comp in ['na_oiii5007_core_amp','na_oiii5007_core_fwhm','na_oiii5007_core_voff'])==True:
# Narrow [OIII]5007 Core
na_oiii5007_core_center = 5008.240 # Angstroms
na_oiii5007_core_amp = p['na_oiii5007_core_amp'] # flux units
na_oiii5007_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii5007_core_center,p['na_oiii5007_core_voff'])
na_oiii5007_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oiii5007_core_fwhm_res)**2) # km/s
na_oiii5007_core_voff = p['na_oiii5007_core_voff'] # km/s
na_oiii5007_core = gaussian(lam_gal,na_oiii5007_core_center,na_oiii5007_core_amp,na_oiii5007_core_fwhm,na_oiii5007_core_voff,velscale)
# na_oiii5007_core = line_model(line_profile,lam_gal,na_oiii5007_core_center,na_oiii5007_core_amp,na_oiii5007_core_fwhm,na_oiii5007_core_voff,velscale)
host_model = host_model - na_oiii5007_core
comp_dict['na_oiii5007_core'] = {'comp':na_oiii5007_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [OIII]4959 Core
na_oiii4959_core_center = 4960.295 # Angstroms
na_oiii4959_core_amp = (1.0/3.0)*na_oiii5007_core_amp # flux units
na_oiii4959_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4959_core_center,na_oiii5007_core_voff)
na_oiii4959_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_oiii4959_fwhm_res)**2) # km/s
na_oiii4959_core_voff = na_oiii5007_core_voff # km/s
na_oiii4959_core = gaussian(lam_gal,na_oiii4959_core_center,na_oiii4959_core_amp,na_oiii4959_core_fwhm,na_oiii4959_core_voff,velscale)
# na_oiii4959_core = line_model(line_profile,lam_gal,na_oiii4959_core_center,na_oiii4959_core_amp,na_oiii4959_core_fwhm,na_oiii4959_core_voff,velscale)
host_model = host_model - na_oiii4959_core
comp_dict['na_oiii4959_core'] = {'comp':na_oiii4959_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
if all(comp in param_names for comp in ['na_Hb_core_amp','na_Hb_core_voff'])==True:
# Narrow H-beta
na_hb_core_center = 4862.680 # Angstroms
na_hb_core_amp = p['na_Hb_core_amp'] # flux units
na_hb_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hb_core_center,p['na_Hb_core_voff'])
na_hb_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_hb_core_fwhm_res)**2) # km/s
na_hb_core_voff = p['na_Hb_core_voff'] # km/s
na_Hb_core = gaussian(lam_gal,na_hb_core_center,na_hb_core_amp,na_hb_core_fwhm,na_hb_core_voff,velscale)
# na_Hb_core = line_model(line_profile,lam_gal,na_hb_core_center,na_hb_core_amp,na_hb_core_fwhm,na_hb_core_voff,velscale)
host_model = host_model - na_Hb_core
comp_dict['na_Hb_core'] = {'comp':na_Hb_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
#### H-alpha/[NII]/[SII] ####################################################################################
if all(comp in param_names for comp in ['na_Ha_core_amp','na_Ha_core_fwhm','na_Ha_core_voff',
'na_nii6585_core_amp',
'na_sii6732_core_amp','na_sii6718_core_amp'])==True:
# Narrow H-alpha
na_ha_core_center = 6564.610 # Angstroms
na_ha_core_amp = p['na_Ha_core_amp'] # flux units
na_ha_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_ha_core_center,p['na_Ha_core_voff'])
na_ha_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_ha_core_fwhm_res)**2) # km/s
na_ha_core_voff = p['na_Ha_core_voff'] # km/s
na_Ha_core = gaussian(lam_gal,na_ha_core_center,na_ha_core_amp,na_ha_core_fwhm,na_ha_core_voff,velscale)
host_model = host_model - na_Ha_core
comp_dict['na_Ha_core'] = {'comp':na_Ha_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [NII]6585 Core
na_nii6585_core_center = 6585.270 # Angstroms
na_nii6585_core_amp = p['na_nii6585_core_amp'] # flux units
na_nii6585_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6585_core_center,na_ha_core_voff)
na_nii6585_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_nii6585_core_fwhm_res)**2) # km/s
na_nii6585_core_voff = na_ha_core_voff
na_nii6585_core = gaussian(lam_gal,na_nii6585_core_center,na_nii6585_core_amp,na_nii6585_core_fwhm,na_nii6585_core_voff,velscale)
host_model = host_model - na_nii6585_core
comp_dict['na_nii6585_core'] = {'comp':na_nii6585_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [NII]6549 Core
na_nii6549_core_center = 6549.860 # Angstroms
na_nii6549_core_amp = (1.0/2.93)*na_nii6585_core_amp # flux units
na_nii6549_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6549_core_center,na_ha_core_voff)
na_nii6549_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_nii6549_core_fwhm_res)**2) # km/s # km/s
na_nii6549_core_voff = na_ha_core_voff
na_nii6549_core = gaussian(lam_gal,na_nii6549_core_center,na_nii6549_core_amp,na_nii6549_core_fwhm,na_nii6549_core_voff,velscale)
host_model = host_model - na_nii6549_core
comp_dict['na_nii6549_core'] = {'comp':na_nii6549_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [SII]6718
na_sii6718_core_center = 6718.290 # Angstroms
na_sii6718_core_amp = p['na_sii6718_core_amp'] # flux units
na_sii6718_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6718_core_center,na_ha_core_voff)
na_sii6718_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_sii6718_core_fwhm_res)**2) # km/s #na_sii6732_fwhm # km/s
na_sii6718_core_voff = na_ha_core_voff
na_sii6718_core = gaussian(lam_gal,na_sii6718_core_center,na_sii6718_core_amp,na_sii6718_core_fwhm,na_sii6718_core_voff,velscale)
host_model = host_model - na_sii6718_core
comp_dict['na_sii6718_core'] = {'comp':na_sii6718_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [SII]6732
na_sii6732_core_center = 6732.670 # Angstroms
na_sii6732_core_amp = p['na_sii6732_core_amp'] # flux units
na_sii6732_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6732_core_center,na_ha_core_voff)
na_sii6732_core_fwhm = np.sqrt(p['na_Ha_core_fwhm']**2+(na_sii6732_core_fwhm_res)**2) # km/s
na_sii6732_core_voff = na_ha_core_voff
na_sii6732_core = gaussian(lam_gal,na_sii6732_core_center,na_sii6732_core_amp,na_sii6732_core_fwhm,na_sii6732_core_voff,velscale)
host_model = host_model - na_sii6732_core
comp_dict['na_sii6732_core'] = {'comp':na_sii6732_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
elif (all(comp in param_names for comp in ['na_Ha_core_amp','na_Ha_core_voff',
'na_nii6585_core_amp',
'na_sii6732_core_amp','na_sii6718_core_amp',
'na_oiii5007_core_fwhm'])==True) & ('na_Ha_core_fwhm' not in param_names):
# If all narrow line widths are tied to [OIII]5007 FWHM...
# Narrow H-alpha
na_ha_core_center = 6564.610 # Angstroms
na_ha_core_amp = p['na_Ha_core_amp'] # flux units
na_ha_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_ha_core_center,p['na_Ha_core_voff'])
na_ha_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_ha_core_fwhm_res)**2) # km/s
na_ha_core_voff = p['na_Ha_core_voff'] # km/s
na_Ha_core = gaussian(lam_gal,na_ha_core_center,na_ha_core_amp,na_ha_core_fwhm,na_ha_core_voff,velscale)
host_model = host_model - na_Ha_core
comp_dict['na_Ha_core'] = {'comp':na_Ha_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [NII]6585 Core
na_nii6585_core_center = 6585.270 # Angstroms
na_nii6585_core_amp = p['na_nii6585_core_amp'] # flux units
na_nii6585_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6585_core_center,na_ha_core_voff)
na_nii6585_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_nii6585_core_fwhm_res)**2) # km/s
na_nii6585_core_voff = na_ha_core_voff
na_nii6585_core = gaussian(lam_gal,na_nii6585_core_center,na_nii6585_core_amp,na_nii6585_core_fwhm,na_nii6585_core_voff,velscale)
host_model = host_model - na_nii6585_core
comp_dict['na_nii6585_core'] = {'comp':na_nii6585_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [NII]6549 Core
na_nii6549_core_center = 6549.860 # Angstroms
na_nii6549_core_amp = (1.0/2.93)*na_nii6585_core_amp # flux units
na_nii6549_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6549_core_center,na_ha_core_voff)
na_nii6549_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_nii6549_core_fwhm_res)**2) # km/s
na_nii6549_core_voff = na_ha_core_voff
na_nii6549_core = gaussian(lam_gal,na_nii6549_core_center,na_nii6549_core_amp,na_nii6549_core_fwhm,na_nii6549_core_voff,velscale)
host_model = host_model - na_nii6549_core
comp_dict['na_nii6549_core'] = {'comp':na_nii6549_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [SII]6732
na_sii6732_core_center = 6732.670 # Angstroms
na_sii6732_core_amp = p['na_sii6732_core_amp'] # flux units
na_sii6732_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6732_core_center,na_ha_core_voff)
na_sii6732_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_sii6732_core_fwhm_res)**2) # km/s
na_sii6732_core_voff = na_ha_core_voff
na_sii6732_core = gaussian(lam_gal,na_sii6732_core_center,na_sii6732_core_amp,na_sii6732_core_fwhm,na_sii6732_core_voff,velscale)
host_model = host_model - na_sii6732_core
comp_dict['na_sii6732_core'] = {'comp':na_sii6732_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
# Narrow [SII]6718
na_sii6718_core_center = 6718.290 # Angstroms
na_sii6718_core_amp = p['na_sii6718_core_amp'] # flux units
na_sii6718_core_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6718_core_center,na_ha_core_voff)
na_sii6718_core_fwhm = np.sqrt(p['na_oiii5007_core_fwhm']**2+(na_sii6718_core_fwhm_res)**2) # km/s
na_sii6718_core_voff = na_ha_core_voff
na_sii6718_core = gaussian(lam_gal,na_sii6718_core_center,na_sii6718_core_amp,na_sii6718_core_fwhm,na_sii6718_core_voff,velscale)
host_model = host_model - na_sii6718_core
comp_dict['na_sii6718_core'] = {'comp':na_sii6718_core,'pcolor':'xkcd:dodger blue','linewidth':1.0}
########################################################################################################
# Outflow Components
#### Hb/[OIII] outflows ################################################################################
if (all(comp in param_names for comp in ['na_oiii5007_outflow_amp','na_oiii5007_outflow_fwhm','na_oiii5007_outflow_voff'])==True):
# Broad [OIII]5007 Outflow;
na_oiii5007_outflow_center = 5008.240 # Angstroms
na_oiii5007_outflow_amp = p['na_oiii5007_outflow_amp'] # flux units
na_oiii5007_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii5007_outflow_center,p['na_oiii5007_outflow_voff'])
na_oiii5007_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_oiii5007_outflow_fwhm_res)**2) # km/s
na_oiii5007_outflow_voff = p['na_oiii5007_outflow_voff'] # km/s
na_oiii5007_outflow = gaussian(lam_gal,na_oiii5007_outflow_center,na_oiii5007_outflow_amp,na_oiii5007_outflow_fwhm,na_oiii5007_outflow_voff,velscale)
host_model = host_model - na_oiii5007_outflow
comp_dict['na_oiii5007_outflow'] = {'comp':na_oiii5007_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# Broad [OIII]4959 Outflow;
na_oiii4959_outflow_center = 4960.295 # Angstroms
na_oiii4959_outflow_amp = na_oiii4959_core_amp*na_oiii5007_outflow_amp/na_oiii5007_core_amp # flux units
na_oiii4959_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_oiii4959_outflow_center,na_oiii5007_outflow_voff)
na_oiii4959_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_oiii4959_outflow_fwhm_res)**2) # km/s
na_oiii4959_outflow_voff = na_oiii5007_outflow_voff # km/s
if (na_oiii4959_outflow_amp!=na_oiii4959_outflow_amp/1.0) or (na_oiii4959_outflow_amp==np.inf): na_oiii4959_outflow_amp=0.0
na_oiii4959_outflow = gaussian(lam_gal,na_oiii4959_outflow_center,na_oiii4959_outflow_amp,na_oiii4959_outflow_fwhm,na_oiii4959_outflow_voff,velscale)
host_model = host_model - na_oiii4959_outflow
comp_dict['na_oiii4959_outflow'] = {'comp':na_oiii4959_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
if (all(comp in param_names for comp in ['na_oiii5007_outflow_amp','na_oiii5007_outflow_fwhm','na_oiii5007_outflow_voff','na_Hb_core_amp','na_Hb_core_voff'])==True):
# Broad H-beta Outflow; only a model, no free parameters, tied to [OIII]5007
na_hb_core_center = 4862.680 # Angstroms
na_hb_outflow_amp = na_hb_core_amp*na_oiii5007_outflow_amp/na_oiii5007_core_amp
na_hb_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_hb_core_center,na_hb_core_voff+na_oiii5007_outflow_voff)
na_hb_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_hb_outflow_fwhm_res)**2) # km/s
na_hb_outflow_voff = na_hb_core_voff+na_oiii5007_outflow_voff # km/s
if (na_hb_outflow_amp!=na_hb_outflow_amp/1.0) or (na_hb_outflow_amp==np.inf): na_hb_outflow_amp=0.0
na_Hb_outflow = gaussian(lam_gal,na_hb_core_center,na_hb_outflow_amp,na_hb_outflow_fwhm,na_hb_outflow_voff,velscale)
host_model = host_model - na_Hb_outflow
comp_dict['na_Hb_outflow'] = {'comp':na_Hb_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
#### Ha/[NII]/[SII] outflows ###########################################################################
# Outflows in H-alpha/[NII] are poorly constrained due to the presence of a broad line and/or blending of narrow lines
# First, we check if the fit includes Hb/[OIII] outflows, if it does, we use the outflow in [OIII] to constrain the outflows
# in the Ha/[NII]/[SII] region. If the fi does NOT include Hb/[OIII] outflows (*not recommended*), we then allow the outflows
# in the Ha/[NII]/[SII] region to be fit as free parameters.
if (all(comp in param_names for comp in ['na_Ha_core_amp','na_Ha_core_voff','na_nii6585_core_amp','na_sii6732_core_amp','na_sii6718_core_amp',
'na_oiii5007_outflow_amp','na_oiii5007_outflow_fwhm','na_oiii5007_outflow_voff'])==True) and \
(all(comp not in param_names for comp in ['na_Ha_outflow_amp','na_Ha_outflow_fwhm','na_Ha_outflow_voff'])==True):
# H-alpha Outflow;
na_ha_outflow_center = 6564.610 # Angstroms
na_ha_outflow_amp = p['na_Ha_core_amp']*p['na_oiii5007_outflow_amp']/p['na_oiii5007_core_amp'] # flux units
na_ha_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_ha_outflow_center,p['na_oiii5007_outflow_voff'])
na_ha_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_ha_outflow_fwhm_res)**2) # km/s
na_ha_outflow_voff = p['na_oiii5007_outflow_voff'] # km/s # km/s
if (na_ha_outflow_amp!=na_ha_outflow_amp/1.0) or (na_ha_outflow_amp==np.inf): na_ha_outflow_amp=0.0
na_Ha_outflow = gaussian(lam_gal,na_ha_outflow_center,na_ha_outflow_amp,na_ha_outflow_fwhm,na_ha_outflow_voff,velscale)
host_model = host_model - na_Ha_outflow
comp_dict['na_Ha_outflow'] = {'comp':na_Ha_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# [NII]6585 Outflow;
na_nii6585_outflow_center = 6585.270 # Angstroms
na_nii6585_outflow_amp = na_nii6585_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_nii6585_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6585_outflow_center,na_ha_outflow_voff)
na_nii6585_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_nii6585_outflow_fwhm_res)**2)
na_nii6585_outflow_voff = na_ha_outflow_voff
if (na_nii6585_outflow_amp!=na_nii6585_outflow_amp/1.0) or (na_nii6585_outflow_amp==np.inf): na_nii6585_outflow_amp=0.0
na_nii6585_outflow = gaussian(lam_gal,na_nii6585_outflow_center,na_nii6585_outflow_amp,na_nii6585_outflow_fwhm,na_nii6585_outflow_voff,velscale)
host_model = host_model - na_nii6585_outflow
comp_dict['na_nii6585_outflow'] = {'comp':na_nii6585_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# [NII]6549 Outflow;
na_nii6549_outflow_center = 6549.860 # Angstroms
na_nii6549_outflow_amp = na_nii6549_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_nii6549_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6549_outflow_center,na_ha_outflow_voff)
na_nii6549_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_nii6549_outflow_fwhm_res)**2) # km/s
na_nii6549_outflow_voff = na_ha_outflow_voff # km/s
if (na_nii6549_outflow_amp!=na_nii6549_outflow_amp/1.0) or (na_nii6549_outflow_amp==np.inf): na_nii6549_outflow_amp=0.0
na_nii6549_outflow = gaussian(lam_gal,na_nii6549_outflow_center,na_nii6549_outflow_amp,na_nii6549_outflow_fwhm,na_nii6549_outflow_voff,velscale)
host_model = host_model - na_nii6549_outflow
comp_dict['na_nii6549_outflow'] = {'comp':na_nii6549_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# Broad [SII]6718 Outflow;
na_sii6718_outflow_center = 6718.290 # Angstroms
na_sii6718_outflow_amp = na_sii6718_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_sii6718_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6718_outflow_center,na_ha_outflow_voff)
na_sii6718_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_sii6718_outflow_fwhm_res)**2) # km/s
na_sii6718_outflow_voff = na_ha_outflow_voff # km/s
if (na_sii6718_outflow_amp!=na_sii6718_outflow_amp/1.0) or (na_sii6718_outflow_amp==np.inf): na_sii6718_outflow_amp=0.0
na_sii6718_outflow = gaussian(lam_gal,na_sii6718_outflow_center,na_sii6718_outflow_amp,na_sii6718_outflow_fwhm,na_sii6718_outflow_voff,velscale)
host_model = host_model - na_sii6718_outflow
comp_dict['na_sii6718_outflow'] = {'comp':na_sii6718_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# [SII]6732 Outflow;
na_sii6732_outflow_center = 6732.670 # Angstroms
na_sii6732_outflow_amp = na_sii6732_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_sii6732_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6732_outflow_center,na_ha_outflow_voff)
na_sii6732_outflow_fwhm = np.sqrt(p['na_oiii5007_outflow_fwhm']**2+(na_sii6732_outflow_fwhm_res)**2) # km/s
na_sii6732_outflow_voff = na_ha_outflow_voff # km/s
if (na_sii6732_outflow_amp!=na_sii6732_outflow_amp/1.0) or (na_sii6732_outflow_amp==np.inf): na_sii6732_outflow_amp=0.0
na_sii6732_outflow = gaussian(lam_gal,na_sii6732_outflow_center,na_sii6732_outflow_amp,na_sii6732_outflow_fwhm,na_sii6732_outflow_voff,velscale)
host_model = host_model - na_sii6732_outflow
comp_dict['na_sii6732_outflow'] = {'comp':na_sii6732_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
elif (all(comp in param_names for comp in ['na_Ha_outflow_amp','na_Ha_outflow_fwhm','na_Ha_outflow_voff'])==True) and \
(all(comp not in param_names for comp in ['na_oiii5007_outflow_amp','na_oiii5007_outflow_fwhm','na_oiii5007_outflow_voff'])==True):
# H-alpha Outflow;
na_ha_outflow_center = 6564.610 # Angstroms
na_ha_outflow_amp = p['na_Ha_outflow_amp'] # flux units
na_ha_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_ha_outflow_center,p['na_Ha_outflow_voff'])
na_ha_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_ha_outflow_fwhm_res)**2) # km/s
na_ha_outflow_voff = p['na_Ha_outflow_voff'] # km/s # km/s
if (na_ha_outflow_amp!=na_ha_outflow_amp/1.0) or (na_ha_outflow_amp==np.inf): na_ha_outflow_amp=0.0
na_Ha_outflow = gaussian(lam_gal,na_ha_outflow_center,na_ha_outflow_amp,na_ha_outflow_fwhm,na_ha_outflow_voff,velscale)
host_model = host_model - na_Ha_outflow
comp_dict['na_Ha_outflow'] = {'comp':na_Ha_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# [NII]6585 Outflow;
na_nii6585_outflow_center = 6585.270 # Angstroms
na_nii6585_outflow_amp = na_nii6585_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_nii6585_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6585_outflow_center,na_ha_outflow_voff)
na_nii6585_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_nii6585_outflow_fwhm_res)**2)
na_nii6585_outflow_voff = na_ha_outflow_voff
if (na_nii6585_outflow_amp!=na_nii6585_outflow_amp/1.0) or (na_nii6585_outflow_amp==np.inf): na_nii6585_outflow_amp=0.0
na_nii6585_outflow = gaussian(lam_gal,na_nii6585_outflow_center,na_nii6585_outflow_amp,na_nii6585_outflow_fwhm,na_nii6585_outflow_voff,velscale)
host_model = host_model - na_nii6585_outflow
comp_dict['na_nii6585_outflow'] = {'comp':na_nii6585_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# [NII]6549 Outflow;
na_nii6549_outflow_center = 6549.860 # Angstroms
na_nii6549_outflow_amp = na_nii6549_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_nii6549_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_nii6549_outflow_center,na_ha_outflow_voff)
na_nii6549_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_nii6549_outflow_fwhm_res)**2) # km/s
na_nii6549_outflow_voff = na_ha_outflow_voff # km/s
if (na_nii6549_outflow_amp!=na_nii6549_outflow_amp/1.0) or (na_nii6549_outflow_amp==np.inf): na_nii6549_outflow_amp=0.0
na_nii6549_outflow = gaussian(lam_gal,na_nii6549_outflow_center,na_nii6549_outflow_amp,na_nii6549_outflow_fwhm,na_nii6549_outflow_voff,velscale)
host_model = host_model - na_nii6549_outflow
comp_dict['na_nii6549_outflow'] = {'comp':na_nii6549_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# Broad [SII]6718 Outflow;
na_sii6718_outflow_center = 6718.290 # Angstroms
na_sii6718_outflow_amp = na_sii6718_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_sii6718_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6718_outflow_center,na_ha_outflow_voff)
na_sii6718_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_sii6718_outflow_fwhm_res)**2) # km/s
na_sii6718_outflow_voff = na_ha_outflow_voff # km/s
if (na_sii6718_outflow_amp!=na_sii6718_outflow_amp/1.0) or (na_sii6718_outflow_amp==np.inf): na_sii6718_outflow_amp=0.0
na_sii6718_outflow = gaussian(lam_gal,na_sii6718_outflow_center,na_sii6718_outflow_amp,na_sii6718_outflow_fwhm,na_sii6718_outflow_voff,velscale)
host_model = host_model - na_sii6718_outflow
comp_dict['na_sii6718_outflow'] = {'comp':na_sii6718_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
# [SII]6732 Outflow;
na_sii6732_outflow_center = 6732.670 # Angstroms
na_sii6732_outflow_amp = na_sii6732_core_amp*na_ha_outflow_amp/na_ha_core_amp # flux units
na_sii6732_outflow_fwhm_res = get_fwhm_res(fwhm_gal_ftn,na_sii6732_outflow_center,na_ha_outflow_voff)
na_sii6732_outflow_fwhm = np.sqrt(p['na_Ha_outflow_fwhm']**2+(na_sii6732_outflow_fwhm_res)**2) # km/s
na_sii6732_outflow_voff = na_ha_outflow_voff # km/s
if (na_sii6732_outflow_amp!=na_sii6732_outflow_amp/1.0) or (na_sii6732_outflow_amp==np.inf): na_sii6732_outflow_amp=0.0
na_sii6732_outflow = gaussian(lam_gal,na_sii6732_outflow_center,na_sii6732_outflow_amp,na_sii6732_outflow_fwhm,na_sii6732_outflow_voff,velscale)
host_model = host_model - na_sii6732_outflow
comp_dict['na_sii6732_outflow'] = {'comp':na_sii6732_outflow,'pcolor':'xkcd:magenta','linewidth':1.0}
########################################################################################################
# Broad Lines
#### Br. H-gamma #######################################################################################
if all(comp in param_names for comp in ['br_Hg_amp','br_Hg_fwhm','br_Hg_voff'])==True:
br_hg_center = 4341.680 # Angstroms
br_hg_amp = p['br_Hg_amp'] # flux units
br_hg_fwhm_res = get_fwhm_res(fwhm_gal_ftn,br_hg_center,p['br_Hg_voff'])
br_hg_fwhm = np.sqrt(p['br_Hg_fwhm']**2+(br_hg_fwhm_res)**2) # km/s
br_hg_voff = p['br_Hg_voff'] # km/s
# br_Hg = gaussian(lam_gal,br_hg_center,br_hg_amp,br_hg_fwhm,br_hg_voff,velscale)
br_Hg = line_model(line_profile,lam_gal,br_hg_center,br_hg_amp,br_hg_fwhm,br_hg_voff,velscale)
host_model = host_model - br_Hg
comp_dict['br_Hg'] = {'comp':br_Hg,'pcolor':'xkcd:turquoise','linewidth':1.0}
#### Br. H-beta ########################################################################################
if all(comp in param_names for comp in ['br_Hb_amp','br_Hb_fwhm','br_Hb_voff'])==True:
br_hb_center = 4862.68 # Angstroms
br_hb_amp = p['br_Hb_amp'] # flux units
br_hb_fwhm_res = get_fwhm_res(fwhm_gal_ftn,br_hb_center,p['br_Hb_voff'])
br_hb_fwhm = np.sqrt(p['br_Hb_fwhm']**2+(br_hb_fwhm_res)**2) # km/s
br_hb_voff = p['br_Hb_voff'] # km/s
# br_Hb = gaussian(lam_gal,br_hb_center,br_hb_amp,br_hb_fwhm,br_hb_voff,velscale)
br_Hb = line_model(line_profile,lam_gal,br_hb_center,br_hb_amp,br_hb_fwhm,br_hb_voff,velscale)
host_model = host_model - br_Hb
comp_dict['br_Hb'] = {'comp':br_Hb,'pcolor':'xkcd:turquoise','linewidth':1.0}
#### Br. H-alpha #######################################################################################
if all(comp in param_names for comp in ['br_Ha_amp','br_Ha_fwhm','br_Ha_voff'])==True:
br_ha_center = 6564.610 # Angstroms
br_ha_amp = p['br_Ha_amp'] # flux units
br_ha_fwhm_res = get_fwhm_res(fwhm_gal_ftn,br_ha_center,p['br_Ha_voff'])
br_ha_fwhm = np.sqrt(p['br_Ha_fwhm']**2+(br_ha_fwhm_res)**2) # km/s
br_ha_voff = p['br_Ha_voff'] # km/s
# br_Ha = gaussian(lam_gal,br_ha_center,br_ha_amp,br_ha_fwhm,br_ha_voff,velscale)
br_Ha = line_model(line_profile,lam_gal,br_ha_center,br_ha_amp,br_ha_fwhm,br_ha_voff,velscale)
host_model = host_model - br_Ha
comp_dict['br_Ha'] = {'comp':br_Ha,'pcolor':'xkcd:turquoise','linewidth':1.0}
########################################################################################################
########################################################################################################
############################# Host-galaxy Component ######################################################
if all(comp in param_names for comp in ['gal_temp_amp'])==True:
gal_temp = p['gal_temp_amp']*(gal_temp)
host_model = (host_model) - (gal_temp) # Subtract off continuum from galaxy, since we only want template weights to be fit
comp_dict['host_galaxy'] = {'comp':gal_temp,'pcolor':'xkcd:lime green','linewidth':1.0}
########################################################################################################
############################# LOSVD Component ####################################################
if all(comp in param_names for comp in ['stel_vel','stel_disp'])==True:
# Convolve the templates with a LOSVD
losvd_params = [p['stel_vel'],p['stel_disp']] # ind 0 = velocity*, ind 1 = sigma*
conv_temp = convolve_gauss_hermite(temp_fft,npad,float(velscale),\
losvd_params,npix,velscale_ratio=1,sigma_diff=0,vsyst=vsyst)
# Fitted weights of all templates using Non-negative Least Squares (NNLS)
host_model[host_model/host_model!=1] = 0
weights = nnls(conv_temp,host_model) # scipy.optimize Non-negative Least Squares
host_galaxy = (np.sum(weights*conv_temp,axis=1))
comp_dict['host_galaxy'] = {'comp':host_galaxy,'pcolor':'xkcd:lime green','linewidth':1.0}
########################################################################################################
# The final model
gmodel = np.sum((d['comp'] for d in comp_dict.values() if d),axis=0)
########################## Measure Emission Line Fluxes #################################################
# Fluxes of components are stored in a dictionary and returned to emcee as metadata blob.
# This is a vast improvement over the previous method, which was storing fluxes in an
# output file at each iteration, which is computationally expensive for opening, writing to, and closing
# a file nwalkers x niter times.
if (fit_type=='final') and (output_model==False):
fluxes = {}
for key in comp_dict:
# compute the integrated flux
flux = simps(comp_dict[key]['comp'],lam_gal)
# add key/value pair to dictionary
fluxes[key+'_flux'] = flux
##################################################################################
# Add last components to comp_dict for plotting purposes
# Add galaxy, sigma, model, and residuals to comp_dict
comp_dict['data'] = {'comp':galaxy ,'pcolor':'xkcd:white', 'linewidth':0.5}
comp_dict['wave'] = {'comp':lam_gal ,'pcolor':'xkcd:black', 'linewidth':0.5}
comp_dict['noise'] = {'comp':noise ,'pcolor':'xkcd:cyan' , 'linewidth':0.5}
comp_dict['model'] = {'comp':gmodel ,'pcolor':'xkcd:red' , 'linewidth':1.0}
comp_dict['resid'] = {'comp':galaxy-gmodel ,'pcolor':'xkcd:white', 'linewidth':0.5}
##################################################################################
##################################################################################
if (fit_type=='init') and (output_model==False): # For max. likelihood fitting
return gmodel
if (fit_type=='init') and (output_model==True): # For max. likelihood fitting
return comp_dict
elif (fit_type=='outflow_test'):
return comp_dict
elif (fit_type=='final') and (output_model==False): # For emcee
return gmodel, fluxes
elif (fit_type=='final') and (output_model==True): # output all models for best-fit model
return comp_dict
|
44cd0bc61a4472c6a5c3c7b190ee5be96f4bdb1a
| 18,204 |
import random
def generate_numbers():
"""
Function to generate 3 random digits to be guessed.
Generate 3 random in a list in order to be compare to the user's digits.
Return:
str_digits (Array): List with 3 random digits converted to String
"""
# List comprehension to generate numbers from 0 to 9 and cast it as String
str_digits = [str(num) for num in range(10)]
# Shuffle randomly the list
random.shuffle(str_digits)
return str_digits[:3]
|
8efd0f579a3a0b3dc5021cd762f9ad2f5774f6be
| 18,205 |
def get_media():
"""Retrieves metadata for all of this server's uploaded media. Can use
the following query parameters:
* max: The maximum number of records to return
* page: The page of records
"""
error_on_unauthorized()
media = Upload.query.order_by(Upload.id)
total_num = media.count()
if total_num == 0:
return jsonify(total=0, uploads=[])
try:
count = int(request.args.get('max', total_num))
page = int(request.args.get('page', 1))
if count <= 0 or page <= 0:
raise APIError(422, "Query parameters out of range")
begin = (page - 1) * count
end = min(begin + count, total_num)
return jsonify(total=total_num, uploads=[upload_to_dict(u) for u in media.all()[begin:end]]), 200
except ValueError:
raise APIError(422, "Invalid query parameter")
|
754417b47f5b9c28427b04ace88bf9ca5c9a5a47
| 18,206 |
def summate2(phasevec):
"""Calculate values b'(j^vec) for combining 2 phase vectors.
Parameter:
phasevec: tuple of two phasevectors
Example:
On input (([b_1(0),b_1(1),...,b_1(L-1)], L), ([b_2(0),b_2(1),...,b_2(L'-1)], L'))
give output [b_1(0)+b_2(0), b_1(0)+b_2(1),..., b_1(1)+b_2(0),...,b_1(L-1)+b_2(L'-1)]
"""
b = [] # array for values of summed phasevector
for i in range(phasevec[0][1]):
for j in range(phasevec[1][1]):
b.append(phasevec[0][0][i] + phasevec[1][0][j])
return b
|
5150c2ee29a31438bf16104eaadeb85a01f54502
| 18,207 |
def get_children():
""" Return IDs of LIST which currently is zero-based index
Modelled after treeview for future alphabetic IDs
TODO: Should probably return list of all DICT?
"""
iid_list = []
#read()
#print('location.get_children() LIST count:',len(LIST))
for i, ndx in enumerate(LIST):
iid = ndx_to_iid(i)
iid_list.append(iid) # treeview uses string for indices
# print('location.get_children:',i,ndx)
return iid_list
|
14e855df5c2b218c68e900709fd548d3431c4a8b
| 18,208 |
def makeTracker( path, args = (), kwargs = {} ):
"""retrieve an instantiated tracker and its associated code.
returns a tuple (code, tracker).
"""
obj, module, pathname, cls = makeObject( path, args, kwargs )
code = getCode( cls, pathname )
return code, obj
|
bc23e21bb53357bcf74e6194656cfbea4b24c218
| 18,209 |
from typing import Tuple
def get_anchor_generator(anchor_size: Tuple[tuple] = None, aspect_ratios: Tuple[tuple] = None):
"""Returns the anchor generator."""
if anchor_size is None:
anchor_size = ((16,), (32,), (64,), (128,))
if aspect_ratios is None:
aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_size)
anchor_generator = AnchorGenerator(sizes=anchor_size,
aspect_ratios=aspect_ratios)
return anchor_generator
|
e9eef959c009062d5866558d00674c1afa033260
| 18,210 |
import torch
def tensor_to_longs(tensor: torch.Tensor) -> list:
"""converts an array of numerical values to a tensor of longs"""
assert tensor.dtype == torch.long
return tensor.detach().cpu().numpy()
|
ba1788be8e353936cfc3d604d940b78a96990fd4
| 18,211 |
def test_fixed(SNRs):
"""
Fixed (infinite T1) qubit.
"""
fidelities = []
numShots = 10000
dt = 1e-3
for SNR in SNRs:
fakeData = create_fake_data(SNR, dt, 1, numShots, T1=1e9)
signal = dt*np.sum(fakeData, axis=1)
fidelities.append(fidelity_est(signal))
return fidelities
|
70ca68f475beed73a47722c719811544ae1bfccb
| 18,212 |
def setup(app):
"""
Add the ``fica`` directive to the Sphinx app.
"""
app.add_directive("fica", FicaDirective)
return {
"version": __version__,
"parallel_read_safe": True,
"parallel_write_safe": True,
}
|
996e568ab58634e64a845b34bf38082658b58889
| 18,213 |
from typing import Tuple
import torch
def get_binary_statistics(
outputs: Tensor, targets: Tensor, label: int = 1,
) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:
"""
Computes the number of true negative, false positive,
false negative, true negative and support
for a binary classification problem for a given label.
Args:
outputs: estimated targets as predicted by a model
with shape [bs; ..., 1]
targets: ground truth (correct) target values
with shape [bs; ..., 1]
label: integer, that specifies label of interest for statistics compute
Returns:
Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: stats
Example:
>>> y_pred = torch.tensor([[0, 0, 1, 1, 0, 1, 0, 1]])
>>> y_true = torch.tensor([[0, 1, 0, 1, 0, 0, 1, 1]])
>>> tn, fp, fn, tp, support = get_binary_statistics(y_pred, y_true)
tensor(2) tensor(2) tensor(2) tensor(2) tensor(4)
"""
tn = ((outputs != label) * (targets != label)).to(torch.long).sum()
fp = ((outputs == label) * (targets != label)).to(torch.long).sum()
fn = ((outputs != label) * (targets == label)).to(torch.long).sum()
tp = ((outputs == label) * (targets == label)).to(torch.long).sum()
support = (targets == label).to(torch.long).sum()
return tn, fp, fn, tp, support
|
e0c81b404f6da77f40c1e4f3810d699fdef1e6a4
| 18,214 |
def threshold_and_mask(min_normed_weight, W, Mc, coords): # =np.arange(Wc.shape[0])*stride + start):
"""Normalize the weights W, threshold to min_normed_weight and remove diagonal,
reduce DX and DY to the columns and rows still containing weights.
Returns
-------
coords : array_like
the indices of these columns in terms of original image indices
W_n_m : array_like
the thresholded weights
D_X_m : array_like
The reduced DX
D_Y_m : array_like
The reduced DY
row_mask : array_like
The indices of these columns in terms of calculated arrays.
"""
#coords = np.arange(Wc.shape[0])*stride + start
wcdiag = np.atleast_2d(np.diag(W))
W_n = W / np.sqrt(wcdiag.T*wcdiag)
mask = W_n - np.diag(np.diag(W_n)) > min_normed_weight
row_mask = np.any(mask, axis=0)
W_n = np.where(mask, W_n, 0)
DX, DY = Mc[0], Mc[1]
W_n_m = W_n[:, row_mask][row_mask, :]
coords = coords[row_mask]
#mask_red = mask[row_mask, :][:, row_mask]
DX_m, DY_m = DX[row_mask, :][:, row_mask], DY[row_mask, :][:, row_mask]
return coords, W_n_m, DX_m, DY_m, row_mask
|
78d361cf2125cd0d3ac1a3985933e39b09538b18
| 18,215 |
import csv
def readCGcsv(filename, levels):
""" Read a .csv file of a callgraph into a dictionary keyed by callgraph level. """
cgdict = {}
with open(filename, "r") as cgcsv:
cgreader = csv.DictReader(cgcsv)
for row in cgreader:
lvl = int(row['Level'])
if (lvl < levels) or (levels <= 0):
cost = row[r'Samp%']
fname = row[r'Calltree/PE=HIDE']
node = CGNode(fname, cost)
if lvl not in cgdict.keys():
cgdict[lvl] = []
cgdict[lvl].append(node)
if lvl > 0:
cgdict[lvl - 1][-1].addCallee(node)
return cgdict
|
ec5dbc3d064a0cf784bfd764b996eb36677642a9
| 18,216 |
def use_colors(tones, i=None):
"""
Use specific color tones for plotting. If i is specified, this function returns a specific color from the corresponding color cycle
For custom color palettes generation check: http://colorbrewer2.org/#type=sequential&scheme=YlGnBu&n=8
Args:
tones : 'hot' or 'cold' for hot and cold colors
Returns:
color i of the color cycle
"""
hot = ['#fed976', '#feb24c', '#fd8d3c', '#fc4e2a', '#e31a1c', '#b10026']
cold = ['#a6bddb', '#67a9cf', '#3690c0', '#02818a', '#016c59', '#014636']
# cold = ['#44AE7E', '#388A8D', '#397187', '#3E568E', '#463883', '#461167']
if i is None:
if tones is 'hot':
colors = hot
elif tones is 'cold':
colors = cold
else:
colors = tones
plt.rc('axes', prop_cycle=(cycler('color', colors)))
return colors
else:
if tones is 'hot':
colors = hot
elif tones is 'cold':
colors = cold
else:
colors = tones
return colors[i % len(colors)]
|
e36cce208c89178af8199662edb336c2455bdc37
| 18,217 |
def fill_form(forms, form):
"""Fills a given form given a set or known forms.
:param forms: A set of known forms.
:param form: The form to fill.
:return: A mapping from form element IDs to suggested values for the form.
"""
forms = list(forms)
new_form = {}
def rec_fill_form(form, labels):
if not labels:
return new_form
unfilled_labels = []
neighbor = get_neighbor(forms, labels)
if not neighbor:
LOGGER.info('No neighbors found', labels)
for label in labels:
new_form[form['form'][label]['id']] = None
return new_form
LOGGER.info('Neighbor', neighbor)
for label in labels:
if label in neighbor['form']:
new_form[form['form'][label]['id']] = neighbor['form'][label]['value']
else:
unfilled_labels.append(label)
# LOGGER.info('unfilled', unfilled_labels)
if len(labels) == len(unfilled_labels):
for label in unfilled_labels:
new_form[form['form'][label]['id']] = None
return new_form
return rec_fill_form(form, unfilled_labels)
return rec_fill_form(form, list(form['features']))
|
3e6c1f623facb67602fa5e057080a08d0de9926d
| 18,218 |
def read_sky_model_from_csv(path: str) -> SkyModel:
"""
Read a CSV file in to create a SkyModel.
The CSV should have the following columns
- right ascension (deg)
- declination (deg)
- stokes I Flux (Jy)
- stokes Q Flux (Jy): if no information available, set to 0
- stokes U Flux (Jy): if no information available, set to 0
- stokes V Flux (Jy): if no information available, set to 0
- reference_frequency (Hz): if no information available, set to 0
- spectral index (N/A): if no information available, set to 0
- rotation measure (rad / m^2): if no information available, set to 0
- major axis FWHM (arcsec): if no information available, set to 0
- minor axis FWHM (arcsec): if no information available, set to 0
- position angle (deg): if no information available, set to 0
- source id (object): if no information available, set to None
:param path: file to read in
:return: SkyModel
"""
# TODO: add validation of csv
dataframe = pd.read_csv(path)
sources = dataframe.to_numpy()
sky = SkyModel(sources)
return sky
|
b649bd1cfd218a924c573bf90b26fa18f62c3cb4
| 18,219 |
def integer_to_vector(x, options_per_element, n_elements, index_to_element):
"""Return a vector representing an action/state from a given integer.
Args:
x (int): the integer to convert.
n_options_per_element(int): number of options for each element in the vector.
n_elements (int): the number of elements in the vector to return.
index_to_element(int=>any): function which converts an integer represents a single option in one of the
vector elements and return anything that vector contains. For example, a function which returns 'UP' for 0,
1 for 'RIGHT',etc. Or a function which returns (2,2) given 10 for a 4x4 grid ((2,2) is the 10-th cell of that grid).
"""
return integer_to_vector_multiple_numbers(x, options_per_element, n_elements, index_to_element)
|
2649359d6a62b047f70bfe72f8403e8343a231ab
| 18,220 |
def samples_for_each_class(dataset_labels, task):
"""
Numbers of samples for each class in the task
Args:
dataset_labels Labels to count samples from
task Labels with in a task
Returns
"""
num_samples = np.zeros([len(task)], dtype=np.float32)
i = 0
for label in task:
global_class_indices = np.column_stack(np.nonzero(dataset_labels))
class_indices = np.squeeze(global_class_indices[global_class_indices[:,1] == label][:,np.array([True, False])])
class_indices = np.sort(class_indices, axis=None)
num_samples[i] = len(class_indices)
i += 1
return num_samples
|
96bc2c794fd955110864f59ddb96c5df1c33b8ed
| 18,221 |
def requiredOneInGroup(col_name, group, dm, df, *args):
"""
If col_name is present in df, the group validation is satisfied.
If not, it still may be satisfied, but not by THIS col_name.
If col_name is missing, return col_name, else return None.
Later, we will validate to see if there is at least one None (non-missing)
value for this group.
"""
if col_name in df.columns:
# if the column name is present, return nothing
return None
else:
# if the column name is missing, return column name
return col_name
|
de46a4ef2f3e45381644db41d617d8c4c0845877
| 18,222 |
def persist(session, obj, return_id=True):
"""
Use the session to store obj in database, then remove obj from session,
so that on a subsequent load from the database we get a clean instance.
"""
session.add(obj)
session.flush()
obj_id = obj.id if return_id else None # save this before obj is expunged
session.expunge(obj)
return obj_id
|
a308931f418616417d10d3115b0f370352778533
| 18,223 |
from unittest.mock import patch
def test_bittrex_query_asset_movement_int_transaction_id(bittrex):
"""Test that if an integer is returned for bittrex transaction id we handle it properly
Bittrex deposit withdrawals SHOULD NOT return an integer for transaction id
according to their docs https://bittrex.github.io/api/v3#definition-Order
but as we saw in practise they sometimes can.
Regression test for https://github.com/rotki/rotki/issues/2175
"""
problematic_deposit = """
[
{
"id": 1,
"status": "COMPLETED",
"quantity": 2.12345678,
"currencySymbol": "RISE",
"confirmations": 2,
"completedAt": "2014-02-13T07:38:53.883Z",
"txId": 9875231951530679373,
"cryptoAddress": "15VyEAT4uf7ycrNWZVb1eGMzrs21BH95Va",
"source": "foo"
}
]
"""
def mock_get_deposit_withdrawal(
url,
method,
json,
**kwargs,
): # pylint: disable=unused-argument
if 'deposit' in url:
response_str = problematic_deposit
else:
response_str = '[]'
return MockResponse(200, response_str)
with patch.object(bittrex.session, 'request', side_effect=mock_get_deposit_withdrawal):
movements = bittrex.query_deposits_withdrawals(
start_ts=0,
end_ts=TEST_END_TS,
only_cache=False,
)
errors = bittrex.msg_aggregator.consume_errors()
warnings = bittrex.msg_aggregator.consume_warnings()
assert len(errors) == 0
assert len(warnings) == 0
assert len(movements) == 1
assert movements[0].location == Location.BITTREX
assert movements[0].category == AssetMovementCategory.DEPOSIT
assert movements[0].timestamp == 1392277134
assert isinstance(movements[0].asset, Asset)
assert movements[0].asset == Asset('RISE')
assert movements[0].amount == FVal('2.12345678')
assert movements[0].fee == ZERO
assert movements[0].transaction_id == '9875231951530679373'
# also make sure they are written in the db
db_movements = bittrex.db.get_asset_movements(
filter_query=AssetMovementsFilterQuery.make(),
has_premium=True,
)
assert len(db_movements) == 1
assert db_movements[0] == movements[0]
|
83e3ce3d8f82b159191c6b9068b54321d06bfa9a
| 18,224 |
from typing import Sequence
import logging
def _fixed_point(
searcher: 'AbstractSearcher',
parsed: parsed_file.ParsedFile,
initial_substitutions: Sequence[substitution.Substitution],
start: int,
end: int,
max_iterations: int,
):
"""Repeatedly apply searcher until there are no more changes."""
if max_iterations <= 1:
return initial_substitutions
# TODO(b/116068515): sort the substitutions here and below.
new_substitutions = [
s.relative_to_span(start, end) for s in initial_substitutions
]
if None in new_substitutions:
logging.error('Out of bounds substitution after filtering: %s',
initial_substitutions[new_substitutions.index(None)])
return initial_substitutions # give up
text = parsed.text[start:end]
logging.debug(
'Applying _fixed_point with initial subs=%r on on parsed.text[%d:%d]: %r',
new_substitutions, start, end, text)
all_substitutions = []
# max_iterations + 1 to get the extra iteration before the break,
# and then - 1 to account for the fact that we already did an iteration.
for i in range(max_iterations):
rewritten = formatting.apply_substitutions(text, new_substitutions)
try:
parsed = _matcher.parse_ast(rewritten, parsed.path)
except _matcher.ParseError as e:
logging.error(
'Could not parse rewritten substitution in %s: %s\n'
'Tried to rewrite text[%s:%s] == %r\n'
'Rewrite was: %r\n'
'Substitutions: %r', parsed.path, e, start, end, text, rewritten,
new_substitutions)
break
# These substitutions parsed and were valid, add them to the list:
all_substitutions.extend(new_substitutions)
# Set up the variables for the next rewrite attempt
logging.debug('_fixed_point Iteration %d: rewrote %r -> %r', i, text,
rewritten)
text = rewritten
if i == max_iterations:
# no point bothering to get the next substitution
break
new_substitutions = list(searcher.find_iter_parsed(parsed))
if not new_substitutions:
break
if not all_substitutions:
# even the first rewrite failed to parse
return []
elif len(all_substitutions) == len(initial_substitutions):
# We didn't discover any new substitutions.
return initial_substitutions
else:
return [_compile_substitutions(all_substitutions, start, end, text)]
|
676102d43f965750d497dbbbbc3e87264de7a6d2
| 18,225 |
from operator import sub
def masker(mask, val):
"""Enforce the defined bits in the <mask> on <val>."""
ones = sub(r"[^1]", "0", mask)
val |= int(ones,2)
zeros = sub(r"[^0]", "1", mask)
val &= int(zeros,2)
return val
|
68b3edd542b295ca7aade0eb9829e310e4c0ed2d
| 18,226 |
def ct_lt_u32(val_a, val_b):
"""
Returns 1 if val_a < val_b, 0 otherwise. Constant time.
:type val_a: int
:type val_b: int
:param val_a: an unsigned integer representable as a 32 bit value
:param val_b: an unsigned integer representable as a 32 bit value
:rtype: int
"""
val_a &= 0xffffffff
val_b &= 0xffffffff
return (val_a^((val_a^val_b)|(((val_a-val_b)&0xffffffff)^val_b)))>>31
|
6816fd1e9633c0c3035d68ac657f3cb917f24527
| 18,227 |
import typing
async def is_banned(ctx: Context, user: typing.Union[discord.Member, discord.User]) -> bool:
"""Returns true if user is in guild's ban list."""
bans = await ctx.guild.bans()
for entry in bans:
if entry.user.id == user.id:
return True
return False
|
2807e2d9a296afb360efe9abf9618e0ebe19e796
| 18,228 |
from typing import List
def _create_transformation_vectors_for_pixel_offsets(
detector_group: h5py.Group, wrapper: nx.NexusWrapper
) -> List[QVector3D]:
"""
Construct a transformation (as a QVector3D) for each pixel offset
"""
x_offsets = wrapper.get_field_value(detector_group, "x_pixel_offset")
y_offsets = wrapper.get_field_value(detector_group, "y_pixel_offset")
z_offsets = wrapper.get_field_value(detector_group, "z_pixel_offset")
if x_offsets is None or y_offsets is None:
raise Exception(
"In pixel_shape_component expected to find x_pixel_offset and y_pixel_offset datasets"
)
if z_offsets is None:
z_offsets = np.zeros_like(x_offsets)
# offsets datasets can be 2D to match dimensionality of detector, so flatten to 1D
return [
QVector3D(x, y, z)
for x, y, z in zip(
x_offsets.flatten(), y_offsets.flatten(), z_offsets.flatten()
)
]
|
1504193d1a7731740a607f77c94a810561142c57
| 18,229 |
import random
def buildIterator(spec_name, param_spec, global_state, random_selection=False):
"""
:param param_spec: argument specification
:param random_selection: produce a continuous stream of random selections
:return: a iterator function to construct an iterator over possible values
"""
if param_spec['type'] == 'list':
if not random_selection:
return ListPermuteGroupElement(spec_name, param_spec['values'])
else:
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: random.choice(param_spec['values'])))
elif 'int' in param_spec['type'] :
v = param_spec['type']
vals = [int(x) for x in v[v.rfind('[') + 1:-1].split(':')]
beg = vals[0] if len (vals) > 0 else 0
end = vals[1] if len(vals) > 1 else beg+1
if not random_selection:
increment = 1
if len(vals) > 2:
increment = vals[2]
return IteratorPermuteGroupElement(spec_name,lambda : xrange(beg, end+1,increment).__iter__())
else:
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: random.randint(beg, end)))
elif 'float' in param_spec['type'] :
v = param_spec['type']
vals = [float(x) for x in v[v.rfind('[') + 1:-1].split(':')]
beg = vals[0] if len(vals) > 0 else 0
end = vals[1] if len(vals) > 1 else beg+1.0
if not random_selection:
increment = 1
if len(vals) > 2:
increment = vals[2]
return IteratorPermuteGroupElement(spec_name,lambda: np.arange(beg, end,increment).__iter__())
else:
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: beg+ random.random()* (end-beg)))
elif param_spec['type'] == 'yesno':
if not random_selection:
return ListPermuteGroupElement(spec_name,['yes','no'])
else:
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: random.choice(['yes', 'no'])))
elif param_spec['type'].startswith('donor'):
mydata = local()
local_state = mydata.current_local_state
choices = [node for node in local_state.getGraph().nodes() \
if len(local_state.getGraph().predecessors(node)) == 0]
if not random_selection:
# do not think we can save this state since it is tied to the local project
return PermuteGroupElement(spec_name,choices.__iter__)
else:
return PermuteGroupElement(spec_name, randomGeneratorFactory(lambda: random.choice(choices)))
return PermuteGroupElement(spec_name,randomGeneratorFactory(lambda: None))
|
d86d2af9499117614a11796c17eeccba16149092
| 18,230 |
import logging
def logged(obj):
"""Add a logger member to a decorated class or function.
:arg obj:
the class or function object being decorated, or an optional
:class:`logging.Logger` object to be used as the parent logger
(instead of the default module-named logger)
:return:
*obj* if *obj* is a class or function; otherwise, if *obj* is a
logger, return a lambda decorator that will in turn set the
logger attribute and return *obj*
If *obj* is a :obj:`class`, then ``obj.__log`` will have the logger
name "<module-name>.<class-name>":
>>> import sys
>>> logging.basicConfig(
... level=logging.DEBUG, stream=sys.stdout,
... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s")
>>> @logged
... class Sample:
...
... def test(self):
... self.__log.debug("This is a test.")
...
>>> Sample().test()
DEBUG:autologging.Sample:test:This is a test.
.. note::
Autologging will prefer to use the class's ``__qualname__`` when
it is available (Python 3.3+). Otherwise, the class's
``__name__`` is used. For example::
class Outer:
@logged
class Nested: pass
Under Python 3.3+, ``Nested.__log`` will have the name
"autologging.Outer.Nested", while under Python 2.7 or 3.2, the
logger name will be "autologging.Nested".
.. versionchanged:: 0.4.0
Functions decorated with ``@logged`` use a *single* underscore
in the logger variable name (e.g. ``my_function._log``) rather
than a double underscore.
If *obj* is a function, then ``obj._log`` will have the logger name
"<module-name>":
>>> import sys
>>> logging.basicConfig(
... level=logging.DEBUG, stream=sys.stdout,
... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s")
>>> @logged
... def test():
... test._log.debug("This is a test.")
...
>>> test()
DEBUG:autologging:test:This is a test.
.. note::
Within a logged function, the ``_log`` attribute must be
qualified by the function name.
If *obj* is a :class:`logging.Logger` object, then that logger is
used as the parent logger (instead of the default module-named
logger):
>>> import sys
>>> logging.basicConfig(
... level=logging.DEBUG, stream=sys.stdout,
... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s")
>>> @logged(logging.getLogger("test.parent"))
... class Sample:
... def test(self):
... self.__log.debug("This is a test.")
...
>>> Sample().test()
DEBUG:test.parent.Sample:test:This is a test.
Again, functions are similar:
>>> import sys
>>> logging.basicConfig(
... level=logging.DEBUG, stream=sys.stdout,
... format="%(levelname)s:%(name)s:%(funcName)s:%(message)s")
>>> @logged(logging.getLogger("test.parent"))
... def test():
... test._log.debug("This is a test.")
...
>>> test()
DEBUG:test.parent:test:This is a test.
.. note::
For classes, the logger member is made "private" (i.e. ``__log``
with double underscore) to ensure that log messages that include
the *%(name)s* format placeholder are written with the correct
name.
Consider a subclass of a ``@logged``-decorated parent class. If
the subclass were **not** decorated with ``@logged`` and could
access the parent's logger member directly to make logging
calls, those log messages would display the name of the
parent class, not the subclass.
Therefore, subclasses of a ``@logged``-decorated parent class
that wish to use a provided ``self.__log`` object must themselves
be decorated with ``@logged``.
.. warning::
Although the ``@logged`` and ``@traced`` decorators will "do the
right thing" regardless of the order in which they are applied to
the same function, it is recommended that ``@logged`` always be
used as the innermost decorator::
@traced
@logged
def my_function():
my_function._log.info("message")
This is because ``@logged`` simply sets the ``_log`` attribute
and then returns the original function, making it "safe" to use
in combination with any other decorator.
.. note::
Both `Jython <http://www.jython.org/>`_ and
`IronPython <http://ironpython.net/>`_ report an "internal" class
name using its mangled form, which will be reflected in the
default logger name.
For example, in the sample code below, both Jython and IronPython
will use the default logger name "autologging._Outer__Nested"
(whereas CPython/PyPy/Stackless would use "autologging.__Nested"
under Python 2 or "autologging.Outer.__Nested" under Python 3.3+)
::
class Outer:
@logged
class __Nested:
pass
.. warning::
`IronPython <http://ironpython.net/>`_ does not fully support
frames (even with the -X:FullFrames option), so you are likely to
see things like misreported line numbers and "<unknown file>" in
log records emitted when running under IronPython.
"""
if isinstance(obj, logging.Logger): # `@logged(logger)'
return lambda class_or_fn: _add_logger_to(
class_or_fn,
logger_name=_generate_logger_name(
class_or_fn, parent_name=obj.name))
else: # `@logged'
return _add_logger_to(obj)
|
5a7d53257ed7d68c53daf90dcf2d48943a430a4e
| 18,231 |
def outlier_removal_mean(dataframe, colname, low_cut, high_cut):
"""Replace outliers with the mean on dataframe[colname]"""
col = dataframe[colname]
col_numerics = col.loc[
col.apply(
lambda x: isinstance(x, (int, float))
and (x >= low_cut and x <= high_cut)
)
]
dataframe.loc[
col.apply(
lambda x: isinstance(x, (int, float))
and (x < low_cut or x > high_cut)
),
colname,
] = col_numerics.mean()
return dataframe
|
03d40bb8098d4313e468d5b4a929756354a7732c
| 18,232 |
def non_repeating(value, counts, q):
"""Finds the first non-repeating string in a stream.
Args:
value (str): Latest string received in the string
counts (dict): Dictionary of strings containing the counts to determine if string is repeated
q (Queue): Container for all strings in stream that have yet determined as being repeated
Return:
str: First non-repeating string. None if all strings are repeated.
"""
q.put(value)
if value in counts:
counts[value] += 1
else:
counts[value] = 1
while not q.empty():
if counts[q.queue[0]] > 1:
q.get()
else:
return q.queue[0]
if q.empty():
return None
|
fc5ec025cffa0d7230d814d3677ae640cd652349
| 18,233 |
def auth_optional(request):
"""
view method for path '/sso/auth_optional'
Return
200 reponse: authenticated and authorized
204 response: not authenticated
403 reponse: authenticated,but not authorized
"""
res = _auth(request)
if res:
#authenticated, but can be authorized or not authorized
return res
else:
#not authenticated
return AUTH_NOT_REQUIRED_RESPONSE
|
06416fdce6a652ca0cdc169c48219e685c13cdad
| 18,234 |
def is_pip_main_available():
"""Return if the main pip function is available. Call get_pip_main before calling this function."""
return PIP_MAIN_FUNC is not None
|
3d4243bb4336fbc9eb9e93b2a1cf9ec4cc129c03
| 18,235 |
import torch
def energy_target(flattened_bbox_targets, pos_bbox_targets,
pos_indices, r, max_energy):
"""Calculate energy targets based on deep watershed paper.
Args:
flattened_bbox_targets (torch.Tensor): The flattened bbox targets.
pos_bbox_targets (torch.Tensor): Bounding box lrtb values only for
positions within the bounding box. We use this as an argument
to prevent recalculating it since it is used for other things as
well.
pos_indices (torch.Tensor): The indices of values in
flattened_bbox_targets which are within a bounding box
max_energy (int): Max energy level possible.
Notes:
The energy targets are calculated as:
E_max \cdot argmax_{c \in C}[1 - \sqrt{((l-r)/2)^2 + ((t-b) / 2)^2}
/ r]
- r is a hyperparameter we would like to minimize.
- (l-r)/2 is the horizontal distance to the center and will be
assigned the variable name "horizontal"
- (t-b)/2 is the vertical distance to the center and will be
assigned the variable name "vertical"
- E_max is self.max_energy
- We don't need the argmax in this code implementation since we
already select the bounding boxes and their respective pixels in
a previous step.
Returns:
tuple: A 2 tuple with values ("pos_energies_targets",
"energies_targets"). Both are flattened but pos_energies_targets
only contains values within bounding boxes.
"""
horizontal = pos_bbox_targets[:, 0] - pos_bbox_targets[:, 2]
vertical = pos_bbox_targets[:, 1] - pos_bbox_targets[:, 3]
# print("Horizontals: {}".format(horizontal))
# print("Verticals: {}".format(vertical))
horizontal = torch.div(horizontal, 2)
vertical = torch.div(vertical, 2)
c2 = (horizontal * horizontal) + (vertical * vertical)
# print("c2: \n{}".format(c2))
# We use x * x instead of x.pow(2) since it's faster by about 30%
square_root = torch.sqrt(c2)
# print("Sqrt: \n{}".format(square_root))
type_dict = {'dtype': square_root.dtype,
'device': square_root.device}
pos_energies = (torch.tensor([1], **type_dict)
- torch.div(square_root, r))
pos_energies *= max_energy
pos_energies = torch.max(pos_energies,
torch.tensor([0], **type_dict))
pos_energies = pos_energies.floor()
energies_targets = torch.zeros(flattened_bbox_targets.shape[0],
**type_dict)
energies_targets[pos_indices] = pos_energies
# torch.set_printoptions(profile='full')
# print("Energy targets: \n {}".format(pos_energies))
# torch.set_printoptions(profile='default')
# input()
return pos_energies, energies_targets
|
84bed4cc1a8bf11be778b7e79524707a49482b39
| 18,236 |
def dashtable(df):
"""
Convert df to appropriate format for dash datatable
PARAMETERS
----------
df: pd.DataFrame,
OUTPUT
----------
dash_cols: list containg columns for dashtable
df: dataframe for dashtable
drop_dict: dict containg dropdown list for dashtable
"""
dash_cols = [] # create dashtable column names
for x in df.columns :
temp_dict = {'name':x,'id':x}
if x in dropdown_cols:
temp_dict.update({'presentation': 'dropdown'})
# append to list
dash_cols.append(temp_dict)
# get dropdown contents for each column
drop_dict = {}
for i in range(len(dropdown_cols)): # loop through dropdown columns
drop_list = []
for x in drop_options[i]: # loop through column elements
drop_list.append({'label': x, 'value': x})
drop_dict.update({dropdown_cols[i]:{'options': drop_list, 'clearable':False}}) # append to dict
return dash_cols, df, drop_dict
|
39897244f81a5c6ac0595aac7cb219f59d6c5739
| 18,237 |
def other_identifiers_to_metax(identifiers_list):
"""Convert other identifiers to comply with Metax schema.
Arguments:
identifiers_list (list): List of other identifiers from frontend.
Returns:
list: List of other identifiers that comply to Metax schema.
"""
other_identifiers = []
for identifier in identifiers_list:
id_dict = {}
id_dict["notation"] = identifier
other_identifiers.append(id_dict)
return other_identifiers
|
986c98d5a557fb4fb75ed940d3f39a9a0ec93527
| 18,238 |
def enforce_excel_cell_string_limit(long_string, limit):
"""
Trims a long string. This function aims to address a limitation of CSV
files, where very long strings which exceed the char cell limit of Excel
cause weird artifacts to happen when saving to CSV.
"""
trimmed_string = ''
if limit <= 3:
limit = 4
if len(long_string) > limit:
trimmed_string = (long_string[:(limit-3)] + '...')
return trimmed_string
else:
return long_string
|
9b8bcf4590dc73425c304c8d778ae51d3e3f0bf3
| 18,239 |
def gaussian_blur(image: np.ndarray, sigma_min: float, sigma_max: float) -> np.ndarray:
"""
Blurs an image using a Gaussian filter.
Args:
image: Input image array.
sigma_min: Lower bound of Gaussian kernel standard deviation range.
sigma_max: Upper bound of Gaussian kernel standard deviation range.
Returns:
Blurred image array.
"""
sigma_value = np.random.uniform(sigma_min, sigma_max)
return cv2.GaussianBlur(image, (0, 0), sigma_value)
|
2fd31d016e4961c6980770e8dd113ae7ad45a6ed
| 18,240 |
def get_number_of_pcs_in_pool(pool):
"""
Retrun number of pcs in a pool
"""
pc_count = Computer.objects.filter(pool=pool).count()
return pc_count
|
812de24ad2cbc738a10258f8252ca531ef72e904
| 18,241 |
import tqdm
import math
def save_images(scene_list, video_manager, num_images=3, frame_margin=1,
image_extension='jpg', encoder_param=95,
image_name_template='$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER',
output_dir=None, downscale_factor=1, show_progress=False,
scale=None, height=None, width=None):
# type: (List[Tuple[FrameTimecode, FrameTimecode]], VideoManager,
# Optional[int], Optional[int], Optional[str], Optional[int],
# Optional[str], Optional[str], Optional[int], Optional[bool],
# Optional[float], Optional[int], Optional[int])
# -> Dict[List[str]]
""" Saves a set number of images from each scene, given a list of scenes
and the associated video/frame source.
Arguments:
scene_list: A list of scenes (pairs of FrameTimecode objects) returned
from calling a SceneManager's detect_scenes() method.
video_manager: A VideoManager object corresponding to the scene list.
Note that the video will be closed/re-opened and seeked through.
num_images: Number of images to generate for each scene. Minimum is 1.
frame_margin: Number of frames to pad each scene around the beginning
and end (e.g. moves the first/last image into the scene by N frames).
Can set to 0, but will result in some video files failing to extract
the very last frame.
image_extension: Type of image to save (must be one of 'jpg', 'png', or 'webp').
encoder_param: Quality/compression efficiency, based on type of image:
'jpg' / 'webp': Quality 0-100, higher is better quality. 100 is lossless for webp.
'png': Compression from 1-9, where 9 achieves best filesize but is slower to encode.
image_name_template: Template to use when creating the images on disk. Can
use the macros $VIDEO_NAME, $SCENE_NUMBER, and $IMAGE_NUMBER. The image
extension is applied automatically as per the argument image_extension.
output_dir: Directory to output the images into. If not set, the output
is created in the working directory.
downscale_factor: Integer factor to downscale images by. No filtering
is currently done, only downsampling (thus requiring an integer).
show_progress: If True, shows a progress bar if tqdm is installed.
scale: Optional factor by which to rescale saved images.A scaling factor of 1 would
not result in rescaling. A value <1 results in a smaller saved image, while a
value >1 results in an image larger than the original. This value is ignored if
either the height or width values are specified.
height: Optional value for the height of the saved images. Specifying both the height
and width will resize images to an exact size, regardless of aspect ratio.
Specifying only height will rescale the image to that number of pixels in height
while preserving the aspect ratio.
width: Optional value for the width of the saved images. Specifying both the width
and height will resize images to an exact size, regardless of aspect ratio.
Specifying only width will rescale the image to that number of pixels wide
while preserving the aspect ratio.
Returns:
Dict[List[str]]: Dictionary of the format { scene_num : [image_paths] },
where scene_num is the number of the scene in scene_list (starting from 1),
and image_paths is a list of the paths to the newly saved/created images.
Raises:
ValueError: Raised if any arguments are invalid or out of range (e.g.
if num_images is negative).
"""
if not scene_list:
return {}
if num_images <= 0 or frame_margin < 0:
raise ValueError()
# TODO: Validate that encoder_param is within the proper range.
# Should be between 0 and 100 (inclusive) for jpg/webp, and 1-9 for png.
imwrite_param = [get_cv2_imwrite_params()[image_extension],
encoder_param] if encoder_param is not None else []
video_name = video_manager.get_video_name()
# Reset video manager and downscale factor.
video_manager.release()
video_manager.reset()
video_manager.set_downscale_factor(downscale_factor)
video_manager.start()
# Setup flags and init progress bar if available.
completed = True
logger.info('Generating output images (%d per scene)...', num_images)
progress_bar = None
if show_progress and tqdm:
progress_bar = tqdm(
total=len(scene_list) * num_images,
unit='images',
dynamic_ncols=True)
filename_template = Template(image_name_template)
scene_num_format = '%0'
scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd'
image_num_format = '%0'
image_num_format += str(math.floor(math.log(num_images, 10)) + 2) + 'd'
framerate = scene_list[0][0].framerate
timecode_list = [
[
FrameTimecode(int(f), fps=framerate) for f in [
# middle frames
a[len(a)//2] if (0 < j < num_images-1) or num_images == 1
# first frame
else min(a[0] + frame_margin, a[-1]) if j == 0
# last frame
else max(a[-1] - frame_margin, a[0])
# for each evenly-split array of frames in the scene list
for j, a in enumerate(np.array_split(r, num_images))
]
]
for i, r in enumerate([
# pad ranges to number of images
r
if 1+r[-1]-r[0] >= num_images
else list(r) + [r[-1]] * (num_images - len(r))
# create range of frames in scene
for r in (
range(start.get_frames(), end.get_frames())
# for each scene in scene list
for start, end in scene_list
)
])
]
image_filenames = {i: [] for i in range(len(timecode_list))}
aspect_ratio = get_aspect_ratio(video_manager)
if abs(aspect_ratio - 1.0) < 0.01:
aspect_ratio = None
for i, scene_timecodes in enumerate(timecode_list):
for j, image_timecode in enumerate(scene_timecodes):
video_manager.seek(image_timecode)
ret_val, frame_im = video_manager.read()
if ret_val:
file_path = '%s.%s' % (
filename_template.safe_substitute(
VIDEO_NAME=video_name,
SCENE_NUMBER=scene_num_format % (i + 1),
IMAGE_NUMBER=image_num_format % (j + 1),
FRAME_NUMBER=image_timecode.get_frames()),
image_extension)
image_filenames[i].append(file_path)
if aspect_ratio is not None:
frame_im = cv2.resize(
frame_im, (0, 0), fx=aspect_ratio, fy=1.0,
interpolation=cv2.INTER_CUBIC)
# Get frame dimensions prior to resizing or scaling
frame_height = frame_im.shape[0]
frame_width = frame_im.shape[1]
# Figure out what kind of resizing needs to be done
if height and width:
frame_im = cv2.resize(
frame_im, (width, height), interpolation=cv2.INTER_CUBIC)
elif height and not width:
factor = height / float(frame_height)
width = int(factor * frame_width)
frame_im = cv2.resize(
frame_im, (width, height), interpolation=cv2.INTER_CUBIC)
elif width and not height:
factor = width / float(frame_width)
height = int(factor * frame_height)
frame_im = cv2.resize(
frame_im, (width, height), interpolation=cv2.INTER_CUBIC)
elif scale:
frame_im = cv2.resize(
frame_im, (0, 0), fx=scale, fy=scale,
interpolation=cv2.INTER_CUBIC)
cv2.imwrite(
get_and_create_path(file_path, output_dir),
frame_im, imwrite_param)
else:
completed = False
break
if progress_bar:
progress_bar.update(1)
if not completed:
logger.error('Could not generate all output images.')
return image_filenames
|
37088294395539acd3b88543d1bdd4d05ef82ce5
| 18,242 |
from typing import List
def get_used_http_ports() -> List[int]:
"""Returns list of ports, used by http servers in existing configs."""
return [rc.http_port for rc in get_run_configs().values()]
|
12982ff4d5b2327c06fef1cf874b871e2eee08c1
| 18,243 |
import io
def get_img_from_fig(fig, dpi=180, color_cvt_flag=cv2.COLOR_BGR2RGB) -> np.ndarray:
"""Make numpy array from mpl fig
Parameters
----------
fig : plt.Figure
Matplotlib figure, usually the result of plt.imshow()
dpi : int, optional
Dots per inches of the image to save. Note, that default matplotlib
figsize is given in inches. Example: px, py = w * dpi, h * dpi pixels
6.4 inches * 100 dpi = 640 pixels, by default 180
color_cvt_flag : int, optional
OpenCV cvtColor flag. to get grayscale image,
use `cv2.COLOR_BGR2GRAY`, by default `cv2.COLOR_BGR2RGB`.
Returns
-------
np.ndarray[np.uint8]
Image array
"""
with io.BytesIO() as buffer:
fig.savefig(buffer, format="png", dpi=dpi)
buffer.seek(0)
img_arr = np.frombuffer(buffer.getvalue(), dtype=np.uint8)
return cv2.cvtColor(cv2.imdecode(img_arr, 1), color_cvt_flag)
|
dde9f35b78df436b30d4f9452b9964c93f924252
| 18,244 |
def split_data_by_target(data, target, num_data_per_target):
"""
Args:
data: np.array [num_data, *data_dims]
target: np.array [num_data, num_targets]
target[i] is a one hot
num_data_per_target: int
Returns:
result_data: np.array [num_data_per_target * num_targets, *data_dims]
result_target: np.array
[num_data_per_target * num_targets, num_targets]
"""
num_unique_targets = len(np.unique(target, axis=0))
target_numeric = np.dot(target, np.arange(num_unique_targets))
result_data = []
result_target = []
for target_id in range(num_unique_targets):
result_data.append(data[target_numeric == target_id][:num_data_per_target])
result_target.append(target[target_numeric == target_id][:num_data_per_target])
return np.concatenate(result_data), np.concatenate(result_target)
|
d4425753b4d9892d2c593ec8e58e75bae0005c3d
| 18,245 |
def top_mutations(mutated_scores, initial_score, top_results=10):
"""Generate list of n mutations that improve localization probability
Takes in the pd.DataFrame of predictions for mutated sequences and the
probability of the initial sequence. After substracting the initial value
from the values of the mutations, it generates a list of the mutations
that increase the probability that the protein is localized at the target
localization. The number of mutations returned is determined with the
top_results variable, which defaults to 10. Note that if there are not so
many beneficial mutations as indicated in top_results, the returned list is
shorter to avoid returning mutations that would decrease the probability of
being localized at the target localization. This means that if all
mutations are detrimental, the function returns an empty pd.DataFrame.
The returned mutations are sorted from larger increase to smaller increase
and include information about the amino acid position, the original
residue at that position, the mutation, the improvement with respect to
initial_score and the final probability of the sequence with that mutation.
Args:
mutated_scores: a pd.DataFrame with the probability predicted by the
model for each mutation (rows) at each position (columns).
initial_score: a float representing the probability predicted by the
model for the initial sequence.
top_results: an integer indicating the number of mutations to return.
Returns:
top_res: a pd.DataFrame with the mutations that improve the
probability that a protein is localized at the target localization,
showing position, mutation and improvement with respect to the
original score.
"""
# check if top_results is an integer
if type(top_results) != int:
raise TypeError("top results should be an integer")
else:
pass
# get the increase or decrease in probability of mutations compared to the
# initial_score of the original sequence
prob_change = mutated_scores - initial_score
# prepare data frame for results
top_res = pd.DataFrame(
columns=["Position", "Mutation", "Prob_increase", "Target_probability"]
)
i = 0
# initialize at max value so that it enters the loop
pred_increase = 1
# get best mutations until reaching top_results or mutations that do
# not improve the probability
while i < top_results and pred_increase > 0:
# get column with maximum value
position_mut = prob_change.max().idxmax()
# get row with maximum value
mutation = prob_change.idxmax()[position_mut]
# get increase and localization probability of the sequence with the
# mutation of interest
pred_increase = prob_change.loc[mutation, position_mut]
prob_value = mutated_scores.loc[mutation, position_mut]
# change it for nan so that we can look for next worse mutation at the
# next iteration
prob_change.loc[mutation, position_mut] = np.nan
# append to results
mut_series = pd.Series(
{
"Position": position_mut,
"Mutation": mutation,
"Prob_increase": pred_increase,
"Target_probability": prob_value,
}
)
top_res = top_res.append(mut_series, ignore_index=True)
i += 1
return top_res
|
f574bf7f7569e3024a42866873c5bb589ff02095
| 18,246 |
def npmat4_to_pdmat4(npmat4):
"""
# updated from cvtMat4
convert numpy.2darray to LMatrix4 defined in Panda3d
:param npmat3: a 3x3 numpy ndarray
:param npvec3: a 1x3 numpy ndarray
:return: a LMatrix3f object, see panda3d
author: weiwei
date: 20170322
"""
return Mat4(npmat4[0, 0], npmat4[1, 0], npmat4[2, 0], 0, \
npmat4[0, 1], npmat4[1, 1], npmat4[2, 1], 0, \
npmat4[0, 2], npmat4[1, 2], npmat4[2, 2], 0, \
npmat4[0, 3], npmat4[1, 3], npmat4[2, 3], 1)
|
7b58014d5d354aefac84786212b6ca190a983e48
| 18,247 |
import requests
def is_at_NWRC(url):
"""
Checks that were on the NWRC network
"""
try:
r = requests.get(url)
code = r.status_code
except Exception as e:
code = 404
return code==200
|
b909a9087940eb70b569ea6c686ff394e84a6ed9
| 18,248 |
import torch
def lmo(x,radius):
"""Returns v with norm(v, self.p) <= r minimizing v*x"""
shape = x.shape
if len(shape) == 4:
v = torch.zeros_like(x)
for first_dim in range(shape[0]):
for second_dim in range(shape[1]):
inner_x = x[first_dim][second_dim]
rows, cols = x[first_dim][second_dim].shape
v[first_dim][second_dim] = torch.zeros_like(inner_x)
maxIdx = torch.argmax(torch.abs(inner_x),0)
for col in range(cols):
v[first_dim][second_dim][maxIdx[col],col] = -radius*torch.sign(inner_x[maxIdx[col],col])
elif len(shape) == 3:
v = torch.zeros_like(x)
for first_dim in range(shape[0]):
inner_x = x[first_dim]
rows, cols = x[first_dim].shape
v[first_dim] = torch.zeros_like(inner_x)
maxIdx = torch.argmax(torch.abs(inner_x),0)
for col in range(cols):
v[first_dim][maxIdx[col],col] = -radius*torch.sign(inner_x[maxIdx[col],col])
elif len(shape)==2:
rows, cols = x.shape
v = torch.zeros_like(x)
maxIdx = torch.argmax(torch.abs(x),0)
for col in range(cols):
v[maxIdx[col],col] = -radius*torch.sign(x[maxIdx[col],col])
else :
v = torch.zeros_like(x)
maxIdx = torch.argmax(torch.abs(x))
v.view(-1)[maxIdx] = -radius * torch.sign(x.view(-1)[maxIdx])
return v
|
24bda333cdd64df9a0b4fa603211036bbdad7200
| 18,249 |
def _transform_index(index, func):
"""
Apply function to all values found in index.
This includes transforming multiindex entries separately.
"""
if isinstance(index, MultiIndex):
items = [tuple(func(y) for y in x) for x in index]
return MultiIndex.from_tuples(items, names=index.names)
else:
items = [func(x) for x in index]
return Index(items, name=index.name)
|
c642dd9330032ed784224b7ede6ee299b6d3ed67
| 18,250 |
def extractQualiTeaTranslations(item):
"""
# 'QualiTeaTranslations'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol or frag) or 'preview' in item['title'].lower():
return None
if 'Harry Potter and the Rise of the Ordinary Person' in item['tags']:
return None
if 'Romance of Dragons and Snakes' in item['tags']:
return buildReleaseMessageWithType(item, 'Romance of Dragons and Snakes', vol, chp, frag=frag, postfix=postfix)
return False
|
446b7f7598e118222c033bbfce074fa02340fd8e
| 18,251 |
def feature_norm_ldc(df):
"""
Process the features to obtain the standard metrics in LDC mode.
"""
df['HNAP'] = df['HNAC']/df['ICC_abs']*100
df['TCC'] = (df['ICC_abs']+df['DCC_abs'])/df['VOL']
df['ICC'] = df['ICC_abs']/df['VOL']
df['DCC'] = df['DCC_abs']/df['VOL']
return df
|
60e3ef31c0be07179854de3191c2c75f4ec2cb4d
| 18,252 |
def dice_jaccard(y_true, y_pred, y_scores, shape, smooth=1, thr=None):
"""
Computes Dice and Jaccard coefficients.
Args:
y_true (ndarray): (N,4)-shaped array of groundtruth bounding boxes coordinates in xyxy format
y_pred (ndarray): (N,4)-shaped array of predicted bounding boxes coordinates in xyxy format
y_scores (ndarray): (N,)-shaped array of prediction scores
shape (tuple): shape of the map, i.e. (h, w)
smooth (int, optional): Smoothing factor to avoid ZeroDivisionError. Defaults to 1.
thr (float, optional): Threshold to binarize predictions; if None, the soft version of
the coefficients are computed. Defaults to None.
Returns:
tuple: The Dice and Jaccard coefficients.
"""
m_true = np.zeros(shape, dtype=np.float32)
for x0, y0, x1, y1 in y_true.astype(int):
m_true[y0:y1 + 1, x0: x1 + 1] = 1.
if thr is not None:
keep = y_scores >= thr
y_pred = y_pred[keep]
y_scores = y_scores[keep]
m_pred = np.zeros_like(m_true)
for (x0, y0, x1, y1), score in zip(y_pred.astype(int), y_scores):
m_pred[y0:y1 + 1, x0: x1 + 1] = np.maximum(m_pred[y0:y1 + 1, x0: x1 + 1], score)
intersection = np.sum(m_true * m_pred)
sum_ = np.sum(m_true) + np.sum(m_pred)
union = sum_ - intersection
jaccard = (intersection + smooth) / (union + smooth)
dice = 2. * (intersection + smooth) / (sum_ + smooth)
return dice.mean(), jaccard.mean()
|
ed3a043b53d843e05ff3e32954eb9dbc2939b6ca
| 18,253 |
def forward_pass(output_node, sorted_nodes):
"""
Performs a forward pass through a list of sorted nodes.
Arguments:
`output_node`: A node in the graph, should be the output node (have no outgoing edges).
`sorted_nodes`: A topologically sorted list of nodes.
Returns the output Node's value
"""
for n in sorted_nodes:
n.forward()
return output_node.value
|
a91c5b7ebef98815a47b26d58a680b36098969d5
| 18,254 |
def qr_decomposition(q, r, iter, n):
"""
Return Q and R matrices for iter number of iterations.
"""
v = column_convertor(r[iter:, iter])
Hbar = hh_reflection(v)
H = np.identity(n)
H[iter:, iter:] = Hbar
r = np.matmul(H, r)
q = np.matmul(q, H)
return q, r
|
94aa433e31e93dc36f67f579cb03f67930cfabc4
| 18,255 |
def main(args=None):
"""Main entry point for `donatello`'s command-line interface.
Args:
args (List[str]): Custom arguments if you wish to override sys.argv.
Returns:
int: The exit code of the program.
"""
try:
init_colorama()
opts = get_parsed_args(args)
if opts.bad_chars is not None:
bad_chars = _parse_bytes(opts.bad_chars, check_dups=True)
else:
bad_chars = b''
bad_chars_as_ints = tuple(int(bc) for bc in bad_chars)
if opts.max_factors is not None:
max_factors = _parse_max_factors(opts.max_factors)
else:
max_factors = 2
if opts.ops is not None:
ops = _parse_ops(opts.ops)
else:
ops = IMPLEMENTED_OPS
if opts.command not in ('factor', 'encode',):
raise DonatelloConfigurationError(
'must specify either `factor` or `encode`; `' + opts.command +
'` is invalid')
if opts.target == '-':
# TODO: https://docs.python.org/3/library/fileinput.html
pass
else:
target = opts.target
if opts.command == 'factor':
value = _parse_target_hex(target)
print_i('Attempting to factor target value ', format_dword(value))
for num_factors in range(2, max_factors+1):
factors = factor_by_byte(
value, bad_chars_as_ints, usable_ops=ops,
num_factors=num_factors)
if factors is not None:
print_i('Found factorization!')
res = [' 0x00000000']
for f in factors:
res.append('{0: <3}'.format(f.operator) + ' ' +
format_dword(f.operand))
print('\n'.join(res))
break
else:
print_e('Unable to find any factors')
elif opts.command == 'encode':
payload = _parse_bytes(target)
print_i('Attempting to encode payload...')
asm = encode_x86_32(payload, bad_chars, max_factors=max_factors)
print_i('Successfully encoded payload!')
print(asm)
return 0
except (DonatelloCannotEncodeError, DonatelloNoPossibleNopsError) as e:
print_e('Failed to factor/encode the specified target: ', e)
return 1
except DonatelloConfigurationError as e:
print_e('Configuration error: ', e)
return 1
except DonatelloNoPresentBadCharactersError:
print_e('No bad characters present in the specified payload; ',
'use the -f/--force flag to bypass this check')
return 1
except DonatelloError as e:
print_e('This should not be reached! See below for error.')
print_e(e)
return 1
except Exception as e:
print_e('Received unexpected exception; re-raising it.')
raise e
|
1977a2bf8e537664a4eab2fb05d6300998a59977
| 18,256 |
import logging
import torch
import operator
def build_detection_train_loader(cfg, mapper=None):
"""
A data loader is created by the following steps:
1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts.
2. Coordinate a random shuffle order shared among all processes (all GPUs)
3. Each process spawn another few workers to process the dicts. Each worker will:
* Map each metadata dict into another format to be consumed by the model.
* Batch them by simply putting dicts into a list.
The batched ``list[mapped_dict]`` is what this dataloader will yield.
Args:
cfg (CfgNode): the config
mapper (callable): a callable which takes a sample (dict) from dataset and
returns the format to be consumed by the model.
By default it will be `DatasetMapper(cfg, True)`.
Returns:
an infinite iterator of training data
"""
num_workers = get_world_size()
images_per_batch = cfg.SOLVER.IMS_PER_BATCH
assert (
images_per_batch % num_workers == 0
), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
images_per_batch, num_workers
)
assert (
images_per_batch >= num_workers
), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
images_per_batch, num_workers
)
images_per_worker = images_per_batch // num_workers
dataset_dicts = get_detection_dataset_dicts(
cfg.DATASETS.TRAIN,
filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
if cfg.MODEL.KEYPOINT_ON
else 0,
proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
)
dataset = DatasetFromList(dataset_dicts, copy=False)
if mapper is None:
mapper = PlusDatasetMapper(cfg, dataset, True)
dataset = MapDataset(dataset, mapper)
sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
logger = logging.getLogger(__name__)
logger.info("Using training sampler {}".format(sampler_name))
if sampler_name == "TrainingSampler":
sampler = samplers.TrainingSampler(len(dataset))
elif sampler_name == "RepeatFactorTrainingSampler":
sampler = samplers.RepeatFactorTrainingSampler(
dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
)
else:
raise ValueError("Unknown training sampler: {}".format(sampler_name))
if cfg.DATALOADER.ASPECT_RATIO_GROUPING:
data_loader = torch.utils.data.DataLoader(
dataset,
sampler=sampler,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=None,
collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
worker_init_fn=worker_init_reset_seed,
) # yield individual mapped dict
data_loader = AspectRatioGroupedDataset(data_loader, images_per_worker)
else:
batch_sampler = torch.utils.data.sampler.BatchSampler(
sampler, images_per_worker, drop_last=True
)
# drop_last so the batch always have the same size
data_loader = torch.utils.data.DataLoader(
dataset,
num_workers=cfg.DATALOADER.NUM_WORKERS,
batch_sampler=batch_sampler,
collate_fn=trivial_batch_collator,
worker_init_fn=worker_init_reset_seed,
)
return data_loader
|
007b09ce00814264b3264798d4a0afd05c23d6eb
| 18,257 |
def discRect(radius,w,l,pos,gap,layerRect,layerCircle,layer):
"""
This function creates a disc that is recessed inside of a rectangle. The
amount that the disc is recessed is determined by a gap that surrounds the
perimeter of the disc. This much hangs out past the rectangle to couple to
a bus waveguide.Calls subCircle(...) in order to accomplish the subtraction
This function returns the disc and the surrounding rectangle
radius: radius of circle
w: width of rectangle (vertical)
l: length of rectangle (horizontal)
pos: tuple giving a relative offset of the circle. The offset is determined
by the gap specified, but it can also be added to this other offset. The
default is no additional recession into the rectangle and just a shift
along the length of the rectangle.
gap: the gap surrounding the disc
layerRect: the layer on which the rectangle is written
layerCircle: the layer on which the disc subtracted from the rectangle is
written. This layer is temporarily used for the boolean operation since
ultimately the disc is returned on the same layer on which the rectangle is
drawn.
"""
newRad=radius+gap
# the circle is offset by the gap width away from the rect
posx,posy=pos
pos=(posx,w/2-radius+posy+gap)
print('pos: '+str(pos))
sub=subCircle(newRad,w,l,pos,layerRect,layerCircle,layer)
# add the disc
disc=gdspy.Round(pos,radius,number_of_points=199,**layerRect)
return sub,disc
|
1cb5f505fb868f31771fe6e48faa6399d8b051ad
| 18,258 |
def sub_factory():
"""Subscript text: <pre>H[sub]2[/sub]O</pre><br />
Example:<br />
H[sub]2[/sub]O
"""
return make_simple_formatter("sub", "<sub>%(value)s</sub>"), {}
|
4f721d0713c1a2be496a45c1bf7abe8766572135
| 18,259 |
from typing import Tuple
def train_test_split(
structures: list, targets: list, train_frac: float = 0.8
) -> Tuple[Tuple[list, list], Tuple[list, list]]:
"""Split structures and targets into training and testing subsets."""
num_train = floor(len(structures) * train_frac)
return (
(structures[:num_train], targets[:num_train]),
(structures[num_train:], targets[num_train:]),
)
|
279fbe353bf07aa9b9654f4be4c21cf248f2c8bb
| 18,260 |
def reset_password(token):
"""
Handles the reset password process.
"""
if not current_user.is_anonymous():
return redirect(url_for("forum.index"))
form = ResetPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
expired, invalid, data = user.verify_reset_token(form.token.data)
if invalid:
flash(("Your password token is invalid."), "danger")
return redirect(url_for("auth.forgot_password"))
if expired:
flash(("Your password is expired."), "danger")
return redirect(url_for("auth.forgot_password"))
if user and data:
user.password = form.password.data
user.save()
flash(("Your password has been updated."), "success")
return redirect(url_for("auth.login"))
form.token.data = token
return render_template("auth/reset_password.html", form=form)
|
c34d090b09a236eecfe101d66ec0daaf3c08eb87
| 18,261 |
def delete(vol_path):
"""
Delete a kv store object for this volume identified by vol_path.
Return true if successful, false otherwise
"""
return kvESX.delete(vol_path)
|
5d120b6a509119587df5f2dc9f1436115b01a257
| 18,262 |
import uuid
def get_tablespace_data(tablespace_path, db_owner):
"""This function returns the tablespace data"""
data = {
"name": "test_%s" % str(uuid.uuid4())[1:8],
"seclabels": [],
"spcacl": [
{
"grantee": db_owner,
"grantor": db_owner,
"privileges": [
{
"privilege_type": "C",
"privilege": True,
"with_grant": False
}
]
}
],
"spclocation": tablespace_path,
"spcoptions": [],
"spcuser": db_owner
}
return data
|
3272e9b941d6bfb426ed754eed7f956c4c0933f4
| 18,263 |
def join_chunks(chunks):
"""empty all chunks out of their sub-lists to be split apart again by split_chunks(). this is because chunks now
looks like this [[t,t,t],[t,t],[f,f,f,][t]]"""
return [item for sublist in chunks for item in sublist]
|
a5daf41ba3fa6e7dafc4f05b29cc5aeaa397d5a5
| 18,264 |
def urls_equal(url1, url2):
"""
Compare two URLObjects, without regard to the order of their query strings.
"""
return (
url1.without_query() == url2.without_query()
and url1.query_dict == url2.query_dict
)
|
f2cbcf111cd5d02fa053fbd373d24b2dab047dfc
| 18,265 |
def bytes_to_ints(bs):
"""
Convert a list of bytes to a list of integers.
>>> bytes_to_ints([1, 0, 2, 1])
[256, 513]
>>> bytes_to_ints([1, 0, 1])
Traceback (most recent call last):
...
ValueError: Odd number of bytes.
>>> bytes_to_ints([])
[]
"""
if len(bs) % 2 != 0:
raise ValueError("Odd number of bytes.")
pairs = zip(bs[::2], bs[1::2])
return [(a << 8) + b for a, b in pairs]
|
e8ac9ec973ff58973703e3e109da5b45d3f9d802
| 18,266 |
import logging
import click
import yaml
def create_default_yaml(config_file):
"""This function creates and saves the default configuration file."""
config_file_path = config_file
imgdb_config_dir = Config.IMGDB_CONFIG_HOME
if not imgdb_config_dir.is_dir():
try:
imgdb_config_dir.mkdir(parents=True, exist_ok=True)
except Exception as e:
logging.critical(
"Something went wrong while trying to create"
" the configuration directory!"
)
logging.debug("Error: %s" % e)
click.echo(
Tcolors.FAIL + "Something went wrong while trying to create"
" the configuration directory!" + Tcolors.ENDC
)
return 0
try:
with open(config_file_path, "w", encoding="utf-8") as config:
yaml.safe_dump(
Config.DEFAULT_CONFIG,
config,
encoding="utf-8",
allow_unicode=True,
default_flow_style=False,
)
click.echo(
Tcolors.OK_GREEN
+ "➜ The configuration file: %s \nhas been created successfully!"
% config_file_path
+ Tcolors.ENDC
)
except Exception as e:
logging.critical(
"Something went wrong while trying to save"
" the program's configuration file!"
)
logging.debug("Error: %s" % e)
click.echo(
Tcolors.FAIL + "Something went wrong while trying to save"
" the program's configuration file!" + Tcolors.ENDC
)
return 0
return parse_config_yaml(Config.DEFAULT_CONFIG, first_run=True)
|
786b3488b4400a66f44900811171df396b3ab3a9
| 18,267 |
import site
def canRun(page):
""" Returns True if the given check page is still set to "Run";
otherwise, returns false. Accepts one required argument, "page."
"""
print("Checking checkpage.")
page = site.Pages[page]
text = page.text()
if text == "Run":
print("We're good!")
return True
return False
|
3cb1276d82ffeadb1a730bb2eb1c1f3427905e94
| 18,268 |
import os
def parse_configs_for_multis(conf_list):
"""
parse list of condor config files searching for multi line configurations
Args:
conf_list: string, output of condor_config_val -config
Returns:
multi: dictionary. keys are first line of multi line config
values are the rest of the multi line config
keeping original formatting
example: this paragraph in a condor_configuration :
JOB_ROUTER_CREATE_IDTOKEN_atlas @=end
sub = "Atlasfetime = 900"
lifetime = 900
scope = "ADVERTISE_STARTD, ADVERTISE_MASTER, READ"
dir = "$(LOCAL_DIR)/jrtokens"
filename = "ce_atlas.idtoken"
owner = "atlas"
@end
would generate a multi entry like this:
multi["JOB_ROUTER_CREATE_IDTOKEN_atlas"] =
'@=end\n sub = "Atlas"\n lifetime = 900\n ..... @end\n'
these entries will be rendered into the frontend.condor_config with proper spacing and line returns
unlike how they would be rendered by condor_config_val --dump
KNOWN PROBLEM: if condor config has two multi-line configs with same name and different
lines generated config file may be incorrect. The condor config is probably incorrect
as well :)
"""
multi = {}
for conf in conf_list:
conf = conf.strip()
if os.path.exists(conf):
with open(conf) as fd:
text = fd.readlines()
pdict = find_multilines(text)
multi.update(pdict)
return multi
|
faf9ea4a5ce40c31797a4d570f79826902bc05da
| 18,269 |
def _bgp_predict_wrapper(model, *args, **kwargs):
"""
Just to ensure that the outgoing shapes are right (i.e. 2D).
"""
mean, cov = model.predict_y(*args, **kwargs)
if len(mean.shape) == 1:
mean = mean[:, None]
if len(cov.shape) == 1:
cov = cov[:, None]
return mean, cov
|
23bb62927e767057df94ef8b95b57874fc078d7f
| 18,270 |
import copy
import json
def create_waninterface(config_waninterface, waninterfaces_n2id, site_id):
"""
Create a WAN Interface
:param config_waninterface: WAN Interface config dict
:param waninterfaces_n2id: WAN Interface Name to ID dict
:param site_id: Site ID to use
:return: New WAN Interface ID
"""
# make a copy of waninterface to modify
waninterface_template = copy.deepcopy(config_waninterface)
# perform name -> ID lookups
name_lookup_in_template(waninterface_template, 'network_id', wannetworks_n2id)
name_lookup_in_template(waninterface_template, 'label_id', waninterfacelabels_n2id)
local_debug("WANINTERFACE TEMPLATE: " + str(json.dumps(waninterface_template, indent=4)))
# create waninterface
waninterface_resp = sdk.post.waninterfaces(site_id, waninterface_template)
if not waninterface_resp.cgx_status:
throw_error("Waninterface creation failed: ", waninterface_resp)
waninterface_name = waninterface_resp.cgx_content.get('name')
waninterface_id = waninterface_resp.cgx_content.get('id')
if not waninterface_name or not waninterface_id:
throw_error("Unable to determine waninterface attributes (Name: {0}, ID {1})..".format(waninterface_name,
waninterface_id))
output_message(" Created waninterface {0}.".format(waninterface_name))
# update caches
waninterfaces_n2id[waninterface_name] = waninterface_id
return waninterface_id
|
f4de347a1e8120e1da8c38d16ed0054e13f13ae5
| 18,271 |
import numpy
def max_pool(images, imgshp, maxpoolshp):
"""
Implements a max pooling layer
Takes as input a 2D tensor of shape batch_size x img_size and performs max pooling.
Max pooling downsamples by taking the max value in a given area, here defined by
maxpoolshp. Outputs a 2D tensor of shape batch_size x output_size.
Parameters
----------
images : 2D tensor
Tensorcontaining images on which to apply convolution. Assumed to be \
of shape `batch_size x img_size`
imgshp : tuple
Tuple containing image dimensions
maxpoolshp : tuple
Tuple containing shape of area to max pool over
Returns
-------
out1 : WRITEME
Symbolic result (2D tensor)
out2 : WRITEME
Logical shape of the output
"""
N = numpy
poolsize = N.int64(N.prod(maxpoolshp))
# imgshp contains either 2 entries (height,width) or 3 (nfeatures,h,w)
# in the first case, default nfeatures to 1
if N.size(imgshp)==2:
imgshp = (1,)+imgshp
# construct indices and index pointers for sparse matrix, which, when multiplied
# with input images will generate a stack of image patches
indices, indptr, spmat_shape, sptype, outshp = \
convolution_indices.conv_eval(imgshp, maxpoolshp, maxpoolshp, mode='valid')
logger.info('XXXXXXXXXXXXXXXX MAX POOLING LAYER XXXXXXXXXXXXXXXXXXXX')
logger.info('imgshp = {0}'.format(imgshp))
logger.info('maxpoolshp = {0}'.format(maxpoolshp))
logger.info('outshp = {0}'.format(outshp))
# build sparse matrix, then generate stack of image patches
csc = theano.sparse.CSM(sptype)(N.ones(indices.size), indices, indptr, spmat_shape)
patches = sparse.structured_dot(csc, images.T).T
pshape = tensor.stack(images.shape[0]*\
tensor.as_tensor(N.prod(outshp)),
tensor.as_tensor(imgshp[0]),
tensor.as_tensor(poolsize))
patch_stack = tensor.reshape(patches, pshape, ndim=3);
out1 = tensor.max(patch_stack, axis=2)
pshape = tensor.stack(images.shape[0],
tensor.as_tensor(N.prod(outshp)),
tensor.as_tensor(imgshp[0]))
out2 = tensor.reshape(out1, pshape, ndim=3);
out3 = tensor.DimShuffle((False,)*3, (0,2,1))(out2)
return tensor.flatten(out3,2), outshp
|
acbbfb686f77dc6e05f385b2addc8f49e7f344d3
| 18,272 |
def rmean(A):
""" Removes time-mean of llc_4320 3d fields; axis=2 is time"""
ix,jx,kx = A.shape
Am = np.repeat(A.mean(axis=2),kx)
Am = Am.reshape(ix,jx,kx)
return A-Am
|
39edcdca0cc4d411c579991086bf555d65686020
| 18,273 |
def default_pruning_settings():
"""
:return: the default pruning settings for optimizing a model
"""
mask_type = "unstructured" # TODO: update based on quantization
sparsity = 0.85 # TODO: dynamically choose sparsity level
balance_perf_loss = 1.0
filter_min_sparsity = 0.4
filter_min_perf_gain = 0.75
filter_min_recovery = -1.0
return PruningSettings(
mask_type,
sparsity,
balance_perf_loss,
filter_min_sparsity,
filter_min_perf_gain,
filter_min_recovery,
)
|
a81f153872a20eaaa5e654957ddf7b4a79ff42a9
| 18,274 |
def build_request_url(base_url, sub_url, query_type, api_key, value):
"""
Function that creates the url and parameters
:param base_url: The base URL from the app.config
:param sub_url: The sub URL from the app.config file. If not defined it will be: "v1/pay-as-you-go/"
:param query_type: The query type of the request
:param apikey: The api key from the app.config
:param value: The artifact value
:return: Tuple. A string of the URL and a dict of the params
:rtype: tuple
"""
# Setup the mapping dict for APIVoid API call types and the url and params for the requests call.
url_map = {
"IP Reputation": {
"url": "iprep",
"params": {
"ip": value
}
},
"Domain Reputation": {
"url": "domainbl",
"params": {
"host": value
}
},
"DNS Lookup": {
"url": "dnslookup",
"params": {
"action": "dns-a",
"host": value
}
},
"Email Verify": {
"url": "emailverify",
"params": {
"email": value
}
},
"ThreatLog": {
"url": "threatlog",
"params": {
"host": value
}
},
"SSL Info": {
"url": "sslinfo",
"params": {
"host": value
}
},
"URL Reputation": {
"url": "urlrep",
"params": {
"url": url_encode(value.encode('utf8')) if isinstance(value, str) else value
}
},
"selftest": {
"url": "sitetrust",
"params": {
"stats": value
}
},
}
try:
request_type = url_map.get(query_type)
request_url = request_type.get("url")
request_params = request_type.get("params")
except KeyError:
raise ValueError("%s is an Invalid IP Void request type or it's not supported", query_type)
# Join the base url, the request type and the sub url
the_url = "/".join((base_url, request_url, sub_url))
# Append the api key
the_url = u"{0}?key={1}".format(the_url, api_key)
# Append the params
for (k, v) in request_params.items():
the_url = u"{0}&{1}={2}".format(the_url, k, v)
LOG.info("Using URL: %s", the_url)
return the_url
|
ecf3ef0a3d7d5591b1f6aa9787f4f2984688f9f2
| 18,275 |
import re
def snake_to_camel(action_str):
"""
for all actions and all objects unsnake case and camel case.
re-add numbers
"""
if action_str == "toggle object on":
return "ToggleObjectOn"
elif action_str == "toggle object off":
return "ToggleObjectOff"
def camel(match):
return match.group(1)[0].upper() + match.group(1)[1:] + match.group(2).upper()
action_str = re.sub(r'(.*?) ([a-zA-Z])', camel, action_str)
if action_str.startswith("Look"): # LookDown_15, LookUp_15
action_str += "_15"
if action_str.startswith("Rotate"): # RotateRight_90, RotateLeft_90
action_str += "_90"
if action_str.startswith("Move"): # MoveAhead_25
action_str += "_25"
return action_str[0].upper() + action_str[1:]
|
c71745c02fc712e2b463e7bcb022bfca41c2efd4
| 18,276 |
from datetime import datetime
def todayDate() -> datetime.date:
"""
:return: ex: datetime.date(2020, 6, 28)
"""
return datetime.date.today()
|
dc9dae8bbeaabf5c8d7d9e3509d1e331e2c609ff
| 18,277 |
def lookup_facade(name, version):
"""
Given a facade name and version, attempt to pull that facade out
of the correct client<version>.py file.
"""
for _version in range(int(version), 0, -1):
try:
facade = getattr(CLIENTS[str(_version)], name)
return facade
except (KeyError, AttributeError):
continue
else:
raise ImportError("No supported version for facade: "
"{}".format(name))
|
eb76df1f7f3a9991c3e283643a52784c9d65f4f1
| 18,278 |
import time
def create_service(netUrl, gwUrl, attributes, token):
"""
Create NFN Service in MOP Environment.
:param netUrl: REST Url endpoint for network
:param gwUrl: REST Url endpoint for gateway
:param serviceAttributes: service paramaters, e.g. service type or name, etc
:param token: seesion token for NF Console
:return serviceId, serviceUrl: created service details
"""
url = netUrl+'/services'
gwId = gwUrl.split('/')[8]
if attributes['type'] == 'host':
# checking if service name is provided
if attributes['name']:
serviceName = attributes['name']
else:
serviceName = attributes['gateway']+'--'+str(attributes['ip'])+'--'+str(attributes['port'])
data = {
"serviceClass": "CS",
"name": serviceName,
"serviceInterceptType": "IP",
"serviceType": "ALL",
"endpointId": gwId,
"pbrType": "WAN",
"dataInterleaving": "NO",
"transparency": "NO",
"networkIp": attributes['ip'],
"networkFirstPort": attributes['port'],
"networkLastPort": attributes['port'],
"interceptIp": attributes['ip'],
"interceptFirstPort": attributes['port'],
"interceptLastPort": attributes['port']
}
if attributes['type'] == 'network':
# checking if service name is provided
if attributes['name']:
serviceName = attributes['name']
else:
serviceName = attributes['gateway']+'--'+str(attributes['netIp'])+'--'+str(attributes['netCidr'])
data = {
"serviceClass": "GW",
"name": serviceName,
"serviceInterceptType": "IP",
"serviceType": "ALL",
"endpointId": gwId,
"lowLatency": "NO",
"dataInterleaving": "NO",
"transparency": "NO",
"multicast": "OFF",
"dnsOptions": "NONE",
"icmpTunnel": "YES",
"cryptoLevel": "STRONG",
"permanentConnection": "YES",
"collectionLocation": "BOTH",
"pbrType": "WAN",
"rateSmoothing": "NO",
"gatewayClusterId": None,
"interceptIp": attributes['netIp'],
"gatewayIp": attributes['netIp'],
"gatewayCidrBlock": attributes['netCidr'],
"localNetworkGateway": "YES"
}
returnData = nfreq((url, data), "post", token)
serviceUrl = returnData['_links']['self']['href']
time.sleep(1)
return serviceUrl, serviceName
|
848f8375273ec4583a6c5d361c8a319ff43ba2a8
| 18,279 |
def _drawBlandAltman(mean, diff, md, sd, percentage, limitOfAgreement, confidenceIntervals, detrend, title, ax, figureSize, dpi, savePath, figureFormat, meanColour, loaColour, pointColour):
"""
Sub function to draw the plot.
"""
if ax is None:
fig, ax = plt.subplots(1,1, figsize=figureSize, dpi=dpi)
plt.rcParams.update({'font.size': 15,'xtick.labelsize':15,
'ytick.labelsize':15})
ax.tick_params(axis='x', labelsize=15)
ax.tick_params(axis='y', labelsize=15)
# ax.rcParams.update({'font.size': 15})
# ax=ax[0,0]
draw = True
else:
draw = False
##
# Plot CIs if calculated
##
if 'mean' in confidenceIntervals.keys():
ax.axhspan(confidenceIntervals['mean'][0],
confidenceIntervals['mean'][1],
facecolor='lightblue', alpha=0.2)
if 'upperLoA' in confidenceIntervals.keys():
ax.axhspan(confidenceIntervals['upperLoA'][0],
confidenceIntervals['upperLoA'][1],
facecolor='wheat', alpha=0.2)
if 'lowerLoA' in confidenceIntervals.keys():
ax.axhspan(confidenceIntervals['lowerLoA'][0],
confidenceIntervals['lowerLoA'][1],
facecolor='wheat', alpha=0.2)
##
# Plot the mean diff and LoA
##
ax.axhline(md, color=meanColour, linestyle='--')
ax.axhline(md + limitOfAgreement*sd, color=loaColour, linestyle='--')
ax.axhline(md - limitOfAgreement*sd, color=loaColour, linestyle='--')
##
# Plot the data points
##
# ax.scatter(mean[0:22], diff[0:22], alpha=0.8, c='orange', marker='.', s=100, label='India Male')
# ax.scatter(mean[22:44], diff[22:44], alpha=0.8, c='blue', marker='.', s=100, label='India Female')
# ax.scatter(mean[44:66], diff[44:66], alpha=0.8, c='red', marker='.', s=100, label='Sierra Leone Male')
# ax.scatter(mean[66:88], diff[66:88], alpha=0.8, c='purple', marker='.', s=100, label='Sierra Leone Female')
ax.scatter(mean[0:20], diff[0:20], alpha=0.8, c='orange', marker='.', s=100, label='India Male')
ax.scatter(mean[20:39], diff[20:39], alpha=0.8, c='blue', marker='.', s=100, label='India Female')
ax.scatter(mean[39:59], diff[39:59], alpha=0.8, c='red', marker='.', s=100, label='Sierra Leone Male')
ax.scatter(mean[59:77], diff[59:77], alpha=0.8, c='purple', marker='.', s=100, label='Sierra Leone Female')
ax.set_ylim(-50, 70)
ax.legend(loc='upper right', fontsize=12)
trans = transforms.blended_transform_factory(
ax.transAxes, ax.transData)
limitOfAgreementRange = (md + (limitOfAgreement * sd)) - (md - limitOfAgreement*sd)
offset = (limitOfAgreementRange / 100.0) * 1.5
ax.text(0.98, md + offset, 'Mean', ha="right", va="bottom", transform=trans)
ax.text(0.98, md - offset, f'{md:.2f}', ha="right", va="top", transform=trans)
ax.text(0.98, md + (limitOfAgreement * sd) + offset, f'+{limitOfAgreement:.2f} SD', ha="right", va="bottom", transform=trans)
ax.text(0.98, md + (limitOfAgreement * sd) - offset, f'{md + limitOfAgreement*sd:.2f}', ha="right", va="top", transform=trans)
ax.text(0.98, md - (limitOfAgreement * sd) - offset, f'-{limitOfAgreement:.2f} SD', ha="right", va="top", transform=trans)
ax.text(0.98, md - (limitOfAgreement * sd) + offset, f'{md - limitOfAgreement*sd:.2f}', ha="right", va="bottom", transform=trans)
# Only draw spine between extent of the data
# ax.spines['left'].set_bounds(min(diff), max(diff))
# ax.spines['bottom'].set_bounds(min(mean), max(mean))
# Hide the right and top spines
# ax.spines['right'].set_visible(False)
# ax.spines['top'].set_visible(False)
if percentage:
ax.set_ylabel('Percentage difference between methods', fontsize=20)
else:
ax.set_ylabel('Difference between methods', fontsize=20)
ax.set_xlabel('Mean of methods', fontsize=20)
# tickLocs = ax.xaxis.get_ticklocs()
# cadenceX = tickLocs[2] - tickLocs[1]
# tickLocs = rangeFrameLocator(tickLocs, (min(mean), max(mean)))
# ax.xaxis.set_major_locator(ticker.FixedLocator(tickLocs))
# tickLocs = ax.yaxis.get_ticklocs()
# cadenceY = tickLocs[2] - tickLocs[1]
# tickLocs = rangeFrameLocator(tickLocs, (min(diff), max(diff)))
# ax.yaxis.set_major_locator(ticker.FixedLocator(tickLocs))
# plt.draw() # Force drawing to populate tick labels
# labels = rangeFrameLabler(ax.xaxis.get_ticklocs(), [item.get_text() for item in ax.get_xticklabels()], cadenceX)
# ax.set_xticklabels(labels)
# labels = rangeFrameLabler(ax.yaxis.get_ticklocs(), [item.get_text() for item in ax.get_yticklabels()], cadenceY)
# ax.set_yticklabels(labels)
# ax.patch.set_alpha(0)
if detrend[0] is None:
pass
else:
plt.text(1, -0.1, f'{detrend[0]} slope correction factor: {detrend[1]:.2f} ± {detrend[2]:.2f}', ha='right', transform=ax.transAxes)
if title:
ax.set_title(title)
##
# Save or draw
##
plt.tight_layout()
if (savePath is not None) & draw:
fig.savefig(savePath, format=figureFormat, dpi=dpi)
plt.close()
elif draw:
plt.show()
else:
return ax
|
43bf53cd4594c1ed58860a6127f40f6345bea6ba
| 18,280 |
def rename_columns(df):
"""This function renames certain columns of the DataFrame
:param df: DataFrame
:type df: pandas DataFrame
:return: DataFrame
:rtype: pandas DataFrame
"""
renamed_cols = {"Man1": "Manufacturer (PE)",
"Pro1": "Model (PE)",
"Man2": "Manufacturer (BAT)",
"Pro2": "Model (BAT)",
"Top": "Type [-coupled]",
'P_PV2AC_in': 'P_PV2AC_in [W]',
'P_PV2AC_out': 'P_PV2AC_out [W]',
'U_PV_min': 'U_PV_min [V]',
'U_PV_nom': 'U_PV_nom [V]',
'U_PV_max': 'U_PV_max [V]',
'U_MPP_min': 'U_MPP_min [V]',
'U_MPP_max': 'U_MPP_max [V]',
'P_AC2BAT_in': 'P_AC2BAT_in [W]',
'P_BAT2AC_out': 'P_BAT2AC_out [W]',
'P_PV2BAT_in': 'P_PV2BAT_in [W]',
'P_BAT2PV_out': 'P_BAT2PV_out [W]',
'P_PV2BAT_out': 'P_PV2BAT_out [W]',
'P_BAT2AC_in': 'P_BAT2AC_in [W]',
'U_BAT_min': 'U_BAT_min [V]',
'U_BAT_nom': 'U_BAT_nom [V]',
'U_BAT_max': 'U_BAT_max [V]',
'E_BAT_100': 'E_BAT_100 [kWh]',
'E_BAT_50': 'E_BAT_50 [kWh]',
'E_BAT_25': 'E_BAT_25 [kWh]',
'E_BAT_usable': 'E_BAT_usable [kWh]',
'eta_BAT_100': 'eta_BAT_100',
'eta_BAT_50': 'eta_BAT_50',
'eta_BAT_25': 'eta_BAT_25',
'eta_BAT': 'eta_BAT',
'P_SYS_SOC1_AC': 'P_SYS_SOC1_AC [W]',
'P_SYS_SOC1_DC': 'P_SYS_SOC1_DC [W]',
'P_SYS_SOC0_AC': 'P_SYS_SOC0_AC [W]',
'P_SYS_SOC0_DC': 'P_SYS_SOC0_DC [W]',
'P_PVINV_AC': 'P_PVINV_AC [W]',
'P_PERI_AC': 'P_PERI_AC [W]',
'P_PV2BAT_DEV_IMPORT': 'P_PV2BAT_DEV_IMPORT [W]',
'P_PV2BAT_DEV_EXPORT': 'P_PV2BAT_DEV_EXPORT [W]',
'P_BAT2AC_DEV_IMPORT': 'P_BAT2AC_DEV_IMPORT [W]',
'P_BAT2AC_DEV_EXPORT': 'P_BAT2AC_DEV_EXPORT [W]',
't_DEAD': 't_DEAD [s]',
't_SETTLING': 't_SETTLING [s]'
}
return df.rename(columns=renamed_cols)
|
9c22747d7c6da20cab1593388db5575a38aa313f
| 18,281 |
import requests
import json
def get_github_emoji(): # pragma: no cover
"""Get Github's usable emoji."""
try:
resp = requests.get(
'https://api.github.com/emojis',
timeout=30
)
except Exception:
return None
return json.loads(resp.text)
|
533a56e2e59b039cbc45ab5acb7ab4e8487e4ad9
| 18,282 |
def transport_stable(p, q, C, lambda1, lambda2, epsilon, scaling_iter, g):
"""
Compute the optimal transport with stabilized numerics.
Args:
p: uniform distribution on input cells
q: uniform distribution on output cells
C: cost matrix to transport cell i to cell j
lambda1: regularization parameter for marginal constraint for p.
lambda2: regularization parameter for marginal constraint for q.
epsilon: entropy parameter
scaling_iter: number of scaling iterations
g: growth value for input cells
"""
u = np.zeros(len(p))
v = np.zeros(len(q))
b = np.ones(len(q))
p = p * g
q = q * np.average(g)
K0 = np.exp(-C / epsilon)
K = np.copy(K0)
alpha1 = lambda1 / (lambda1 + epsilon)
alpha2 = lambda2 / (lambda2 + epsilon)
for i in range(scaling_iter):
# scaling iteration
a = (p / (K.dot(b))) ** alpha1 * np.exp(-u / (lambda1 + epsilon))
b = (q / (K.T.dot(a))) ** alpha2 * np.exp(-v / (lambda2 + epsilon))
# stabilization
if (max(max(abs(a)), max(abs(b))) > 1e100):
u = u + epsilon * np.log(a)
v = v + epsilon * np.log(b) # absorb
K = (K0.T * np.exp(u / epsilon)).T * np.exp(v / epsilon)
a = np.ones(len(p))
b = np.ones(len(q))
return (K.T * a).T * b
|
584607e57b4d216633ef0a03c2cb06726b0f423f
| 18,283 |
def add(A: Coord, B: Coord, s: float = 1.0, t: float = 1.0) -> Coord:
"""Return the point sA + tB."""
return (s * A[0] + t * B[0], s * A[1] + t * B[1])
|
53c2f750199d785140154881fdc0ace31b9e2472
| 18,284 |
def from_binary(bin_data: str, delimiter: str = " ") -> bytes:
"""Converts binary string into bytes object"""
if delimiter == "":
data = [bin_data[i:i+8] for i in range(0, len(bin_data), 8)]
else:
data = bin_data.split(delimiter)
data = [int(byte, 2) for byte in data]
return bytes(data)
|
f16706da2d5b9ae5984a35a13ebd02ae94581153
| 18,285 |
def send_raw(task, raw_bytes):
"""Send raw bytes to the BMC. Bytes should be a string of bytes.
:param task: a TaskManager instance.
:param raw_bytes: a string of raw bytes to send, e.g. '0x00 0x01'
:returns: a tuple with stdout and stderr.
:raises: IPMIFailure on an error from ipmitool.
:raises: MissingParameterValue if a required parameter is missing.
:raises: InvalidParameterValue when an invalid value is specified.
"""
node_uuid = task.node.uuid
LOG.debug('Sending node %(node)s raw bytes %(bytes)s',
{'bytes': raw_bytes, 'node': node_uuid})
driver_info = _parse_driver_info(task.node)
cmd = 'raw %s' % raw_bytes
try:
out, err = _exec_ipmitool(driver_info, cmd)
LOG.debug('send raw bytes returned stdout: %(stdout)s, stderr:'
' %(stderr)s', {'stdout': out, 'stderr': err})
except (exception.PasswordFileFailedToCreate,
processutils.ProcessExecutionError) as e:
LOG.exception('IPMI "raw bytes" failed for node %(node_id)s '
'with error: %(error)s.',
{'node_id': node_uuid, 'error': e})
raise exception.IPMIFailure(cmd=cmd)
return out, err
|
1f903f1942c5d1b673c9019f9023b1ddf7d2c07a
| 18,286 |
def one_on_f_weight(f, normalize=True):
""" Literally 1/f weight. Useful for fitting linspace data in logspace.
Parameters
----------
f: array
Frequency
normalize: boolean, optional
Normalized the weight to [0, 1].
Defaults to True.
Returns
-------
weight: array
The 1/f weight.
"""
weight = 1/f
if normalize:
weight /= max(weight)
return(weight)
|
54301aa7480e6f3520cbfcccfa463a2a02d34b9c
| 18,287 |
def PCSPRE1M2SOC(p0, meas_pcs, meas_pre, x_pcs ,y_pcs, z_pcs, \
x_pre ,y_pre, z_pre, wt_pcs=1.0, wt_pre=1.0, \
tol_pcs=None, tol_pre=None):
"""
Optimize two X-tensors and two PRE centres to two common sites
@param p0: List containing initial guesses for (17 unknowns):
metal site 1 <x,y,z> , Xaxial and Xrhomic at site 1, Euler angles
<A,B,G> at site 1 AND metal site 2 <x,y,z> , Xaxial and Xrhomic at
site 2, Euler angles <A,B,G> at site 2 and the PRE constant c
@param meas_pcs: The numpy array of measused PCS
@param meas_pre: The numpy array of measured PRE
@param x: The numpy array of x coordinates of associated exp vals
@param y: The numpy array of y coordinates of associated exp vals
@param z: The numpy array of z coordinates of associated exp vals
@param wt_pcs: [OPTIONAL] The weight of the PCS terms in optimization
@param wt_pre: [OPTIONAL] The weight of the PRE terms in optimization
"""
xm1,ym1,zm1, ax1,rh1, a1,b1,g1,xm2,ym2,zm2, ax2,rh2, a2,b2,g2, c = p0
r_v1 = sqrt((x_pre-xm1)**2 +(y_pre-ym1)**2 + (z_pre-zm1)**2)
r_v2 = sqrt((x_pre-xm2)**2 +(y_pre-ym2)**2 + (z_pre-zm2)**2)
zyz_rot1 = ZYZRot(a1, b1, g1)
zyz_rot2 = ZYZRot(a2, b2, g2)
X1, X2 = (x_pcs - xm1), (x_pcs - xm2)
Y1, Y2 = (y_pcs - ym1), (y_pcs - ym2)
Z1, Z2 = (z_pcs - zm1), (z_pcs - zm2)
x_t1 = zyz_rot1[0][0]*X1 + zyz_rot1[0][1]*Y1 + zyz_rot1[0][2]*Z1
y_t1 = zyz_rot1[1][0]*X1 + zyz_rot1[1][1]*Y1 + zyz_rot1[1][2]*Z1
z_t1 = zyz_rot1[2][0]*X1 + zyz_rot1[2][1]*Y1 + zyz_rot1[2][2]*Z1
x_t2 = zyz_rot2[0][0]*X2 + zyz_rot2[0][1]*Y2 + zyz_rot2[0][2]*Z2
y_t2 = zyz_rot2[1][0]*X2 + zyz_rot2[1][1]*Y2 + zyz_rot2[1][2]*Z2
z_t2 = zyz_rot2[2][0]*X2 + zyz_rot2[2][1]*Y2 + zyz_rot2[2][2]*Z2
r2_1 = (x_t1*x_t1)+(y_t1*y_t1)+(z_t1*z_t1)
r2_2 = (x_t2*x_t2)+(y_t2*y_t2)+(z_t2*z_t2)
r5_1 = (r2_1*r2_1) * sqrt(r2_1)
r5_2 = (r2_2*r2_2) * sqrt(r2_2)
tmp_1 = 1.0/r5_1
tmp_2 = 1.0/r5_2
PCS_1 = (tmp_1*(ax1*(3.0*z_t1*z_t1-r2_1)+rh1*1.5*(x_t1*x_t1-y_t1*y_t1)))
PCS_2 = (tmp_2*(ax2*(3.0*z_t2*z_t2-r2_2)+rh2*1.5*(x_t2*x_t2-y_t2*y_t2)))
err_pcs = meas_pcs - (PCS_1 + PCS_2)
err_pre = meas_pre - (c/r_v1**6 + c/r_v2**6)
if tol_pcs != None and tol_pre != None:
for pcs_item in range(0, len(err_pcs)):
if abs(err_pcs[pcs_item]) - tol_pcs[pcs_item] <= 0.0:
err_pcs[pcs_item] = 0.0
for pre_item in range(0, len(err_pre)):
if abs(err_pre[pre_item]) - tol_pre[pre_item] <= 0.0:
err_pre[pre_item] = 0.0
#TODO: Check if this should be squared (below)
err_pcs = err_pcs*wt_pcs
err_pre = err_pre*wt_pre
err = append(err_pcs, err_pre)
return err
|
ec0f266ed1a8b1a45504c13057486bd26e3cc4a5
| 18,288 |
def load_randomdata(dataset_str, iter):
"""Load data."""
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
NL = 2312
NC = 120
elif dataset_str == 'cora':
NL = 1708
NC = 140
else:
NL = 18717
NC = 60
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
#fixed 500 for validation read from file, choose random 140 from the others for train
'''
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
'''
idx_val=[int(item) for item in open("source/"+dataset_str+"/val_idx"+str(iter)+".txt").readlines()]
idx_test = test_idx_range.tolist()
idx_train = random.sample(list(set(range(0,NL))-set(idx_val)),NC);
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
|
476a54078680bb711a77fc9e3900192a1ef3b811
| 18,289 |
def plot(figsize=None, formats=None, limit=100, titlelen=10, **kwargs):
"""Display an image [in a Jupyter Notebook] from a Quilt fragment path.
Intended for use with `%matplotlib inline`.
Convenience method that loops over supblots that call
`plt.imshow(image.imread(FRAG_PATH))`.
Keyword arguments
* figsize=None # None means auto, else provide (HEIGHT_INCHES, WIDTH_INCHES)
* formats=None # List of extensions as strings ['jpg', 'png', ...]
* limit=100 # maximum number of images to display
* titlelen=10 # max number of characters in subplot title
* **kwargs - all remaining kwargs are passed to plt.subplots;
see https://matplotlib.org/api/_as_gen/matplotlib.pyplot.subplots.html
"""
# pylint: disable=protected-access
def _plot(node, paths):
lower_formats = set((x.lower() for x in formats)) if formats is not None else None
def node_filter(frag, meta):
filepath = meta.get('_system', {}).get('filepath', None)
# don't try to read DataFrames as images
if isinstance(frag, string_types) and filepath:
_, ext = splitext_no_dot(filepath)
if lower_formats is None or ext.lower() in lower_formats:
return True
return False
# assume DataNode has one path; doesn't work with multi-fragment images
display = [('', paths[0], node._meta)]
# for GroupNodes, display all DataNode children
if isinstance(node, GroupNode):
datanodes = [(x, y) for (x, y) in node._items() if isinstance(y, DataNode)]
display = [(x, y._data(), y._meta) for (x, y) in datanodes]
# sort by name so iteration is reproducible (and unit tests pass)
display = sorted(display, key=lambda rec: rec[0])
display = [x for x in display if node_filter(x[1], x[2])]
if len(display) > limit:
print('Displaying {} of {} images{}'.format(limit, len(display), ELLIPSIS))
display = display[:limit]
# display can be empty e.g. if no DataNode children
if not display:
print('No images to display.')
return
# cast to int to avoid downstream complaints of
# 'float' object cannot be interpreted as an index
floatlen = float(len(display)) # prevent integer division in 2.7
cols = min(int(floor(sqrt(floatlen))), 8)
rows = int(ceil(floatlen/cols))
plt.tight_layout()
plt.subplots(
rows,
cols,
figsize=(cols*2, rows*2) if not figsize else figsize,
**kwargs)
for i in range(rows*cols):
axes = plt.subplot(rows, cols, i + 1) # subplots start at 1, not 0
axes.axis('off')
if i < len(display):
(name, frag, meta) = display[i]
plt.title(name[:titlelen] + ELLIPSIS if len(name) > titlelen else name)
filepath = meta.get('_system', {}).get('filepath', None)
_, ext = splitext_no_dot(filepath)
try:
bits = mpimg.imread(frag, format=ext)
plt.imshow(bits)
# Mac throws OSError, Linux IOError if file not recognizable
except (IOError, OSError) as err:
print('{}: {}'.format(name, str(err)))
continue
return _plot
|
f1b72c952d1c517ba4f09e03af8463a73d2c8759
| 18,290 |
def tresize(tombfile, keyfile, passphrase, newsize):
"""
Resize a tomb.
Keyfile, passphrase and new size are needed.
"""
cmd = ['tomb',
'resize',
tombfile,
'-k',
keyfile,
'--unsafe',
'--tomb-pwd',
sanitize_passphrase(passphrase),
'-s',
str(newsize),
'--no-color']
return execute(cmd)
|
334a722b79aec80bc4a95c67a0b155653e29eb10
| 18,291 |
def auto_z_levels(fid, x, y, variable, t_idx, n_cont, n_dec):
"""
list(float) = auto_z_levels(fid, variable, t_idx, n_cont, n_dec)
... # contour lines
... # post .
"""
fig, ax = plt.subplots()
z_levs = np.ndarray(0)
for i in t_idx:
data = fid.variables[variable][i]
cp = ax.contour(x, y, data, n_cont)
z_levs = np.concatenate((z_levs, cp.levels), axis=0)
z_levs = np.sort(np.unique(np.around(z_levs, n_dec)))
plt.close(fig)
return z_levs
|
f80020c01a661412fb79d23f6081bdb94a471102
| 18,292 |
def canonicalize(curie: str):
"""Return the best CURIE."""
# TODO maybe normalize the curie first?
norm_prefix, norm_identifier = normalize_curie(curie)
if norm_prefix is None or norm_identifier is None:
return jsonify(
query=curie,
normalizable=False,
)
norm_curie = f'{norm_prefix}:{norm_identifier}'
rv = dict(query=curie)
if norm_curie != curie:
rv['norm_curie'] = norm_curie
if norm_curie not in canonicalizer.graph:
rv['found'] = False
else:
result_curie = canonicalizer.canonicalize(norm_curie)
rv.update(
found=True,
result=result_curie,
mappings=url_for(
f'.{all_mappings.__name__}',
source_curie=norm_curie,
target_curie=result_curie,
),
)
return jsonify(rv)
|
510b2d10170c674cc24090bf2bcd900912678acf
| 18,293 |
def _DefaultValueConstructorForField(field):
"""Returns a function which returns a default value for a field.
Args:
field: FieldDescriptor object for this field.
The returned function has one argument:
message: Message instance containing this field, or a weakref proxy
of same.
That function in turn returns a default value for this field. The default
value may refer back to |message| via a weak reference.
"""
if _IsMapField(field):
return _GetInitializeDefaultForMap(field)
if field.label == _FieldDescriptor.LABEL_REPEATED:
if field.has_default_value and field.default_value != []:
raise ValueError('Repeated field default value not empty list: %s' % (
field.default_value))
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
message_type = field.message_type
def MakeRepeatedMessageDefault(message):
return containers.RepeatedCompositeFieldContainer(
message._listener_for_children, field.message_type)
return MakeRepeatedMessageDefault
else:
type_checker = type_checkers.GetTypeChecker(field)
def MakeRepeatedScalarDefault(message):
return containers.RepeatedScalarFieldContainer(
message._listener_for_children, type_checker)
return MakeRepeatedScalarDefault
if field.cpp_type == _FieldDescriptor.CPPTYPE_MESSAGE:
message_type = field.message_type
def MakeSubMessageDefault(message):
assert getattr(message_type, '_concrete_class', None), (
'Uninitialized concrete class found for field %r (message type %r)'
% (field.full_name, message_type.full_name))
result = message_type._concrete_class()
result._SetListener(
_OneofListener(message, field)
if field.containing_oneof is not None
else message._listener_for_children)
return result
return MakeSubMessageDefault
def MakeScalarDefault(message):
return field.default_value
return MakeScalarDefault
|
3a468e2850aaf9707ee1229eeb009ef5c013f1b6
| 18,294 |
def clean_text(dirty_text):
"""
Given a string, this function tokenizes the words of that string.
:param dirty_text: string
:return: list
input = "American artist accomplishments american"
output = ['accomplishments', 'american', 'artist']
"""
lower_dirty_text = dirty_text.lower()
regex_pattern = r"[\w']+"
tokenizer = RegexpTokenizer(regex_pattern)
tokens = tokenizer.tokenize(lower_dirty_text)
unique_tokens = list(set(tokens))
return unique_tokens
|
1df63ea0c9be5a518d2fd1f931772080962f878f
| 18,295 |
def GetCurrentUserController(AuthJSONController):
""" Return the CurrentUserController in the proper scope """
class CurrentUserController(AuthJSONController):
""" Controller to return the currently signed in user """
def __init__(self, toJson):
""" Initialize with the Json Converter """
self.toJson = toJson
AuthJSONController.__init__(self)
def performWithJSON(self, json=None, user=None):
""" Convert the existing Word Lists to JSON """
if user:
return {'user':self.toJson(user)}
return Errors.NO_USER.toJSON()
return CurrentUserController
|
ee710cd4d65982cf01d17fba130b7bb83dffd617
| 18,296 |
import numpy
def fft_in_range(audiomatrix, startindex, endindex, channel):
"""
Do an FFT in the specified range of indices
The audiomatrix should have the first index as its time domain and
second index as the channel number. The startindex and endinex
select the time range to use, and the channel parameter selects
which channel to do the FFT on.
Returns a vector of data in the frequency domain
"""
n = endindex - startindex
indat = audiomatrix[startindex:endindex, channel]
outdat = (numpy.fft.fft(indat)[range(n//2)])/n
return outdat
|
30ce104795d0809f054439ba32f47d33528ecbff
| 18,297 |
def drop_arrays_by_name(gt_names, used_classes):
"""Drop irrelevant ground truths by name.
Args:
gt_names (list[str]): Names of ground truths.
used_classes (list[str]): Classes of interest.
Returns:
np.ndarray: Indices of ground truths that will be dropped.
"""
inds = [i for i, x in enumerate(gt_names) if x not in used_classes]
inds = np.array(inds, dtype=np.int64)
return inds
|
67d711ae61f3c833fa9e8b33d4bf4bf6d99a34ad
| 18,298 |
def get_data_table_metas(data_table_name, data_table_namespace):
"""
Gets metas from meta table associated with table named `data_table_name` and namespaced `data_table_namespace`.
Parameters
---------
data_table_name : string
table name of this data table
data_table_namespace : string
table name of this data table
Returns
-------
dict
metas
Examples
--------
>>> from common.python import session
>>> session.get_data_table_metas("meta", "readme") # {'model_id': 'a_id', 'used_framework': 'fate'}
"""
return RuntimeInstance.SESSION.get_data_table_metas(data_table_name=data_table_name,
data_table_namespace=data_table_namespace)
|
8b4ee249112d399c429a33fed82d9cb01404d441
| 18,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.