code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
| text
stringlengths 164
112k
|
---|---|---|
def makeStickyEdataFile(Economy,ignore_periods,description='',filename=None,save_data=False,calc_micro_stats=True,meas_err_base=None):
'''
Makes descriptive statistics and macroeconomic data file. Behaves slightly
differently for heterogeneous agents vs representative agent models.
Parameters
----------
Economy : Market or AgentType
A representation of the model economy. For heterogeneous agents specifications,
this will be an instance of a subclass of Market. For representative agent
specifications, this will be an instance of an AgentType subclass.
ignore_periods : int
Number of periods at the start of the simulation to throw out.
description : str
Description of the economy that is prepended on the output string.
filename : str
Name of the output log file, if any; .txt will be appended automatically.
save_data : bool
When True, save simulation data to filename + 'Data.txt' for use in Stata.
calc_micro_stats : bool
When True, calculate microeconomic statistics like in Table 2 of the
paper draft.
meas_err_base : float or None
Base value of measurement error standard deviation, which will be adjusted.
When None (default), value is calculated as stdev(DeltaLogC).
Returns
-------
None
'''
# Extract time series data from the economy
if hasattr(Economy,'agents'): # If this is a heterogeneous agent specification...
if len(Economy.agents) > 1:
pLvlAll_hist = np.concatenate([this_type.pLvlTrue_hist for this_type in Economy.agents],axis=1)
aLvlAll_hist = np.concatenate([this_type.aLvlNow_hist for this_type in Economy.agents],axis=1)
cLvlAll_hist = np.concatenate([this_type.cLvlNow_hist for this_type in Economy.agents],axis=1)
yLvlAll_hist = np.concatenate([this_type.yLvlNow_hist for this_type in Economy.agents],axis=1)
else: # Don't duplicate the data unless necessary (with one type, concatenating is useless)
pLvlAll_hist = Economy.agents[0].pLvlTrue_hist
aLvlAll_hist = Economy.agents[0].aLvlNow_hist
cLvlAll_hist = Economy.agents[0].cLvlNow_hist
yLvlAll_hist = Economy.agents[0].yLvlNow_hist
# PermShkAggHist needs to be shifted one period forward
PlvlAgg_hist = np.cumprod(np.concatenate(([1.0],Economy.PermShkAggHist[:-1]),axis=0))
AlvlAgg_hist = np.mean(aLvlAll_hist,axis=1) # Level of aggregate assets
AnrmAgg_hist = AlvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate assets
ClvlAgg_hist = np.mean(cLvlAll_hist,axis=1) # Level of aggregate consumption
CnrmAgg_hist = ClvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate consumption
YlvlAgg_hist = np.mean(yLvlAll_hist,axis=1) # Level of aggregate income
YnrmAgg_hist = YlvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate income
if calc_micro_stats: # Only calculate stats if requested. This is a memory hog with many simulated periods
micro_stat_periods = int((Economy.agents[0].T_sim-ignore_periods)*0.1)
not_newborns = (np.concatenate([this_type.t_age_hist[(ignore_periods+1):(ignore_periods+micro_stat_periods),:] for this_type in Economy.agents],axis=1) > 1).flatten()
Logc = np.log(cLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
DeltaLogc = (Logc[1:] - Logc[0:-1]).flatten()
DeltaLogc_trimmed = DeltaLogc[not_newborns]
Loga = np.log(aLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
DeltaLoga = (Loga[1:] - Loga[0:-1]).flatten()
DeltaLoga_trimmed = DeltaLoga[not_newborns]
Logp = np.log(pLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
DeltaLogp = (Logp[1:] - Logp[0:-1]).flatten()
DeltaLogp_trimmed = DeltaLogp[not_newborns]
Logy = np.log(yLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
Logy_trimmed = Logy
Logy_trimmed[np.isinf(Logy)] = np.nan
birth_events = np.concatenate([this_type.t_age_hist == 1 for this_type in Economy.agents],axis=1)
vBirth = calcValueAtBirth(cLvlAll_hist[ignore_periods:,:],birth_events[ignore_periods:,:],PlvlAgg_hist[ignore_periods:],Economy.MrkvNow_hist[ignore_periods:],Economy.agents[0].DiscFac,Economy.agents[0].CRRA)
BigTheta_hist = Economy.TranShkAggHist
if hasattr(Economy,'MrkvNow'):
Mrkv_hist = Economy.MrkvNow_hist
if ~hasattr(Economy,'Rfree'): # If this is a markov DSGE specification...
# Find the expected interest rate - approximate by assuming growth = expected growth
ExpectedGrowth_hist = Economy.PermGroFacAgg[Mrkv_hist]
ExpectedKLRatio_hist = AnrmAgg_hist/ExpectedGrowth_hist
ExpectedR_hist = Economy.Rfunc(ExpectedKLRatio_hist)
else: # If this is a representative agent specification...
PlvlAgg_hist = Economy.pLvlTrue_hist.flatten()
ClvlAgg_hist = Economy.cLvlNow_hist.flatten()
CnrmAgg_hist = ClvlAgg_hist/PlvlAgg_hist.flatten()
YnrmAgg_hist = Economy.yNrmTrue_hist.flatten()
YlvlAgg_hist = YnrmAgg_hist*PlvlAgg_hist.flatten()
AlvlAgg_hist = Economy.aLvlNow_hist.flatten()
AnrmAgg_hist = AlvlAgg_hist/PlvlAgg_hist.flatten()
BigTheta_hist = Economy.TranShkNow_hist.flatten()
if hasattr(Economy,'MrkvNow'):
Mrkv_hist = Economy.MrkvNow_hist
# Process aggregate data into forms used by regressions
LogC = np.log(ClvlAgg_hist[ignore_periods:])
LogA = np.log(AlvlAgg_hist[ignore_periods:])
LogY = np.log(YlvlAgg_hist[ignore_periods:])
DeltaLogC = LogC[1:] - LogC[0:-1]
DeltaLogA = LogA[1:] - LogA[0:-1]
DeltaLogY = LogY[1:] - LogY[0:-1]
A = AnrmAgg_hist[(ignore_periods+1):] # This is a relabeling for the regression code
BigTheta = BigTheta_hist[(ignore_periods+1):]
if hasattr(Economy,'MrkvNow'):
Mrkv = Mrkv_hist[(ignore_periods+1):] # This is a relabeling for the regression code
if ~hasattr(Economy,'Rfree') and hasattr(Economy,'agents'): # If this is a markov DSGE specification...
R = ExpectedR_hist[(ignore_periods+1):]
Delta8LogC = (np.log(ClvlAgg_hist[8:]) - np.log(ClvlAgg_hist[:-8]))[(ignore_periods-7):]
Delta8LogY = (np.log(YlvlAgg_hist[8:]) - np.log(YlvlAgg_hist[:-8]))[(ignore_periods-7):]
# Add measurement error to LogC
if meas_err_base is None:
meas_err_base = np.std(DeltaLogC)
sigma_meas_err = meas_err_base*0.375 # This approximately matches the change in IV vs OLS in U.S. empirical coefficients
np.random.seed(10)
Measurement_Error = sigma_meas_err*np.random.normal(0.,1.,LogC.size)
LogC_me = LogC + Measurement_Error
DeltaLogC_me = LogC_me[1:] - LogC_me[0:-1]
# Apply measurement error to long delta LogC
LogC_long = np.log(ClvlAgg_hist)
LogC_long_me = LogC_long + sigma_meas_err*np.random.normal(0.,1.,LogC_long.size)
Delta8LogC_me = (LogC_long_me[8:] - LogC_long_me[:-8])[(ignore_periods-7):]
# Make summary statistics for the results file
csv_output_string = str(np.mean(AnrmAgg_hist[ignore_periods:])) +","+ str(np.mean(CnrmAgg_hist[ignore_periods:]))+ ","+str(np.std(np.log(AnrmAgg_hist[ignore_periods:])))+ ","+str(np.std(DeltaLogC))+ ","+str(np.std(DeltaLogY)) +","+ str(np.std(DeltaLogA))
if hasattr(Economy,'agents') and calc_micro_stats: # This block only runs for heterogeneous agents specifications
csv_output_string += ","+str(np.mean(np.std(Loga,axis=1)))+ ","+str(np.mean(np.std(Logc,axis=1))) + ","+str(np.mean(np.std(Logp,axis=1))) +","+ str(np.mean(np.nanstd(Logy_trimmed,axis=1))) +","+ str(np.std(DeltaLoga_trimmed))+","+ str(np.std(DeltaLogc_trimmed))+ ","+str(np.std(DeltaLogp_trimmed))
# Save the results to a logfile if requested
if filename is not None:
with open(results_dir + filename + 'Results.csv','w') as f:
f.write(csv_output_string)
f.close()
if calc_micro_stats and hasattr(Economy,'agents'):
with open(results_dir + filename + 'BirthValue.csv','w') as f:
my_writer = csv.writer(f, delimiter = ',')
my_writer.writerow(vBirth)
f.close()
if save_data:
DataArray = (np.vstack((np.arange(DeltaLogC.size),DeltaLogC_me,DeltaLogC,DeltaLogY,A,BigTheta,Delta8LogC,Delta8LogY,Delta8LogC_me,Measurement_Error[1:]))).transpose()
VarNames = ['time_period','DeltaLogC_me','DeltaLogC','DeltaLogY','A','BigTheta','Delta8LogC','Delta8LogY','Delta8LogC_me','Measurement_Error']
if hasattr(Economy,'MrkvNow'):
DataArray = np.hstack((DataArray,np.reshape(Mrkv,(Mrkv.size,1))))
VarNames.append('MrkvState')
if hasattr(Economy,'MrkvNow') & ~hasattr(Economy,'Rfree') and hasattr(Economy,'agents'):
DataArray = np.hstack((DataArray,np.reshape(R,(R.size,1))))
VarNames.append('R')
with open(results_dir + filename + 'Data.txt','w') as f:
my_writer = csv.writer(f, delimiter = '\t')
my_writer.writerow(VarNames)
for i in range(DataArray.shape[0]):
my_writer.writerow(DataArray[i,:])
f.close() | Makes descriptive statistics and macroeconomic data file. Behaves slightly
differently for heterogeneous agents vs representative agent models.
Parameters
----------
Economy : Market or AgentType
A representation of the model economy. For heterogeneous agents specifications,
this will be an instance of a subclass of Market. For representative agent
specifications, this will be an instance of an AgentType subclass.
ignore_periods : int
Number of periods at the start of the simulation to throw out.
description : str
Description of the economy that is prepended on the output string.
filename : str
Name of the output log file, if any; .txt will be appended automatically.
save_data : bool
When True, save simulation data to filename + 'Data.txt' for use in Stata.
calc_micro_stats : bool
When True, calculate microeconomic statistics like in Table 2 of the
paper draft.
meas_err_base : float or None
Base value of measurement error standard deviation, which will be adjusted.
When None (default), value is calculated as stdev(DeltaLogC).
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Makes descriptive statistics and macroeconomic data file. Behaves slightly
differently for heterogeneous agents vs representative agent models.
Parameters
----------
Economy : Market or AgentType
A representation of the model economy. For heterogeneous agents specifications,
this will be an instance of a subclass of Market. For representative agent
specifications, this will be an instance of an AgentType subclass.
ignore_periods : int
Number of periods at the start of the simulation to throw out.
description : str
Description of the economy that is prepended on the output string.
filename : str
Name of the output log file, if any; .txt will be appended automatically.
save_data : bool
When True, save simulation data to filename + 'Data.txt' for use in Stata.
calc_micro_stats : bool
When True, calculate microeconomic statistics like in Table 2 of the
paper draft.
meas_err_base : float or None
Base value of measurement error standard deviation, which will be adjusted.
When None (default), value is calculated as stdev(DeltaLogC).
Returns
-------
None
### Response:
def makeStickyEdataFile(Economy,ignore_periods,description='',filename=None,save_data=False,calc_micro_stats=True,meas_err_base=None):
'''
Makes descriptive statistics and macroeconomic data file. Behaves slightly
differently for heterogeneous agents vs representative agent models.
Parameters
----------
Economy : Market or AgentType
A representation of the model economy. For heterogeneous agents specifications,
this will be an instance of a subclass of Market. For representative agent
specifications, this will be an instance of an AgentType subclass.
ignore_periods : int
Number of periods at the start of the simulation to throw out.
description : str
Description of the economy that is prepended on the output string.
filename : str
Name of the output log file, if any; .txt will be appended automatically.
save_data : bool
When True, save simulation data to filename + 'Data.txt' for use in Stata.
calc_micro_stats : bool
When True, calculate microeconomic statistics like in Table 2 of the
paper draft.
meas_err_base : float or None
Base value of measurement error standard deviation, which will be adjusted.
When None (default), value is calculated as stdev(DeltaLogC).
Returns
-------
None
'''
# Extract time series data from the economy
if hasattr(Economy,'agents'): # If this is a heterogeneous agent specification...
if len(Economy.agents) > 1:
pLvlAll_hist = np.concatenate([this_type.pLvlTrue_hist for this_type in Economy.agents],axis=1)
aLvlAll_hist = np.concatenate([this_type.aLvlNow_hist for this_type in Economy.agents],axis=1)
cLvlAll_hist = np.concatenate([this_type.cLvlNow_hist for this_type in Economy.agents],axis=1)
yLvlAll_hist = np.concatenate([this_type.yLvlNow_hist for this_type in Economy.agents],axis=1)
else: # Don't duplicate the data unless necessary (with one type, concatenating is useless)
pLvlAll_hist = Economy.agents[0].pLvlTrue_hist
aLvlAll_hist = Economy.agents[0].aLvlNow_hist
cLvlAll_hist = Economy.agents[0].cLvlNow_hist
yLvlAll_hist = Economy.agents[0].yLvlNow_hist
# PermShkAggHist needs to be shifted one period forward
PlvlAgg_hist = np.cumprod(np.concatenate(([1.0],Economy.PermShkAggHist[:-1]),axis=0))
AlvlAgg_hist = np.mean(aLvlAll_hist,axis=1) # Level of aggregate assets
AnrmAgg_hist = AlvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate assets
ClvlAgg_hist = np.mean(cLvlAll_hist,axis=1) # Level of aggregate consumption
CnrmAgg_hist = ClvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate consumption
YlvlAgg_hist = np.mean(yLvlAll_hist,axis=1) # Level of aggregate income
YnrmAgg_hist = YlvlAgg_hist/PlvlAgg_hist # Normalized level of aggregate income
if calc_micro_stats: # Only calculate stats if requested. This is a memory hog with many simulated periods
micro_stat_periods = int((Economy.agents[0].T_sim-ignore_periods)*0.1)
not_newborns = (np.concatenate([this_type.t_age_hist[(ignore_periods+1):(ignore_periods+micro_stat_periods),:] for this_type in Economy.agents],axis=1) > 1).flatten()
Logc = np.log(cLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
DeltaLogc = (Logc[1:] - Logc[0:-1]).flatten()
DeltaLogc_trimmed = DeltaLogc[not_newborns]
Loga = np.log(aLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
DeltaLoga = (Loga[1:] - Loga[0:-1]).flatten()
DeltaLoga_trimmed = DeltaLoga[not_newborns]
Logp = np.log(pLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
DeltaLogp = (Logp[1:] - Logp[0:-1]).flatten()
DeltaLogp_trimmed = DeltaLogp[not_newborns]
Logy = np.log(yLvlAll_hist[ignore_periods:(ignore_periods+micro_stat_periods),:])
Logy_trimmed = Logy
Logy_trimmed[np.isinf(Logy)] = np.nan
birth_events = np.concatenate([this_type.t_age_hist == 1 for this_type in Economy.agents],axis=1)
vBirth = calcValueAtBirth(cLvlAll_hist[ignore_periods:,:],birth_events[ignore_periods:,:],PlvlAgg_hist[ignore_periods:],Economy.MrkvNow_hist[ignore_periods:],Economy.agents[0].DiscFac,Economy.agents[0].CRRA)
BigTheta_hist = Economy.TranShkAggHist
if hasattr(Economy,'MrkvNow'):
Mrkv_hist = Economy.MrkvNow_hist
if ~hasattr(Economy,'Rfree'): # If this is a markov DSGE specification...
# Find the expected interest rate - approximate by assuming growth = expected growth
ExpectedGrowth_hist = Economy.PermGroFacAgg[Mrkv_hist]
ExpectedKLRatio_hist = AnrmAgg_hist/ExpectedGrowth_hist
ExpectedR_hist = Economy.Rfunc(ExpectedKLRatio_hist)
else: # If this is a representative agent specification...
PlvlAgg_hist = Economy.pLvlTrue_hist.flatten()
ClvlAgg_hist = Economy.cLvlNow_hist.flatten()
CnrmAgg_hist = ClvlAgg_hist/PlvlAgg_hist.flatten()
YnrmAgg_hist = Economy.yNrmTrue_hist.flatten()
YlvlAgg_hist = YnrmAgg_hist*PlvlAgg_hist.flatten()
AlvlAgg_hist = Economy.aLvlNow_hist.flatten()
AnrmAgg_hist = AlvlAgg_hist/PlvlAgg_hist.flatten()
BigTheta_hist = Economy.TranShkNow_hist.flatten()
if hasattr(Economy,'MrkvNow'):
Mrkv_hist = Economy.MrkvNow_hist
# Process aggregate data into forms used by regressions
LogC = np.log(ClvlAgg_hist[ignore_periods:])
LogA = np.log(AlvlAgg_hist[ignore_periods:])
LogY = np.log(YlvlAgg_hist[ignore_periods:])
DeltaLogC = LogC[1:] - LogC[0:-1]
DeltaLogA = LogA[1:] - LogA[0:-1]
DeltaLogY = LogY[1:] - LogY[0:-1]
A = AnrmAgg_hist[(ignore_periods+1):] # This is a relabeling for the regression code
BigTheta = BigTheta_hist[(ignore_periods+1):]
if hasattr(Economy,'MrkvNow'):
Mrkv = Mrkv_hist[(ignore_periods+1):] # This is a relabeling for the regression code
if ~hasattr(Economy,'Rfree') and hasattr(Economy,'agents'): # If this is a markov DSGE specification...
R = ExpectedR_hist[(ignore_periods+1):]
Delta8LogC = (np.log(ClvlAgg_hist[8:]) - np.log(ClvlAgg_hist[:-8]))[(ignore_periods-7):]
Delta8LogY = (np.log(YlvlAgg_hist[8:]) - np.log(YlvlAgg_hist[:-8]))[(ignore_periods-7):]
# Add measurement error to LogC
if meas_err_base is None:
meas_err_base = np.std(DeltaLogC)
sigma_meas_err = meas_err_base*0.375 # This approximately matches the change in IV vs OLS in U.S. empirical coefficients
np.random.seed(10)
Measurement_Error = sigma_meas_err*np.random.normal(0.,1.,LogC.size)
LogC_me = LogC + Measurement_Error
DeltaLogC_me = LogC_me[1:] - LogC_me[0:-1]
# Apply measurement error to long delta LogC
LogC_long = np.log(ClvlAgg_hist)
LogC_long_me = LogC_long + sigma_meas_err*np.random.normal(0.,1.,LogC_long.size)
Delta8LogC_me = (LogC_long_me[8:] - LogC_long_me[:-8])[(ignore_periods-7):]
# Make summary statistics for the results file
csv_output_string = str(np.mean(AnrmAgg_hist[ignore_periods:])) +","+ str(np.mean(CnrmAgg_hist[ignore_periods:]))+ ","+str(np.std(np.log(AnrmAgg_hist[ignore_periods:])))+ ","+str(np.std(DeltaLogC))+ ","+str(np.std(DeltaLogY)) +","+ str(np.std(DeltaLogA))
if hasattr(Economy,'agents') and calc_micro_stats: # This block only runs for heterogeneous agents specifications
csv_output_string += ","+str(np.mean(np.std(Loga,axis=1)))+ ","+str(np.mean(np.std(Logc,axis=1))) + ","+str(np.mean(np.std(Logp,axis=1))) +","+ str(np.mean(np.nanstd(Logy_trimmed,axis=1))) +","+ str(np.std(DeltaLoga_trimmed))+","+ str(np.std(DeltaLogc_trimmed))+ ","+str(np.std(DeltaLogp_trimmed))
# Save the results to a logfile if requested
if filename is not None:
with open(results_dir + filename + 'Results.csv','w') as f:
f.write(csv_output_string)
f.close()
if calc_micro_stats and hasattr(Economy,'agents'):
with open(results_dir + filename + 'BirthValue.csv','w') as f:
my_writer = csv.writer(f, delimiter = ',')
my_writer.writerow(vBirth)
f.close()
if save_data:
DataArray = (np.vstack((np.arange(DeltaLogC.size),DeltaLogC_me,DeltaLogC,DeltaLogY,A,BigTheta,Delta8LogC,Delta8LogY,Delta8LogC_me,Measurement_Error[1:]))).transpose()
VarNames = ['time_period','DeltaLogC_me','DeltaLogC','DeltaLogY','A','BigTheta','Delta8LogC','Delta8LogY','Delta8LogC_me','Measurement_Error']
if hasattr(Economy,'MrkvNow'):
DataArray = np.hstack((DataArray,np.reshape(Mrkv,(Mrkv.size,1))))
VarNames.append('MrkvState')
if hasattr(Economy,'MrkvNow') & ~hasattr(Economy,'Rfree') and hasattr(Economy,'agents'):
DataArray = np.hstack((DataArray,np.reshape(R,(R.size,1))))
VarNames.append('R')
with open(results_dir + filename + 'Data.txt','w') as f:
my_writer = csv.writer(f, delimiter = '\t')
my_writer.writerow(VarNames)
for i in range(DataArray.shape[0]):
my_writer.writerow(DataArray[i,:])
f.close() |
def widgetForName(self, name):
"""Gets a widget with *name*
:param name: the widgets in this container should all have
a name() method. This is the string to match to that result
:type name: str
"""
for iwidget in range(len(self)):
if self.widget(iwidget).name() == name:
return self.widget(iwidget) | Gets a widget with *name*
:param name: the widgets in this container should all have
a name() method. This is the string to match to that result
:type name: str | Below is the the instruction that describes the task:
### Input:
Gets a widget with *name*
:param name: the widgets in this container should all have
a name() method. This is the string to match to that result
:type name: str
### Response:
def widgetForName(self, name):
"""Gets a widget with *name*
:param name: the widgets in this container should all have
a name() method. This is the string to match to that result
:type name: str
"""
for iwidget in range(len(self)):
if self.widget(iwidget).name() == name:
return self.widget(iwidget) |
def SetValue(self, value=None, act=True):
" main method to set value "
if value is None:
value = wx.TextCtrl.GetValue(self).strip()
self.__CheckValid(value)
self.__GetMark()
if value is not None:
wx.TextCtrl.SetValue(self, self.format % set_float(value))
if self.is_valid and hasattr(self.__action, '__call__') and act:
self.__action(value=self.__val)
elif not self.is_valid and self.bell_on_invalid:
wx.Bell()
self.__SetMark() | main method to set value | Below is the the instruction that describes the task:
### Input:
main method to set value
### Response:
def SetValue(self, value=None, act=True):
" main method to set value "
if value is None:
value = wx.TextCtrl.GetValue(self).strip()
self.__CheckValid(value)
self.__GetMark()
if value is not None:
wx.TextCtrl.SetValue(self, self.format % set_float(value))
if self.is_valid and hasattr(self.__action, '__call__') and act:
self.__action(value=self.__val)
elif not self.is_valid and self.bell_on_invalid:
wx.Bell()
self.__SetMark() |
def peer_ips(peer_relation='cluster', addr_key='private-address'):
'''Return a dict of peers and their private-address'''
peers = {}
for r_id in relation_ids(peer_relation):
for unit in relation_list(r_id):
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
return peers | Return a dict of peers and their private-address | Below is the the instruction that describes the task:
### Input:
Return a dict of peers and their private-address
### Response:
def peer_ips(peer_relation='cluster', addr_key='private-address'):
'''Return a dict of peers and their private-address'''
peers = {}
for r_id in relation_ids(peer_relation):
for unit in relation_list(r_id):
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
return peers |
def vmdk_to_ami(args):
"""
Calls methods to perform vmdk import
:param args:
:return:
"""
aws_importer = AWSUtilities.AWSUtils(args.directory, args.aws_profile, args.s3_bucket,
args.aws_regions, args.ami_name, args.vmdk_upload_file)
aws_importer.import_vmdk() | Calls methods to perform vmdk import
:param args:
:return: | Below is the the instruction that describes the task:
### Input:
Calls methods to perform vmdk import
:param args:
:return:
### Response:
def vmdk_to_ami(args):
"""
Calls methods to perform vmdk import
:param args:
:return:
"""
aws_importer = AWSUtilities.AWSUtils(args.directory, args.aws_profile, args.s3_bucket,
args.aws_regions, args.ami_name, args.vmdk_upload_file)
aws_importer.import_vmdk() |
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type) | Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception. | Below is the the instruction that describes the task:
### Input:
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
### Response:
def create_relationship(manager, handle_id, other_handle_id, rel_type):
"""
Makes a relationship from node to other_node depending on which
meta_type the nodes are. Returns the relationship or raises
NoRelationshipPossible exception.
"""
meta_type = get_node_meta_type(manager, handle_id)
if meta_type == 'Location':
return create_location_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Logical':
return create_logical_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Relation':
return create_relation_relationship(manager, handle_id, other_handle_id, rel_type)
elif meta_type == 'Physical':
return create_physical_relationship(manager, handle_id, other_handle_id, rel_type)
other_meta_type = get_node_meta_type(manager, other_handle_id)
raise exceptions.NoRelationshipPossible(handle_id, meta_type, other_handle_id, other_meta_type, rel_type) |
def isLoggedOn(rh, userid):
"""
Determine whether a virtual machine is logged on.
Input:
Request Handle:
userid being queried
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - 0: if we got status. Otherwise, it is the
error return code from the commands issued.
rs - Based on rc value. For rc==0, rs is:
0: if we determined it is logged on.
1: if we determined it is logged off.
"""
rh.printSysLog("Enter vmUtils.isLoggedOn, userid: " + userid)
results = {
'overallRC': 0,
'rc': 0,
'rs': 0,
}
cmd = ["sudo", "/sbin/vmcp", "query", "user", userid]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
except CalledProcessError as e:
search_pattern = '(^HCP\w\w\w045E|^HCP\w\w\w361E)'.encode()
match = re.search(search_pattern, e.output)
if match:
# Not logged on
results['rs'] = 1
else:
# Abnormal failure
rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd,
e.returncode, e.output))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
except Exception as e:
# All other exceptions.
results = msgs.msg['0421'][0]
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.printSysLog("Exit vmUtils.isLoggedOn, overallRC: " +
str(results['overallRC']) + " rc: " + str(results['rc']) +
" rs: " + str(results['rs']))
return results | Determine whether a virtual machine is logged on.
Input:
Request Handle:
userid being queried
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - 0: if we got status. Otherwise, it is the
error return code from the commands issued.
rs - Based on rc value. For rc==0, rs is:
0: if we determined it is logged on.
1: if we determined it is logged off. | Below is the the instruction that describes the task:
### Input:
Determine whether a virtual machine is logged on.
Input:
Request Handle:
userid being queried
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - 0: if we got status. Otherwise, it is the
error return code from the commands issued.
rs - Based on rc value. For rc==0, rs is:
0: if we determined it is logged on.
1: if we determined it is logged off.
### Response:
def isLoggedOn(rh, userid):
"""
Determine whether a virtual machine is logged on.
Input:
Request Handle:
userid being queried
Output:
Dictionary containing the following:
overallRC - overall return code, 0: success, non-zero: failure
rc - 0: if we got status. Otherwise, it is the
error return code from the commands issued.
rs - Based on rc value. For rc==0, rs is:
0: if we determined it is logged on.
1: if we determined it is logged off.
"""
rh.printSysLog("Enter vmUtils.isLoggedOn, userid: " + userid)
results = {
'overallRC': 0,
'rc': 0,
'rs': 0,
}
cmd = ["sudo", "/sbin/vmcp", "query", "user", userid]
strCmd = ' '.join(cmd)
rh.printSysLog("Invoking: " + strCmd)
try:
subprocess.check_output(
cmd,
close_fds=True,
stderr=subprocess.STDOUT)
except CalledProcessError as e:
search_pattern = '(^HCP\w\w\w045E|^HCP\w\w\w361E)'.encode()
match = re.search(search_pattern, e.output)
if match:
# Not logged on
results['rs'] = 1
else:
# Abnormal failure
rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd,
e.returncode, e.output))
results = msgs.msg['0415'][0]
results['rs'] = e.returncode
except Exception as e:
# All other exceptions.
results = msgs.msg['0421'][0]
rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd,
type(e).__name__, str(e)))
rh.printSysLog("Exit vmUtils.isLoggedOn, overallRC: " +
str(results['overallRC']) + " rc: " + str(results['rc']) +
" rs: " + str(results['rs']))
return results |
def get_properties(self):
"""
Returns all the properties of the features layer (iterator)
@rtype: L{Cproperty}
@return: list of properties
"""
if self.features_layer is not None:
for property in self.features_layer.get_properties():
yield propertyfound_entities.get(mention.string) | Returns all the properties of the features layer (iterator)
@rtype: L{Cproperty}
@return: list of properties | Below is the the instruction that describes the task:
### Input:
Returns all the properties of the features layer (iterator)
@rtype: L{Cproperty}
@return: list of properties
### Response:
def get_properties(self):
"""
Returns all the properties of the features layer (iterator)
@rtype: L{Cproperty}
@return: list of properties
"""
if self.features_layer is not None:
for property in self.features_layer.get_properties():
yield propertyfound_entities.get(mention.string) |
def _on_close(self, socket):
"""
Called when the connection was closed.
"""
self.logger.debug('Connection closed.')
for subscription in self.subscriptions.values():
if subscription.state == 'subscribed':
subscription.state = 'connection_pending' | Called when the connection was closed. | Below is the the instruction that describes the task:
### Input:
Called when the connection was closed.
### Response:
def _on_close(self, socket):
"""
Called when the connection was closed.
"""
self.logger.debug('Connection closed.')
for subscription in self.subscriptions.values():
if subscription.state == 'subscribed':
subscription.state = 'connection_pending' |
def xmlparser(xml, objectify=True):
""" Parse xml
:param xml: XML element
:type xml: Union[text_type, lxml.etree._Element]
:rtype: lxml.etree._Element
:returns: An element object
:raises: TypeError if element is not in accepted type
"""
doclose = None
if isinstance(xml, (etree._Element, ObjectifiedElement, etree._ElementTree)):
return xml
elif isinstance(xml, text_type):
xml = StringIO(xml)
doclose = True
elif not isinstance(xml, IOBase):
raise TypeError("Unsupported type of resource {}".format(type(xml)))
if objectify is False:
parsed = etree.parse(xml).getroot()
else:
parsed = parse(xml).getroot()
if doclose:
xml.close()
return parsed | Parse xml
:param xml: XML element
:type xml: Union[text_type, lxml.etree._Element]
:rtype: lxml.etree._Element
:returns: An element object
:raises: TypeError if element is not in accepted type | Below is the the instruction that describes the task:
### Input:
Parse xml
:param xml: XML element
:type xml: Union[text_type, lxml.etree._Element]
:rtype: lxml.etree._Element
:returns: An element object
:raises: TypeError if element is not in accepted type
### Response:
def xmlparser(xml, objectify=True):
""" Parse xml
:param xml: XML element
:type xml: Union[text_type, lxml.etree._Element]
:rtype: lxml.etree._Element
:returns: An element object
:raises: TypeError if element is not in accepted type
"""
doclose = None
if isinstance(xml, (etree._Element, ObjectifiedElement, etree._ElementTree)):
return xml
elif isinstance(xml, text_type):
xml = StringIO(xml)
doclose = True
elif not isinstance(xml, IOBase):
raise TypeError("Unsupported type of resource {}".format(type(xml)))
if objectify is False:
parsed = etree.parse(xml).getroot()
else:
parsed = parse(xml).getroot()
if doclose:
xml.close()
return parsed |
def software_breakpoint_set(self, addr, thumb=False, arm=False, flash=False, ram=False):
"""Sets a software breakpoint at the specified address.
If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if
``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a
normal breakpoint is set.
If ``flash`` is ``True``, the breakpoint is set in flash, otherwise if
``ram`` is ``True``, the breakpoint is set in RAM. If both are
``True`` or both are ``False``, then the best option is chosen for
setting the breakpoint in software.
Args:
self (JLink): the ``JLink`` instance
addr (int): the address where the breakpoint will be set
thumb (bool): boolean indicating to set the breakpoint in THUMB mode
arm (bool): boolean indicating to set the breakpoint in ARM mode
flash (bool): boolean indicating to set the breakpoint in flash
ram (bool): boolean indicating to set the breakpoint in RAM
Returns:
An integer specifying the breakpoint handle. This handle should sbe
retained for future breakpoint operations.
Raises:
TypeError: if the given address is not an integer.
JLinkException: if the breakpoint could not be set.
"""
if flash and not ram:
flags = enums.JLinkBreakpoint.SW_FLASH
elif not flash and ram:
flags = enums.JLinkBreakpoint.SW_RAM
else:
flags = enums.JLinkBreakpoint.SW
if thumb:
flags = flags | enums.JLinkBreakpoint.THUMB
elif arm:
flags = flags | enums.JLinkBreakpoint.ARM
handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)
if handle <= 0:
raise errors.JLinkException('Software breakpoint could not be set.')
return handle | Sets a software breakpoint at the specified address.
If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if
``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a
normal breakpoint is set.
If ``flash`` is ``True``, the breakpoint is set in flash, otherwise if
``ram`` is ``True``, the breakpoint is set in RAM. If both are
``True`` or both are ``False``, then the best option is chosen for
setting the breakpoint in software.
Args:
self (JLink): the ``JLink`` instance
addr (int): the address where the breakpoint will be set
thumb (bool): boolean indicating to set the breakpoint in THUMB mode
arm (bool): boolean indicating to set the breakpoint in ARM mode
flash (bool): boolean indicating to set the breakpoint in flash
ram (bool): boolean indicating to set the breakpoint in RAM
Returns:
An integer specifying the breakpoint handle. This handle should sbe
retained for future breakpoint operations.
Raises:
TypeError: if the given address is not an integer.
JLinkException: if the breakpoint could not be set. | Below is the the instruction that describes the task:
### Input:
Sets a software breakpoint at the specified address.
If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if
``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a
normal breakpoint is set.
If ``flash`` is ``True``, the breakpoint is set in flash, otherwise if
``ram`` is ``True``, the breakpoint is set in RAM. If both are
``True`` or both are ``False``, then the best option is chosen for
setting the breakpoint in software.
Args:
self (JLink): the ``JLink`` instance
addr (int): the address where the breakpoint will be set
thumb (bool): boolean indicating to set the breakpoint in THUMB mode
arm (bool): boolean indicating to set the breakpoint in ARM mode
flash (bool): boolean indicating to set the breakpoint in flash
ram (bool): boolean indicating to set the breakpoint in RAM
Returns:
An integer specifying the breakpoint handle. This handle should sbe
retained for future breakpoint operations.
Raises:
TypeError: if the given address is not an integer.
JLinkException: if the breakpoint could not be set.
### Response:
def software_breakpoint_set(self, addr, thumb=False, arm=False, flash=False, ram=False):
"""Sets a software breakpoint at the specified address.
If ``thumb`` is ``True``, the breakpoint is set in THUMB-mode, while if
``arm`` is ``True``, the breakpoint is set in ARM-mode, otherwise a
normal breakpoint is set.
If ``flash`` is ``True``, the breakpoint is set in flash, otherwise if
``ram`` is ``True``, the breakpoint is set in RAM. If both are
``True`` or both are ``False``, then the best option is chosen for
setting the breakpoint in software.
Args:
self (JLink): the ``JLink`` instance
addr (int): the address where the breakpoint will be set
thumb (bool): boolean indicating to set the breakpoint in THUMB mode
arm (bool): boolean indicating to set the breakpoint in ARM mode
flash (bool): boolean indicating to set the breakpoint in flash
ram (bool): boolean indicating to set the breakpoint in RAM
Returns:
An integer specifying the breakpoint handle. This handle should sbe
retained for future breakpoint operations.
Raises:
TypeError: if the given address is not an integer.
JLinkException: if the breakpoint could not be set.
"""
if flash and not ram:
flags = enums.JLinkBreakpoint.SW_FLASH
elif not flash and ram:
flags = enums.JLinkBreakpoint.SW_RAM
else:
flags = enums.JLinkBreakpoint.SW
if thumb:
flags = flags | enums.JLinkBreakpoint.THUMB
elif arm:
flags = flags | enums.JLinkBreakpoint.ARM
handle = self._dll.JLINKARM_SetBPEx(int(addr), flags)
if handle <= 0:
raise errors.JLinkException('Software breakpoint could not be set.')
return handle |
def find_differences(self, refindex: int):
"""
Search all differences between protocol messages regarding a reference message
:param refindex: index of reference message
:rtype: dict[int, set[int]]
"""
differences = defaultdict(set)
if refindex >= len(self.protocol.messages):
return differences
if self.proto_view == 0:
proto = self.protocol.decoded_proto_bits_str
elif self.proto_view == 1:
proto = self.protocol.decoded_hex_str
elif self.proto_view == 2:
proto = self.protocol.decoded_ascii_str
else:
return differences
ref_message = proto[refindex]
ref_offset = self.get_alignment_offset_at(refindex)
for i, message in enumerate(proto):
if i == refindex:
continue
msg_offset = self.get_alignment_offset_at(i)
short, long = sorted([len(ref_message) + ref_offset, len(message) + msg_offset])
differences[i] = {
j for j in range(max(msg_offset, ref_offset), long)
if j >= short or message[j - msg_offset] != ref_message[j - ref_offset]
}
return differences | Search all differences between protocol messages regarding a reference message
:param refindex: index of reference message
:rtype: dict[int, set[int]] | Below is the the instruction that describes the task:
### Input:
Search all differences between protocol messages regarding a reference message
:param refindex: index of reference message
:rtype: dict[int, set[int]]
### Response:
def find_differences(self, refindex: int):
"""
Search all differences between protocol messages regarding a reference message
:param refindex: index of reference message
:rtype: dict[int, set[int]]
"""
differences = defaultdict(set)
if refindex >= len(self.protocol.messages):
return differences
if self.proto_view == 0:
proto = self.protocol.decoded_proto_bits_str
elif self.proto_view == 1:
proto = self.protocol.decoded_hex_str
elif self.proto_view == 2:
proto = self.protocol.decoded_ascii_str
else:
return differences
ref_message = proto[refindex]
ref_offset = self.get_alignment_offset_at(refindex)
for i, message in enumerate(proto):
if i == refindex:
continue
msg_offset = self.get_alignment_offset_at(i)
short, long = sorted([len(ref_message) + ref_offset, len(message) + msg_offset])
differences[i] = {
j for j in range(max(msg_offset, ref_offset), long)
if j >= short or message[j - msg_offset] != ref_message[j - ref_offset]
}
return differences |
def execute(self, sql, parameters=None, bulk_parameters=None):
"""
Prepare and execute a database operation (query or command).
"""
if self.connection._closed:
raise ProgrammingError("Connection closed")
if self._closed:
raise ProgrammingError("Cursor closed")
self._result = self.connection.client.sql(sql, parameters,
bulk_parameters)
if "rows" in self._result:
self.rows = iter(self._result["rows"]) | Prepare and execute a database operation (query or command). | Below is the the instruction that describes the task:
### Input:
Prepare and execute a database operation (query or command).
### Response:
def execute(self, sql, parameters=None, bulk_parameters=None):
"""
Prepare and execute a database operation (query or command).
"""
if self.connection._closed:
raise ProgrammingError("Connection closed")
if self._closed:
raise ProgrammingError("Cursor closed")
self._result = self.connection.client.sql(sql, parameters,
bulk_parameters)
if "rows" in self._result:
self.rows = iter(self._result["rows"]) |
def sort_by(self, *ids):
"""Update files order.
:param ids: List of ids specifying the final status of the list.
"""
# Support sorting by file_ids or keys.
files = {str(f_.file_id): f_.key for f_ in self}
# self.record['_files'] = [{'key': files.get(id_, id_)} for id_ in ids]
self.filesmap = OrderedDict([
(files.get(id_, id_), self[files.get(id_, id_)].dumps())
for id_ in ids
])
self.flush() | Update files order.
:param ids: List of ids specifying the final status of the list. | Below is the the instruction that describes the task:
### Input:
Update files order.
:param ids: List of ids specifying the final status of the list.
### Response:
def sort_by(self, *ids):
"""Update files order.
:param ids: List of ids specifying the final status of the list.
"""
# Support sorting by file_ids or keys.
files = {str(f_.file_id): f_.key for f_ in self}
# self.record['_files'] = [{'key': files.get(id_, id_)} for id_ in ids]
self.filesmap = OrderedDict([
(files.get(id_, id_), self[files.get(id_, id_)].dumps())
for id_ in ids
])
self.flush() |
def worker_task(work_item, config):
"""The celery task which performs a single mutation and runs a test suite.
This runs `cosmic-ray worker` in a subprocess and returns the results,
passing `config` to it via stdin.
Args:
work_item: A dict describing a WorkItem.
config: The configuration to use for the test execution.
Returns: An updated WorkItem
"""
global _workspace
_ensure_workspace(config)
result = worker(
work_item.module_path,
config.python_version,
work_item.operator_name,
work_item.occurrence,
config.test_command,
config.timeout)
return work_item.job_id, result | The celery task which performs a single mutation and runs a test suite.
This runs `cosmic-ray worker` in a subprocess and returns the results,
passing `config` to it via stdin.
Args:
work_item: A dict describing a WorkItem.
config: The configuration to use for the test execution.
Returns: An updated WorkItem | Below is the the instruction that describes the task:
### Input:
The celery task which performs a single mutation and runs a test suite.
This runs `cosmic-ray worker` in a subprocess and returns the results,
passing `config` to it via stdin.
Args:
work_item: A dict describing a WorkItem.
config: The configuration to use for the test execution.
Returns: An updated WorkItem
### Response:
def worker_task(work_item, config):
"""The celery task which performs a single mutation and runs a test suite.
This runs `cosmic-ray worker` in a subprocess and returns the results,
passing `config` to it via stdin.
Args:
work_item: A dict describing a WorkItem.
config: The configuration to use for the test execution.
Returns: An updated WorkItem
"""
global _workspace
_ensure_workspace(config)
result = worker(
work_item.module_path,
config.python_version,
work_item.operator_name,
work_item.occurrence,
config.test_command,
config.timeout)
return work_item.job_id, result |
def determine_collections(self):
"""Try to determine which collections this record should belong to."""
for value in record_get_field_values(self.record, '980', code='a'):
if 'NOTE' in value.upper():
self.collections.add('NOTE')
if 'THESIS' in value.upper():
self.collections.add('THESIS')
if 'CONFERENCEPAPER' in value.upper():
self.collections.add('ConferencePaper')
if "HIDDEN" in value.upper():
self.hidden = True
if self.is_published():
self.collections.add("PUBLISHED")
self.collections.add("CITEABLE")
if 'NOTE' not in self.collections:
from itertools import product
# TODO: Move this to a KB
kb = ['ATLAS-CONF-', 'CMS-PAS-', 'ATL-', 'CMS-DP-',
'ALICE-INT-', 'LHCb-PUB-']
values = record_get_field_values(self.record, "088", code='a')
for val, rep in product(values, kb):
if val.startswith(rep):
self.collections.add('NOTE')
break
# 980 Arxiv tag
if record_get_field_values(self.record, '035',
filter_subfield_code="a",
filter_subfield_value="arXiv"):
self.collections.add("arXiv")
# 980 HEP && CORE
self.collections.add('HEP')
self.collections.add('CORE')
# 980 Conference Note
if 'ConferencePaper' not in self.collections:
for value in record_get_field_values(self.record,
tag='962',
code='n'):
if value[-2:].isdigit():
self.collections.add('ConferencePaper')
break
# Clear out any existing ones.
record_delete_fields(self.record, "980") | Try to determine which collections this record should belong to. | Below is the the instruction that describes the task:
### Input:
Try to determine which collections this record should belong to.
### Response:
def determine_collections(self):
"""Try to determine which collections this record should belong to."""
for value in record_get_field_values(self.record, '980', code='a'):
if 'NOTE' in value.upper():
self.collections.add('NOTE')
if 'THESIS' in value.upper():
self.collections.add('THESIS')
if 'CONFERENCEPAPER' in value.upper():
self.collections.add('ConferencePaper')
if "HIDDEN" in value.upper():
self.hidden = True
if self.is_published():
self.collections.add("PUBLISHED")
self.collections.add("CITEABLE")
if 'NOTE' not in self.collections:
from itertools import product
# TODO: Move this to a KB
kb = ['ATLAS-CONF-', 'CMS-PAS-', 'ATL-', 'CMS-DP-',
'ALICE-INT-', 'LHCb-PUB-']
values = record_get_field_values(self.record, "088", code='a')
for val, rep in product(values, kb):
if val.startswith(rep):
self.collections.add('NOTE')
break
# 980 Arxiv tag
if record_get_field_values(self.record, '035',
filter_subfield_code="a",
filter_subfield_value="arXiv"):
self.collections.add("arXiv")
# 980 HEP && CORE
self.collections.add('HEP')
self.collections.add('CORE')
# 980 Conference Note
if 'ConferencePaper' not in self.collections:
for value in record_get_field_values(self.record,
tag='962',
code='n'):
if value[-2:].isdigit():
self.collections.add('ConferencePaper')
break
# Clear out any existing ones.
record_delete_fields(self.record, "980") |
def imshow(image, format, **kwargs):
"""Draw an image in the current context figure.
Parameters
----------
image: image data
Image data, depending on the passed format, can be one of:
- an instance of an ipywidgets Image
- a file name
- a raw byte string
format: {'widget', 'filename', ...}
Type of the input argument.
If not 'widget' or 'filename', must be a format supported by
the ipywidgets Image.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x' is
required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is required
for that mark, axes_options['x'] contains optional keyword arguments
for the constructor of the corresponding axis type.
"""
if format == 'widget':
ipyimage = image
elif format == 'filename':
with open(image, 'rb') as f:
data = f.read()
ipyimage = ipyImage(value=data)
else:
ipyimage = ipyImage(value=image, format=format)
kwargs['image'] = ipyimage
kwargs.setdefault('x', [0., 1.])
kwargs.setdefault('y', [0., 1.])
return _draw_mark(Image, **kwargs) | Draw an image in the current context figure.
Parameters
----------
image: image data
Image data, depending on the passed format, can be one of:
- an instance of an ipywidgets Image
- a file name
- a raw byte string
format: {'widget', 'filename', ...}
Type of the input argument.
If not 'widget' or 'filename', must be a format supported by
the ipywidgets Image.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x' is
required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is required
for that mark, axes_options['x'] contains optional keyword arguments
for the constructor of the corresponding axis type. | Below is the the instruction that describes the task:
### Input:
Draw an image in the current context figure.
Parameters
----------
image: image data
Image data, depending on the passed format, can be one of:
- an instance of an ipywidgets Image
- a file name
- a raw byte string
format: {'widget', 'filename', ...}
Type of the input argument.
If not 'widget' or 'filename', must be a format supported by
the ipywidgets Image.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x' is
required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is required
for that mark, axes_options['x'] contains optional keyword arguments
for the constructor of the corresponding axis type.
### Response:
def imshow(image, format, **kwargs):
"""Draw an image in the current context figure.
Parameters
----------
image: image data
Image data, depending on the passed format, can be one of:
- an instance of an ipywidgets Image
- a file name
- a raw byte string
format: {'widget', 'filename', ...}
Type of the input argument.
If not 'widget' or 'filename', must be a format supported by
the ipywidgets Image.
options: dict (default: {})
Options for the scales to be created. If a scale labeled 'x' is
required for that mark, options['x'] contains optional keyword
arguments for the constructor of the corresponding scale type.
axes_options: dict (default: {})
Options for the axes to be created. If an axis labeled 'x' is required
for that mark, axes_options['x'] contains optional keyword arguments
for the constructor of the corresponding axis type.
"""
if format == 'widget':
ipyimage = image
elif format == 'filename':
with open(image, 'rb') as f:
data = f.read()
ipyimage = ipyImage(value=data)
else:
ipyimage = ipyImage(value=image, format=format)
kwargs['image'] = ipyimage
kwargs.setdefault('x', [0., 1.])
kwargs.setdefault('y', [0., 1.])
return _draw_mark(Image, **kwargs) |
def gauge(self, stats, value):
"""
Log gauges
>>> client = StatsdClient()
>>> client.gauge('example.gauge', 47)
>>> client.gauge(('example.gauge41', 'example.gauge43'), 47)
"""
self.update_stats(stats, value, self.SC_GAUGE) | Log gauges
>>> client = StatsdClient()
>>> client.gauge('example.gauge', 47)
>>> client.gauge(('example.gauge41', 'example.gauge43'), 47) | Below is the the instruction that describes the task:
### Input:
Log gauges
>>> client = StatsdClient()
>>> client.gauge('example.gauge', 47)
>>> client.gauge(('example.gauge41', 'example.gauge43'), 47)
### Response:
def gauge(self, stats, value):
"""
Log gauges
>>> client = StatsdClient()
>>> client.gauge('example.gauge', 47)
>>> client.gauge(('example.gauge41', 'example.gauge43'), 47)
"""
self.update_stats(stats, value, self.SC_GAUGE) |
def convert_subject_ids(self, subject_ids):
"""
Convert subject ids to strings if they are integers
"""
# TODO: need to make this generalisable via a
# splitting+mapping function passed to the repository
if subject_ids is not None:
subject_ids = set(
('{:03d}'.format(s)
if isinstance(s, int) else s) for s in subject_ids)
return subject_ids | Convert subject ids to strings if they are integers | Below is the the instruction that describes the task:
### Input:
Convert subject ids to strings if they are integers
### Response:
def convert_subject_ids(self, subject_ids):
"""
Convert subject ids to strings if they are integers
"""
# TODO: need to make this generalisable via a
# splitting+mapping function passed to the repository
if subject_ids is not None:
subject_ids = set(
('{:03d}'.format(s)
if isinstance(s, int) else s) for s in subject_ids)
return subject_ids |
def reopen(self):
"""Reopen the tough connection.
It will not complain if the connection cannot be reopened.
"""
try:
self._con.reopen()
except Exception:
if self._transcation:
self._transaction = False
try:
self._con.query('rollback')
except Exception:
pass
else:
self._transaction = False
self._closed = False
self._setsession()
self._usage = 0 | Reopen the tough connection.
It will not complain if the connection cannot be reopened. | Below is the the instruction that describes the task:
### Input:
Reopen the tough connection.
It will not complain if the connection cannot be reopened.
### Response:
def reopen(self):
"""Reopen the tough connection.
It will not complain if the connection cannot be reopened.
"""
try:
self._con.reopen()
except Exception:
if self._transcation:
self._transaction = False
try:
self._con.query('rollback')
except Exception:
pass
else:
self._transaction = False
self._closed = False
self._setsession()
self._usage = 0 |
def pool_args(function, sequence, kwargs):
"""Return a single iterator of n elements of lists of length 3, given a sequence of len n."""
return zip(itertools.repeat(function), sequence, itertools.repeat(kwargs)) | Return a single iterator of n elements of lists of length 3, given a sequence of len n. | Below is the the instruction that describes the task:
### Input:
Return a single iterator of n elements of lists of length 3, given a sequence of len n.
### Response:
def pool_args(function, sequence, kwargs):
"""Return a single iterator of n elements of lists of length 3, given a sequence of len n."""
return zip(itertools.repeat(function), sequence, itertools.repeat(kwargs)) |
def run(tpu_job_name,
tpu,
gcp_project,
tpu_zone,
model_dir,
model_type="bitransformer",
vocabulary=gin.REQUIRED,
train_dataset_fn=None,
eval_dataset_fn=None,
dataset_split="train",
autostack=True,
checkpoint_path="",
mode="train",
iterations_per_loop=100,
save_checkpoints_steps=1000,
eval_steps=10,
train_steps=1000000,
batch_size=auto_batch_size,
sequence_length=gin.REQUIRED,
mesh_shape=gin.REQUIRED,
layout_rules=gin.REQUIRED,
get_components_fn=None):
"""Run training/eval/inference.
Args:
tpu_job_name: string, name of TPU worker binary
tpu: string, the Cloud TPU to use for training
gcp_project: string, project name for the Cloud TPU-enabled project
tpu_zone: string, GCE zone where the Cloud TPU is located in
model_dir: string, estimator model_dir
model_type: a string - either "bitransformer", "lm" or "aligned"
vocabulary: a vocabulary.Vocabulary or
(inputs_vocabulary, targets_vocabulary) tuple.
train_dataset_fn: A function returning a tf.data.Dataset. Must be provided
for mode=train
eval_dataset_fn: A function returning a tf.data.Dataset. Must be provided
for model=eval
dataset_split: a string
autostack: boolean, internally combine variables
checkpoint_path: a string - which checkpoint to load for inference
mode: string, train/evaluate/infer
iterations_per_loop: integer, steps per train loop
save_checkpoints_steps: integer, steps per checkpoint
eval_steps: integer, number of evaluation steps
train_steps: Total number of training steps.
batch_size: An integer or a function with the same signature as
auto_batch_size(). Mini-batch size for the training. Note that this is
the global batch size and not the per-shard batch size.
sequence_length: an integer
mesh_shape: an input to mtf.convert_to_shape()
layout_rules: an input to mtf.convert_to_layout_rules()
get_components_fn: an optional function that gets a list of tuples of
(metric_names, component) for each component. Required if mode is
"continuous_eval"
"""
if not isinstance(batch_size, int):
batch_size = batch_size(sequence_length, mesh_shape, layout_rules)
tf.logging.info("mode=%s" % mode,)
tf.logging.info("batch_size=%s" % batch_size,)
tf.logging.info("sequence_length=%s" % sequence_length,)
tf.logging.info("mesh_shape=%s" % mesh_shape,)
tf.logging.info("layout_rules=%s" % layout_rules,)
if mode == "train" and dataset_split != "train":
raise ValueError("mode==\"train\" requires dataset_split==\"train\"")
mesh_shape = mtf.convert_to_shape(mesh_shape)
layout_rules = mtf.convert_to_layout_rules(layout_rules)
cluster = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu if (tpu) else "", zone=tpu_zone, project=gcp_project)
tf.logging.info(
"Building TPUConfig with tpu_job_name={}".format(tpu_job_name)
)
my_tpu_config = tpu_config.TPUConfig(
tpu_job_name=tpu_job_name,
iterations_per_loop=iterations_per_loop,
num_cores_per_replica=1,
per_host_input_for_training=tpu_config.InputPipelineConfig.BROADCAST,
)
run_config = tpu_config.RunConfig(
cluster=cluster,
model_dir=model_dir,
save_checkpoints_steps=save_checkpoints_steps,
tpu_config=my_tpu_config)
transformer_model = build_model(
model_type=model_type,
input_vocab_size=inputs_vocabulary(vocabulary).vocab_size,
output_vocab_size=targets_vocabulary(vocabulary).vocab_size,
layout_rules=layout_rules,
mesh_shape=mesh_shape)
model_fn = tpu_estimator_model_fn(
model_type=model_type,
transformer_model=transformer_model,
model_dir=model_dir,
use_tpu=tpu,
mesh_shape=mesh_shape,
layout_rules=layout_rules,
batch_size=batch_size,
sequence_length=sequence_length,
autostack=autostack,
metric_names=None)
estimator = tpu_estimator.TPUEstimator(
model_fn=model_fn,
config=run_config,
train_batch_size=batch_size,
eval_batch_size=batch_size,
predict_batch_size=batch_size,
use_tpu=tpu,
export_to_tpu=False,
params={})
if mode == "train":
if train_dataset_fn is None:
raise ValueError("Must provide train_dataset_fn through gin for train.")
def input_fn(params):
del params
dataset = train_dataset_fn(batch_size=batch_size,
sequence_length=sequence_length,
vocabulary=vocabulary,
dataset_split=dataset_split)
return dataset
estimator.train(input_fn=input_fn, max_steps=train_steps)
elif mode == "continuous_eval":
if get_components_fn is None:
raise ValueError("Must provide get_components_fn through gin for eval.")
if eval_dataset_fn is None:
raise ValueError("Must provide eval_dataset_fn through gin for eval.")
metrics_inputs = get_components_fn()
for _ in tf.contrib.training.checkpoints_iterator(estimator.model_dir):
for metric_names, component in metrics_inputs:
tf.logging.info("Evaluating {}".format(component.__dict__))
tf.logging.info("on split {}".format(dataset_split))
# Prepend eval tag and split name to metric names
metric_names = [
"eval/{}/{}".format(dataset_split, n) for n in metric_names
]
# Regenerate the estimator
model_fn = tpu_estimator_model_fn(
model_type=model_type,
transformer_model=transformer_model,
model_dir=model_dir,
use_tpu=tpu,
mesh_shape=mesh_shape,
layout_rules=layout_rules,
batch_size=batch_size,
sequence_length=sequence_length,
autostack=autostack,
metric_names=metric_names)
estimator = tpu_estimator.TPUEstimator(
model_fn=model_fn,
config=run_config,
train_batch_size=batch_size,
eval_batch_size=batch_size,
predict_batch_size=batch_size,
use_tpu=tpu,
export_to_tpu=False,
params={})
def input_fn(params):
del params
dataset = eval_dataset_fn(component, # pylint: disable=cell-var-from-loop
batch_size=batch_size,
sequence_length=sequence_length,
vocabulary=vocabulary,
dataset_split=dataset_split,
pack=False)
return dataset
eval_args = {"eval": (input_fn, eval_steps)}
_ = evaluate(estimator, eval_args)
elif mode == "infer":
decode_from_file(
estimator,
vocabulary=vocabulary,
model_type=model_type,
batch_size=batch_size,
sequence_length=sequence_length,
checkpoint_path=checkpoint_path)
else:
raise ValueError(
"unknown mode %s - must be train/evaluate/continuous_eval/infer" % mode) | Run training/eval/inference.
Args:
tpu_job_name: string, name of TPU worker binary
tpu: string, the Cloud TPU to use for training
gcp_project: string, project name for the Cloud TPU-enabled project
tpu_zone: string, GCE zone where the Cloud TPU is located in
model_dir: string, estimator model_dir
model_type: a string - either "bitransformer", "lm" or "aligned"
vocabulary: a vocabulary.Vocabulary or
(inputs_vocabulary, targets_vocabulary) tuple.
train_dataset_fn: A function returning a tf.data.Dataset. Must be provided
for mode=train
eval_dataset_fn: A function returning a tf.data.Dataset. Must be provided
for model=eval
dataset_split: a string
autostack: boolean, internally combine variables
checkpoint_path: a string - which checkpoint to load for inference
mode: string, train/evaluate/infer
iterations_per_loop: integer, steps per train loop
save_checkpoints_steps: integer, steps per checkpoint
eval_steps: integer, number of evaluation steps
train_steps: Total number of training steps.
batch_size: An integer or a function with the same signature as
auto_batch_size(). Mini-batch size for the training. Note that this is
the global batch size and not the per-shard batch size.
sequence_length: an integer
mesh_shape: an input to mtf.convert_to_shape()
layout_rules: an input to mtf.convert_to_layout_rules()
get_components_fn: an optional function that gets a list of tuples of
(metric_names, component) for each component. Required if mode is
"continuous_eval" | Below is the the instruction that describes the task:
### Input:
Run training/eval/inference.
Args:
tpu_job_name: string, name of TPU worker binary
tpu: string, the Cloud TPU to use for training
gcp_project: string, project name for the Cloud TPU-enabled project
tpu_zone: string, GCE zone where the Cloud TPU is located in
model_dir: string, estimator model_dir
model_type: a string - either "bitransformer", "lm" or "aligned"
vocabulary: a vocabulary.Vocabulary or
(inputs_vocabulary, targets_vocabulary) tuple.
train_dataset_fn: A function returning a tf.data.Dataset. Must be provided
for mode=train
eval_dataset_fn: A function returning a tf.data.Dataset. Must be provided
for model=eval
dataset_split: a string
autostack: boolean, internally combine variables
checkpoint_path: a string - which checkpoint to load for inference
mode: string, train/evaluate/infer
iterations_per_loop: integer, steps per train loop
save_checkpoints_steps: integer, steps per checkpoint
eval_steps: integer, number of evaluation steps
train_steps: Total number of training steps.
batch_size: An integer or a function with the same signature as
auto_batch_size(). Mini-batch size for the training. Note that this is
the global batch size and not the per-shard batch size.
sequence_length: an integer
mesh_shape: an input to mtf.convert_to_shape()
layout_rules: an input to mtf.convert_to_layout_rules()
get_components_fn: an optional function that gets a list of tuples of
(metric_names, component) for each component. Required if mode is
"continuous_eval"
### Response:
def run(tpu_job_name,
tpu,
gcp_project,
tpu_zone,
model_dir,
model_type="bitransformer",
vocabulary=gin.REQUIRED,
train_dataset_fn=None,
eval_dataset_fn=None,
dataset_split="train",
autostack=True,
checkpoint_path="",
mode="train",
iterations_per_loop=100,
save_checkpoints_steps=1000,
eval_steps=10,
train_steps=1000000,
batch_size=auto_batch_size,
sequence_length=gin.REQUIRED,
mesh_shape=gin.REQUIRED,
layout_rules=gin.REQUIRED,
get_components_fn=None):
"""Run training/eval/inference.
Args:
tpu_job_name: string, name of TPU worker binary
tpu: string, the Cloud TPU to use for training
gcp_project: string, project name for the Cloud TPU-enabled project
tpu_zone: string, GCE zone where the Cloud TPU is located in
model_dir: string, estimator model_dir
model_type: a string - either "bitransformer", "lm" or "aligned"
vocabulary: a vocabulary.Vocabulary or
(inputs_vocabulary, targets_vocabulary) tuple.
train_dataset_fn: A function returning a tf.data.Dataset. Must be provided
for mode=train
eval_dataset_fn: A function returning a tf.data.Dataset. Must be provided
for model=eval
dataset_split: a string
autostack: boolean, internally combine variables
checkpoint_path: a string - which checkpoint to load for inference
mode: string, train/evaluate/infer
iterations_per_loop: integer, steps per train loop
save_checkpoints_steps: integer, steps per checkpoint
eval_steps: integer, number of evaluation steps
train_steps: Total number of training steps.
batch_size: An integer or a function with the same signature as
auto_batch_size(). Mini-batch size for the training. Note that this is
the global batch size and not the per-shard batch size.
sequence_length: an integer
mesh_shape: an input to mtf.convert_to_shape()
layout_rules: an input to mtf.convert_to_layout_rules()
get_components_fn: an optional function that gets a list of tuples of
(metric_names, component) for each component. Required if mode is
"continuous_eval"
"""
if not isinstance(batch_size, int):
batch_size = batch_size(sequence_length, mesh_shape, layout_rules)
tf.logging.info("mode=%s" % mode,)
tf.logging.info("batch_size=%s" % batch_size,)
tf.logging.info("sequence_length=%s" % sequence_length,)
tf.logging.info("mesh_shape=%s" % mesh_shape,)
tf.logging.info("layout_rules=%s" % layout_rules,)
if mode == "train" and dataset_split != "train":
raise ValueError("mode==\"train\" requires dataset_split==\"train\"")
mesh_shape = mtf.convert_to_shape(mesh_shape)
layout_rules = mtf.convert_to_layout_rules(layout_rules)
cluster = tf.contrib.cluster_resolver.TPUClusterResolver(
tpu if (tpu) else "", zone=tpu_zone, project=gcp_project)
tf.logging.info(
"Building TPUConfig with tpu_job_name={}".format(tpu_job_name)
)
my_tpu_config = tpu_config.TPUConfig(
tpu_job_name=tpu_job_name,
iterations_per_loop=iterations_per_loop,
num_cores_per_replica=1,
per_host_input_for_training=tpu_config.InputPipelineConfig.BROADCAST,
)
run_config = tpu_config.RunConfig(
cluster=cluster,
model_dir=model_dir,
save_checkpoints_steps=save_checkpoints_steps,
tpu_config=my_tpu_config)
transformer_model = build_model(
model_type=model_type,
input_vocab_size=inputs_vocabulary(vocabulary).vocab_size,
output_vocab_size=targets_vocabulary(vocabulary).vocab_size,
layout_rules=layout_rules,
mesh_shape=mesh_shape)
model_fn = tpu_estimator_model_fn(
model_type=model_type,
transformer_model=transformer_model,
model_dir=model_dir,
use_tpu=tpu,
mesh_shape=mesh_shape,
layout_rules=layout_rules,
batch_size=batch_size,
sequence_length=sequence_length,
autostack=autostack,
metric_names=None)
estimator = tpu_estimator.TPUEstimator(
model_fn=model_fn,
config=run_config,
train_batch_size=batch_size,
eval_batch_size=batch_size,
predict_batch_size=batch_size,
use_tpu=tpu,
export_to_tpu=False,
params={})
if mode == "train":
if train_dataset_fn is None:
raise ValueError("Must provide train_dataset_fn through gin for train.")
def input_fn(params):
del params
dataset = train_dataset_fn(batch_size=batch_size,
sequence_length=sequence_length,
vocabulary=vocabulary,
dataset_split=dataset_split)
return dataset
estimator.train(input_fn=input_fn, max_steps=train_steps)
elif mode == "continuous_eval":
if get_components_fn is None:
raise ValueError("Must provide get_components_fn through gin for eval.")
if eval_dataset_fn is None:
raise ValueError("Must provide eval_dataset_fn through gin for eval.")
metrics_inputs = get_components_fn()
for _ in tf.contrib.training.checkpoints_iterator(estimator.model_dir):
for metric_names, component in metrics_inputs:
tf.logging.info("Evaluating {}".format(component.__dict__))
tf.logging.info("on split {}".format(dataset_split))
# Prepend eval tag and split name to metric names
metric_names = [
"eval/{}/{}".format(dataset_split, n) for n in metric_names
]
# Regenerate the estimator
model_fn = tpu_estimator_model_fn(
model_type=model_type,
transformer_model=transformer_model,
model_dir=model_dir,
use_tpu=tpu,
mesh_shape=mesh_shape,
layout_rules=layout_rules,
batch_size=batch_size,
sequence_length=sequence_length,
autostack=autostack,
metric_names=metric_names)
estimator = tpu_estimator.TPUEstimator(
model_fn=model_fn,
config=run_config,
train_batch_size=batch_size,
eval_batch_size=batch_size,
predict_batch_size=batch_size,
use_tpu=tpu,
export_to_tpu=False,
params={})
def input_fn(params):
del params
dataset = eval_dataset_fn(component, # pylint: disable=cell-var-from-loop
batch_size=batch_size,
sequence_length=sequence_length,
vocabulary=vocabulary,
dataset_split=dataset_split,
pack=False)
return dataset
eval_args = {"eval": (input_fn, eval_steps)}
_ = evaluate(estimator, eval_args)
elif mode == "infer":
decode_from_file(
estimator,
vocabulary=vocabulary,
model_type=model_type,
batch_size=batch_size,
sequence_length=sequence_length,
checkpoint_path=checkpoint_path)
else:
raise ValueError(
"unknown mode %s - must be train/evaluate/continuous_eval/infer" % mode) |
def request_doi_status_by_batch_id(self, doi_batch_id, data_type='result'):
"""
This method retrieve the DOI requests status.
file_name: Used as unique ID to identify a deposit.
data_type: [contents, result]
contents - retrieve the XML submited by the publisher
result - retrieve a XML with the status of the submission
"""
endpoint = self.get_endpoint('submissionDownload')
params = {
'usr': self.api_user,
'pwd': self.api_key,
'doi_batch_id': doi_batch_id,
'type': data_type
}
result = self.do_http_request(
'get',
endpoint,
data=params,
timeout=10,
custom_header=str(self.etiquette)
)
return result | This method retrieve the DOI requests status.
file_name: Used as unique ID to identify a deposit.
data_type: [contents, result]
contents - retrieve the XML submited by the publisher
result - retrieve a XML with the status of the submission | Below is the the instruction that describes the task:
### Input:
This method retrieve the DOI requests status.
file_name: Used as unique ID to identify a deposit.
data_type: [contents, result]
contents - retrieve the XML submited by the publisher
result - retrieve a XML with the status of the submission
### Response:
def request_doi_status_by_batch_id(self, doi_batch_id, data_type='result'):
"""
This method retrieve the DOI requests status.
file_name: Used as unique ID to identify a deposit.
data_type: [contents, result]
contents - retrieve the XML submited by the publisher
result - retrieve a XML with the status of the submission
"""
endpoint = self.get_endpoint('submissionDownload')
params = {
'usr': self.api_user,
'pwd': self.api_key,
'doi_batch_id': doi_batch_id,
'type': data_type
}
result = self.do_http_request(
'get',
endpoint,
data=params,
timeout=10,
custom_header=str(self.etiquette)
)
return result |
def range(self, location, distance):
"""Test whether locations are within a given range of ``location``.
Args:
location (Point): Location to test range against
distance (float): Distance to test location is within
Returns:
list of list of Point: Groups of points in range per segment
"""
return (segment.range(location, distance) for segment in self) | Test whether locations are within a given range of ``location``.
Args:
location (Point): Location to test range against
distance (float): Distance to test location is within
Returns:
list of list of Point: Groups of points in range per segment | Below is the the instruction that describes the task:
### Input:
Test whether locations are within a given range of ``location``.
Args:
location (Point): Location to test range against
distance (float): Distance to test location is within
Returns:
list of list of Point: Groups of points in range per segment
### Response:
def range(self, location, distance):
"""Test whether locations are within a given range of ``location``.
Args:
location (Point): Location to test range against
distance (float): Distance to test location is within
Returns:
list of list of Point: Groups of points in range per segment
"""
return (segment.range(location, distance) for segment in self) |
def convert(self, inp):
"""Converts a string representation of some quantity of units into a
quantities object.
Args:
inp (str): A textual representation of some quantity of units,
e.g., "fifty kilograms".
Returns:
A quantities object representing the described quantity and its
units.
"""
inp = self._preprocess(inp)
n = NumberService().longestNumber(inp)
units = self.extractUnits(inp)
# Convert to quantity object, attempt conversion
quantity = pq.Quantity(float(n), units[0])
quantity.units = units[1]
return quantity | Converts a string representation of some quantity of units into a
quantities object.
Args:
inp (str): A textual representation of some quantity of units,
e.g., "fifty kilograms".
Returns:
A quantities object representing the described quantity and its
units. | Below is the the instruction that describes the task:
### Input:
Converts a string representation of some quantity of units into a
quantities object.
Args:
inp (str): A textual representation of some quantity of units,
e.g., "fifty kilograms".
Returns:
A quantities object representing the described quantity and its
units.
### Response:
def convert(self, inp):
"""Converts a string representation of some quantity of units into a
quantities object.
Args:
inp (str): A textual representation of some quantity of units,
e.g., "fifty kilograms".
Returns:
A quantities object representing the described quantity and its
units.
"""
inp = self._preprocess(inp)
n = NumberService().longestNumber(inp)
units = self.extractUnits(inp)
# Convert to quantity object, attempt conversion
quantity = pq.Quantity(float(n), units[0])
quantity.units = units[1]
return quantity |
def find_by_project(self, project, params={}, **options):
"""Returns the compact records for all sections in the specified project.
Parameters
----------
project : {Id} The project to get sections from.
[params] : {Object} Parameters for the request
"""
path = "/projects/%s/sections" % (project)
return self.client.get(path, params, **options) | Returns the compact records for all sections in the specified project.
Parameters
----------
project : {Id} The project to get sections from.
[params] : {Object} Parameters for the request | Below is the the instruction that describes the task:
### Input:
Returns the compact records for all sections in the specified project.
Parameters
----------
project : {Id} The project to get sections from.
[params] : {Object} Parameters for the request
### Response:
def find_by_project(self, project, params={}, **options):
"""Returns the compact records for all sections in the specified project.
Parameters
----------
project : {Id} The project to get sections from.
[params] : {Object} Parameters for the request
"""
path = "/projects/%s/sections" % (project)
return self.client.get(path, params, **options) |
def clean_previous_run(self):
"""Clean variables from previous configuration
:return: None
"""
# Execute the base class treatment...
super(Alignak, self).clean_previous_run()
# Clean all lists
self.pollers.clear()
self.reactionners.clear()
self.brokers.clear() | Clean variables from previous configuration
:return: None | Below is the the instruction that describes the task:
### Input:
Clean variables from previous configuration
:return: None
### Response:
def clean_previous_run(self):
"""Clean variables from previous configuration
:return: None
"""
# Execute the base class treatment...
super(Alignak, self).clean_previous_run()
# Clean all lists
self.pollers.clear()
self.reactionners.clear()
self.brokers.clear() |
def open_spreadsheet(self, path, as_template=False):
"""
Opens an exiting spreadsheet document on the local file system.
"""
desktop = self.cls(self.hostname, self.port)
return desktop.open_spreadsheet(path, as_template=as_template) | Opens an exiting spreadsheet document on the local file system. | Below is the the instruction that describes the task:
### Input:
Opens an exiting spreadsheet document on the local file system.
### Response:
def open_spreadsheet(self, path, as_template=False):
"""
Opens an exiting spreadsheet document on the local file system.
"""
desktop = self.cls(self.hostname, self.port)
return desktop.open_spreadsheet(path, as_template=as_template) |
def close(self):
""" Stop overwriting display, or update parent. """
if self.parent:
self.parent.update(self.parent.offset + self.offset)
return
self.output.write("\n")
self.output.flush() | Stop overwriting display, or update parent. | Below is the the instruction that describes the task:
### Input:
Stop overwriting display, or update parent.
### Response:
def close(self):
""" Stop overwriting display, or update parent. """
if self.parent:
self.parent.update(self.parent.offset + self.offset)
return
self.output.write("\n")
self.output.flush() |
def create_singleplots(plotman, cov, mag, pha, pha_fpi, alpha, options):
'''Plot the data of the tomodir in individual plots.
'''
magunit = 'log_rho'
if not pha == []:
[real, imag] = calc_complex(mag, pha)
if not pha_fpi == []:
[real_fpi, imag_fpi] = calc_complex(mag, pha_fpi)
if options.cmaglin:
mag = np.power(10, mag)
magunit = 'rho'
data = np.column_stack((mag, cov, pha, real, imag,
pha_fpi, real_fpi, imag_fpi))
titles = ['Magnitude', 'Coverage',
'Phase', 'Real Part', 'Imaginary Part',
'FPI Phase', 'FPI Real Part', 'FPI Imaginary Part']
unites = [
magunit, 'cov',
'phi', 'log_real', 'log_imag',
'phi', 'log_real', 'log_imag'
]
vmins = [options.mag_vmin, options.cov_vmin,
options.pha_vmin, options.real_vmin, options.imag_vmin,
options.pha_vmin, options.real_vmin, options.imag_vmin]
vmaxs = [options.mag_vmax, options.cov_vmax,
options.pha_vmax, options.real_vmax, options.imag_vmax,
options.pha_vmax, options.real_vmax, options.imag_vmax]
cmaps = ['jet', 'GnBu',
'jet_r', 'jet_r', 'plasma_r',
'plasma', 'jet_r', 'plasma_r']
saves = ['rho', 'cov',
'phi', 'real', 'imag',
'fpi_phi', 'fpi_real', 'fpi_imag']
else:
if options.cmaglin:
mag = np.power(10, mag)
magunit = 'rho'
data = np.column_stack((mag, cov, pha, real, imag))
titles = ['Magnitude', 'Coverage',
'Phase', 'Real Part', 'Imaginary Part']
unites = [magunit, 'cov',
'phi', 'log_real', 'log_imag']
vmins = [options.mag_vmin, options.cov_vmin,
options.pha_vmin, options.real_vmin, options.imag_vmin]
vmaxs = [options.mag_vmax, options.cov_vmax,
options.pha_vmax, options.real_vmax, options.imag_vmax]
cmaps = ['jet', 'GnBu',
'jet_r', 'jet_r', 'plasma_r']
saves = ['rho', 'cov',
'phi', 'real', 'imag']
else:
data = np.column_stack((mag, cov))
titles = ['Magnitude', 'Coverage']
unites = [magunit, 'cov']
vmins = [options.mag_vmin, options.cov_vmin]
vmaxs = [options.mag_vmax, options.cov_vmax]
cmaps = ['jet', 'GnBu']
saves = ['rho', 'cov']
try:
mod_rho = np.genfromtxt('rho/rho.dat', skip_header=1, usecols=([0]))
mod_pha = np.genfromtxt('rho/rho.dat', skip_header=1, usecols=([1]))
data = np.column_stack((data, mod_rho, mod_pha))
titles.append('Model')
titles.append('Model')
unites.append('rho')
unites.append('phi')
vmins.append(options.mag_vmin)
vmins.append(options.pha_vmin)
vmaxs.append(options.mag_vmax)
vmaxs.append(options.pha_vmax)
cmaps.append('jet')
cmaps.append('plasma')
saves.append('rhomod')
saves.append('phamod')
except:
pass
for datum, title, unit, vmin, vmax, cm, save in zip(
np.transpose(data), titles, unites, vmins, vmaxs, cmaps, saves):
sizex, sizez = getfigsize(plotman)
f, ax = plt.subplots(1, figsize=(sizex, sizez))
cid = plotman.parman.add_data(datum)
# handle options
cblabel = units.get_label(unit)
if options.title is not None:
title = options.title
zlabel = 'z [' + options.unit + ']'
xlabel = 'x [' + options.unit + ']'
xmin, xmax, zmin, zmax, vmin, vmax = check_minmax(
plotman,
cid,
options.xmin, options.xmax,
options.zmin, options.zmax,
vmin, vmax
)
# plot
cmap = mpl_cm.get_cmap(cm)
fig, ax, cnorm, cmap, cb, scalarMap = plotman.plot_elements_to_ax(
cid=cid,
cid_alpha=alpha,
ax=ax,
xmin=xmin,
xmax=xmax,
zmin=zmin,
zmax=zmax,
cblabel=cblabel,
title=title,
zlabel=zlabel,
xlabel=xlabel,
plot_colorbar=True,
cmap_name=cm,
over=cmap(1.0),
under=cmap(0.0),
no_elecs=options.no_elecs,
cbmin=vmin,
cbmax=vmax,
)
f.tight_layout()
f.savefig(save + '.png', dpi=300) | Plot the data of the tomodir in individual plots. | Below is the the instruction that describes the task:
### Input:
Plot the data of the tomodir in individual plots.
### Response:
def create_singleplots(plotman, cov, mag, pha, pha_fpi, alpha, options):
'''Plot the data of the tomodir in individual plots.
'''
magunit = 'log_rho'
if not pha == []:
[real, imag] = calc_complex(mag, pha)
if not pha_fpi == []:
[real_fpi, imag_fpi] = calc_complex(mag, pha_fpi)
if options.cmaglin:
mag = np.power(10, mag)
magunit = 'rho'
data = np.column_stack((mag, cov, pha, real, imag,
pha_fpi, real_fpi, imag_fpi))
titles = ['Magnitude', 'Coverage',
'Phase', 'Real Part', 'Imaginary Part',
'FPI Phase', 'FPI Real Part', 'FPI Imaginary Part']
unites = [
magunit, 'cov',
'phi', 'log_real', 'log_imag',
'phi', 'log_real', 'log_imag'
]
vmins = [options.mag_vmin, options.cov_vmin,
options.pha_vmin, options.real_vmin, options.imag_vmin,
options.pha_vmin, options.real_vmin, options.imag_vmin]
vmaxs = [options.mag_vmax, options.cov_vmax,
options.pha_vmax, options.real_vmax, options.imag_vmax,
options.pha_vmax, options.real_vmax, options.imag_vmax]
cmaps = ['jet', 'GnBu',
'jet_r', 'jet_r', 'plasma_r',
'plasma', 'jet_r', 'plasma_r']
saves = ['rho', 'cov',
'phi', 'real', 'imag',
'fpi_phi', 'fpi_real', 'fpi_imag']
else:
if options.cmaglin:
mag = np.power(10, mag)
magunit = 'rho'
data = np.column_stack((mag, cov, pha, real, imag))
titles = ['Magnitude', 'Coverage',
'Phase', 'Real Part', 'Imaginary Part']
unites = [magunit, 'cov',
'phi', 'log_real', 'log_imag']
vmins = [options.mag_vmin, options.cov_vmin,
options.pha_vmin, options.real_vmin, options.imag_vmin]
vmaxs = [options.mag_vmax, options.cov_vmax,
options.pha_vmax, options.real_vmax, options.imag_vmax]
cmaps = ['jet', 'GnBu',
'jet_r', 'jet_r', 'plasma_r']
saves = ['rho', 'cov',
'phi', 'real', 'imag']
else:
data = np.column_stack((mag, cov))
titles = ['Magnitude', 'Coverage']
unites = [magunit, 'cov']
vmins = [options.mag_vmin, options.cov_vmin]
vmaxs = [options.mag_vmax, options.cov_vmax]
cmaps = ['jet', 'GnBu']
saves = ['rho', 'cov']
try:
mod_rho = np.genfromtxt('rho/rho.dat', skip_header=1, usecols=([0]))
mod_pha = np.genfromtxt('rho/rho.dat', skip_header=1, usecols=([1]))
data = np.column_stack((data, mod_rho, mod_pha))
titles.append('Model')
titles.append('Model')
unites.append('rho')
unites.append('phi')
vmins.append(options.mag_vmin)
vmins.append(options.pha_vmin)
vmaxs.append(options.mag_vmax)
vmaxs.append(options.pha_vmax)
cmaps.append('jet')
cmaps.append('plasma')
saves.append('rhomod')
saves.append('phamod')
except:
pass
for datum, title, unit, vmin, vmax, cm, save in zip(
np.transpose(data), titles, unites, vmins, vmaxs, cmaps, saves):
sizex, sizez = getfigsize(plotman)
f, ax = plt.subplots(1, figsize=(sizex, sizez))
cid = plotman.parman.add_data(datum)
# handle options
cblabel = units.get_label(unit)
if options.title is not None:
title = options.title
zlabel = 'z [' + options.unit + ']'
xlabel = 'x [' + options.unit + ']'
xmin, xmax, zmin, zmax, vmin, vmax = check_minmax(
plotman,
cid,
options.xmin, options.xmax,
options.zmin, options.zmax,
vmin, vmax
)
# plot
cmap = mpl_cm.get_cmap(cm)
fig, ax, cnorm, cmap, cb, scalarMap = plotman.plot_elements_to_ax(
cid=cid,
cid_alpha=alpha,
ax=ax,
xmin=xmin,
xmax=xmax,
zmin=zmin,
zmax=zmax,
cblabel=cblabel,
title=title,
zlabel=zlabel,
xlabel=xlabel,
plot_colorbar=True,
cmap_name=cm,
over=cmap(1.0),
under=cmap(0.0),
no_elecs=options.no_elecs,
cbmin=vmin,
cbmax=vmax,
)
f.tight_layout()
f.savefig(save + '.png', dpi=300) |
def _mean_prediction(self, lmda, Y, scores, h, t_params):
""" Creates a h-step ahead mean prediction
Parameters
----------
lmda : np.array
The past predicted values
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
Returns
----------
h-length vector of mean predictions
"""
# Create arrays to iteratre over
lmda_exp = lmda.copy()
scores_exp = scores.copy()
Y_exp = Y.copy()
m1 = (np.sqrt(t_params[-2])*sp.gamma((t_params[-2]-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(t_params[-2]/2.0))
temp_theta = t_params[-1] + (t_params[-3] - (1.0/t_params[-3]))*np.exp(lmda_exp[-1]/2.0)*m1
# Loop over h time periods
for t in range(0,h):
new_value = t_params[0]
if self.p != 0:
for j in range(1,self.p+1):
new_value += t_params[j]*lmda_exp[-j]
if self.q != 0:
for k in range(1,self.q+1):
new_value += t_params[k+self.p]*scores_exp[-k]
if self.leverage is True:
m1 = (np.sqrt(t_params[-2])*sp.gamma((t_params[-2]-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(t_params[-2]/2.0))
new_value += t_params[1+self.p+self.q]*np.sign(-(Y_exp[-1]-temp_theta))*(scores_exp[-1]+1)
temp_theta = t_params[-1] + (t_params[-3] - (1.0/t_params[-3]))*np.exp(new_value/2.0)*m1
lmda_exp = np.append(lmda_exp,[new_value]) # For indexing consistency
scores_exp = np.append(scores_exp,[0]) # expectation of score is zero
Y_exp = np.append(Y_exp,[temp_theta])
return lmda_exp | Creates a h-step ahead mean prediction
Parameters
----------
lmda : np.array
The past predicted values
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
Returns
----------
h-length vector of mean predictions | Below is the the instruction that describes the task:
### Input:
Creates a h-step ahead mean prediction
Parameters
----------
lmda : np.array
The past predicted values
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
Returns
----------
h-length vector of mean predictions
### Response:
def _mean_prediction(self, lmda, Y, scores, h, t_params):
""" Creates a h-step ahead mean prediction
Parameters
----------
lmda : np.array
The past predicted values
Y : np.array
The past data
scores : np.array
The past scores
h : int
How many steps ahead for the prediction
t_params : np.array
A vector of (transformed) latent variables
Returns
----------
h-length vector of mean predictions
"""
# Create arrays to iteratre over
lmda_exp = lmda.copy()
scores_exp = scores.copy()
Y_exp = Y.copy()
m1 = (np.sqrt(t_params[-2])*sp.gamma((t_params[-2]-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(t_params[-2]/2.0))
temp_theta = t_params[-1] + (t_params[-3] - (1.0/t_params[-3]))*np.exp(lmda_exp[-1]/2.0)*m1
# Loop over h time periods
for t in range(0,h):
new_value = t_params[0]
if self.p != 0:
for j in range(1,self.p+1):
new_value += t_params[j]*lmda_exp[-j]
if self.q != 0:
for k in range(1,self.q+1):
new_value += t_params[k+self.p]*scores_exp[-k]
if self.leverage is True:
m1 = (np.sqrt(t_params[-2])*sp.gamma((t_params[-2]-1.0)/2.0))/(np.sqrt(np.pi)*sp.gamma(t_params[-2]/2.0))
new_value += t_params[1+self.p+self.q]*np.sign(-(Y_exp[-1]-temp_theta))*(scores_exp[-1]+1)
temp_theta = t_params[-1] + (t_params[-3] - (1.0/t_params[-3]))*np.exp(new_value/2.0)*m1
lmda_exp = np.append(lmda_exp,[new_value]) # For indexing consistency
scores_exp = np.append(scores_exp,[0]) # expectation of score is zero
Y_exp = np.append(Y_exp,[temp_theta])
return lmda_exp |
def better_print(self, printer=None):
"""
Print the value using a *printer*.
:param printer: Callable used to print the value, by default: :func:`pprint.pprint`
"""
printer = printer or pprint.pprint
printer(self.value) | Print the value using a *printer*.
:param printer: Callable used to print the value, by default: :func:`pprint.pprint` | Below is the the instruction that describes the task:
### Input:
Print the value using a *printer*.
:param printer: Callable used to print the value, by default: :func:`pprint.pprint`
### Response:
def better_print(self, printer=None):
"""
Print the value using a *printer*.
:param printer: Callable used to print the value, by default: :func:`pprint.pprint`
"""
printer = printer or pprint.pprint
printer(self.value) |
def to_json(self, extras=None):
"""
Convert a model into a json using the playhouse shortcut.
"""
extras = extras or {}
to_dict = model_to_dict(self)
to_dict.update(extras)
return json.dumps(to_dict, cls=sel.serializers.JsonEncoder) | Convert a model into a json using the playhouse shortcut. | Below is the the instruction that describes the task:
### Input:
Convert a model into a json using the playhouse shortcut.
### Response:
def to_json(self, extras=None):
"""
Convert a model into a json using the playhouse shortcut.
"""
extras = extras or {}
to_dict = model_to_dict(self)
to_dict.update(extras)
return json.dumps(to_dict, cls=sel.serializers.JsonEncoder) |
def create(cls, parent, child, relation_type, index=None):
"""Create a PID relation for given parent and child."""
try:
with db.session.begin_nested():
obj = cls(parent_id=parent.id,
child_id=child.id,
relation_type=relation_type,
index=index)
db.session.add(obj)
except IntegrityError:
raise Exception("PID Relation already exists.")
# msg = "PIDRelation already exists: " \
# "{0} -> {1} ({2})".format(
# parent_pid, child_pid, relation_type)
# logger.exception(msg)
# raise Exception(msg)
return obj | Create a PID relation for given parent and child. | Below is the the instruction that describes the task:
### Input:
Create a PID relation for given parent and child.
### Response:
def create(cls, parent, child, relation_type, index=None):
"""Create a PID relation for given parent and child."""
try:
with db.session.begin_nested():
obj = cls(parent_id=parent.id,
child_id=child.id,
relation_type=relation_type,
index=index)
db.session.add(obj)
except IntegrityError:
raise Exception("PID Relation already exists.")
# msg = "PIDRelation already exists: " \
# "{0} -> {1} ({2})".format(
# parent_pid, child_pid, relation_type)
# logger.exception(msg)
# raise Exception(msg)
return obj |
def read_mutating_webhook_configuration(self, name, **kwargs): # noqa: E501
"""read_mutating_webhook_configuration # noqa: E501
read the specified MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_mutating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the MutatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1MutatingWebhookConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_mutating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.read_mutating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501
return data | read_mutating_webhook_configuration # noqa: E501
read the specified MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_mutating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the MutatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1MutatingWebhookConfiguration
If the method is called asynchronously,
returns the request thread. | Below is the the instruction that describes the task:
### Input:
read_mutating_webhook_configuration # noqa: E501
read the specified MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_mutating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the MutatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1MutatingWebhookConfiguration
If the method is called asynchronously,
returns the request thread.
### Response:
def read_mutating_webhook_configuration(self, name, **kwargs): # noqa: E501
"""read_mutating_webhook_configuration # noqa: E501
read the specified MutatingWebhookConfiguration # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_mutating_webhook_configuration(name, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the MutatingWebhookConfiguration (required)
:param str pretty: If 'true', then the output is pretty printed.
:param bool exact: Should the export be exact. Exact export maintains cluster-specific fields like 'Namespace'.
:param bool export: Should this value be exported. Export strips fields that a user can not specify.
:return: V1beta1MutatingWebhookConfiguration
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_mutating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501
else:
(data) = self.read_mutating_webhook_configuration_with_http_info(name, **kwargs) # noqa: E501
return data |
def get_locations(self):
'''
a method to retrieve all the locations tracked by the model
:return: dictionary with location id keys
NOTE: results are added to self.locations property
{
'location.id': {
'
}
}
'''
import requests
url = self.endpoint + '/locations'
params = {
'group': self.group_name
}
response = requests.get(url, params=params)
response_details = response.json()
if 'locations' in response_details.keys():
self.locations = response_details['locations']
return self.locations | a method to retrieve all the locations tracked by the model
:return: dictionary with location id keys
NOTE: results are added to self.locations property
{
'location.id': {
'
}
} | Below is the the instruction that describes the task:
### Input:
a method to retrieve all the locations tracked by the model
:return: dictionary with location id keys
NOTE: results are added to self.locations property
{
'location.id': {
'
}
}
### Response:
def get_locations(self):
'''
a method to retrieve all the locations tracked by the model
:return: dictionary with location id keys
NOTE: results are added to self.locations property
{
'location.id': {
'
}
}
'''
import requests
url = self.endpoint + '/locations'
params = {
'group': self.group_name
}
response = requests.get(url, params=params)
response_details = response.json()
if 'locations' in response_details.keys():
self.locations = response_details['locations']
return self.locations |
def save_file(self, filename = 'StockChart'):
""" save htmlcontent as .html file """
filename = filename + '.html'
with open(filename, 'w') as f:
#self.buildhtml()
f.write(self.htmlcontent)
f.closed | save htmlcontent as .html file | Below is the the instruction that describes the task:
### Input:
save htmlcontent as .html file
### Response:
def save_file(self, filename = 'StockChart'):
""" save htmlcontent as .html file """
filename = filename + '.html'
with open(filename, 'w') as f:
#self.buildhtml()
f.write(self.htmlcontent)
f.closed |
def setup_build_path(build_path):
"""
Create build directory. If this already exists, print informative
error message and quit.
"""
if os.path.isdir(build_path):
fname = os.path.join(build_path, 'CMakeCache.txt')
if os.path.exists(fname):
sys.stderr.write('aborting setup\n')
sys.stderr.write(
'build directory {0} which contains CMakeCache.txt already exists\n'.
format(build_path))
sys.stderr.write(
'remove the build directory and then rerun setup\n')
sys.exit(1)
else:
os.makedirs(build_path, 0o755) | Create build directory. If this already exists, print informative
error message and quit. | Below is the the instruction that describes the task:
### Input:
Create build directory. If this already exists, print informative
error message and quit.
### Response:
def setup_build_path(build_path):
"""
Create build directory. If this already exists, print informative
error message and quit.
"""
if os.path.isdir(build_path):
fname = os.path.join(build_path, 'CMakeCache.txt')
if os.path.exists(fname):
sys.stderr.write('aborting setup\n')
sys.stderr.write(
'build directory {0} which contains CMakeCache.txt already exists\n'.
format(build_path))
sys.stderr.write(
'remove the build directory and then rerun setup\n')
sys.exit(1)
else:
os.makedirs(build_path, 0o755) |
def where(cls, **kwargs):
"""
Returns a generator which yields instances matching the given query
arguments.
For example, this would yield all :py:class:`.Project`::
Project.where()
And this would yield all launch approved :py:class:`.Project`::
Project.where(launch_approved=True)
"""
_id = kwargs.pop('id', '')
return cls.paginated_results(*cls.http_get(_id, params=kwargs)) | Returns a generator which yields instances matching the given query
arguments.
For example, this would yield all :py:class:`.Project`::
Project.where()
And this would yield all launch approved :py:class:`.Project`::
Project.where(launch_approved=True) | Below is the the instruction that describes the task:
### Input:
Returns a generator which yields instances matching the given query
arguments.
For example, this would yield all :py:class:`.Project`::
Project.where()
And this would yield all launch approved :py:class:`.Project`::
Project.where(launch_approved=True)
### Response:
def where(cls, **kwargs):
"""
Returns a generator which yields instances matching the given query
arguments.
For example, this would yield all :py:class:`.Project`::
Project.where()
And this would yield all launch approved :py:class:`.Project`::
Project.where(launch_approved=True)
"""
_id = kwargs.pop('id', '')
return cls.paginated_results(*cls.http_get(_id, params=kwargs)) |
def get_iso_packet_buffer_list(transfer_p):
"""
Python-specific helper extracting a list of iso packet buffers.
"""
transfer = transfer_p.contents
offset = 0
result = []
append = result.append
for iso_transfer in _get_iso_packet_list(transfer):
length = iso_transfer.length
append(_get_iso_packet_buffer(transfer, offset, length))
offset += length
return result | Python-specific helper extracting a list of iso packet buffers. | Below is the the instruction that describes the task:
### Input:
Python-specific helper extracting a list of iso packet buffers.
### Response:
def get_iso_packet_buffer_list(transfer_p):
"""
Python-specific helper extracting a list of iso packet buffers.
"""
transfer = transfer_p.contents
offset = 0
result = []
append = result.append
for iso_transfer in _get_iso_packet_list(transfer):
length = iso_transfer.length
append(_get_iso_packet_buffer(transfer, offset, length))
offset += length
return result |
def ackermann_naive(m: int, n: int) -> int:
"""Ackermann number.
"""
if m == 0:
return n + 1
elif n == 0:
return ackermann(m - 1, 1)
else:
return ackermann(m - 1, ackermann(m, n - 1)) | Ackermann number. | Below is the the instruction that describes the task:
### Input:
Ackermann number.
### Response:
def ackermann_naive(m: int, n: int) -> int:
"""Ackermann number.
"""
if m == 0:
return n + 1
elif n == 0:
return ackermann(m - 1, 1)
else:
return ackermann(m - 1, ackermann(m, n - 1)) |
def to_0d_array(value: Any) -> np.ndarray:
"""Given a value, wrap it in a 0-D numpy.ndarray.
"""
if np.isscalar(value) or (isinstance(value, np.ndarray) and
value.ndim == 0):
return np.array(value)
else:
return to_0d_object_array(value) | Given a value, wrap it in a 0-D numpy.ndarray. | Below is the the instruction that describes the task:
### Input:
Given a value, wrap it in a 0-D numpy.ndarray.
### Response:
def to_0d_array(value: Any) -> np.ndarray:
"""Given a value, wrap it in a 0-D numpy.ndarray.
"""
if np.isscalar(value) or (isinstance(value, np.ndarray) and
value.ndim == 0):
return np.array(value)
else:
return to_0d_object_array(value) |
def pcolor_axes(array, px_to_units=px_to_units):
"""
Return axes :code:`x, y` for *array* to be used with :func:`matplotlib.pyplot.color`.
*px_to_units* is a function to convert pixels to units. By default, returns pixels.
"""
# ======================================
# Coords need to be +1 larger than array
# ======================================
x_size = array.shape[0]+1
y_size = array.shape[1]+1
x = _np.empty((x_size, y_size))
y = _np.empty((x_size, y_size))
for i in range(x_size):
for j in range(y_size):
x[i, j], y[i, j] = px_to_units(i-0.5, j-0.5)
return x, y | Return axes :code:`x, y` for *array* to be used with :func:`matplotlib.pyplot.color`.
*px_to_units* is a function to convert pixels to units. By default, returns pixels. | Below is the the instruction that describes the task:
### Input:
Return axes :code:`x, y` for *array* to be used with :func:`matplotlib.pyplot.color`.
*px_to_units* is a function to convert pixels to units. By default, returns pixels.
### Response:
def pcolor_axes(array, px_to_units=px_to_units):
"""
Return axes :code:`x, y` for *array* to be used with :func:`matplotlib.pyplot.color`.
*px_to_units* is a function to convert pixels to units. By default, returns pixels.
"""
# ======================================
# Coords need to be +1 larger than array
# ======================================
x_size = array.shape[0]+1
y_size = array.shape[1]+1
x = _np.empty((x_size, y_size))
y = _np.empty((x_size, y_size))
for i in range(x_size):
for j in range(y_size):
x[i, j], y[i, j] = px_to_units(i-0.5, j-0.5)
return x, y |
def umode(self, nick, modes=''):
"""
Sets/gets user modes.
Required arguments:
* nick - Nick to set/get user modes for.
Optional arguments:
* modes='' - Sets these user modes on a nick.
"""
with self.lock:
if not modes:
self.send('MODE %s' % nick)
if self.readable():
msg = self._recv(expected_replies=('221',))
if msg[0] == '221':
modes = msg[2].replace('+', '').replace(':', '', 1)
return modes
self.send('MODE %s %s' % (nick, modes))
if self.readable():
msg = self._recv(expected_replies=('MODE',))
if msg[0] == 'MODE':
if not self.hide_called_events:
self.stepback()
return msg[2].replace(':', '', 1) | Sets/gets user modes.
Required arguments:
* nick - Nick to set/get user modes for.
Optional arguments:
* modes='' - Sets these user modes on a nick. | Below is the the instruction that describes the task:
### Input:
Sets/gets user modes.
Required arguments:
* nick - Nick to set/get user modes for.
Optional arguments:
* modes='' - Sets these user modes on a nick.
### Response:
def umode(self, nick, modes=''):
"""
Sets/gets user modes.
Required arguments:
* nick - Nick to set/get user modes for.
Optional arguments:
* modes='' - Sets these user modes on a nick.
"""
with self.lock:
if not modes:
self.send('MODE %s' % nick)
if self.readable():
msg = self._recv(expected_replies=('221',))
if msg[0] == '221':
modes = msg[2].replace('+', '').replace(':', '', 1)
return modes
self.send('MODE %s %s' % (nick, modes))
if self.readable():
msg = self._recv(expected_replies=('MODE',))
if msg[0] == 'MODE':
if not self.hide_called_events:
self.stepback()
return msg[2].replace(':', '', 1) |
def _colorize_single_line(line, regexp, color_def):
"""Print single line to console with ability to colorize parts of it."""
match = regexp.match(line)
groupdict = match.groupdict()
groups = match.groups()
if not groupdict:
# no named groups, just colorize whole line
color = color_def[0]
dark = color_def[1]
cprint("%s\n" % line, color, fg_dark=dark)
else:
rev_groups = {v: k for k, v in groupdict.items()}
for part in groups:
if part in rev_groups and rev_groups[part] in color_def:
group_name = rev_groups[part]
cprint(
part,
color_def[group_name][0],
fg_dark=color_def[group_name][1],
)
else:
cprint(part)
cprint("\n") | Print single line to console with ability to colorize parts of it. | Below is the the instruction that describes the task:
### Input:
Print single line to console with ability to colorize parts of it.
### Response:
def _colorize_single_line(line, regexp, color_def):
"""Print single line to console with ability to colorize parts of it."""
match = regexp.match(line)
groupdict = match.groupdict()
groups = match.groups()
if not groupdict:
# no named groups, just colorize whole line
color = color_def[0]
dark = color_def[1]
cprint("%s\n" % line, color, fg_dark=dark)
else:
rev_groups = {v: k for k, v in groupdict.items()}
for part in groups:
if part in rev_groups and rev_groups[part] in color_def:
group_name = rev_groups[part]
cprint(
part,
color_def[group_name][0],
fg_dark=color_def[group_name][1],
)
else:
cprint(part)
cprint("\n") |
def namedb_get_name_preorder( db, preorder_hash, current_block ):
"""
Get a (singular) name preorder record outstanding at the given block, given the preorder hash.
NOTE: returns expired preorders.
Return the preorder record on success.
Return None if not found.
"""
select_query = "SELECT * FROM preorders WHERE preorder_hash = ? AND op = ? AND block_number < ?;"
args = (preorder_hash, NAME_PREORDER, current_block + NAME_PREORDER_EXPIRE)
cur = db.cursor()
preorder_rows = namedb_query_execute( cur, select_query, args )
preorder_row = preorder_rows.fetchone()
if preorder_row is None:
# no such preorder
return None
preorder_rec = {}
preorder_rec.update( preorder_row )
unexpired_query, unexpired_args = namedb_select_where_unexpired_names( current_block )
# make sure that the name doesn't already exist
select_query = "SELECT name_records.preorder_hash " + \
"FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \
"WHERE name_records.preorder_hash = ? AND " + \
unexpired_query + ";"
args = (preorder_hash,) + unexpired_args
cur = db.cursor()
nm_rows = namedb_query_execute( cur, select_query, args )
nm_row = nm_rows.fetchone()
if nm_row is not None:
# name with this preorder exists
return None
return preorder_rec | Get a (singular) name preorder record outstanding at the given block, given the preorder hash.
NOTE: returns expired preorders.
Return the preorder record on success.
Return None if not found. | Below is the the instruction that describes the task:
### Input:
Get a (singular) name preorder record outstanding at the given block, given the preorder hash.
NOTE: returns expired preorders.
Return the preorder record on success.
Return None if not found.
### Response:
def namedb_get_name_preorder( db, preorder_hash, current_block ):
"""
Get a (singular) name preorder record outstanding at the given block, given the preorder hash.
NOTE: returns expired preorders.
Return the preorder record on success.
Return None if not found.
"""
select_query = "SELECT * FROM preorders WHERE preorder_hash = ? AND op = ? AND block_number < ?;"
args = (preorder_hash, NAME_PREORDER, current_block + NAME_PREORDER_EXPIRE)
cur = db.cursor()
preorder_rows = namedb_query_execute( cur, select_query, args )
preorder_row = preorder_rows.fetchone()
if preorder_row is None:
# no such preorder
return None
preorder_rec = {}
preorder_rec.update( preorder_row )
unexpired_query, unexpired_args = namedb_select_where_unexpired_names( current_block )
# make sure that the name doesn't already exist
select_query = "SELECT name_records.preorder_hash " + \
"FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \
"WHERE name_records.preorder_hash = ? AND " + \
unexpired_query + ";"
args = (preorder_hash,) + unexpired_args
cur = db.cursor()
nm_rows = namedb_query_execute( cur, select_query, args )
nm_row = nm_rows.fetchone()
if nm_row is not None:
# name with this preorder exists
return None
return preorder_rec |
def _parse_tree_structmap(self, tree, parent_elem, normative_parent_elem=None):
"""Recursively parse all the children of parent_elem, including amdSecs
and dmdSecs.
:param lxml._ElementTree tree: encodes the entire METS file.
:param lxml._Element parent_elem: the element whose children we are
parsing.
:param lxml._Element normative_parent_elem: the normative
counterpart of ``parent_elem`` taken from the logical structMap
labelled "Normative Directory Structure".
"""
siblings = []
el_to_normative = self._get_el_to_normative(parent_elem, normative_parent_elem)
for elem, normative_elem in el_to_normative.items():
if elem.tag != utils.lxmlns("mets") + "div":
continue # Only handle divs, not fptrs
entry_type = elem.get("TYPE")
label = elem.get("LABEL")
fptr_elems = elem.findall("mets:fptr", namespaces=utils.NAMESPACES)
# Directories are walked recursively. Additionally, they may
# contain direct fptrs.
if entry_type.lower() == "directory":
children = self._parse_tree_structmap(
tree, elem, normative_parent_elem=normative_elem
)
fs_entry = fsentry.FSEntry.dir(label, children)
self._add_dmdsecs_to_fs_entry(elem, fs_entry, tree)
siblings.append(fs_entry)
for fptr_elem in fptr_elems:
fptr = self._analyze_fptr(fptr_elem, tree, entry_type)
fs_entry = fsentry.FSEntry.from_fptr(
label=None, type_=u"Item", fptr=fptr
)
self._add_amdsecs_to_fs_entry(fptr.amdids, fs_entry, tree)
siblings.append(fs_entry)
continue
# Other types, e.g.: items, aips...
if not len(fptr_elems):
continue
fptr = self._analyze_fptr(fptr_elems[0], tree, entry_type)
fs_entry = fsentry.FSEntry.from_fptr(label, entry_type, fptr)
self._add_dmdsecs_to_fs_entry(elem, fs_entry, tree)
self._add_amdsecs_to_fs_entry(fptr.amdids, fs_entry, tree)
siblings.append(fs_entry)
return siblings | Recursively parse all the children of parent_elem, including amdSecs
and dmdSecs.
:param lxml._ElementTree tree: encodes the entire METS file.
:param lxml._Element parent_elem: the element whose children we are
parsing.
:param lxml._Element normative_parent_elem: the normative
counterpart of ``parent_elem`` taken from the logical structMap
labelled "Normative Directory Structure". | Below is the the instruction that describes the task:
### Input:
Recursively parse all the children of parent_elem, including amdSecs
and dmdSecs.
:param lxml._ElementTree tree: encodes the entire METS file.
:param lxml._Element parent_elem: the element whose children we are
parsing.
:param lxml._Element normative_parent_elem: the normative
counterpart of ``parent_elem`` taken from the logical structMap
labelled "Normative Directory Structure".
### Response:
def _parse_tree_structmap(self, tree, parent_elem, normative_parent_elem=None):
"""Recursively parse all the children of parent_elem, including amdSecs
and dmdSecs.
:param lxml._ElementTree tree: encodes the entire METS file.
:param lxml._Element parent_elem: the element whose children we are
parsing.
:param lxml._Element normative_parent_elem: the normative
counterpart of ``parent_elem`` taken from the logical structMap
labelled "Normative Directory Structure".
"""
siblings = []
el_to_normative = self._get_el_to_normative(parent_elem, normative_parent_elem)
for elem, normative_elem in el_to_normative.items():
if elem.tag != utils.lxmlns("mets") + "div":
continue # Only handle divs, not fptrs
entry_type = elem.get("TYPE")
label = elem.get("LABEL")
fptr_elems = elem.findall("mets:fptr", namespaces=utils.NAMESPACES)
# Directories are walked recursively. Additionally, they may
# contain direct fptrs.
if entry_type.lower() == "directory":
children = self._parse_tree_structmap(
tree, elem, normative_parent_elem=normative_elem
)
fs_entry = fsentry.FSEntry.dir(label, children)
self._add_dmdsecs_to_fs_entry(elem, fs_entry, tree)
siblings.append(fs_entry)
for fptr_elem in fptr_elems:
fptr = self._analyze_fptr(fptr_elem, tree, entry_type)
fs_entry = fsentry.FSEntry.from_fptr(
label=None, type_=u"Item", fptr=fptr
)
self._add_amdsecs_to_fs_entry(fptr.amdids, fs_entry, tree)
siblings.append(fs_entry)
continue
# Other types, e.g.: items, aips...
if not len(fptr_elems):
continue
fptr = self._analyze_fptr(fptr_elems[0], tree, entry_type)
fs_entry = fsentry.FSEntry.from_fptr(label, entry_type, fptr)
self._add_dmdsecs_to_fs_entry(elem, fs_entry, tree)
self._add_amdsecs_to_fs_entry(fptr.amdids, fs_entry, tree)
siblings.append(fs_entry)
return siblings |
def get_fastq_dir(fc_dir):
"""Retrieve the fastq directory within Solexa flowcell output.
"""
full_goat_bc = glob.glob(os.path.join(fc_dir, "Data", "*Firecrest*", "Bustard*"))
bustard_bc = glob.glob(os.path.join(fc_dir, "Data", "Intensities", "*Bustard*"))
machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls")
if os.path.exists(machine_bc):
return os.path.join(machine_bc, "fastq")
elif len(full_goat_bc) > 0:
return os.path.join(full_goat_bc[0], "fastq")
elif len(bustard_bc) > 0:
return os.path.join(bustard_bc[0], "fastq")
# otherwise assume we are in the fastq directory
# XXX What other cases can we end up with here?
else:
return fc_dir | Retrieve the fastq directory within Solexa flowcell output. | Below is the the instruction that describes the task:
### Input:
Retrieve the fastq directory within Solexa flowcell output.
### Response:
def get_fastq_dir(fc_dir):
"""Retrieve the fastq directory within Solexa flowcell output.
"""
full_goat_bc = glob.glob(os.path.join(fc_dir, "Data", "*Firecrest*", "Bustard*"))
bustard_bc = glob.glob(os.path.join(fc_dir, "Data", "Intensities", "*Bustard*"))
machine_bc = os.path.join(fc_dir, "Data", "Intensities", "BaseCalls")
if os.path.exists(machine_bc):
return os.path.join(machine_bc, "fastq")
elif len(full_goat_bc) > 0:
return os.path.join(full_goat_bc[0], "fastq")
elif len(bustard_bc) > 0:
return os.path.join(bustard_bc[0], "fastq")
# otherwise assume we are in the fastq directory
# XXX What other cases can we end up with here?
else:
return fc_dir |
def Dir_anis_corr(InDir, AniSpec):
"""
takes the 6 element 's' vector and the Dec,Inc 'InDir' data,
performs simple anisotropy correction. returns corrected Dec, Inc
"""
Dir = np.zeros((3), 'f')
Dir[0] = InDir[0]
Dir[1] = InDir[1]
Dir[2] = 1.
chi, chi_inv = check_F(AniSpec)
if chi[0][0] == 1.:
return Dir # isotropic
X = dir2cart(Dir)
M = np.array(X)
H = np.dot(M, chi_inv)
return cart2dir(H) | takes the 6 element 's' vector and the Dec,Inc 'InDir' data,
performs simple anisotropy correction. returns corrected Dec, Inc | Below is the the instruction that describes the task:
### Input:
takes the 6 element 's' vector and the Dec,Inc 'InDir' data,
performs simple anisotropy correction. returns corrected Dec, Inc
### Response:
def Dir_anis_corr(InDir, AniSpec):
"""
takes the 6 element 's' vector and the Dec,Inc 'InDir' data,
performs simple anisotropy correction. returns corrected Dec, Inc
"""
Dir = np.zeros((3), 'f')
Dir[0] = InDir[0]
Dir[1] = InDir[1]
Dir[2] = 1.
chi, chi_inv = check_F(AniSpec)
if chi[0][0] == 1.:
return Dir # isotropic
X = dir2cart(Dir)
M = np.array(X)
H = np.dot(M, chi_inv)
return cart2dir(H) |
def _read_master_branch_resource(self, fn, is_json=False):
"""This will force the current branch to master! """
with self._master_branch_repo_lock:
ga = self._create_git_action_for_global_resource()
with ga.lock():
ga.checkout_master()
if os.path.exists(fn):
if is_json:
return read_as_json(fn)
with codecs.open(fn, 'rU', encoding='utf-8') as f:
ret = f.read()
return ret
return None | This will force the current branch to master! | Below is the the instruction that describes the task:
### Input:
This will force the current branch to master!
### Response:
def _read_master_branch_resource(self, fn, is_json=False):
"""This will force the current branch to master! """
with self._master_branch_repo_lock:
ga = self._create_git_action_for_global_resource()
with ga.lock():
ga.checkout_master()
if os.path.exists(fn):
if is_json:
return read_as_json(fn)
with codecs.open(fn, 'rU', encoding='utf-8') as f:
ret = f.read()
return ret
return None |
def change_and_save(self, update_only_changed_fields=False, **changed_fields):
"""
Changes a given `changed_fields` on each object in the queryset, saves objects
and returns the changed objects in the queryset.
"""
bulk_change_and_save(self, update_only_changed_fields=update_only_changed_fields, **changed_fields)
return self.filter() | Changes a given `changed_fields` on each object in the queryset, saves objects
and returns the changed objects in the queryset. | Below is the the instruction that describes the task:
### Input:
Changes a given `changed_fields` on each object in the queryset, saves objects
and returns the changed objects in the queryset.
### Response:
def change_and_save(self, update_only_changed_fields=False, **changed_fields):
"""
Changes a given `changed_fields` on each object in the queryset, saves objects
and returns the changed objects in the queryset.
"""
bulk_change_and_save(self, update_only_changed_fields=update_only_changed_fields, **changed_fields)
return self.filter() |
def log_file(self):
"""The path to the log file for this job.
"""
log_file = self.get('log')
if not log_file:
log_file = '%s.log' % (self.name)
self.set('log', log_file)
return os.path.join(self.initial_dir, self.get('log')) | The path to the log file for this job. | Below is the the instruction that describes the task:
### Input:
The path to the log file for this job.
### Response:
def log_file(self):
"""The path to the log file for this job.
"""
log_file = self.get('log')
if not log_file:
log_file = '%s.log' % (self.name)
self.set('log', log_file)
return os.path.join(self.initial_dir, self.get('log')) |
def avg_bp_from_range(self, bp):
""" Helper function - FastQC often gives base pair ranges (eg. 10-15)
which are not helpful when plotting. This returns the average from such
ranges as an int, which is helpful. If not a range, just returns the int """
try:
if '-' in bp:
maxlen = float(bp.split("-",1)[1])
minlen = float(bp.split("-",1)[0])
bp = ((maxlen - minlen)/2) + minlen
except TypeError:
pass
return(int(bp)) | Helper function - FastQC often gives base pair ranges (eg. 10-15)
which are not helpful when plotting. This returns the average from such
ranges as an int, which is helpful. If not a range, just returns the int | Below is the the instruction that describes the task:
### Input:
Helper function - FastQC often gives base pair ranges (eg. 10-15)
which are not helpful when plotting. This returns the average from such
ranges as an int, which is helpful. If not a range, just returns the int
### Response:
def avg_bp_from_range(self, bp):
""" Helper function - FastQC often gives base pair ranges (eg. 10-15)
which are not helpful when plotting. This returns the average from such
ranges as an int, which is helpful. If not a range, just returns the int """
try:
if '-' in bp:
maxlen = float(bp.split("-",1)[1])
minlen = float(bp.split("-",1)[0])
bp = ((maxlen - minlen)/2) + minlen
except TypeError:
pass
return(int(bp)) |
def human2bytes(s):
"""
>>> human2bytes('1M')
1048576
>>> human2bytes('1G')
1073741824
"""
symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
letter = s[-1:].strip().upper()
num = s[:-1]
assert num.isdigit() and letter in symbols, s
num = float(num)
prefix = {symbols[0]: 1}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
return int(num * prefix[letter]) | >>> human2bytes('1M')
1048576
>>> human2bytes('1G')
1073741824 | Below is the the instruction that describes the task:
### Input:
>>> human2bytes('1M')
1048576
>>> human2bytes('1G')
1073741824
### Response:
def human2bytes(s):
"""
>>> human2bytes('1M')
1048576
>>> human2bytes('1G')
1073741824
"""
symbols = ('B', 'K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y')
letter = s[-1:].strip().upper()
num = s[:-1]
assert num.isdigit() and letter in symbols, s
num = float(num)
prefix = {symbols[0]: 1}
for i, s in enumerate(symbols[1:]):
prefix[s] = 1 << (i + 1) * 10
return int(num * prefix[letter]) |
def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += 1
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime | Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString | Below is the the instruction that describes the task:
### Input:
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
### Response:
def parseDateText(self, dateString):
"""
Parse long-form date strings::
'May 31st, 2006'
'Jan 1st'
'July 2006'
@type dateString: string
@param dateString: text to convert to a datetime
@rtype: struct_time
@return: calculated C{struct_time} value of dateString
"""
yr, mth, dy, hr, mn, sec, wd, yd, isdst = time.localtime()
currentMth = mth
currentDy = dy
s = dateString.lower()
m = self.ptc.CRE_DATE3.search(s)
mth = m.group('mthname')
mth = self.ptc.MonthOffsets[mth]
if m.group('day') != None:
dy = int(m.group('day'))
else:
dy = 1
if m.group('year') != None:
yr = int(m.group('year'))
# birthday epoch constraint
if yr < self.ptc.BirthdayEpoch:
yr += 2000
elif yr < 100:
yr += 1900
elif (mth < currentMth) or (mth == currentMth and dy < currentDy):
# if that day and month have already passed in this year,
# then increment the year by 1
yr += 1
if dy > 0 and dy <= self.ptc.daysInMonth(mth, yr):
sourceTime = (yr, mth, dy, hr, mn, sec, wd, yd, isdst)
else:
# Return current time if date string is invalid
self.dateFlag = 0
self.timeFlag = 0
sourceTime = time.localtime()
return sourceTime |
def generic_ref_formatter(view, context, model, name, lazy=False):
"""
For GenericReferenceField and LazyGenericReferenceField
See Also
--------
diff_formatter
"""
try:
if lazy:
rel_model = getattr(model, name).fetch()
else:
rel_model = getattr(model, name)
except (mongoengine.DoesNotExist, AttributeError) as e:
# custom_field_type_formatters seems to fix the issue of stale references
# crashing pages, since it intercepts the display of all ReferenceField's.
return Markup(
'<span class="label label-danger">Error</span> <small>%s</small>' % e
)
if rel_model is None:
return ''
try:
return Markup(
'<a href="%s">%s</a>'
% (
url_for(
# Flask-Admin creates URL's namespaced w/ model class name, lowercase.
'%s.details_view' % rel_model.__class__.__name__.lower(),
id=rel_model.id,
),
rel_model,
)
)
except werkzeug.routing.BuildError as e:
return Markup(
'<span class="label label-danger">Error</span> <small>%s</small>' % e
) | For GenericReferenceField and LazyGenericReferenceField
See Also
--------
diff_formatter | Below is the the instruction that describes the task:
### Input:
For GenericReferenceField and LazyGenericReferenceField
See Also
--------
diff_formatter
### Response:
def generic_ref_formatter(view, context, model, name, lazy=False):
"""
For GenericReferenceField and LazyGenericReferenceField
See Also
--------
diff_formatter
"""
try:
if lazy:
rel_model = getattr(model, name).fetch()
else:
rel_model = getattr(model, name)
except (mongoengine.DoesNotExist, AttributeError) as e:
# custom_field_type_formatters seems to fix the issue of stale references
# crashing pages, since it intercepts the display of all ReferenceField's.
return Markup(
'<span class="label label-danger">Error</span> <small>%s</small>' % e
)
if rel_model is None:
return ''
try:
return Markup(
'<a href="%s">%s</a>'
% (
url_for(
# Flask-Admin creates URL's namespaced w/ model class name, lowercase.
'%s.details_view' % rel_model.__class__.__name__.lower(),
id=rel_model.id,
),
rel_model,
)
)
except werkzeug.routing.BuildError as e:
return Markup(
'<span class="label label-danger">Error</span> <small>%s</small>' % e
) |
def compose(self, *args, **kwargs):
"""
Compose layer and masks (mask, vector mask, and clipping layers).
:return: :py:class:`PIL.Image`, or `None` if the layer has no pixel.
"""
from psd_tools.api.composer import compose_layer
if self.bbox == (0, 0, 0, 0):
return None
return compose_layer(self, *args, **kwargs) | Compose layer and masks (mask, vector mask, and clipping layers).
:return: :py:class:`PIL.Image`, or `None` if the layer has no pixel. | Below is the the instruction that describes the task:
### Input:
Compose layer and masks (mask, vector mask, and clipping layers).
:return: :py:class:`PIL.Image`, or `None` if the layer has no pixel.
### Response:
def compose(self, *args, **kwargs):
"""
Compose layer and masks (mask, vector mask, and clipping layers).
:return: :py:class:`PIL.Image`, or `None` if the layer has no pixel.
"""
from psd_tools.api.composer import compose_layer
if self.bbox == (0, 0, 0, 0):
return None
return compose_layer(self, *args, **kwargs) |
def doc(inherit=None, **kwargs):
"""Annotate the decorated view function or class with the specified Swagger
attributes.
Usage:
.. code-block:: python
@doc(tags=['pet'], description='a pet store')
def get_pet(pet_id):
return Pet.query.filter(Pet.id == pet_id).one()
:param inherit: Inherit Swagger documentation from parent classes
"""
def wrapper(func):
annotate(func, 'docs', [kwargs], inherit=inherit)
return activate(func)
return wrapper | Annotate the decorated view function or class with the specified Swagger
attributes.
Usage:
.. code-block:: python
@doc(tags=['pet'], description='a pet store')
def get_pet(pet_id):
return Pet.query.filter(Pet.id == pet_id).one()
:param inherit: Inherit Swagger documentation from parent classes | Below is the the instruction that describes the task:
### Input:
Annotate the decorated view function or class with the specified Swagger
attributes.
Usage:
.. code-block:: python
@doc(tags=['pet'], description='a pet store')
def get_pet(pet_id):
return Pet.query.filter(Pet.id == pet_id).one()
:param inherit: Inherit Swagger documentation from parent classes
### Response:
def doc(inherit=None, **kwargs):
"""Annotate the decorated view function or class with the specified Swagger
attributes.
Usage:
.. code-block:: python
@doc(tags=['pet'], description='a pet store')
def get_pet(pet_id):
return Pet.query.filter(Pet.id == pet_id).one()
:param inherit: Inherit Swagger documentation from parent classes
"""
def wrapper(func):
annotate(func, 'docs', [kwargs], inherit=inherit)
return activate(func)
return wrapper |
def upload(ctx, release, rebuild):
""" Uploads distribuition files to pypi or pypitest. """
dist_path = Path(DIST_PATH)
if rebuild is False:
if not dist_path.exists() or not list(dist_path.glob('*')):
print("No distribution files found. Please run 'build' command first")
return
else:
ctx.invoke(build, force=True)
if release:
args = ['twine', 'upload', 'dist/*']
else:
repository = 'https://test.pypi.org/legacy/'
args = ['twine', 'upload', '--repository-url', repository, 'dist/*']
env = os.environ.copy()
p = subprocess.Popen(args, env=env)
p.wait() | Uploads distribuition files to pypi or pypitest. | Below is the the instruction that describes the task:
### Input:
Uploads distribuition files to pypi or pypitest.
### Response:
def upload(ctx, release, rebuild):
""" Uploads distribuition files to pypi or pypitest. """
dist_path = Path(DIST_PATH)
if rebuild is False:
if not dist_path.exists() or not list(dist_path.glob('*')):
print("No distribution files found. Please run 'build' command first")
return
else:
ctx.invoke(build, force=True)
if release:
args = ['twine', 'upload', 'dist/*']
else:
repository = 'https://test.pypi.org/legacy/'
args = ['twine', 'upload', '--repository-url', repository, 'dist/*']
env = os.environ.copy()
p = subprocess.Popen(args, env=env)
p.wait() |
def get_fig_data_attrs(self, delimiter=None):
"""Join the data attributes with other plotters in the project
This method joins the attributes of the
:class:`~psyplot.InteractiveBase` instances in the project that
draw on the same figure as this instance does.
Parameters
----------
delimiter: str
Specifies the delimiter with what the attributes are joined. If
None, the :attr:`delimiter` attribute of this instance or (if the
latter is also None), the rcParams['texts.delimiter'] item is used.
Returns
-------
dict
A dictionary with all the meta attributes joined by the specified
`delimiter`"""
if self.project is not None:
delimiter = next(filter(lambda d: d is not None, [
delimiter, self.delimiter, self.rc['delimiter']]))
figs = self.project.figs
fig = self.ax.get_figure()
if self.plotter._initialized and fig in figs:
ret = figs[fig].joined_attrs(delimiter=delimiter,
plot_data=True)
else:
ret = self.get_enhanced_attrs(self.plotter.plot_data)
self.logger.debug(
'Can not get the figure attributes because plot has not '
'yet been initialized!')
return ret
else:
return self.get_enhanced_attrs(self.plotter.plot_data) | Join the data attributes with other plotters in the project
This method joins the attributes of the
:class:`~psyplot.InteractiveBase` instances in the project that
draw on the same figure as this instance does.
Parameters
----------
delimiter: str
Specifies the delimiter with what the attributes are joined. If
None, the :attr:`delimiter` attribute of this instance or (if the
latter is also None), the rcParams['texts.delimiter'] item is used.
Returns
-------
dict
A dictionary with all the meta attributes joined by the specified
`delimiter` | Below is the the instruction that describes the task:
### Input:
Join the data attributes with other plotters in the project
This method joins the attributes of the
:class:`~psyplot.InteractiveBase` instances in the project that
draw on the same figure as this instance does.
Parameters
----------
delimiter: str
Specifies the delimiter with what the attributes are joined. If
None, the :attr:`delimiter` attribute of this instance or (if the
latter is also None), the rcParams['texts.delimiter'] item is used.
Returns
-------
dict
A dictionary with all the meta attributes joined by the specified
`delimiter`
### Response:
def get_fig_data_attrs(self, delimiter=None):
"""Join the data attributes with other plotters in the project
This method joins the attributes of the
:class:`~psyplot.InteractiveBase` instances in the project that
draw on the same figure as this instance does.
Parameters
----------
delimiter: str
Specifies the delimiter with what the attributes are joined. If
None, the :attr:`delimiter` attribute of this instance or (if the
latter is also None), the rcParams['texts.delimiter'] item is used.
Returns
-------
dict
A dictionary with all the meta attributes joined by the specified
`delimiter`"""
if self.project is not None:
delimiter = next(filter(lambda d: d is not None, [
delimiter, self.delimiter, self.rc['delimiter']]))
figs = self.project.figs
fig = self.ax.get_figure()
if self.plotter._initialized and fig in figs:
ret = figs[fig].joined_attrs(delimiter=delimiter,
plot_data=True)
else:
ret = self.get_enhanced_attrs(self.plotter.plot_data)
self.logger.debug(
'Can not get the figure attributes because plot has not '
'yet been initialized!')
return ret
else:
return self.get_enhanced_attrs(self.plotter.plot_data) |
def docoptcfg(doc, argv=None, env_prefix=None, config_option=None, ignore=None, *args, **kwargs):
"""Pass most args/kwargs to docopt. Handle `env_prefix` and `config_option`.
:raise DocoptcfgError: If `config_option` isn't found in docstring.
:raise DocoptcfgFileError: On any error while trying to read and parse config file (if enabled).
:param str doc: Docstring passed to docopt.
:param iter argv: sys.argv[1:] passed to docopt.
:param str env_prefix: Enable environment variable support, prefix of said variables.
:param str config_option: Enable config file support, docopt option defining path to config file.
:param iter ignore: Options to ignore. Default is --help and --version.
:param iter args: Additional positional arguments passed to docopt.
:param dict kwargs: Additional keyword arguments passed to docopt.
:return: Dictionary constructed by docopt and updated by docoptcfg.
:rtype: dict
"""
docopt_dict = docopt.docopt(doc, argv, *args, **kwargs)
if env_prefix is None and config_option is None:
return docopt_dict # Nothing to do.
if argv is None:
argv = sys.argv[1:]
if ignore is None:
ignore = ('--help', '--version')
settable, booleans, repeatable, short_map = settable_options(doc, argv, ignore, kwargs.get('options_first', False))
if not settable:
return docopt_dict # Nothing to do.
# Handle environment variables defaults.
if env_prefix is not None:
defaults = values_from_env(env_prefix, settable, booleans, repeatable)
settable -= set(defaults.keys()) # No longer settable by values_from_file().
docopt_dict.update(defaults)
# Handle config file defaults.
if config_option is not None:
defaults = values_from_file(
docopt_dict,
short_map.get(config_option, config_option),
settable,
booleans,
repeatable,
)
docopt_dict.update(defaults)
return docopt_dict | Pass most args/kwargs to docopt. Handle `env_prefix` and `config_option`.
:raise DocoptcfgError: If `config_option` isn't found in docstring.
:raise DocoptcfgFileError: On any error while trying to read and parse config file (if enabled).
:param str doc: Docstring passed to docopt.
:param iter argv: sys.argv[1:] passed to docopt.
:param str env_prefix: Enable environment variable support, prefix of said variables.
:param str config_option: Enable config file support, docopt option defining path to config file.
:param iter ignore: Options to ignore. Default is --help and --version.
:param iter args: Additional positional arguments passed to docopt.
:param dict kwargs: Additional keyword arguments passed to docopt.
:return: Dictionary constructed by docopt and updated by docoptcfg.
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Pass most args/kwargs to docopt. Handle `env_prefix` and `config_option`.
:raise DocoptcfgError: If `config_option` isn't found in docstring.
:raise DocoptcfgFileError: On any error while trying to read and parse config file (if enabled).
:param str doc: Docstring passed to docopt.
:param iter argv: sys.argv[1:] passed to docopt.
:param str env_prefix: Enable environment variable support, prefix of said variables.
:param str config_option: Enable config file support, docopt option defining path to config file.
:param iter ignore: Options to ignore. Default is --help and --version.
:param iter args: Additional positional arguments passed to docopt.
:param dict kwargs: Additional keyword arguments passed to docopt.
:return: Dictionary constructed by docopt and updated by docoptcfg.
:rtype: dict
### Response:
def docoptcfg(doc, argv=None, env_prefix=None, config_option=None, ignore=None, *args, **kwargs):
"""Pass most args/kwargs to docopt. Handle `env_prefix` and `config_option`.
:raise DocoptcfgError: If `config_option` isn't found in docstring.
:raise DocoptcfgFileError: On any error while trying to read and parse config file (if enabled).
:param str doc: Docstring passed to docopt.
:param iter argv: sys.argv[1:] passed to docopt.
:param str env_prefix: Enable environment variable support, prefix of said variables.
:param str config_option: Enable config file support, docopt option defining path to config file.
:param iter ignore: Options to ignore. Default is --help and --version.
:param iter args: Additional positional arguments passed to docopt.
:param dict kwargs: Additional keyword arguments passed to docopt.
:return: Dictionary constructed by docopt and updated by docoptcfg.
:rtype: dict
"""
docopt_dict = docopt.docopt(doc, argv, *args, **kwargs)
if env_prefix is None and config_option is None:
return docopt_dict # Nothing to do.
if argv is None:
argv = sys.argv[1:]
if ignore is None:
ignore = ('--help', '--version')
settable, booleans, repeatable, short_map = settable_options(doc, argv, ignore, kwargs.get('options_first', False))
if not settable:
return docopt_dict # Nothing to do.
# Handle environment variables defaults.
if env_prefix is not None:
defaults = values_from_env(env_prefix, settable, booleans, repeatable)
settable -= set(defaults.keys()) # No longer settable by values_from_file().
docopt_dict.update(defaults)
# Handle config file defaults.
if config_option is not None:
defaults = values_from_file(
docopt_dict,
short_map.get(config_option, config_option),
settable,
booleans,
repeatable,
)
docopt_dict.update(defaults)
return docopt_dict |
def without_global_scope(self, scope):
"""
Remove a registered global scope.
:param scope: The scope to remove
:type scope: Scope or str
:rtype: Builder
"""
if isinstance(scope, basestring):
del self._scopes[scope]
return self
keys = []
for key, value in self._scopes.items():
if scope == value.__class__ or isinstance(scope, value.__class__):
keys.append(key)
for key in keys:
del self._scopes[key]
return self | Remove a registered global scope.
:param scope: The scope to remove
:type scope: Scope or str
:rtype: Builder | Below is the the instruction that describes the task:
### Input:
Remove a registered global scope.
:param scope: The scope to remove
:type scope: Scope or str
:rtype: Builder
### Response:
def without_global_scope(self, scope):
"""
Remove a registered global scope.
:param scope: The scope to remove
:type scope: Scope or str
:rtype: Builder
"""
if isinstance(scope, basestring):
del self._scopes[scope]
return self
keys = []
for key, value in self._scopes.items():
if scope == value.__class__ or isinstance(scope, value.__class__):
keys.append(key)
for key in keys:
del self._scopes[key]
return self |
def setup(self, loop):
"""Start the watcher, registering new watches if any."""
self._loop = loop
self._fd = LibC.inotify_init()
for alias, (path, flags) in self.requests.items():
self._setup_watch(alias, path, flags)
# We pass ownership of the fd to the transport; it will close it.
self._stream, self._transport = yield from aioutils.stream_from_fd(self._fd, loop) | Start the watcher, registering new watches if any. | Below is the the instruction that describes the task:
### Input:
Start the watcher, registering new watches if any.
### Response:
def setup(self, loop):
"""Start the watcher, registering new watches if any."""
self._loop = loop
self._fd = LibC.inotify_init()
for alias, (path, flags) in self.requests.items():
self._setup_watch(alias, path, flags)
# We pass ownership of the fd to the transport; it will close it.
self._stream, self._transport = yield from aioutils.stream_from_fd(self._fd, loop) |
def _get_types(self):
""" extracts the needed types from the configspace for faster retrival later
type = 0 - numerical (continuous or integer) parameter
type >=1 - categorical parameter
TODO: figure out a way to properly handle ordinal parameters
"""
types = []
num_values = []
for hp in self.configspace.get_hyperparameters():
#print(hp)
if isinstance(hp, CS.CategoricalHyperparameter):
types.append('U')
num_values.append(len(hp.choices))
elif isinstance(hp, CS.UniformIntegerHyperparameter):
types.append('I')
num_values.append((hp.upper - hp.lower + 1))
elif isinstance(hp, CS.UniformFloatHyperparameter):
types.append('C')
num_values.append(np.inf)
elif isinstance(hp, CS.OrdinalHyperparameter):
types.append('O')
num_values.append(len(hp.sequence))
else:
raise ValueError('Unsupported Parametertype %s'%type(hp))
return(types, num_values) | extracts the needed types from the configspace for faster retrival later
type = 0 - numerical (continuous or integer) parameter
type >=1 - categorical parameter
TODO: figure out a way to properly handle ordinal parameters | Below is the the instruction that describes the task:
### Input:
extracts the needed types from the configspace for faster retrival later
type = 0 - numerical (continuous or integer) parameter
type >=1 - categorical parameter
TODO: figure out a way to properly handle ordinal parameters
### Response:
def _get_types(self):
""" extracts the needed types from the configspace for faster retrival later
type = 0 - numerical (continuous or integer) parameter
type >=1 - categorical parameter
TODO: figure out a way to properly handle ordinal parameters
"""
types = []
num_values = []
for hp in self.configspace.get_hyperparameters():
#print(hp)
if isinstance(hp, CS.CategoricalHyperparameter):
types.append('U')
num_values.append(len(hp.choices))
elif isinstance(hp, CS.UniformIntegerHyperparameter):
types.append('I')
num_values.append((hp.upper - hp.lower + 1))
elif isinstance(hp, CS.UniformFloatHyperparameter):
types.append('C')
num_values.append(np.inf)
elif isinstance(hp, CS.OrdinalHyperparameter):
types.append('O')
num_values.append(len(hp.sequence))
else:
raise ValueError('Unsupported Parametertype %s'%type(hp))
return(types, num_values) |
def tensors_to(tensors, *args, **kwargs):
""" Apply ``torch.Tensor.to`` to tensors in a generic data structure.
Inspired by:
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31
Args:
tensors (tensor, dict, list, namedtuple or tuple): Data structure with tensor values to
move.
*args: Arguments passed to ``torch.Tensor.to``.
**kwargs: Keyword arguments passed to ``torch.Tensor.to``.
Example use case:
This is useful as a complementary function to ``collate_tensors``. Following collating,
it's important to move your tensors to the appropriate device.
Returns:
The inputted ``tensors`` with ``torch.Tensor.to`` applied.
Example:
>>> import torch
>>> batch = [
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... ]
>>> tensors_to(batch, torch.device('cpu')) # doctest: +ELLIPSIS
[{'column_a': tensor(...}]
"""
if torch.is_tensor(tensors):
return tensors.to(*args, **kwargs)
elif isinstance(tensors, dict):
return {k: tensors_to(v, *args, **kwargs) for k, v in tensors.items()}
elif hasattr(tensors, '_asdict') and isinstance(tensors, tuple): # Handle ``namedtuple``
return tensors.__class__(**tensors_to(tensors._asdict(), *args, **kwargs))
elif isinstance(tensors, list):
return [tensors_to(t, *args, **kwargs) for t in tensors]
elif isinstance(tensors, tuple):
return tuple([tensors_to(t, *args, **kwargs) for t in tensors])
else:
return tensors | Apply ``torch.Tensor.to`` to tensors in a generic data structure.
Inspired by:
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31
Args:
tensors (tensor, dict, list, namedtuple or tuple): Data structure with tensor values to
move.
*args: Arguments passed to ``torch.Tensor.to``.
**kwargs: Keyword arguments passed to ``torch.Tensor.to``.
Example use case:
This is useful as a complementary function to ``collate_tensors``. Following collating,
it's important to move your tensors to the appropriate device.
Returns:
The inputted ``tensors`` with ``torch.Tensor.to`` applied.
Example:
>>> import torch
>>> batch = [
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... ]
>>> tensors_to(batch, torch.device('cpu')) # doctest: +ELLIPSIS
[{'column_a': tensor(...}] | Below is the the instruction that describes the task:
### Input:
Apply ``torch.Tensor.to`` to tensors in a generic data structure.
Inspired by:
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31
Args:
tensors (tensor, dict, list, namedtuple or tuple): Data structure with tensor values to
move.
*args: Arguments passed to ``torch.Tensor.to``.
**kwargs: Keyword arguments passed to ``torch.Tensor.to``.
Example use case:
This is useful as a complementary function to ``collate_tensors``. Following collating,
it's important to move your tensors to the appropriate device.
Returns:
The inputted ``tensors`` with ``torch.Tensor.to`` applied.
Example:
>>> import torch
>>> batch = [
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... ]
>>> tensors_to(batch, torch.device('cpu')) # doctest: +ELLIPSIS
[{'column_a': tensor(...}]
### Response:
def tensors_to(tensors, *args, **kwargs):
""" Apply ``torch.Tensor.to`` to tensors in a generic data structure.
Inspired by:
https://github.com/pytorch/pytorch/blob/master/torch/utils/data/_utils/collate.py#L31
Args:
tensors (tensor, dict, list, namedtuple or tuple): Data structure with tensor values to
move.
*args: Arguments passed to ``torch.Tensor.to``.
**kwargs: Keyword arguments passed to ``torch.Tensor.to``.
Example use case:
This is useful as a complementary function to ``collate_tensors``. Following collating,
it's important to move your tensors to the appropriate device.
Returns:
The inputted ``tensors`` with ``torch.Tensor.to`` applied.
Example:
>>> import torch
>>> batch = [
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... { 'column_a': torch.randn(5), 'column_b': torch.randn(5) },
... ]
>>> tensors_to(batch, torch.device('cpu')) # doctest: +ELLIPSIS
[{'column_a': tensor(...}]
"""
if torch.is_tensor(tensors):
return tensors.to(*args, **kwargs)
elif isinstance(tensors, dict):
return {k: tensors_to(v, *args, **kwargs) for k, v in tensors.items()}
elif hasattr(tensors, '_asdict') and isinstance(tensors, tuple): # Handle ``namedtuple``
return tensors.__class__(**tensors_to(tensors._asdict(), *args, **kwargs))
elif isinstance(tensors, list):
return [tensors_to(t, *args, **kwargs) for t in tensors]
elif isinstance(tensors, tuple):
return tuple([tensors_to(t, *args, **kwargs) for t in tensors])
else:
return tensors |
def submitEntry(self):
"""Process user inputs and subit logbook entry when user clicks Submit button"""
# logType = self.logui.logType.currentText()
mcclogs, physlogs = self.selectedLogs()
success = True
if mcclogs != []:
if not self.acceptedUser("MCC"):
QMessageBox().warning(self, "Invalid User", "Please enter a valid user name!")
return
fileName = self.xmlSetup("MCC", mcclogs)
if fileName is None:
return
if not self.imagePixmap.isNull():
self.prepareImages(fileName, "MCC")
success = self.sendToLogbook(fileName, "MCC")
if physlogs != []:
for i in range(len(physlogs)):
fileName = self.xmlSetup("Physics", physlogs[i])
if fileName is None:
return
if not self.imagePixmap.isNull():
self.prepareImages(fileName, "Physics")
success_phys = self.sendToLogbook(fileName, "Physics", physlogs[i])
success = success and success_phys
self.done(success) | Process user inputs and subit logbook entry when user clicks Submit button | Below is the the instruction that describes the task:
### Input:
Process user inputs and subit logbook entry when user clicks Submit button
### Response:
def submitEntry(self):
"""Process user inputs and subit logbook entry when user clicks Submit button"""
# logType = self.logui.logType.currentText()
mcclogs, physlogs = self.selectedLogs()
success = True
if mcclogs != []:
if not self.acceptedUser("MCC"):
QMessageBox().warning(self, "Invalid User", "Please enter a valid user name!")
return
fileName = self.xmlSetup("MCC", mcclogs)
if fileName is None:
return
if not self.imagePixmap.isNull():
self.prepareImages(fileName, "MCC")
success = self.sendToLogbook(fileName, "MCC")
if physlogs != []:
for i in range(len(physlogs)):
fileName = self.xmlSetup("Physics", physlogs[i])
if fileName is None:
return
if not self.imagePixmap.isNull():
self.prepareImages(fileName, "Physics")
success_phys = self.sendToLogbook(fileName, "Physics", physlogs[i])
success = success and success_phys
self.done(success) |
def n_cap(self, n_cap='acetyl', cap_dihedral=None):
"""Adds an N-terminal acetamide cap.
Notes
-----
Default behaviour is to duplicate the dihedral angle of the
succeeding residues such that the orientation of the carbonyl
of the acetyl will resemble that of the first residue. This
can be adjusted by supplying a cap_dihedral value. Currently
only acetyl cap is supported, but this structure should work
for other caps.
Parameters
----------
cap : str, optional
Type of cap to be added. Options: 'acetyl'
cap_dihedral : bool
Alternate psi angle to be used when added cap.
"""
if n_cap == 'acetyl':
methylacetamide = Ligand(
atoms=None, mol_code='UNK', is_hetero=True)
atoms = OrderedDict()
atoms['C'] = Atom([0.9500, -0.2290, 0.5090], 'C', res_label='C')
atoms['CA'] = Atom([0.7450, -0.9430, 1.8040], 'C', res_label='CA')
atoms['O'] = Atom([0.1660, -2.0230, 1.8130], 'O', res_label='O')
atoms['N'] = Atom([1.2540, -0.2750, 2.9010], 'N', res_label='N')
atoms['CME'] = Atom([1.1630, -0.7870, 4.2500],
'C', res_label='CME')
# these coordinates seem ok, but could review
# and use a different fragment if necessary
methylacetamide.atoms = atoms
s1, e1, s2, e2 = [
x._vector for x in [methylacetamide['N'],
methylacetamide['CME'],
self._monomers[0]['N'],
self._monomers[0]['CA']]]
translation, angle, axis, point = find_transformations(
s1, e1, s2, e2, radians=False)
methylacetamide.rotate(
angle=angle, axis=axis, point=point, radians=False)
methylacetamide.translate(vector=translation)
start_angle = dihedral(
methylacetamide['C'], self._monomers[0]['N'],
self._monomers[0]['CA'], self._monomers[0]['C'])
ref_angle = dihedral(
self._monomers[0]['C'], self._monomers[1]['N'],
self._monomers[1]['CA'], self._monomers[1]['C'])
if cap_dihedral is not None:
methylacetamide.rotate(ref_angle - start_angle + cap_dihedral,
axis=methylacetamide['N']._vector -
self._monomers[0]['CA']._vector,
point=methylacetamide['N']._vector)
else:
methylacetamide.rotate(ref_angle - start_angle,
axis=methylacetamide['N']._vector -
self._monomers[0]['CA']._vector,
point=methylacetamide['N']._vector)
if self.ligands is None:
self.ligands = LigandGroup(ampal_parent=self)
acetamide = Ligand(mol_code='ACM', ampal_parent=self.ligands)
acetamide_atoms = OrderedDict()
acetamide_atoms['C'] = atoms['C']
acetamide_atoms['CA'] = atoms['CA']
acetamide_atoms['O'] = atoms['O']
for atom in acetamide_atoms.values():
atom.ampal_parent = acetamide
acetamide.atoms = acetamide_atoms
self.ligands.append(acetamide)
else:
pass # just in case we want to build different caps in later
self.tags['assigned_ff'] = False
return | Adds an N-terminal acetamide cap.
Notes
-----
Default behaviour is to duplicate the dihedral angle of the
succeeding residues such that the orientation of the carbonyl
of the acetyl will resemble that of the first residue. This
can be adjusted by supplying a cap_dihedral value. Currently
only acetyl cap is supported, but this structure should work
for other caps.
Parameters
----------
cap : str, optional
Type of cap to be added. Options: 'acetyl'
cap_dihedral : bool
Alternate psi angle to be used when added cap. | Below is the the instruction that describes the task:
### Input:
Adds an N-terminal acetamide cap.
Notes
-----
Default behaviour is to duplicate the dihedral angle of the
succeeding residues such that the orientation of the carbonyl
of the acetyl will resemble that of the first residue. This
can be adjusted by supplying a cap_dihedral value. Currently
only acetyl cap is supported, but this structure should work
for other caps.
Parameters
----------
cap : str, optional
Type of cap to be added. Options: 'acetyl'
cap_dihedral : bool
Alternate psi angle to be used when added cap.
### Response:
def n_cap(self, n_cap='acetyl', cap_dihedral=None):
"""Adds an N-terminal acetamide cap.
Notes
-----
Default behaviour is to duplicate the dihedral angle of the
succeeding residues such that the orientation of the carbonyl
of the acetyl will resemble that of the first residue. This
can be adjusted by supplying a cap_dihedral value. Currently
only acetyl cap is supported, but this structure should work
for other caps.
Parameters
----------
cap : str, optional
Type of cap to be added. Options: 'acetyl'
cap_dihedral : bool
Alternate psi angle to be used when added cap.
"""
if n_cap == 'acetyl':
methylacetamide = Ligand(
atoms=None, mol_code='UNK', is_hetero=True)
atoms = OrderedDict()
atoms['C'] = Atom([0.9500, -0.2290, 0.5090], 'C', res_label='C')
atoms['CA'] = Atom([0.7450, -0.9430, 1.8040], 'C', res_label='CA')
atoms['O'] = Atom([0.1660, -2.0230, 1.8130], 'O', res_label='O')
atoms['N'] = Atom([1.2540, -0.2750, 2.9010], 'N', res_label='N')
atoms['CME'] = Atom([1.1630, -0.7870, 4.2500],
'C', res_label='CME')
# these coordinates seem ok, but could review
# and use a different fragment if necessary
methylacetamide.atoms = atoms
s1, e1, s2, e2 = [
x._vector for x in [methylacetamide['N'],
methylacetamide['CME'],
self._monomers[0]['N'],
self._monomers[0]['CA']]]
translation, angle, axis, point = find_transformations(
s1, e1, s2, e2, radians=False)
methylacetamide.rotate(
angle=angle, axis=axis, point=point, radians=False)
methylacetamide.translate(vector=translation)
start_angle = dihedral(
methylacetamide['C'], self._monomers[0]['N'],
self._monomers[0]['CA'], self._monomers[0]['C'])
ref_angle = dihedral(
self._monomers[0]['C'], self._monomers[1]['N'],
self._monomers[1]['CA'], self._monomers[1]['C'])
if cap_dihedral is not None:
methylacetamide.rotate(ref_angle - start_angle + cap_dihedral,
axis=methylacetamide['N']._vector -
self._monomers[0]['CA']._vector,
point=methylacetamide['N']._vector)
else:
methylacetamide.rotate(ref_angle - start_angle,
axis=methylacetamide['N']._vector -
self._monomers[0]['CA']._vector,
point=methylacetamide['N']._vector)
if self.ligands is None:
self.ligands = LigandGroup(ampal_parent=self)
acetamide = Ligand(mol_code='ACM', ampal_parent=self.ligands)
acetamide_atoms = OrderedDict()
acetamide_atoms['C'] = atoms['C']
acetamide_atoms['CA'] = atoms['CA']
acetamide_atoms['O'] = atoms['O']
for atom in acetamide_atoms.values():
atom.ampal_parent = acetamide
acetamide.atoms = acetamide_atoms
self.ligands.append(acetamide)
else:
pass # just in case we want to build different caps in later
self.tags['assigned_ff'] = False
return |
def get_bytes(self, bridge):
"""
Gets the full command as bytes.
:param bridge: The bridge, to which the command should be sent.
"""
if self.cmd_2 is not None:
cmd = [self.cmd_1, self.cmd_2]
else:
cmd = [self.cmd_1, self.SUFFIX_BYTE]
if bridge.version < self.BRIDGE_SHORT_VERSION_MIN:
cmd.append(self.BRIDGE_LONG_BYTE)
return bytearray(cmd) | Gets the full command as bytes.
:param bridge: The bridge, to which the command should be sent. | Below is the the instruction that describes the task:
### Input:
Gets the full command as bytes.
:param bridge: The bridge, to which the command should be sent.
### Response:
def get_bytes(self, bridge):
"""
Gets the full command as bytes.
:param bridge: The bridge, to which the command should be sent.
"""
if self.cmd_2 is not None:
cmd = [self.cmd_1, self.cmd_2]
else:
cmd = [self.cmd_1, self.SUFFIX_BYTE]
if bridge.version < self.BRIDGE_SHORT_VERSION_MIN:
cmd.append(self.BRIDGE_LONG_BYTE)
return bytearray(cmd) |
def new(project_name):
"""Creates a new project"""
try:
locale.setlocale(locale.LC_ALL, '')
except:
print("Warning: Unable to set locale. Expect encoding problems.")
config = utils.get_config()
config['new_project']['project_name'] = project_name
values = new_project_ui(config)
if type(values) is not str:
print('New project options:')
pprint.pprint(values)
project_dir = render.render_project(**values)
git.init_repo(project_dir, **values)
else:
print(values) | Creates a new project | Below is the the instruction that describes the task:
### Input:
Creates a new project
### Response:
def new(project_name):
"""Creates a new project"""
try:
locale.setlocale(locale.LC_ALL, '')
except:
print("Warning: Unable to set locale. Expect encoding problems.")
config = utils.get_config()
config['new_project']['project_name'] = project_name
values = new_project_ui(config)
if type(values) is not str:
print('New project options:')
pprint.pprint(values)
project_dir = render.render_project(**values)
git.init_repo(project_dir, **values)
else:
print(values) |
def word_under_mouse_cursor(self):
"""
Selects the word under the **mouse** cursor.
:return: A QTextCursor with the word under mouse cursor selected.
"""
editor = self._editor
text_cursor = editor.cursorForPosition(editor._last_mouse_pos)
text_cursor = self.word_under_cursor(True, text_cursor)
return text_cursor | Selects the word under the **mouse** cursor.
:return: A QTextCursor with the word under mouse cursor selected. | Below is the the instruction that describes the task:
### Input:
Selects the word under the **mouse** cursor.
:return: A QTextCursor with the word under mouse cursor selected.
### Response:
def word_under_mouse_cursor(self):
"""
Selects the word under the **mouse** cursor.
:return: A QTextCursor with the word under mouse cursor selected.
"""
editor = self._editor
text_cursor = editor.cursorForPosition(editor._last_mouse_pos)
text_cursor = self.word_under_cursor(True, text_cursor)
return text_cursor |
def comments(self, ticket, include_inline_images=False):
"""
Retrieve the comments for a ticket.
:param ticket: Ticket object or id
:param include_inline_images: Boolean. If `True`, inline image attachments will be
returned in each comments' `attachments` field alongside non-inline attachments
"""
return self._query_zendesk(self.endpoint.comments, 'comment', id=ticket, include_inline_images=repr(include_inline_images).lower()) | Retrieve the comments for a ticket.
:param ticket: Ticket object or id
:param include_inline_images: Boolean. If `True`, inline image attachments will be
returned in each comments' `attachments` field alongside non-inline attachments | Below is the the instruction that describes the task:
### Input:
Retrieve the comments for a ticket.
:param ticket: Ticket object or id
:param include_inline_images: Boolean. If `True`, inline image attachments will be
returned in each comments' `attachments` field alongside non-inline attachments
### Response:
def comments(self, ticket, include_inline_images=False):
"""
Retrieve the comments for a ticket.
:param ticket: Ticket object or id
:param include_inline_images: Boolean. If `True`, inline image attachments will be
returned in each comments' `attachments` field alongside non-inline attachments
"""
return self._query_zendesk(self.endpoint.comments, 'comment', id=ticket, include_inline_images=repr(include_inline_images).lower()) |
def add_granule(self, data, store, workspace=None):
'''Harvest/add a granule into an existing imagemosaic'''
ext = os.path.splitext(data)[-1]
if ext == ".zip":
type = "file.imagemosaic"
upload_data = open(data, 'rb')
headers = {
"Content-type": "application/zip",
"Accept": "application/xml"
}
else:
type = "external.imagemosaic"
upload_data = data if data.startswith("file:") else "file:{data}".format(data=data)
headers = {
"Content-type": "text/plain",
"Accept": "application/xml"
}
params = dict()
workspace_name = workspace
if isinstance(store, basestring):
store_name = store
else:
store_name = store.name
workspace_name = store.workspace.name
if workspace_name is None:
raise ValueError("Must specify workspace")
url = build_url(
self.service_url,
[
"workspaces",
workspace_name,
"coveragestores",
store_name,
type
],
params
)
try:
resp = self.http_request(url, method='post', data=upload_data, headers=headers)
if resp.status_code != 202:
FailedRequestError('Failed to add granule to mosaic {} : {}, {}'.format(store, resp.status_code, resp.text))
self._cache.clear()
finally:
if hasattr(upload_data, "close"):
upload_data.close()
# maybe return a list of all granules?
return None | Harvest/add a granule into an existing imagemosaic | Below is the the instruction that describes the task:
### Input:
Harvest/add a granule into an existing imagemosaic
### Response:
def add_granule(self, data, store, workspace=None):
'''Harvest/add a granule into an existing imagemosaic'''
ext = os.path.splitext(data)[-1]
if ext == ".zip":
type = "file.imagemosaic"
upload_data = open(data, 'rb')
headers = {
"Content-type": "application/zip",
"Accept": "application/xml"
}
else:
type = "external.imagemosaic"
upload_data = data if data.startswith("file:") else "file:{data}".format(data=data)
headers = {
"Content-type": "text/plain",
"Accept": "application/xml"
}
params = dict()
workspace_name = workspace
if isinstance(store, basestring):
store_name = store
else:
store_name = store.name
workspace_name = store.workspace.name
if workspace_name is None:
raise ValueError("Must specify workspace")
url = build_url(
self.service_url,
[
"workspaces",
workspace_name,
"coveragestores",
store_name,
type
],
params
)
try:
resp = self.http_request(url, method='post', data=upload_data, headers=headers)
if resp.status_code != 202:
FailedRequestError('Failed to add granule to mosaic {} : {}, {}'.format(store, resp.status_code, resp.text))
self._cache.clear()
finally:
if hasattr(upload_data, "close"):
upload_data.close()
# maybe return a list of all granules?
return None |
def set_user_session(user):
"""
Set user session
:param user: user object chould be model instance or dict
:return:
"""
from uliweb import settings, request
user_fieldname = settings.get_var('AUTH/GET_AUTH_USER_FIELDNAME', 'id')
share_session = settings.get_var('AUTH/AUTH_SHARE_USER_SESSION', False)
if isinstance(user, dict):
user_id = user[user_fieldname]
else:
user_id = getattr(user, user_fieldname)
if share_session:
cache = functions.get_cache()
key = get_user_session_key(user_id)
session_id = cache.get(key, None)
log.debug('Auth: user session user_id={}, session_id={}, key={}'.format(user_id, session_id, key))
if not session_id:
request.session.save()
log.debug('Auth: set user session mapping userid={}, '
'session_id={}, expiry time={}'.format(user_id,
request.session.key,
request.session.expiry_time))
cache.set(key, request.session.key, expire=request.session.expiry_time)
elif session_id != request.session.key:
log.debug('Auth: load oldkey={}, key={}'.format(request.session.key, session_id))
request.session.delete()
request.session.load(session_id)
if isinstance(user, dict):
request.session[_get_auth_key()] = user
else:
request.session[_get_auth_key()] = user_id
request.user = user | Set user session
:param user: user object chould be model instance or dict
:return: | Below is the the instruction that describes the task:
### Input:
Set user session
:param user: user object chould be model instance or dict
:return:
### Response:
def set_user_session(user):
"""
Set user session
:param user: user object chould be model instance or dict
:return:
"""
from uliweb import settings, request
user_fieldname = settings.get_var('AUTH/GET_AUTH_USER_FIELDNAME', 'id')
share_session = settings.get_var('AUTH/AUTH_SHARE_USER_SESSION', False)
if isinstance(user, dict):
user_id = user[user_fieldname]
else:
user_id = getattr(user, user_fieldname)
if share_session:
cache = functions.get_cache()
key = get_user_session_key(user_id)
session_id = cache.get(key, None)
log.debug('Auth: user session user_id={}, session_id={}, key={}'.format(user_id, session_id, key))
if not session_id:
request.session.save()
log.debug('Auth: set user session mapping userid={}, '
'session_id={}, expiry time={}'.format(user_id,
request.session.key,
request.session.expiry_time))
cache.set(key, request.session.key, expire=request.session.expiry_time)
elif session_id != request.session.key:
log.debug('Auth: load oldkey={}, key={}'.format(request.session.key, session_id))
request.session.delete()
request.session.load(session_id)
if isinstance(user, dict):
request.session[_get_auth_key()] = user
else:
request.session[_get_auth_key()] = user_id
request.user = user |
def read_examples(input_files, batch_size, shuffle, num_epochs=None):
"""Creates readers and queues for reading example protos."""
files = []
for e in input_files:
for path in e.split(','):
files.extend(file_io.get_matching_files(path))
thread_count = multiprocessing.cpu_count()
# The minimum number of instances in a queue from which examples are drawn
# randomly. The larger this number, the more randomness at the expense of
# higher memory requirements.
min_after_dequeue = 1000
# When batching data, the queue's capacity will be larger than the batch_size
# by some factor. The recommended formula is (num_threads + a small safety
# margin). For now, we use a single thread for reading, so this can be small.
queue_size_multiplier = thread_count + 3
# Convert num_epochs == 0 -> num_epochs is None, if necessary
num_epochs = num_epochs or None
# Build a queue of the filenames to be read.
filename_queue = tf.train.string_input_producer(files, num_epochs, shuffle)
example_id, encoded_example = tf.TextLineReader().read_up_to(
filename_queue, batch_size)
if shuffle:
capacity = min_after_dequeue + queue_size_multiplier * batch_size
return tf.train.shuffle_batch(
[example_id, encoded_example],
batch_size,
capacity,
min_after_dequeue,
enqueue_many=True,
num_threads=thread_count)
else:
capacity = queue_size_multiplier * batch_size
return tf.train.batch(
[example_id, encoded_example],
batch_size,
capacity=capacity,
enqueue_many=True,
num_threads=thread_count) | Creates readers and queues for reading example protos. | Below is the the instruction that describes the task:
### Input:
Creates readers and queues for reading example protos.
### Response:
def read_examples(input_files, batch_size, shuffle, num_epochs=None):
"""Creates readers and queues for reading example protos."""
files = []
for e in input_files:
for path in e.split(','):
files.extend(file_io.get_matching_files(path))
thread_count = multiprocessing.cpu_count()
# The minimum number of instances in a queue from which examples are drawn
# randomly. The larger this number, the more randomness at the expense of
# higher memory requirements.
min_after_dequeue = 1000
# When batching data, the queue's capacity will be larger than the batch_size
# by some factor. The recommended formula is (num_threads + a small safety
# margin). For now, we use a single thread for reading, so this can be small.
queue_size_multiplier = thread_count + 3
# Convert num_epochs == 0 -> num_epochs is None, if necessary
num_epochs = num_epochs or None
# Build a queue of the filenames to be read.
filename_queue = tf.train.string_input_producer(files, num_epochs, shuffle)
example_id, encoded_example = tf.TextLineReader().read_up_to(
filename_queue, batch_size)
if shuffle:
capacity = min_after_dequeue + queue_size_multiplier * batch_size
return tf.train.shuffle_batch(
[example_id, encoded_example],
batch_size,
capacity,
min_after_dequeue,
enqueue_many=True,
num_threads=thread_count)
else:
capacity = queue_size_multiplier * batch_size
return tf.train.batch(
[example_id, encoded_example],
batch_size,
capacity=capacity,
enqueue_many=True,
num_threads=thread_count) |
def _check_pillar_exact_minions(self, expr, delimiter, greedy):
'''
Return the minions found by looking via pillar
'''
return self._check_cache_minions(expr,
delimiter,
greedy,
'pillar',
exact_match=True) | Return the minions found by looking via pillar | Below is the the instruction that describes the task:
### Input:
Return the minions found by looking via pillar
### Response:
def _check_pillar_exact_minions(self, expr, delimiter, greedy):
'''
Return the minions found by looking via pillar
'''
return self._check_cache_minions(expr,
delimiter,
greedy,
'pillar',
exact_match=True) |
def run_qaml(self):
"""
Create and run the GenomeQAML system call
"""
logging.info('Running GenomeQAML quality assessment')
qaml_call = 'classify.py -t {tf} -r {rf}'\
.format(tf=self.qaml_path,
rf=self.qaml_report)
make_path(self.reportpath)
# Only attempt to assess assemblies if the report doesn't already exist
if not os.path.isfile(self.qaml_report):
# Run the system calls
out, err = run_subprocess(qaml_call)
# Acquire thread lock, and write the logs to file
self.threadlock.acquire()
write_to_logfile(qaml_call, qaml_call, self.logfile)
write_to_logfile(out, err, self.logfile)
self.threadlock.release() | Create and run the GenomeQAML system call | Below is the the instruction that describes the task:
### Input:
Create and run the GenomeQAML system call
### Response:
def run_qaml(self):
"""
Create and run the GenomeQAML system call
"""
logging.info('Running GenomeQAML quality assessment')
qaml_call = 'classify.py -t {tf} -r {rf}'\
.format(tf=self.qaml_path,
rf=self.qaml_report)
make_path(self.reportpath)
# Only attempt to assess assemblies if the report doesn't already exist
if not os.path.isfile(self.qaml_report):
# Run the system calls
out, err = run_subprocess(qaml_call)
# Acquire thread lock, and write the logs to file
self.threadlock.acquire()
write_to_logfile(qaml_call, qaml_call, self.logfile)
write_to_logfile(out, err, self.logfile)
self.threadlock.release() |
def _rule_option(self):
""" Parses the production rule::
option : NAME value ';'
Returns list (name, value_list).
"""
name = self._get_token(self.RE_NAME)
value = self._rule_value()
self._expect_token(';')
return [name, value] | Parses the production rule::
option : NAME value ';'
Returns list (name, value_list). | Below is the the instruction that describes the task:
### Input:
Parses the production rule::
option : NAME value ';'
Returns list (name, value_list).
### Response:
def _rule_option(self):
""" Parses the production rule::
option : NAME value ';'
Returns list (name, value_list).
"""
name = self._get_token(self.RE_NAME)
value = self._rule_value()
self._expect_token(';')
return [name, value] |
def compare_schemas(one, two):
"""Compare two structures that represents JSON schemas.
For comparison you can't use normal comparison, because in JSON schema
lists DO NOT keep order (and Python lists do), so this must be taken into
account during comparison.
Note this wont check all configurations, only first one that seems to
match, which can lead to wrong results.
:param one: First schema to compare.
:param two: Second schema to compare.
:rtype: `bool`
"""
one = _normalize_string_type(one)
two = _normalize_string_type(two)
_assert_same_types(one, two)
if isinstance(one, list):
return _compare_lists(one, two)
elif isinstance(one, dict):
return _compare_dicts(one, two)
elif isinstance(one, SCALAR_TYPES):
return one == two
elif one is None:
return one is two
else:
raise RuntimeError('Not allowed type "{type}"'.format(
type=type(one).__name__)) | Compare two structures that represents JSON schemas.
For comparison you can't use normal comparison, because in JSON schema
lists DO NOT keep order (and Python lists do), so this must be taken into
account during comparison.
Note this wont check all configurations, only first one that seems to
match, which can lead to wrong results.
:param one: First schema to compare.
:param two: Second schema to compare.
:rtype: `bool` | Below is the the instruction that describes the task:
### Input:
Compare two structures that represents JSON schemas.
For comparison you can't use normal comparison, because in JSON schema
lists DO NOT keep order (and Python lists do), so this must be taken into
account during comparison.
Note this wont check all configurations, only first one that seems to
match, which can lead to wrong results.
:param one: First schema to compare.
:param two: Second schema to compare.
:rtype: `bool`
### Response:
def compare_schemas(one, two):
"""Compare two structures that represents JSON schemas.
For comparison you can't use normal comparison, because in JSON schema
lists DO NOT keep order (and Python lists do), so this must be taken into
account during comparison.
Note this wont check all configurations, only first one that seems to
match, which can lead to wrong results.
:param one: First schema to compare.
:param two: Second schema to compare.
:rtype: `bool`
"""
one = _normalize_string_type(one)
two = _normalize_string_type(two)
_assert_same_types(one, two)
if isinstance(one, list):
return _compare_lists(one, two)
elif isinstance(one, dict):
return _compare_dicts(one, two)
elif isinstance(one, SCALAR_TYPES):
return one == two
elif one is None:
return one is two
else:
raise RuntimeError('Not allowed type "{type}"'.format(
type=type(one).__name__)) |
def pathstrip(path, n):
""" Strip n leading components from the given path """
pathlist = [path]
while os.path.dirname(pathlist[0]) != b'':
pathlist[0:1] = os.path.split(pathlist[0])
return b'/'.join(pathlist[n:]) | Strip n leading components from the given path | Below is the the instruction that describes the task:
### Input:
Strip n leading components from the given path
### Response:
def pathstrip(path, n):
""" Strip n leading components from the given path """
pathlist = [path]
while os.path.dirname(pathlist[0]) != b'':
pathlist[0:1] = os.path.split(pathlist[0])
return b'/'.join(pathlist[n:]) |
def Main(url):
'''
Entry Point.
Args:
url: target url.
'''
# The object of Web-Scraping.
web_scrape = WebScraping()
# Execute Web-Scraping.
document = web_scrape.scrape(url)
# The object of automatic summarization with N-gram.
auto_abstractor = NgramAutoAbstractor()
# n-gram object
auto_abstractor.n_gram = Ngram()
# n of n-gram
auto_abstractor.n = 3
# Set tokenizer. This is japanese tokenizer with MeCab.
auto_abstractor.tokenizable_doc = MeCabTokenizer()
# Object of abstracting and filtering document.
abstractable_doc = TopNRankAbstractor()
# Execute summarization.
result_dict = auto_abstractor.summarize(document, abstractable_doc)
# Output 3 summarized sentences.
limit = 3
i = 1
for sentence in result_dict["summarize_result"]:
print(sentence)
if i >= limit:
break
i += 1 | Entry Point.
Args:
url: target url. | Below is the the instruction that describes the task:
### Input:
Entry Point.
Args:
url: target url.
### Response:
def Main(url):
'''
Entry Point.
Args:
url: target url.
'''
# The object of Web-Scraping.
web_scrape = WebScraping()
# Execute Web-Scraping.
document = web_scrape.scrape(url)
# The object of automatic summarization with N-gram.
auto_abstractor = NgramAutoAbstractor()
# n-gram object
auto_abstractor.n_gram = Ngram()
# n of n-gram
auto_abstractor.n = 3
# Set tokenizer. This is japanese tokenizer with MeCab.
auto_abstractor.tokenizable_doc = MeCabTokenizer()
# Object of abstracting and filtering document.
abstractable_doc = TopNRankAbstractor()
# Execute summarization.
result_dict = auto_abstractor.summarize(document, abstractable_doc)
# Output 3 summarized sentences.
limit = 3
i = 1
for sentence in result_dict["summarize_result"]:
print(sentence)
if i >= limit:
break
i += 1 |
def write(filename, data):
"""
Create a new BibTeX file.
:param filename: The name of the BibTeX file to write.
:param data: A ``bibtexparser.BibDatabase`` object.
"""
with open(filename, 'w') as fh:
fh.write(bibdatabase2bibtex(data)) | Create a new BibTeX file.
:param filename: The name of the BibTeX file to write.
:param data: A ``bibtexparser.BibDatabase`` object. | Below is the the instruction that describes the task:
### Input:
Create a new BibTeX file.
:param filename: The name of the BibTeX file to write.
:param data: A ``bibtexparser.BibDatabase`` object.
### Response:
def write(filename, data):
"""
Create a new BibTeX file.
:param filename: The name of the BibTeX file to write.
:param data: A ``bibtexparser.BibDatabase`` object.
"""
with open(filename, 'w') as fh:
fh.write(bibdatabase2bibtex(data)) |
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
filename = os.path.basename(path)
suffixes = map(lambda (suffix, mode, mtype):
(-len(suffix), suffix, mode, mtype), imp.get_suffixes())
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return filename[:neglen], suffix, mode, mtype | Get the module name, suffix, mode, and module type for a given file. | Below is the the instruction that describes the task:
### Input:
Get the module name, suffix, mode, and module type for a given file.
### Response:
def getmoduleinfo(path):
"""Get the module name, suffix, mode, and module type for a given file."""
filename = os.path.basename(path)
suffixes = map(lambda (suffix, mode, mtype):
(-len(suffix), suffix, mode, mtype), imp.get_suffixes())
suffixes.sort() # try longest suffixes first, in case they overlap
for neglen, suffix, mode, mtype in suffixes:
if filename[neglen:] == suffix:
return filename[:neglen], suffix, mode, mtype |
def spawn_managed_host(config_file, manager, connect_on_start=True):
"""
Spawns a managed host, if it is not already running
"""
data = manager.request_host_status(config_file)
is_running = data['started']
# Managed hosts run as persistent processes, so it may already be running
if is_running:
host_status = json.loads(data['host']['output'])
logfile = data['host']['logfile']
else:
data = manager.start_host(config_file)
host_status = json.loads(data['output'])
logfile = data['logfile']
host = JSHost(
status=host_status,
logfile=logfile,
config_file=config_file,
manager=manager
)
if not is_running and settings.VERBOSITY >= verbosity.PROCESS_START:
print('Started {}'.format(host.get_name()))
if connect_on_start:
host.connect()
return host | Spawns a managed host, if it is not already running | Below is the the instruction that describes the task:
### Input:
Spawns a managed host, if it is not already running
### Response:
def spawn_managed_host(config_file, manager, connect_on_start=True):
"""
Spawns a managed host, if it is not already running
"""
data = manager.request_host_status(config_file)
is_running = data['started']
# Managed hosts run as persistent processes, so it may already be running
if is_running:
host_status = json.loads(data['host']['output'])
logfile = data['host']['logfile']
else:
data = manager.start_host(config_file)
host_status = json.loads(data['output'])
logfile = data['logfile']
host = JSHost(
status=host_status,
logfile=logfile,
config_file=config_file,
manager=manager
)
if not is_running and settings.VERBOSITY >= verbosity.PROCESS_START:
print('Started {}'.format(host.get_name()))
if connect_on_start:
host.connect()
return host |
def decodeTagAttributes(self, text):
"""docstring for decodeTagAttributes"""
attribs = {}
if text.strip() == u'':
return attribs
scanner = _attributePat.scanner(text)
match = scanner.search()
while match:
key, val1, val2, val3, val4 = match.groups()
value = val1 or val2 or val3 or val4
if value:
value = _space.sub(u' ', value).strip()
else:
value = ''
attribs[key] = self.decodeCharReferences(value)
match = scanner.search()
return attribs | docstring for decodeTagAttributes | Below is the the instruction that describes the task:
### Input:
docstring for decodeTagAttributes
### Response:
def decodeTagAttributes(self, text):
"""docstring for decodeTagAttributes"""
attribs = {}
if text.strip() == u'':
return attribs
scanner = _attributePat.scanner(text)
match = scanner.search()
while match:
key, val1, val2, val3, val4 = match.groups()
value = val1 or val2 or val3 or val4
if value:
value = _space.sub(u' ', value).strip()
else:
value = ''
attribs[key] = self.decodeCharReferences(value)
match = scanner.search()
return attribs |
def find_button(browser, value):
"""
Find a button with the given value.
Searches for the following different kinds of buttons:
<input type="submit">
<input type="reset">
<input type="button">
<input type="image">
<button>
<{a,p,div,span,...} role="button">
Returns: an :class:`ElementSelector`
"""
field_types = (
'submit',
'reset',
'button-element',
'button',
'image',
'button-role',
)
return reduce(
operator.add,
(find_field_with_value(browser, field_type, value)
for field_type in field_types)
) | Find a button with the given value.
Searches for the following different kinds of buttons:
<input type="submit">
<input type="reset">
<input type="button">
<input type="image">
<button>
<{a,p,div,span,...} role="button">
Returns: an :class:`ElementSelector` | Below is the the instruction that describes the task:
### Input:
Find a button with the given value.
Searches for the following different kinds of buttons:
<input type="submit">
<input type="reset">
<input type="button">
<input type="image">
<button>
<{a,p,div,span,...} role="button">
Returns: an :class:`ElementSelector`
### Response:
def find_button(browser, value):
"""
Find a button with the given value.
Searches for the following different kinds of buttons:
<input type="submit">
<input type="reset">
<input type="button">
<input type="image">
<button>
<{a,p,div,span,...} role="button">
Returns: an :class:`ElementSelector`
"""
field_types = (
'submit',
'reset',
'button-element',
'button',
'image',
'button-role',
)
return reduce(
operator.add,
(find_field_with_value(browser, field_type, value)
for field_type in field_types)
) |
def to_record_per_alt(self):
'''Returns list of vcf_records. One per variant
in the ALT column. Does not change INFO/FORMAT etc columns, which
means that they are now broken'''
record_list = []
for alt in self.ALT:
record_list.append(copy.copy(self))
record_list[-1].ALT = [alt]
return record_list | Returns list of vcf_records. One per variant
in the ALT column. Does not change INFO/FORMAT etc columns, which
means that they are now broken | Below is the the instruction that describes the task:
### Input:
Returns list of vcf_records. One per variant
in the ALT column. Does not change INFO/FORMAT etc columns, which
means that they are now broken
### Response:
def to_record_per_alt(self):
'''Returns list of vcf_records. One per variant
in the ALT column. Does not change INFO/FORMAT etc columns, which
means that they are now broken'''
record_list = []
for alt in self.ALT:
record_list.append(copy.copy(self))
record_list[-1].ALT = [alt]
return record_list |
def from_config(config, **options):
"""Instantiate an `SyncedRotationEventStores` from config.
Parameters:
config -- the configuration file options read from file(s).
**options -- various options given to the specific event store. Shall
not be used with this event store. Warning will be logged
for every extra non-recognized option. The only required
key to this function is 'path'.
returns -- a newly instantiated `SyncedRotationEventStores`.
"""
required_args = ('storage-backends',)
optional_args = {'events_per_batch': 25000}
rconfig.check_config_options("SyncedRotationEventStores",
required_args,
tuple(optional_args.keys()), options)
if "events_per_batch" in options:
events_per_batch = int(options["events_per_batch"])
else:
events_per_batch = optional_args["events_per_batch"]
estore = SyncedRotationEventStores(events_per_batch)
for section in options['storage-backends'].split(' '):
try:
substore = rconfig.construct_eventstore(config, section)
estore.add_rotated_store(substore)
except Exception as e:
_logger.exception('Could not instantiate substore from'
' section %s', section)
estore.close()
raise
return estore | Instantiate an `SyncedRotationEventStores` from config.
Parameters:
config -- the configuration file options read from file(s).
**options -- various options given to the specific event store. Shall
not be used with this event store. Warning will be logged
for every extra non-recognized option. The only required
key to this function is 'path'.
returns -- a newly instantiated `SyncedRotationEventStores`. | Below is the the instruction that describes the task:
### Input:
Instantiate an `SyncedRotationEventStores` from config.
Parameters:
config -- the configuration file options read from file(s).
**options -- various options given to the specific event store. Shall
not be used with this event store. Warning will be logged
for every extra non-recognized option. The only required
key to this function is 'path'.
returns -- a newly instantiated `SyncedRotationEventStores`.
### Response:
def from_config(config, **options):
"""Instantiate an `SyncedRotationEventStores` from config.
Parameters:
config -- the configuration file options read from file(s).
**options -- various options given to the specific event store. Shall
not be used with this event store. Warning will be logged
for every extra non-recognized option. The only required
key to this function is 'path'.
returns -- a newly instantiated `SyncedRotationEventStores`.
"""
required_args = ('storage-backends',)
optional_args = {'events_per_batch': 25000}
rconfig.check_config_options("SyncedRotationEventStores",
required_args,
tuple(optional_args.keys()), options)
if "events_per_batch" in options:
events_per_batch = int(options["events_per_batch"])
else:
events_per_batch = optional_args["events_per_batch"]
estore = SyncedRotationEventStores(events_per_batch)
for section in options['storage-backends'].split(' '):
try:
substore = rconfig.construct_eventstore(config, section)
estore.add_rotated_store(substore)
except Exception as e:
_logger.exception('Could not instantiate substore from'
' section %s', section)
estore.close()
raise
return estore |
def refund_order(self, request, pk):
"""Refund the order specified by the pk
"""
order = Order.objects.get(id=pk)
order.refund()
return Response(status=status.HTTP_204_NO_CONTENT) | Refund the order specified by the pk | Below is the the instruction that describes the task:
### Input:
Refund the order specified by the pk
### Response:
def refund_order(self, request, pk):
"""Refund the order specified by the pk
"""
order = Order.objects.get(id=pk)
order.refund()
return Response(status=status.HTTP_204_NO_CONTENT) |
def PopupGetFolder(message, title=None, default_path='', no_window=False, size=(None, None), button_color=None,
background_color=None, text_color=None, icon=DEFAULT_WINDOW_ICON, font=None, no_titlebar=False,
grab_anywhere=False, keep_on_top=False, location=(None, None), initial_folder=None):
"""
Display popup with text entry field and browse button. Browse for folder
:param message:
:param default_path:
:param no_window:
:param size:
:param button_color:
:param background_color:
:param text_color:
:param icon:
:param font:
:param no_titlebar:
:param grab_anywhere:
:param keep_on_top:
:param location:
:return: Contents of text field. None if closed using X or cancelled
"""
if no_window:
app = wx.App(False)
frame = wx.Frame()
if initial_folder:
dialog = wx.DirDialog(frame, style=wx.FD_OPEN)
else:
dialog = wx.DirDialog(frame)
folder_name = ''
if dialog.ShowModal() == wx.ID_OK:
folder_name = dialog.GetPath()
return folder_name
layout = [[Text(message, auto_size_text=True, text_color=text_color, background_color=background_color)],
[InputText(default_text=default_path, size=size), FolderBrowse(initial_folder=initial_folder)],
[Button('Ok', size=(60, 20), bind_return_key=True), Button('Cancel', size=(60, 20))]]
_title = title if title is not None else message
window = Window(title=_title, icon=icon, auto_size_text=True, button_color=button_color,
background_color=background_color,
font=font, no_titlebar=no_titlebar, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top,
location=location)
(button, input_values) = window.Layout(layout).Read()
window.Close()
if button != 'Ok':
return None
else:
path = input_values[0]
return path | Display popup with text entry field and browse button. Browse for folder
:param message:
:param default_path:
:param no_window:
:param size:
:param button_color:
:param background_color:
:param text_color:
:param icon:
:param font:
:param no_titlebar:
:param grab_anywhere:
:param keep_on_top:
:param location:
:return: Contents of text field. None if closed using X or cancelled | Below is the the instruction that describes the task:
### Input:
Display popup with text entry field and browse button. Browse for folder
:param message:
:param default_path:
:param no_window:
:param size:
:param button_color:
:param background_color:
:param text_color:
:param icon:
:param font:
:param no_titlebar:
:param grab_anywhere:
:param keep_on_top:
:param location:
:return: Contents of text field. None if closed using X or cancelled
### Response:
def PopupGetFolder(message, title=None, default_path='', no_window=False, size=(None, None), button_color=None,
background_color=None, text_color=None, icon=DEFAULT_WINDOW_ICON, font=None, no_titlebar=False,
grab_anywhere=False, keep_on_top=False, location=(None, None), initial_folder=None):
"""
Display popup with text entry field and browse button. Browse for folder
:param message:
:param default_path:
:param no_window:
:param size:
:param button_color:
:param background_color:
:param text_color:
:param icon:
:param font:
:param no_titlebar:
:param grab_anywhere:
:param keep_on_top:
:param location:
:return: Contents of text field. None if closed using X or cancelled
"""
if no_window:
app = wx.App(False)
frame = wx.Frame()
if initial_folder:
dialog = wx.DirDialog(frame, style=wx.FD_OPEN)
else:
dialog = wx.DirDialog(frame)
folder_name = ''
if dialog.ShowModal() == wx.ID_OK:
folder_name = dialog.GetPath()
return folder_name
layout = [[Text(message, auto_size_text=True, text_color=text_color, background_color=background_color)],
[InputText(default_text=default_path, size=size), FolderBrowse(initial_folder=initial_folder)],
[Button('Ok', size=(60, 20), bind_return_key=True), Button('Cancel', size=(60, 20))]]
_title = title if title is not None else message
window = Window(title=_title, icon=icon, auto_size_text=True, button_color=button_color,
background_color=background_color,
font=font, no_titlebar=no_titlebar, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top,
location=location)
(button, input_values) = window.Layout(layout).Read()
window.Close()
if button != 'Ok':
return None
else:
path = input_values[0]
return path |
def create_friendship(self, access_token,
user_id=None, user_name=None):
"""doc: http://open.youku.com/docs/doc?id=28
"""
url = 'https://openapi.youku.com/v2/users/friendship/create.json'
data = {
'client_id': self.client_id,
'access_token': access_token,
'user_id': user_id,
'user_name': user_name
}
data = remove_none_value(data)
r = requests.post(url, data=data)
check_error(r)
return r.json() | doc: http://open.youku.com/docs/doc?id=28 | Below is the the instruction that describes the task:
### Input:
doc: http://open.youku.com/docs/doc?id=28
### Response:
def create_friendship(self, access_token,
user_id=None, user_name=None):
"""doc: http://open.youku.com/docs/doc?id=28
"""
url = 'https://openapi.youku.com/v2/users/friendship/create.json'
data = {
'client_id': self.client_id,
'access_token': access_token,
'user_id': user_id,
'user_name': user_name
}
data = remove_none_value(data)
r = requests.post(url, data=data)
check_error(r)
return r.json() |
def _rename(self):
"""
Called during a PUT request where the action specifies
a rename operation. Returns resource URI of the renamed file.
"""
newname = self.action['newname']
try:
newpath = self.fs.rename(self.fp,newname)
except OSError:
raise tornado.web.HTTPError(400)
return newpath | Called during a PUT request where the action specifies
a rename operation. Returns resource URI of the renamed file. | Below is the the instruction that describes the task:
### Input:
Called during a PUT request where the action specifies
a rename operation. Returns resource URI of the renamed file.
### Response:
def _rename(self):
"""
Called during a PUT request where the action specifies
a rename operation. Returns resource URI of the renamed file.
"""
newname = self.action['newname']
try:
newpath = self.fs.rename(self.fp,newname)
except OSError:
raise tornado.web.HTTPError(400)
return newpath |
def main(arguments=None):
"""
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
"""
# setup the command-line util settings
su = tools(
arguments=arguments,
docString=__doc__,
logLevel="WARNING",
options_first=False,
projectName="qubits"
)
arguments, settings, log, dbConn = su.setup()
# unpack remaining cl arguments using `exec` to setup the variable names
# automatically
for arg, val in arguments.iteritems():
if arg[0] == "-":
varname = arg.replace("-", "") + "Flag"
else:
varname = arg.replace("<", "").replace(">", "")
if varname == "import":
varname = "iimport"
if isinstance(val, str) or isinstance(val, unicode):
exec(varname + " = '%s'" % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug('%s = %s' % (varname, val,))
## START LOGGING ##
startTime = times.get_now_sql_datetime()
log.info(
'--- STARTING TO RUN THE cl_utils.py AT %s' %
(startTime,))
if init:
from . import workspace
ws = workspace(
log=log,
pathToWorkspace=pathToWorkspace
)
ws.setup()
return
# IMPORT THE SIMULATION SETTINGS
(allSettings,
programSettings,
limitingMags,
sampleNumber,
peakMagnitudeDistributions,
explosionDaysFromSettings,
extendLightCurveTail,
relativeSNRates,
lowerRedshiftLimit,
upperRedshiftLimit,
redshiftResolution,
restFrameFilter,
kCorrectionTemporalResolution,
kCorPolyOrder,
kCorMinimumDataPoints,
extinctionType,
extinctionConstant,
hostExtinctionDistributions,
galacticExtinctionDistribution,
surveyCadenceSettings,
snLightCurves,
surveyArea,
CCSNRateFraction,
transientToCCSNRateFraction,
extraSurveyConstraints,
lightCurvePolyOrder,
logLevel) = cu.read_in_survey_parameters(
log,
pathToSettingsFile=pathToSettingsFile
)
logFilePath = pathToOutputDirectory + "/qubits.log"
del log
log = _set_up_command_line_tool(
level=str(logLevel),
logFilePath=logFilePath
)
# dbConn, log = cu.settings(
# pathToSettingsFile=pathToSettingsFile,
# dbConn=False,
# log=True
# )
## START LOGGING ##
startTime = dcu.get_now_sql_datetime()
log.info('--- STARTING TO RUN THE qubits AT %s' % (startTime,))
resultsDict = {}
pathToOutputPlotDirectory = pathToOutputDirectory + "/plots/"
dcu.dryx_mkdir(
log,
directoryPath=pathToOutputPlotDirectory
)
pathToResultsFolder = pathToOutputDirectory + "/results/"
dcu.dryx_mkdir(
log,
directoryPath=pathToResultsFolder
)
if not programSettings['Extract Lightcurves from Spectra'] and not programSettings['Generate KCorrection Database'] and not programSettings['Run the Simulation'] and not programSettings['Compile and Plot Results']:
print "All stages of the simulatation have been switched off. Please switch on at least one stage of the simulation under the 'Programming Settings' in the settings file `%(pathToSettingsFile)s`" % locals()
# GENERATE THE DATA FOR SIMULATIONS
if programSettings['Extract Lightcurves from Spectra']:
log.info('generating the Lightcurves')
dg.generate_model_lightcurves(
log=log,
pathToSpectralDatabase=pathToSpectralDatabase,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
explosionDaysFromSettings=explosionDaysFromSettings,
extendLightCurveTail=extendLightCurveTail,
polyOrder=lightCurvePolyOrder
)
print "The lightcurve file can be found here: %(pathToOutputDirectory)stransient_light_curves.yaml" % locals()
print "The lightcurve plots can be found in %(pathToOutputPlotDirectory)s" % locals()
if programSettings['Generate KCorrection Database']:
log.info('generating the kcorrection data')
dg.generate_kcorrection_listing_database(
log,
pathToOutputDirectory=pathToOutputDirectory,
pathToSpectralDatabase=pathToSpectralDatabase,
restFrameFilter=restFrameFilter,
temporalResolution=kCorrectionTemporalResolution,
redshiftResolution=redshiftResolution,
redshiftLower=lowerRedshiftLimit,
redshiftUpper=upperRedshiftLimit + redshiftResolution)
log.info('generating the kcorrection polynomials')
dg.generate_kcorrection_polynomial_database(
log,
pathToOutputDirectory=pathToOutputDirectory,
restFrameFilter=restFrameFilter,
kCorPolyOrder=kCorPolyOrder, # ORDER OF THE POLYNOMIAL TO FIT
kCorMinimumDataPoints=kCorMinimumDataPoints,
redshiftResolution=redshiftResolution,
redshiftLower=lowerRedshiftLimit,
redshiftUpper=upperRedshiftLimit + redshiftResolution,
plot=programSettings['Generate KCorrection Plots'])
print "The k-correction database has been generated here: %(pathToOutputDirectory)sk_corrections" % locals()
if programSettings['Generate KCorrection Plots']:
print "The k-correction polynomial plots can also be found in %(pathToOutputDirectory)sk_corrections" % locals()
if programSettings['Run the Simulation']:
# CREATE THE OBSERVABLE UNIVERSE!
log.info('generating the redshift array')
redshiftArray = u.random_redshift_array(
log,
sampleNumber,
lowerRedshiftLimit,
upperRedshiftLimit,
redshiftResolution=redshiftResolution,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings['Plot Simulation Helper Plots'])
resultsDict['Redshifts'] = redshiftArray.tolist()
log.info('generating the SN type array')
snTypesArray = u.random_sn_types_array(
log,
sampleNumber,
relativeSNRates,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings['Plot Simulation Helper Plots'])
resultsDict['SN Types'] = snTypesArray.tolist()
log.info('generating peak magnitudes for the SNe')
peakMagnitudesArray = u.random_peak_magnitudes(
log,
peakMagnitudeDistributions,
snTypesArray,
plot=programSettings['Plot Simulation Helper Plots'])
log.info('generating the SN host extictions array')
hostExtinctionArray = u.random_host_extinction(
log,
sampleNumber,
extinctionType,
extinctionConstant,
hostExtinctionDistributions,
plot=programSettings['Plot Simulation Helper Plots'])
log.info('generating the SN galactic extictions array')
galacticExtinctionArray = u.random_galactic_extinction(
log,
sampleNumber,
extinctionType,
extinctionConstant,
galacticExtinctionDistribution,
plot=programSettings['Plot Simulation Helper Plots'])
log.info('generating the raw lightcurves for the SNe')
rawLightCurveDict = u.generate_numpy_polynomial_lightcurves(
log,
snLightCurves=snLightCurves,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings['Plot Simulation Helper Plots'])
log.info('generating the k-correction array for the SNe')
kCorrectionArray = u.build_kcorrection_array(
log,
redshiftArray,
snTypesArray,
snLightCurves,
pathToOutputDirectory=pathToOutputDirectory,
plot=programSettings['Plot Simulation Helper Plots'])
log.info('generating the observed lightcurves for the SNe')
observedFrameLightCurveInfo, peakAppMagList = u.convert_lightcurves_to_observered_frame(
log,
snLightCurves=snLightCurves,
rawLightCurveDict=rawLightCurveDict,
redshiftArray=redshiftArray,
snTypesArray=snTypesArray,
peakMagnitudesArray=peakMagnitudesArray,
kCorrectionArray=kCorrectionArray,
hostExtinctionArray=hostExtinctionArray,
galacticExtinctionArray=galacticExtinctionArray,
restFrameFilter=restFrameFilter,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
polyOrder=lightCurvePolyOrder,
plot=programSettings['Plot Simulation Helper Plots'])
log.info('generating the survey observation cadence')
cadenceDictionary = ss.survey_cadence_arrays(
log,
surveyCadenceSettings,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings['Plot Simulation Helper Plots'])
log.info('determining if the SNe are discoverable by the survey')
discoverableList = ss.determine_if_sne_are_discoverable(
log,
redshiftArray=redshiftArray,
limitingMags=limitingMags,
observedFrameLightCurveInfo=observedFrameLightCurveInfo,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings['Plot Simulation Helper Plots'])
log.info(
'determining the day (if and) when each SN is first discoverable by the survey')
ripeDayList = ss.determine_when_sne_are_ripe_for_discovery(
log,
redshiftArray=redshiftArray,
limitingMags=limitingMags,
discoverableList=discoverableList,
observedFrameLightCurveInfo=observedFrameLightCurveInfo,
plot=programSettings['Plot Simulation Helper Plots'])
# log.info('determining the day when each SN is disappears fainter than the survey limiting mags')
# disappearDayList = determine_when_discovered_sne_disappear(
# log,
# redshiftArray=redshiftArray,
# limitingMags=limitingMags,
# ripeDayList=ripeDayList,
# observedFrameLightCurveInfo=observedFrameLightCurveInfo,
# plot=programSettings['Plot Simulation Helper Plots'])
log.info('determining if and when each SN is discovered by the survey')
lightCurveDiscoveryDayList, surveyDiscoveryDayList, snCampaignLengthList = ss.determine_if_sne_are_discovered(
log,
limitingMags=limitingMags,
ripeDayList=ripeDayList,
cadenceDictionary=cadenceDictionary,
observedFrameLightCurveInfo=observedFrameLightCurveInfo,
extraSurveyConstraints=extraSurveyConstraints,
plot=programSettings['Plot Simulation Helper Plots'])
resultsDict[
'Discoveries Relative to Peak Magnitudes'] = lightCurveDiscoveryDayList
resultsDict[
'Discoveries Relative to Survey Year'] = surveyDiscoveryDayList
resultsDict['Campaign Length'] = snCampaignLengthList
resultsDict['Cadence Dictionary'] = cadenceDictionary
resultsDict['Peak Apparent Magnitudes'] = peakAppMagList
now = datetime.now()
now = now.strftime("%Y%m%dt%H%M%S")
fileName = pathToOutputDirectory + \
"/simulation_results_%s.yaml" % (now,)
stream = file(fileName, 'w')
yamlContent = dict(allSettings.items() + resultsDict.items())
yaml.dump(yamlContent, stream, default_flow_style=False)
stream.close()
print "The simulation output file can be found here: %(fileName)s. Remember to update your settings file 'Simulation Results File Used for Plots' parameter with this filename before compiling the results." % locals()
if programSettings['Plot Simulation Helper Plots']:
print "The simulation helper-plots found in %(pathToOutputPlotDirectory)s" % locals()
# COMPILE AND PLOT THE RESULTS
if programSettings['Compile and Plot Results']:
pathToYamlFile = pathToOutputDirectory + "/" + \
programSettings['Simulation Results File Used for Plots']
result_log = r.log_the_survey_settings(log, pathToYamlFile)
snSurveyDiscoveryTimes, lightCurveDiscoveryTimes, snTypes, redshifts, cadenceDictionary, peakAppMagList, snCampaignLengthList = r.import_results(
log, pathToYamlFile)
snRatePlotLink, totalRate, tooFaintRate, shortCampaignRate = r.determine_sn_rate(
log,
lightCurveDiscoveryTimes,
snSurveyDiscoveryTimes,
redshifts,
surveyCadenceSettings=surveyCadenceSettings,
lowerRedshiftLimit=lowerRedshiftLimit,
upperRedshiftLimit=upperRedshiftLimit,
redshiftResolution=redshiftResolution,
surveyArea=surveyArea,
CCSNRateFraction=CCSNRateFraction,
transientToCCSNRateFraction=transientToCCSNRateFraction,
peakAppMagList=peakAppMagList,
snCampaignLengthList=snCampaignLengthList,
extraSurveyConstraints=extraSurveyConstraints,
pathToOutputPlotFolder=pathToOutputPlotDirectory)
result_log += """
## Results ##
This simulated survey discovered a total of **%s** transients per year. An extra **%s** transients were detected but deemed too faint to constrain a positive transient identification and a further **%s** transients where detected but an observational campaign of more than **%s** days could not be completed to ensure identification. See below for the various output plots.
""" % (totalRate, tooFaintRate, shortCampaignRate, extraSurveyConstraints["Observable for at least ? number of days"])
cadenceWheelLink = r.plot_cadence_wheel(
log,
cadenceDictionary,
pathToOutputPlotFolder=pathToOutputPlotDirectory)
result_log += """%s""" % (cadenceWheelLink,)
discoveryMapLink = r.plot_sn_discovery_map(
log,
snSurveyDiscoveryTimes,
peakAppMagList,
snCampaignLengthList,
redshifts,
extraSurveyConstraints,
pathToOutputPlotFolder=pathToOutputPlotDirectory)
result_log += """%s""" % (discoveryMapLink,)
ratioMapLink = r.plot_sn_discovery_ratio_map(
log,
snSurveyDiscoveryTimes,
redshifts,
peakAppMagList,
snCampaignLengthList,
extraSurveyConstraints,
pathToOutputPlotFolder=pathToOutputPlotDirectory)
result_log += """%s""" % (ratioMapLink,)
result_log += """%s""" % (snRatePlotLink,)
now = datetime.now()
now = now.strftime("%Y%m%dt%H%M%S")
mdLogPath = pathToResultsFolder + \
"simulation_result_log_%s.md" % (now,)
mdLog = open(mdLogPath, 'w')
mdLog.write(result_log)
mdLog.close()
dmd.convert_to_html(
log=log,
pathToMMDFile=mdLogPath,
css="amblin"
)
print "Results can be found here: %(pathToResultsFolder)s" % locals()
html = mdLogPath.replace(".md", ".html")
print "Open this file in your browser: %(html)s" % locals()
if "dbConn" in locals() and dbConn:
dbConn.commit()
dbConn.close()
## FINISH LOGGING ##
endTime = times.get_now_sql_datetime()
runningTime = times.calculate_time_difference(startTime, endTime)
log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %
(endTime, runningTime, ))
return | *The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command* | Below is the the instruction that describes the task:
### Input:
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
### Response:
def main(arguments=None):
"""
*The main function used when ``cl_utils.py`` is run as a single script from the cl, or when installed as a cl command*
"""
# setup the command-line util settings
su = tools(
arguments=arguments,
docString=__doc__,
logLevel="WARNING",
options_first=False,
projectName="qubits"
)
arguments, settings, log, dbConn = su.setup()
# unpack remaining cl arguments using `exec` to setup the variable names
# automatically
for arg, val in arguments.iteritems():
if arg[0] == "-":
varname = arg.replace("-", "") + "Flag"
else:
varname = arg.replace("<", "").replace(">", "")
if varname == "import":
varname = "iimport"
if isinstance(val, str) or isinstance(val, unicode):
exec(varname + " = '%s'" % (val,))
else:
exec(varname + " = %s" % (val,))
if arg == "--dbConn":
dbConn = val
log.debug('%s = %s' % (varname, val,))
## START LOGGING ##
startTime = times.get_now_sql_datetime()
log.info(
'--- STARTING TO RUN THE cl_utils.py AT %s' %
(startTime,))
if init:
from . import workspace
ws = workspace(
log=log,
pathToWorkspace=pathToWorkspace
)
ws.setup()
return
# IMPORT THE SIMULATION SETTINGS
(allSettings,
programSettings,
limitingMags,
sampleNumber,
peakMagnitudeDistributions,
explosionDaysFromSettings,
extendLightCurveTail,
relativeSNRates,
lowerRedshiftLimit,
upperRedshiftLimit,
redshiftResolution,
restFrameFilter,
kCorrectionTemporalResolution,
kCorPolyOrder,
kCorMinimumDataPoints,
extinctionType,
extinctionConstant,
hostExtinctionDistributions,
galacticExtinctionDistribution,
surveyCadenceSettings,
snLightCurves,
surveyArea,
CCSNRateFraction,
transientToCCSNRateFraction,
extraSurveyConstraints,
lightCurvePolyOrder,
logLevel) = cu.read_in_survey_parameters(
log,
pathToSettingsFile=pathToSettingsFile
)
logFilePath = pathToOutputDirectory + "/qubits.log"
del log
log = _set_up_command_line_tool(
level=str(logLevel),
logFilePath=logFilePath
)
# dbConn, log = cu.settings(
# pathToSettingsFile=pathToSettingsFile,
# dbConn=False,
# log=True
# )
## START LOGGING ##
startTime = dcu.get_now_sql_datetime()
log.info('--- STARTING TO RUN THE qubits AT %s' % (startTime,))
resultsDict = {}
pathToOutputPlotDirectory = pathToOutputDirectory + "/plots/"
dcu.dryx_mkdir(
log,
directoryPath=pathToOutputPlotDirectory
)
pathToResultsFolder = pathToOutputDirectory + "/results/"
dcu.dryx_mkdir(
log,
directoryPath=pathToResultsFolder
)
if not programSettings['Extract Lightcurves from Spectra'] and not programSettings['Generate KCorrection Database'] and not programSettings['Run the Simulation'] and not programSettings['Compile and Plot Results']:
print "All stages of the simulatation have been switched off. Please switch on at least one stage of the simulation under the 'Programming Settings' in the settings file `%(pathToSettingsFile)s`" % locals()
# GENERATE THE DATA FOR SIMULATIONS
if programSettings['Extract Lightcurves from Spectra']:
log.info('generating the Lightcurves')
dg.generate_model_lightcurves(
log=log,
pathToSpectralDatabase=pathToSpectralDatabase,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
explosionDaysFromSettings=explosionDaysFromSettings,
extendLightCurveTail=extendLightCurveTail,
polyOrder=lightCurvePolyOrder
)
print "The lightcurve file can be found here: %(pathToOutputDirectory)stransient_light_curves.yaml" % locals()
print "The lightcurve plots can be found in %(pathToOutputPlotDirectory)s" % locals()
if programSettings['Generate KCorrection Database']:
log.info('generating the kcorrection data')
dg.generate_kcorrection_listing_database(
log,
pathToOutputDirectory=pathToOutputDirectory,
pathToSpectralDatabase=pathToSpectralDatabase,
restFrameFilter=restFrameFilter,
temporalResolution=kCorrectionTemporalResolution,
redshiftResolution=redshiftResolution,
redshiftLower=lowerRedshiftLimit,
redshiftUpper=upperRedshiftLimit + redshiftResolution)
log.info('generating the kcorrection polynomials')
dg.generate_kcorrection_polynomial_database(
log,
pathToOutputDirectory=pathToOutputDirectory,
restFrameFilter=restFrameFilter,
kCorPolyOrder=kCorPolyOrder, # ORDER OF THE POLYNOMIAL TO FIT
kCorMinimumDataPoints=kCorMinimumDataPoints,
redshiftResolution=redshiftResolution,
redshiftLower=lowerRedshiftLimit,
redshiftUpper=upperRedshiftLimit + redshiftResolution,
plot=programSettings['Generate KCorrection Plots'])
print "The k-correction database has been generated here: %(pathToOutputDirectory)sk_corrections" % locals()
if programSettings['Generate KCorrection Plots']:
print "The k-correction polynomial plots can also be found in %(pathToOutputDirectory)sk_corrections" % locals()
if programSettings['Run the Simulation']:
# CREATE THE OBSERVABLE UNIVERSE!
log.info('generating the redshift array')
redshiftArray = u.random_redshift_array(
log,
sampleNumber,
lowerRedshiftLimit,
upperRedshiftLimit,
redshiftResolution=redshiftResolution,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings['Plot Simulation Helper Plots'])
resultsDict['Redshifts'] = redshiftArray.tolist()
log.info('generating the SN type array')
snTypesArray = u.random_sn_types_array(
log,
sampleNumber,
relativeSNRates,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings['Plot Simulation Helper Plots'])
resultsDict['SN Types'] = snTypesArray.tolist()
log.info('generating peak magnitudes for the SNe')
peakMagnitudesArray = u.random_peak_magnitudes(
log,
peakMagnitudeDistributions,
snTypesArray,
plot=programSettings['Plot Simulation Helper Plots'])
log.info('generating the SN host extictions array')
hostExtinctionArray = u.random_host_extinction(
log,
sampleNumber,
extinctionType,
extinctionConstant,
hostExtinctionDistributions,
plot=programSettings['Plot Simulation Helper Plots'])
log.info('generating the SN galactic extictions array')
galacticExtinctionArray = u.random_galactic_extinction(
log,
sampleNumber,
extinctionType,
extinctionConstant,
galacticExtinctionDistribution,
plot=programSettings['Plot Simulation Helper Plots'])
log.info('generating the raw lightcurves for the SNe')
rawLightCurveDict = u.generate_numpy_polynomial_lightcurves(
log,
snLightCurves=snLightCurves,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings['Plot Simulation Helper Plots'])
log.info('generating the k-correction array for the SNe')
kCorrectionArray = u.build_kcorrection_array(
log,
redshiftArray,
snTypesArray,
snLightCurves,
pathToOutputDirectory=pathToOutputDirectory,
plot=programSettings['Plot Simulation Helper Plots'])
log.info('generating the observed lightcurves for the SNe')
observedFrameLightCurveInfo, peakAppMagList = u.convert_lightcurves_to_observered_frame(
log,
snLightCurves=snLightCurves,
rawLightCurveDict=rawLightCurveDict,
redshiftArray=redshiftArray,
snTypesArray=snTypesArray,
peakMagnitudesArray=peakMagnitudesArray,
kCorrectionArray=kCorrectionArray,
hostExtinctionArray=hostExtinctionArray,
galacticExtinctionArray=galacticExtinctionArray,
restFrameFilter=restFrameFilter,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
polyOrder=lightCurvePolyOrder,
plot=programSettings['Plot Simulation Helper Plots'])
log.info('generating the survey observation cadence')
cadenceDictionary = ss.survey_cadence_arrays(
log,
surveyCadenceSettings,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings['Plot Simulation Helper Plots'])
log.info('determining if the SNe are discoverable by the survey')
discoverableList = ss.determine_if_sne_are_discoverable(
log,
redshiftArray=redshiftArray,
limitingMags=limitingMags,
observedFrameLightCurveInfo=observedFrameLightCurveInfo,
pathToOutputDirectory=pathToOutputDirectory,
pathToOutputPlotDirectory=pathToOutputPlotDirectory,
plot=programSettings['Plot Simulation Helper Plots'])
log.info(
'determining the day (if and) when each SN is first discoverable by the survey')
ripeDayList = ss.determine_when_sne_are_ripe_for_discovery(
log,
redshiftArray=redshiftArray,
limitingMags=limitingMags,
discoverableList=discoverableList,
observedFrameLightCurveInfo=observedFrameLightCurveInfo,
plot=programSettings['Plot Simulation Helper Plots'])
# log.info('determining the day when each SN is disappears fainter than the survey limiting mags')
# disappearDayList = determine_when_discovered_sne_disappear(
# log,
# redshiftArray=redshiftArray,
# limitingMags=limitingMags,
# ripeDayList=ripeDayList,
# observedFrameLightCurveInfo=observedFrameLightCurveInfo,
# plot=programSettings['Plot Simulation Helper Plots'])
log.info('determining if and when each SN is discovered by the survey')
lightCurveDiscoveryDayList, surveyDiscoveryDayList, snCampaignLengthList = ss.determine_if_sne_are_discovered(
log,
limitingMags=limitingMags,
ripeDayList=ripeDayList,
cadenceDictionary=cadenceDictionary,
observedFrameLightCurveInfo=observedFrameLightCurveInfo,
extraSurveyConstraints=extraSurveyConstraints,
plot=programSettings['Plot Simulation Helper Plots'])
resultsDict[
'Discoveries Relative to Peak Magnitudes'] = lightCurveDiscoveryDayList
resultsDict[
'Discoveries Relative to Survey Year'] = surveyDiscoveryDayList
resultsDict['Campaign Length'] = snCampaignLengthList
resultsDict['Cadence Dictionary'] = cadenceDictionary
resultsDict['Peak Apparent Magnitudes'] = peakAppMagList
now = datetime.now()
now = now.strftime("%Y%m%dt%H%M%S")
fileName = pathToOutputDirectory + \
"/simulation_results_%s.yaml" % (now,)
stream = file(fileName, 'w')
yamlContent = dict(allSettings.items() + resultsDict.items())
yaml.dump(yamlContent, stream, default_flow_style=False)
stream.close()
print "The simulation output file can be found here: %(fileName)s. Remember to update your settings file 'Simulation Results File Used for Plots' parameter with this filename before compiling the results." % locals()
if programSettings['Plot Simulation Helper Plots']:
print "The simulation helper-plots found in %(pathToOutputPlotDirectory)s" % locals()
# COMPILE AND PLOT THE RESULTS
if programSettings['Compile and Plot Results']:
pathToYamlFile = pathToOutputDirectory + "/" + \
programSettings['Simulation Results File Used for Plots']
result_log = r.log_the_survey_settings(log, pathToYamlFile)
snSurveyDiscoveryTimes, lightCurveDiscoveryTimes, snTypes, redshifts, cadenceDictionary, peakAppMagList, snCampaignLengthList = r.import_results(
log, pathToYamlFile)
snRatePlotLink, totalRate, tooFaintRate, shortCampaignRate = r.determine_sn_rate(
log,
lightCurveDiscoveryTimes,
snSurveyDiscoveryTimes,
redshifts,
surveyCadenceSettings=surveyCadenceSettings,
lowerRedshiftLimit=lowerRedshiftLimit,
upperRedshiftLimit=upperRedshiftLimit,
redshiftResolution=redshiftResolution,
surveyArea=surveyArea,
CCSNRateFraction=CCSNRateFraction,
transientToCCSNRateFraction=transientToCCSNRateFraction,
peakAppMagList=peakAppMagList,
snCampaignLengthList=snCampaignLengthList,
extraSurveyConstraints=extraSurveyConstraints,
pathToOutputPlotFolder=pathToOutputPlotDirectory)
result_log += """
## Results ##
This simulated survey discovered a total of **%s** transients per year. An extra **%s** transients were detected but deemed too faint to constrain a positive transient identification and a further **%s** transients where detected but an observational campaign of more than **%s** days could not be completed to ensure identification. See below for the various output plots.
""" % (totalRate, tooFaintRate, shortCampaignRate, extraSurveyConstraints["Observable for at least ? number of days"])
cadenceWheelLink = r.plot_cadence_wheel(
log,
cadenceDictionary,
pathToOutputPlotFolder=pathToOutputPlotDirectory)
result_log += """%s""" % (cadenceWheelLink,)
discoveryMapLink = r.plot_sn_discovery_map(
log,
snSurveyDiscoveryTimes,
peakAppMagList,
snCampaignLengthList,
redshifts,
extraSurveyConstraints,
pathToOutputPlotFolder=pathToOutputPlotDirectory)
result_log += """%s""" % (discoveryMapLink,)
ratioMapLink = r.plot_sn_discovery_ratio_map(
log,
snSurveyDiscoveryTimes,
redshifts,
peakAppMagList,
snCampaignLengthList,
extraSurveyConstraints,
pathToOutputPlotFolder=pathToOutputPlotDirectory)
result_log += """%s""" % (ratioMapLink,)
result_log += """%s""" % (snRatePlotLink,)
now = datetime.now()
now = now.strftime("%Y%m%dt%H%M%S")
mdLogPath = pathToResultsFolder + \
"simulation_result_log_%s.md" % (now,)
mdLog = open(mdLogPath, 'w')
mdLog.write(result_log)
mdLog.close()
dmd.convert_to_html(
log=log,
pathToMMDFile=mdLogPath,
css="amblin"
)
print "Results can be found here: %(pathToResultsFolder)s" % locals()
html = mdLogPath.replace(".md", ".html")
print "Open this file in your browser: %(html)s" % locals()
if "dbConn" in locals() and dbConn:
dbConn.commit()
dbConn.close()
## FINISH LOGGING ##
endTime = times.get_now_sql_datetime()
runningTime = times.calculate_time_difference(startTime, endTime)
log.info('-- FINISHED ATTEMPT TO RUN THE cl_utils.py AT %s (RUNTIME: %s) --' %
(endTime, runningTime, ))
return |
def wr_txt_section_hdrgos(self, fout_txt, sortby=None, prt_section=True):
"""Write high GO IDs that are actually used to group current set of GO IDs."""
sec2d_go = self.grprobj.get_sections_2d() # lists of GO IDs
sec2d_nt = self.get_sections_2dnt(sec2d_go) # lists of GO Grouper namedtuples
if sortby is None:
sortby = self.fncsortnt
with open(fout_txt, 'w') as prt:
self.prt_ver(prt)
prt.write("# GROUP NAME: {NAME}\n".format(NAME=self.grprobj.grpname))
for section_name, nthdrgos_actual in sec2d_nt:
if prt_section:
prt.write("# SECTION: {SECTION}\n".format(SECTION=section_name))
self.prt_ntgos(prt, nthdrgos_actual)
if prt_section:
prt.write("\n")
dat = SummarySec2dHdrGos().summarize_sec2hdrgos(sec2d_go)
sys.stdout.write(self.grprobj.fmtsum.format(
GO_DESC='hdr', SECs=len(dat['S']), GOs=len(dat['G']),
UNGRP=len(dat['U']), undesc="unused",
ACTION="WROTE:", FILE=fout_txt))
return sec2d_nt | Write high GO IDs that are actually used to group current set of GO IDs. | Below is the the instruction that describes the task:
### Input:
Write high GO IDs that are actually used to group current set of GO IDs.
### Response:
def wr_txt_section_hdrgos(self, fout_txt, sortby=None, prt_section=True):
"""Write high GO IDs that are actually used to group current set of GO IDs."""
sec2d_go = self.grprobj.get_sections_2d() # lists of GO IDs
sec2d_nt = self.get_sections_2dnt(sec2d_go) # lists of GO Grouper namedtuples
if sortby is None:
sortby = self.fncsortnt
with open(fout_txt, 'w') as prt:
self.prt_ver(prt)
prt.write("# GROUP NAME: {NAME}\n".format(NAME=self.grprobj.grpname))
for section_name, nthdrgos_actual in sec2d_nt:
if prt_section:
prt.write("# SECTION: {SECTION}\n".format(SECTION=section_name))
self.prt_ntgos(prt, nthdrgos_actual)
if prt_section:
prt.write("\n")
dat = SummarySec2dHdrGos().summarize_sec2hdrgos(sec2d_go)
sys.stdout.write(self.grprobj.fmtsum.format(
GO_DESC='hdr', SECs=len(dat['S']), GOs=len(dat['G']),
UNGRP=len(dat['U']), undesc="unused",
ACTION="WROTE:", FILE=fout_txt))
return sec2d_nt |
def replaceChild(self, child, content):
"""
Replace I{child} with the specified I{content}.
@param child: A child element.
@type child: L{Element}
@param content: An element or collection of elements.
@type content: L{Element} or [L{Element},]
"""
if child not in self.children:
raise Exception('child not-found')
index = self.children.index(child)
self.remove(child)
if not isinstance(content, (list, tuple)):
content = (content,)
for node in content:
self.children.insert(index, node.detach())
node.parent = self
index += 1 | Replace I{child} with the specified I{content}.
@param child: A child element.
@type child: L{Element}
@param content: An element or collection of elements.
@type content: L{Element} or [L{Element},] | Below is the the instruction that describes the task:
### Input:
Replace I{child} with the specified I{content}.
@param child: A child element.
@type child: L{Element}
@param content: An element or collection of elements.
@type content: L{Element} or [L{Element},]
### Response:
def replaceChild(self, child, content):
"""
Replace I{child} with the specified I{content}.
@param child: A child element.
@type child: L{Element}
@param content: An element or collection of elements.
@type content: L{Element} or [L{Element},]
"""
if child not in self.children:
raise Exception('child not-found')
index = self.children.index(child)
self.remove(child)
if not isinstance(content, (list, tuple)):
content = (content,)
for node in content:
self.children.insert(index, node.detach())
node.parent = self
index += 1 |
def get_space_information(self, space_key, expand=None, callback=None):
"""
Returns information about a space.
:param space_key (string): A string containing the key of the space.
:param expand (string): OPTIONAL: A comma separated list of properties to expand on the space. Default: Empty.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the space/{spaceKey} endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
"""
params = {}
if expand:
params["expand"] = expand
return self._service_get_request("rest/api/space/{key}".format(key=space_key),
params=params, callback=callback) | Returns information about a space.
:param space_key (string): A string containing the key of the space.
:param expand (string): OPTIONAL: A comma separated list of properties to expand on the space. Default: Empty.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the space/{spaceKey} endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially. | Below is the the instruction that describes the task:
### Input:
Returns information about a space.
:param space_key (string): A string containing the key of the space.
:param expand (string): OPTIONAL: A comma separated list of properties to expand on the space. Default: Empty.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the space/{spaceKey} endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
### Response:
def get_space_information(self, space_key, expand=None, callback=None):
"""
Returns information about a space.
:param space_key (string): A string containing the key of the space.
:param expand (string): OPTIONAL: A comma separated list of properties to expand on the space. Default: Empty.
:param callback: OPTIONAL: The callback to execute on the resulting data, before the method returns.
Default: None (no callback, raw data returned).
:return: The JSON data returned from the space/{spaceKey} endpoint,
or the results of the callback. Will raise requests.HTTPError on bad input, potentially.
"""
params = {}
if expand:
params["expand"] = expand
return self._service_get_request("rest/api/space/{key}".format(key=space_key),
params=params, callback=callback) |
def _handle_default(value, script_name):
""" There are two potential variants of these scripts,
the Bash scripts that are meant to be run within PULSAR_ROOT
for older-style installs and the binaries created by setup.py
as part of a proper pulsar installation.
This method first looks for the newer style variant of these
scripts and returns the full path to them if needed and falls
back to the bash scripts if these cannot be found.
"""
if value:
return value
installed_script = which("pulsar-%s" % script_name.replace("_", "-"))
if installed_script:
return installed_script
else:
return "scripts/%s.bash" % script_name | There are two potential variants of these scripts,
the Bash scripts that are meant to be run within PULSAR_ROOT
for older-style installs and the binaries created by setup.py
as part of a proper pulsar installation.
This method first looks for the newer style variant of these
scripts and returns the full path to them if needed and falls
back to the bash scripts if these cannot be found. | Below is the the instruction that describes the task:
### Input:
There are two potential variants of these scripts,
the Bash scripts that are meant to be run within PULSAR_ROOT
for older-style installs and the binaries created by setup.py
as part of a proper pulsar installation.
This method first looks for the newer style variant of these
scripts and returns the full path to them if needed and falls
back to the bash scripts if these cannot be found.
### Response:
def _handle_default(value, script_name):
""" There are two potential variants of these scripts,
the Bash scripts that are meant to be run within PULSAR_ROOT
for older-style installs and the binaries created by setup.py
as part of a proper pulsar installation.
This method first looks for the newer style variant of these
scripts and returns the full path to them if needed and falls
back to the bash scripts if these cannot be found.
"""
if value:
return value
installed_script = which("pulsar-%s" % script_name.replace("_", "-"))
if installed_script:
return installed_script
else:
return "scripts/%s.bash" % script_name |
def returner(load):
'''
Return data to a postgres server
'''
conn = _get_conn()
if conn is None:
return None
cur = conn.cursor()
sql = '''INSERT INTO salt_returns
(fun, jid, return, id, success)
VALUES (%s, %s, %s, %s, %s)'''
try:
ret = six.text_type(load['return'])
except UnicodeDecodeError:
ret = str(load['return'])
job_ret = {'return': ret}
if 'retcode' in load:
job_ret['retcode'] = load['retcode']
if 'success' in load:
job_ret['success'] = load['success']
cur.execute(
sql, (
load['fun'],
load['jid'],
salt.utils.json.dumps(job_ret),
load['id'],
load.get('success'),
)
)
_close_conn(conn) | Return data to a postgres server | Below is the the instruction that describes the task:
### Input:
Return data to a postgres server
### Response:
def returner(load):
'''
Return data to a postgres server
'''
conn = _get_conn()
if conn is None:
return None
cur = conn.cursor()
sql = '''INSERT INTO salt_returns
(fun, jid, return, id, success)
VALUES (%s, %s, %s, %s, %s)'''
try:
ret = six.text_type(load['return'])
except UnicodeDecodeError:
ret = str(load['return'])
job_ret = {'return': ret}
if 'retcode' in load:
job_ret['retcode'] = load['retcode']
if 'success' in load:
job_ret['success'] = load['success']
cur.execute(
sql, (
load['fun'],
load['jid'],
salt.utils.json.dumps(job_ret),
load['id'],
load.get('success'),
)
)
_close_conn(conn) |
def load_stubs(self, log_mem=False):
"""Load all events in their `stub` (name, alias, etc only) form.
Used in `update` mode.
"""
# Initialize parameter related to diagnostic output of memory usage
if log_mem:
import psutil
process = psutil.Process(os.getpid())
rss = process.memory_info().rss
LOG_MEMORY_INT = 1000
MEMORY_LIMIT = 1000.0
def _add_stub_manually(_fname):
"""Create and add a 'stub' by manually loading parameters from
JSON files.
Previously this was done by creating a full `Entry` instance, then
using the `Entry.get_stub()` method to trim it down. This was very
slow and memory intensive, hence this improved approach.
"""
# FIX: should this be ``fi.endswith(``.gz')`` ?
fname = uncompress_gz(_fname) if '.gz' in _fname else _fname
stub = None
stub_name = None
with codecs.open(fname, 'r') as jfil:
# Load the full JSON file
data = json.load(jfil, object_pairs_hook=OrderedDict)
# Extract the top-level keys (should just be the name of the
# entry)
stub_name = list(data.keys())
# Make sure there is only a single top-level entry
if len(stub_name) != 1:
err = "json file '{}' has multiple keys: {}".format(
fname, list(stub_name))
self._log.error(err)
raise ValueError(err)
stub_name = stub_name[0]
# Make sure a non-stub entry doesnt already exist with this
# name
if stub_name in self.entries and not self.entries[
stub_name]._stub:
err_str = (
"ERROR: non-stub entry already exists with name '{}'"
.format(stub_name))
self.log.error(err_str)
raise RuntimeError(err_str)
# Remove the outmost dict level
data = data[stub_name]
# Create a new `Entry` (subclass) instance
proto = self.proto
stub = proto(catalog=self, name=stub_name, stub=True)
# Add stub parameters if they are available
if proto._KEYS.ALIAS in data:
stub[proto._KEYS.ALIAS] = data[proto._KEYS.ALIAS]
if proto._KEYS.DISTINCT_FROM in data:
stub[proto._KEYS.DISTINCT_FROM] = data[
proto._KEYS.DISTINCT_FROM]
if proto._KEYS.RA in data:
stub[proto._KEYS.RA] = data[proto._KEYS.RA]
if proto._KEYS.DEC in data:
stub[proto._KEYS.DEC] = data[proto._KEYS.DEC]
if proto._KEYS.DISCOVER_DATE in data:
stub[proto._KEYS.DISCOVER_DATE] = data[
proto._KEYS.DISCOVER_DATE]
if proto._KEYS.SOURCES in data:
stub[proto._KEYS.SOURCES] = data[
proto._KEYS.SOURCES]
# Store the stub
self.entries[stub_name] = stub
self.log.debug("Added stub for '{}'".format(stub_name))
currenttask = 'Loading entry stubs'
files = self.PATHS.get_repo_output_file_list()
for ii, _fname in enumerate(pbar(files, currenttask)):
# Run normally
# _add_stub(_fname)
# Run 'manually' (extract stub parameters directly from JSON)
_add_stub_manually(_fname)
if log_mem:
rss = process.memory_info().rss / 1024 / 1024
if ii % LOG_MEMORY_INT == 0 or rss > MEMORY_LIMIT:
log_memory(self.log, "\nLoaded stub {}".format(ii),
logging.INFO)
if rss > MEMORY_LIMIT:
err = (
"Memory usage {}, has exceeded {} on file {} '{}'".
format(rss, MEMORY_LIMIT, ii, _fname))
self.log.error(err)
raise RuntimeError(err)
return self.entries | Load all events in their `stub` (name, alias, etc only) form.
Used in `update` mode. | Below is the the instruction that describes the task:
### Input:
Load all events in their `stub` (name, alias, etc only) form.
Used in `update` mode.
### Response:
def load_stubs(self, log_mem=False):
"""Load all events in their `stub` (name, alias, etc only) form.
Used in `update` mode.
"""
# Initialize parameter related to diagnostic output of memory usage
if log_mem:
import psutil
process = psutil.Process(os.getpid())
rss = process.memory_info().rss
LOG_MEMORY_INT = 1000
MEMORY_LIMIT = 1000.0
def _add_stub_manually(_fname):
"""Create and add a 'stub' by manually loading parameters from
JSON files.
Previously this was done by creating a full `Entry` instance, then
using the `Entry.get_stub()` method to trim it down. This was very
slow and memory intensive, hence this improved approach.
"""
# FIX: should this be ``fi.endswith(``.gz')`` ?
fname = uncompress_gz(_fname) if '.gz' in _fname else _fname
stub = None
stub_name = None
with codecs.open(fname, 'r') as jfil:
# Load the full JSON file
data = json.load(jfil, object_pairs_hook=OrderedDict)
# Extract the top-level keys (should just be the name of the
# entry)
stub_name = list(data.keys())
# Make sure there is only a single top-level entry
if len(stub_name) != 1:
err = "json file '{}' has multiple keys: {}".format(
fname, list(stub_name))
self._log.error(err)
raise ValueError(err)
stub_name = stub_name[0]
# Make sure a non-stub entry doesnt already exist with this
# name
if stub_name in self.entries and not self.entries[
stub_name]._stub:
err_str = (
"ERROR: non-stub entry already exists with name '{}'"
.format(stub_name))
self.log.error(err_str)
raise RuntimeError(err_str)
# Remove the outmost dict level
data = data[stub_name]
# Create a new `Entry` (subclass) instance
proto = self.proto
stub = proto(catalog=self, name=stub_name, stub=True)
# Add stub parameters if they are available
if proto._KEYS.ALIAS in data:
stub[proto._KEYS.ALIAS] = data[proto._KEYS.ALIAS]
if proto._KEYS.DISTINCT_FROM in data:
stub[proto._KEYS.DISTINCT_FROM] = data[
proto._KEYS.DISTINCT_FROM]
if proto._KEYS.RA in data:
stub[proto._KEYS.RA] = data[proto._KEYS.RA]
if proto._KEYS.DEC in data:
stub[proto._KEYS.DEC] = data[proto._KEYS.DEC]
if proto._KEYS.DISCOVER_DATE in data:
stub[proto._KEYS.DISCOVER_DATE] = data[
proto._KEYS.DISCOVER_DATE]
if proto._KEYS.SOURCES in data:
stub[proto._KEYS.SOURCES] = data[
proto._KEYS.SOURCES]
# Store the stub
self.entries[stub_name] = stub
self.log.debug("Added stub for '{}'".format(stub_name))
currenttask = 'Loading entry stubs'
files = self.PATHS.get_repo_output_file_list()
for ii, _fname in enumerate(pbar(files, currenttask)):
# Run normally
# _add_stub(_fname)
# Run 'manually' (extract stub parameters directly from JSON)
_add_stub_manually(_fname)
if log_mem:
rss = process.memory_info().rss / 1024 / 1024
if ii % LOG_MEMORY_INT == 0 or rss > MEMORY_LIMIT:
log_memory(self.log, "\nLoaded stub {}".format(ii),
logging.INFO)
if rss > MEMORY_LIMIT:
err = (
"Memory usage {}, has exceeded {} on file {} '{}'".
format(rss, MEMORY_LIMIT, ii, _fname))
self.log.error(err)
raise RuntimeError(err)
return self.entries |
def display_reports(self, layout):
"""Issues the final PyLint score as a TeamCity build statistic value"""
try:
score = self.linter.stats['global_note']
except (AttributeError, KeyError):
pass
else:
self.tc.message('buildStatisticValue', key='PyLintScore', value=str(score)) | Issues the final PyLint score as a TeamCity build statistic value | Below is the the instruction that describes the task:
### Input:
Issues the final PyLint score as a TeamCity build statistic value
### Response:
def display_reports(self, layout):
"""Issues the final PyLint score as a TeamCity build statistic value"""
try:
score = self.linter.stats['global_note']
except (AttributeError, KeyError):
pass
else:
self.tc.message('buildStatisticValue', key='PyLintScore', value=str(score)) |
def _contextkey(jail=None, chroot=None, root=None, prefix='pkg.list_pkgs'):
'''
As this module is designed to manipulate packages in jails and chroots, use
the passed jail/chroot to ensure that a key in the __context__ dict that is
unique to that jail/chroot is used.
'''
if jail:
return six.text_type(prefix) + '.jail_{0}'.format(jail)
elif chroot:
return six.text_type(prefix) + '.chroot_{0}'.format(chroot)
elif root:
return six.text_type(prefix) + '.root_{0}'.format(root)
return prefix | As this module is designed to manipulate packages in jails and chroots, use
the passed jail/chroot to ensure that a key in the __context__ dict that is
unique to that jail/chroot is used. | Below is the the instruction that describes the task:
### Input:
As this module is designed to manipulate packages in jails and chroots, use
the passed jail/chroot to ensure that a key in the __context__ dict that is
unique to that jail/chroot is used.
### Response:
def _contextkey(jail=None, chroot=None, root=None, prefix='pkg.list_pkgs'):
'''
As this module is designed to manipulate packages in jails and chroots, use
the passed jail/chroot to ensure that a key in the __context__ dict that is
unique to that jail/chroot is used.
'''
if jail:
return six.text_type(prefix) + '.jail_{0}'.format(jail)
elif chroot:
return six.text_type(prefix) + '.chroot_{0}'.format(chroot)
elif root:
return six.text_type(prefix) + '.root_{0}'.format(root)
return prefix |
Subsets and Splits