Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def read_fakelc(fakelcfile):
'''
This just reads a pickled fake LC.
Parameters
----------
fakelcfile : str
The fake LC file to read.
Returns
-------
dict
This returns an lcdict.
'''
try:
with open(fakelcfile,'rb') as infd:
lcdict = pickle.load(infd)
except UnicodeDecodeError:
with open(fakelcfile,'rb') as infd:
lcdict = pickle.load(infd, encoding='latin1')
return lcdict
|
def get_varfeatures(simbasedir,
mindet=1000,
nworkers=None):
'''This runs `lcproc.lcvfeatures.parallel_varfeatures` on fake LCs in
`simbasedir`.
Parameters
----------
simbasedir : str
The directory containing the fake LCs to process.
mindet : int
The minimum number of detections needed to accept an LC and process it.
nworkers : int or None
The number of parallel workers to use when extracting variability
features from the input light curves.
Returns
-------
str
The path to the `varfeatures` pickle created after running the
`lcproc.lcvfeatures.parallel_varfeatures` function.
'''
# get the info from the simbasedir
with open(os.path.join(simbasedir, 'fakelcs-info.pkl'),'rb') as infd:
siminfo = pickle.load(infd)
lcfpaths = siminfo['lcfpath']
varfeaturedir = os.path.join(simbasedir,'varfeatures')
# get the column defs for the fakelcs
timecols = siminfo['timecols']
magcols = siminfo['magcols']
errcols = siminfo['errcols']
# get the column defs for the fakelcs
timecols = siminfo['timecols']
magcols = siminfo['magcols']
errcols = siminfo['errcols']
# register the fakelc pklc as a custom lcproc format
# now we should be able to use all lcproc functions correctly
fakelc_formatkey = 'fake-%s' % siminfo['lcformat']
lcproc.register_lcformat(
fakelc_formatkey,
'*-fakelc.pkl',
timecols,
magcols,
errcols,
'astrobase.lcproc',
'_read_pklc',
magsarefluxes=siminfo['magsarefluxes']
)
# now we can use lcproc.parallel_varfeatures directly
varinfo = lcvfeatures.parallel_varfeatures(lcfpaths,
varfeaturedir,
lcformat=fakelc_formatkey,
mindet=mindet,
nworkers=nworkers)
with open(os.path.join(simbasedir,'fakelc-varfeatures.pkl'),'wb') as outfd:
pickle.dump(varinfo, outfd, pickle.HIGHEST_PROTOCOL)
return os.path.join(simbasedir,'fakelc-varfeatures.pkl')
|
def precision(ntp, nfp):
'''
This calculates precision.
https://en.wikipedia.org/wiki/Precision_and_recall
Parameters
----------
ntp : int
The number of true positives.
nfp : int
The number of false positives.
Returns
-------
float
The precision calculated using `ntp/(ntp + nfp)`.
'''
if (ntp+nfp) > 0:
return ntp/(ntp+nfp)
else:
return np.nan
|
def recall(ntp, nfn):
'''
This calculates recall.
https://en.wikipedia.org/wiki/Precision_and_recall
Parameters
----------
ntp : int
The number of true positives.
nfn : int
The number of false negatives.
Returns
-------
float
The precision calculated using `ntp/(ntp + nfn)`.
'''
if (ntp+nfn) > 0:
return ntp/(ntp+nfn)
else:
return np.nan
|
def matthews_correl_coeff(ntp, ntn, nfp, nfn):
'''
This calculates the Matthews correlation coefficent.
https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
Parameters
----------
ntp : int
The number of true positives.
ntn : int
The number of true negatives
nfp : int
The number of false positives.
nfn : int
The number of false negatives.
Returns
-------
float
The Matthews correlation coefficient.
'''
mcc_top = (ntp*ntn - nfp*nfn)
mcc_bot = msqrt((ntp + nfp)*(ntp + nfn)*(ntn + nfp)*(ntn + nfn))
if mcc_bot > 0:
return mcc_top/mcc_bot
else:
return np.nan
|
def get_recovered_variables_for_magbin(simbasedir,
magbinmedian,
stetson_stdev_min=2.0,
inveta_stdev_min=2.0,
iqr_stdev_min=2.0,
statsonly=True):
'''This runs variability selection for the given magbinmedian.
To generate a full recovery matrix over all magnitude bins, run this
function for each magbin over the specified stetson_stdev_min and
inveta_stdev_min grid.
Parameters
----------
simbasedir : str
The input directory of fake LCs.
magbinmedian : float
The magbin to run the variable recovery for. This is an item from the
dict from `simbasedir/fakelcs-info.pkl: `fakelcinfo['magrms'][magcol]`
list for each magcol and designates which magbin to get the recovery
stats for.
stetson_stdev_min : float
The minimum sigma above the trend in the Stetson J variability index
distribution for this magbin to use to consider objects as variable.
inveta_stdev_min : float
The minimum sigma above the trend in the 1/eta variability index
distribution for this magbin to use to consider objects as variable.
iqr_stdev_min : float
The minimum sigma above the trend in the IQR variability index
distribution for this magbin to use to consider objects as variable.
statsonly : bool
If this is True, only the final stats will be returned. If False, the
full arrays used to generate the stats will also be returned.
Returns
-------
dict
The returned dict contains statistics for this magbin and if requested,
the full arrays used to calculate the statistics.
'''
# get the info from the simbasedir
with open(os.path.join(simbasedir, 'fakelcs-info.pkl'),'rb') as infd:
siminfo = pickle.load(infd)
objectids = siminfo['objectid']
varflags = siminfo['isvariable']
sdssr = siminfo['sdssr']
# get the column defs for the fakelcs
timecols = siminfo['timecols']
magcols = siminfo['magcols']
errcols = siminfo['errcols']
# register the fakelc pklc as a custom lcproc format
# now we should be able to use all lcproc functions correctly
fakelc_formatkey = 'fake-%s' % siminfo['lcformat']
lcproc.register_lcformat(
fakelc_formatkey,
'*-fakelc.pkl',
timecols,
magcols,
errcols,
'astrobase.lcproc',
'_read_pklc',
magsarefluxes=siminfo['magsarefluxes']
)
# make the output directory if it doesn't exit
outdir = os.path.join(simbasedir, 'recvar-threshold-pkls')
if not os.path.exists(outdir):
os.mkdir(outdir)
# run the variability search
varfeaturedir = os.path.join(simbasedir, 'varfeatures')
varthreshinfof = os.path.join(
outdir,
'varthresh-magbinmed%.2f-stet%.2f-inveta%.2f.pkl' % (magbinmedian,
stetson_stdev_min,
inveta_stdev_min)
)
varthresh = varthreshold.variability_threshold(
varfeaturedir,
varthreshinfof,
lcformat=fakelc_formatkey,
min_stetj_stdev=stetson_stdev_min,
min_inveta_stdev=inveta_stdev_min,
min_iqr_stdev=iqr_stdev_min,
verbose=False
)
# get the magbins from the varthresh info
magbins = varthresh['magbins']
# get the magbininds
magbininds = np.digitize(sdssr, magbins)
# bin the objects according to these magbins
binned_objectids = []
binned_actualvars = []
binned_actualnotvars = []
# go through all the mag bins and bin up the objectids, actual variables,
# and actual not-variables
for mbinind, _magi in zip(np.unique(magbininds),
range(len(magbins)-1)):
thisbinind = np.where(magbininds == mbinind)
thisbin_objectids = objectids[thisbinind]
thisbin_varflags = varflags[thisbinind]
thisbin_actualvars = thisbin_objectids[thisbin_varflags]
thisbin_actualnotvars = thisbin_objectids[~thisbin_varflags]
binned_objectids.append(thisbin_objectids)
binned_actualvars.append(thisbin_actualvars)
binned_actualnotvars.append(thisbin_actualnotvars)
# this is the output dict
recdict = {
'simbasedir':simbasedir,
'timecols':timecols,
'magcols':magcols,
'errcols':errcols,
'magsarefluxes':siminfo['magsarefluxes'],
'stetj_min_stdev':stetson_stdev_min,
'inveta_min_stdev':inveta_stdev_min,
'iqr_min_stdev':iqr_stdev_min,
'magbinmedian':magbinmedian,
}
# now, for each magcol, find the magbin corresponding to magbinmedian, and
# get its stats
for magcol in magcols:
# this is the index of the matching magnitude bin for the magbinmedian
# provided
magbinind = np.where(
np.array(varthresh[magcol]['binned_sdssr_median']) == magbinmedian
)
magbinind = np.asscalar(magbinind[0])
# get the objectids, actual vars and actual notvars in this magbin
thisbin_objectids = binned_objectids[magbinind]
thisbin_actualvars = binned_actualvars[magbinind]
thisbin_actualnotvars = binned_actualnotvars[magbinind]
# stetson recovered variables in this magbin
stet_recoveredvars = varthresh[magcol][
'binned_objectids_thresh_stetsonj'
][magbinind]
# calculate TP, FP, TN, FN
stet_recoverednotvars = np.setdiff1d(thisbin_objectids,
stet_recoveredvars)
stet_truepositives = np.intersect1d(stet_recoveredvars,
thisbin_actualvars)
stet_falsepositives = np.intersect1d(stet_recoveredvars,
thisbin_actualnotvars)
stet_truenegatives = np.intersect1d(stet_recoverednotvars,
thisbin_actualnotvars)
stet_falsenegatives = np.intersect1d(stet_recoverednotvars,
thisbin_actualvars)
# calculate stetson recall, precision, Matthews correl coeff
stet_recall = recall(stet_truepositives.size,
stet_falsenegatives.size)
stet_precision = precision(stet_truepositives.size,
stet_falsepositives.size)
stet_mcc = matthews_correl_coeff(stet_truepositives.size,
stet_truenegatives.size,
stet_falsepositives.size,
stet_falsenegatives.size)
# inveta recovered variables in this magbin
inveta_recoveredvars = varthresh[magcol][
'binned_objectids_thresh_inveta'
][magbinind]
inveta_recoverednotvars = np.setdiff1d(thisbin_objectids,
inveta_recoveredvars)
inveta_truepositives = np.intersect1d(inveta_recoveredvars,
thisbin_actualvars)
inveta_falsepositives = np.intersect1d(inveta_recoveredvars,
thisbin_actualnotvars)
inveta_truenegatives = np.intersect1d(inveta_recoverednotvars,
thisbin_actualnotvars)
inveta_falsenegatives = np.intersect1d(inveta_recoverednotvars,
thisbin_actualvars)
# calculate inveta recall, precision, Matthews correl coeff
inveta_recall = recall(inveta_truepositives.size,
inveta_falsenegatives.size)
inveta_precision = precision(inveta_truepositives.size,
inveta_falsepositives.size)
inveta_mcc = matthews_correl_coeff(inveta_truepositives.size,
inveta_truenegatives.size,
inveta_falsepositives.size,
inveta_falsenegatives.size)
# iqr recovered variables in this magbin
iqr_recoveredvars = varthresh[magcol][
'binned_objectids_thresh_iqr'
][magbinind]
iqr_recoverednotvars = np.setdiff1d(thisbin_objectids,
iqr_recoveredvars)
iqr_truepositives = np.intersect1d(iqr_recoveredvars,
thisbin_actualvars)
iqr_falsepositives = np.intersect1d(iqr_recoveredvars,
thisbin_actualnotvars)
iqr_truenegatives = np.intersect1d(iqr_recoverednotvars,
thisbin_actualnotvars)
iqr_falsenegatives = np.intersect1d(iqr_recoverednotvars,
thisbin_actualvars)
# calculate iqr recall, precision, Matthews correl coeff
iqr_recall = recall(iqr_truepositives.size,
iqr_falsenegatives.size)
iqr_precision = precision(iqr_truepositives.size,
iqr_falsepositives.size)
iqr_mcc = matthews_correl_coeff(iqr_truepositives.size,
iqr_truenegatives.size,
iqr_falsepositives.size,
iqr_falsenegatives.size)
# calculate the items missed by one method but found by the other
# methods
stet_missed_inveta_found = np.setdiff1d(inveta_truepositives,
stet_truepositives)
stet_missed_iqr_found = np.setdiff1d(iqr_truepositives,
stet_truepositives)
inveta_missed_stet_found = np.setdiff1d(stet_truepositives,
inveta_truepositives)
inveta_missed_iqr_found = np.setdiff1d(iqr_truepositives,
inveta_truepositives)
iqr_missed_stet_found = np.setdiff1d(stet_truepositives,
iqr_truepositives)
iqr_missed_inveta_found = np.setdiff1d(inveta_truepositives,
iqr_truepositives)
if not statsonly:
recdict[magcol] = {
# stetson J alone
'stet_recoveredvars':stet_recoveredvars,
'stet_truepositives':stet_truepositives,
'stet_falsepositives':stet_falsepositives,
'stet_truenegatives':stet_truenegatives,
'stet_falsenegatives':stet_falsenegatives,
'stet_precision':stet_precision,
'stet_recall':stet_recall,
'stet_mcc':stet_mcc,
# inveta alone
'inveta_recoveredvars':inveta_recoveredvars,
'inveta_truepositives':inveta_truepositives,
'inveta_falsepositives':inveta_falsepositives,
'inveta_truenegatives':inveta_truenegatives,
'inveta_falsenegatives':inveta_falsenegatives,
'inveta_precision':inveta_precision,
'inveta_recall':inveta_recall,
'inveta_mcc':inveta_mcc,
# iqr alone
'iqr_recoveredvars':iqr_recoveredvars,
'iqr_truepositives':iqr_truepositives,
'iqr_falsepositives':iqr_falsepositives,
'iqr_truenegatives':iqr_truenegatives,
'iqr_falsenegatives':iqr_falsenegatives,
'iqr_precision':iqr_precision,
'iqr_recall':iqr_recall,
'iqr_mcc':iqr_mcc,
# true positive variables missed by one method but picked up by
# the others
'stet_missed_inveta_found':stet_missed_inveta_found,
'stet_missed_iqr_found':stet_missed_iqr_found,
'inveta_missed_stet_found':inveta_missed_stet_found,
'inveta_missed_iqr_found':inveta_missed_iqr_found,
'iqr_missed_stet_found':iqr_missed_stet_found,
'iqr_missed_inveta_found':iqr_missed_inveta_found,
# bin info
'actual_variables':thisbin_actualvars,
'actual_nonvariables':thisbin_actualnotvars,
'all_objectids':thisbin_objectids,
'magbinind':magbinind,
}
# if statsonly is set, then we only return the numbers but not the
# arrays themselves
else:
recdict[magcol] = {
# stetson J alone
'stet_recoveredvars':stet_recoveredvars.size,
'stet_truepositives':stet_truepositives.size,
'stet_falsepositives':stet_falsepositives.size,
'stet_truenegatives':stet_truenegatives.size,
'stet_falsenegatives':stet_falsenegatives.size,
'stet_precision':stet_precision,
'stet_recall':stet_recall,
'stet_mcc':stet_mcc,
# inveta alone
'inveta_recoveredvars':inveta_recoveredvars.size,
'inveta_truepositives':inveta_truepositives.size,
'inveta_falsepositives':inveta_falsepositives.size,
'inveta_truenegatives':inveta_truenegatives.size,
'inveta_falsenegatives':inveta_falsenegatives.size,
'inveta_precision':inveta_precision,
'inveta_recall':inveta_recall,
'inveta_mcc':inveta_mcc,
# iqr alone
'iqr_recoveredvars':iqr_recoveredvars.size,
'iqr_truepositives':iqr_truepositives.size,
'iqr_falsepositives':iqr_falsepositives.size,
'iqr_truenegatives':iqr_truenegatives.size,
'iqr_falsenegatives':iqr_falsenegatives.size,
'iqr_precision':iqr_precision,
'iqr_recall':iqr_recall,
'iqr_mcc':iqr_mcc,
# true positive variables missed by one method but picked up by
# the others
'stet_missed_inveta_found':stet_missed_inveta_found.size,
'stet_missed_iqr_found':stet_missed_iqr_found.size,
'inveta_missed_stet_found':inveta_missed_stet_found.size,
'inveta_missed_iqr_found':inveta_missed_iqr_found.size,
'iqr_missed_stet_found':iqr_missed_stet_found.size,
'iqr_missed_inveta_found':iqr_missed_inveta_found.size,
# bin info
'actual_variables':thisbin_actualvars.size,
'actual_nonvariables':thisbin_actualnotvars.size,
'all_objectids':thisbin_objectids.size,
'magbinind':magbinind,
}
#
# done with per magcol
#
return recdict
|
def magbin_varind_gridsearch_worker(task):
'''
This is a parallel grid search worker for the function below.
'''
simbasedir, gridpoint, magbinmedian = task
try:
res = get_recovered_variables_for_magbin(simbasedir,
magbinmedian,
stetson_stdev_min=gridpoint[0],
inveta_stdev_min=gridpoint[1],
iqr_stdev_min=gridpoint[2],
statsonly=True)
return res
except Exception as e:
LOGEXCEPTION('failed to get info for %s' % gridpoint)
return None
|
def variable_index_gridsearch_magbin(simbasedir,
stetson_stdev_range=(1.0,20.0),
inveta_stdev_range=(1.0,20.0),
iqr_stdev_range=(1.0,20.0),
ngridpoints=32,
ngridworkers=None):
'''This runs a variable index grid search per magbin.
For each magbin, this does a grid search using the stetson and inveta ranges
provided and tries to optimize the Matthews Correlation Coefficient (best
value is +1.0), indicating the best possible separation of variables
vs. nonvariables. The thresholds on these two variable indexes that produce
the largest coeff for the collection of fake LCs will probably be the ones
that work best for actual variable classification on the real LCs.
https://en.wikipedia.org/wiki/Matthews_correlation_coefficient
For each grid-point, calculates the true positives, false positives, true
negatives, false negatives. Then gets the precision and recall, confusion
matrix, and the ROC curve for variable vs. nonvariable.
Once we've identified the best thresholds to use, we can then calculate
variable object numbers:
- as a function of magnitude
- as a function of period
- as a function of number of detections
- as a function of amplitude of variability
Writes everything back to `simbasedir/fakevar-recovery.pkl`. Use the
plotting function below to make plots for the results.
Parameters
----------
simbasedir : str
The directory where the fake LCs are located.
stetson_stdev_range : sequence of 2 floats
The min and max values of the Stetson J variability index to generate a
grid over these to test for the values of this index that produce the
'best' recovery rate for the injected variable stars.
inveta_stdev_range : sequence of 2 floats
The min and max values of the 1/eta variability index to generate a
grid over these to test for the values of this index that produce the
'best' recovery rate for the injected variable stars.
iqr_stdev_range : sequence of 2 floats
The min and max values of the IQR variability index to generate a
grid over these to test for the values of this index that produce the
'best' recovery rate for the injected variable stars.
ngridpoints : int
The number of grid points for each variability index grid. Remember that
this function will be searching in 3D and will require lots of time to
run if ngridpoints is too large.
For the default number of grid points and 25000 simulated light curves,
this takes about 3 days to run on a 40 (effective) core machine with 2 x
Xeon E5-2650v3 CPUs.
ngridworkers : int or None
The number of parallel grid search workers that will be launched.
Returns
-------
dict
The returned dict contains a list of recovery stats for each magbin and
each grid point in the variability index grids that were used. This dict
can be passed to the plotting function below to plot the results.
'''
# make the output directory where all the pkls from the variability
# threshold runs will go
outdir = os.path.join(simbasedir,'recvar-threshold-pkls')
if not os.path.exists(outdir):
os.mkdir(outdir)
# get the info from the simbasedir
with open(os.path.join(simbasedir, 'fakelcs-info.pkl'),'rb') as infd:
siminfo = pickle.load(infd)
# get the column defs for the fakelcs
timecols = siminfo['timecols']
magcols = siminfo['magcols']
errcols = siminfo['errcols']
# get the magbinmedians to use for the recovery processing
magbinmedians = siminfo['magrms'][magcols[0]]['binned_sdssr_median']
# generate the grids for stetson and inveta
stetson_grid = np.linspace(stetson_stdev_range[0],
stetson_stdev_range[1],
num=ngridpoints)
inveta_grid = np.linspace(inveta_stdev_range[0],
inveta_stdev_range[1],
num=ngridpoints)
iqr_grid = np.linspace(iqr_stdev_range[0],
iqr_stdev_range[1],
num=ngridpoints)
# generate the grid
stet_inveta_iqr_grid = []
for stet in stetson_grid:
for inveta in inveta_grid:
for iqr in iqr_grid:
grid_point = [stet, inveta, iqr]
stet_inveta_iqr_grid.append(grid_point)
# the output dict
grid_results = {'stetson_grid':stetson_grid,
'inveta_grid':inveta_grid,
'iqr_grid':iqr_grid,
'stet_inveta_iqr_grid':stet_inveta_iqr_grid,
'magbinmedians':magbinmedians,
'timecols':timecols,
'magcols':magcols,
'errcols':errcols,
'simbasedir':os.path.abspath(simbasedir),
'recovery':[]}
# set up the pool
pool = mp.Pool(ngridworkers)
# run the grid search per magbinmedian
for magbinmedian in magbinmedians:
LOGINFO('running stetson J-inveta grid-search '
'for magbinmedian = %.3f...' % magbinmedian)
tasks = [(simbasedir, gp, magbinmedian) for gp in stet_inveta_iqr_grid]
thisbin_results = pool.map(magbin_varind_gridsearch_worker, tasks)
grid_results['recovery'].append(thisbin_results)
pool.close()
pool.join()
LOGINFO('done.')
with open(os.path.join(simbasedir,
'fakevar-recovery-per-magbin.pkl'),'wb') as outfd:
pickle.dump(grid_results,outfd,pickle.HIGHEST_PROTOCOL)
return grid_results
|
def plot_varind_gridsearch_magbin_results(gridsearch_results):
'''This plots the gridsearch results from `variable_index_gridsearch_magbin`.
Parameters
----------
gridsearch_results : dict
This is the dict produced by `variable_index_gridsearch_magbin` above.
Returns
-------
dict
The returned dict contains filenames of the recovery rate plots made for
each variability index. These include plots of the precision, recall,
and Matthews Correlation Coefficient over each magbin and a heatmap of
these values over the grid points of the variability index stdev values
arrays used.
'''
# get the result pickle/dict
if (isinstance(gridsearch_results, str) and
os.path.exists(gridsearch_results)):
with open(gridsearch_results,'rb') as infd:
gridresults = pickle.load(infd)
elif isinstance(gridsearch_results, dict):
gridresults = gridsearch_results
else:
LOGERROR('could not understand the input '
'variable index grid-search result dict/pickle')
return None
plotres = {'simbasedir':gridresults['simbasedir']}
recgrid = gridresults['recovery']
simbasedir = gridresults['simbasedir']
for magcol in gridresults['magcols']:
plotres[magcol] = {'best_stetsonj':[],
'best_inveta':[],
'best_iqr':[],
'magbinmedians':gridresults['magbinmedians']}
# go through all the magbins
for magbinind, magbinmedian in enumerate(gridresults['magbinmedians']):
LOGINFO('plotting results for %s: magbin: %.3f' %
(magcol, magbinmedian))
stet_mcc = np.array(
[x[magcol]['stet_mcc']
for x in recgrid[magbinind]]
)[::(gridresults['inveta_grid'].size *
gridresults['stetson_grid'].size)]
stet_precision = np.array(
[x[magcol]['stet_precision']
for x in recgrid[magbinind]]
)[::(gridresults['inveta_grid'].size *
gridresults['stetson_grid'].size)]
stet_recall = np.array(
[x[magcol]['stet_recall']
for x in recgrid[magbinind]]
)[::(gridresults['inveta_grid'].size *
gridresults['stetson_grid'].size)]
stet_missed_inveta_found = np.array(
[x[magcol]['stet_missed_inveta_found']
for x in recgrid[magbinind]]
)[::(gridresults['inveta_grid'].size *
gridresults['stetson_grid'].size)]
stet_missed_iqr_found = np.array(
[x[magcol]['stet_missed_iqr_found']
for x in recgrid[magbinind]]
)[::(gridresults['inveta_grid'].size *
gridresults['stetson_grid'].size)]
inveta_mcc = np.array(
[x[magcol]['inveta_mcc']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
::gridresults['inveta_grid'].size
]
inveta_precision = np.array(
[x[magcol]['inveta_precision']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
::gridresults['inveta_grid'].size
]
inveta_recall = np.array(
[x[magcol]['inveta_recall']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
::gridresults['inveta_grid'].size
]
inveta_missed_stet_found = np.array(
[x[magcol]['inveta_missed_stet_found']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
::gridresults['inveta_grid'].size
]
inveta_missed_iqr_found = np.array(
[x[magcol]['inveta_missed_iqr_found']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
::gridresults['inveta_grid'].size
]
iqr_mcc = np.array(
[x[magcol]['iqr_mcc']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
:gridresults['inveta_grid'].size
]
iqr_precision = np.array(
[x[magcol]['iqr_precision']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
:gridresults['inveta_grid'].size
]
iqr_recall = np.array(
[x[magcol]['iqr_recall']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
:gridresults['inveta_grid'].size
]
iqr_missed_stet_found = np.array(
[x[magcol]['iqr_missed_stet_found']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
:gridresults['inveta_grid'].size
]
iqr_missed_inveta_found = np.array(
[x[magcol]['iqr_missed_inveta_found']
for x in recgrid[magbinind]]
)[:(gridresults['iqr_grid'].size *
gridresults['stetson_grid'].size)][
:gridresults['inveta_grid'].size
]
fig = plt.figure(figsize=(6.4*5, 4.8*3))
# FIRST ROW: stetson J plot
plt.subplot(3,5,1)
if np.any(np.isfinite(stet_mcc)):
plt.plot(gridresults['stetson_grid'],
stet_mcc)
plt.xlabel('stetson J stdev multiplier threshold')
plt.ylabel('MCC')
plt.title('MCC for stetson J')
else:
plt.text(0.5,0.5,
'stet MCC values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,2)
if np.any(np.isfinite(stet_precision)):
plt.plot(gridresults['stetson_grid'],
stet_precision)
plt.xlabel('stetson J stdev multiplier threshold')
plt.ylabel('precision')
plt.title('precision for stetson J')
else:
plt.text(0.5,0.5,
'stet precision values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,3)
if np.any(np.isfinite(stet_recall)):
plt.plot(gridresults['stetson_grid'],
stet_recall)
plt.xlabel('stetson J stdev multiplier threshold')
plt.ylabel('recall')
plt.title('recall for stetson J')
else:
plt.text(0.5,0.5,
'stet recall values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,4)
if np.any(np.isfinite(stet_missed_inveta_found)):
plt.plot(gridresults['stetson_grid'],
stet_missed_inveta_found)
plt.xlabel('stetson J stdev multiplier threshold')
plt.ylabel('# objects stetson missed but inveta found')
plt.title('stetson J missed, inveta found')
else:
plt.text(0.5,0.5,
'stet-missed/inveta-found values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,5)
if np.any(np.isfinite(stet_missed_iqr_found)):
plt.plot(gridresults['stetson_grid'],
stet_missed_iqr_found)
plt.xlabel('stetson J stdev multiplier threshold')
plt.ylabel('# objects stetson missed but IQR found')
plt.title('stetson J missed, IQR found')
else:
plt.text(0.5,0.5,
'stet-missed/IQR-found values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
# SECOND ROW: inveta plots
plt.subplot(3,5,6)
if np.any(np.isfinite(inveta_mcc)):
plt.plot(gridresults['inveta_grid'],
inveta_mcc)
plt.xlabel('inveta stdev multiplier threshold')
plt.ylabel('MCC')
plt.title('MCC for inveta')
else:
plt.text(0.5,0.5,
'inveta MCC values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,7)
if np.any(np.isfinite(inveta_precision)):
plt.plot(gridresults['inveta_grid'],
inveta_precision)
plt.xlabel('inveta stdev multiplier threshold')
plt.ylabel('precision')
plt.title('precision for inveta')
else:
plt.text(0.5,0.5,
'inveta precision values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,8)
if np.any(np.isfinite(inveta_recall)):
plt.plot(gridresults['inveta_grid'],
inveta_recall)
plt.xlabel('inveta stdev multiplier threshold')
plt.ylabel('recall')
plt.title('recall for inveta')
else:
plt.text(0.5,0.5,
'inveta recall values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,9)
if np.any(np.isfinite(inveta_missed_stet_found)):
plt.plot(gridresults['inveta_grid'],
inveta_missed_stet_found)
plt.xlabel('inveta stdev multiplier threshold')
plt.ylabel('# objects inveta missed but stetson found')
plt.title('inveta missed, stetson J found')
else:
plt.text(0.5,0.5,
'inveta-missed-stet-found values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,10)
if np.any(np.isfinite(inveta_missed_iqr_found)):
plt.plot(gridresults['inveta_grid'],
inveta_missed_iqr_found)
plt.xlabel('inveta stdev multiplier threshold')
plt.ylabel('# objects inveta missed but IQR found')
plt.title('inveta missed, IQR found')
else:
plt.text(0.5,0.5,
'inveta-missed-iqr-found values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
# THIRD ROW: inveta plots
plt.subplot(3,5,11)
if np.any(np.isfinite(iqr_mcc)):
plt.plot(gridresults['iqr_grid'],
iqr_mcc)
plt.xlabel('IQR stdev multiplier threshold')
plt.ylabel('MCC')
plt.title('MCC for IQR')
else:
plt.text(0.5,0.5,
'IQR MCC values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,12)
if np.any(np.isfinite(iqr_precision)):
plt.plot(gridresults['iqr_grid'],
iqr_precision)
plt.xlabel('IQR stdev multiplier threshold')
plt.ylabel('precision')
plt.title('precision for IQR')
else:
plt.text(0.5,0.5,
'IQR precision values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,13)
if np.any(np.isfinite(iqr_recall)):
plt.plot(gridresults['iqr_grid'],
iqr_recall)
plt.xlabel('IQR stdev multiplier threshold')
plt.ylabel('recall')
plt.title('recall for IQR')
else:
plt.text(0.5,0.5,
'IQR recall values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,14)
if np.any(np.isfinite(iqr_missed_stet_found)):
plt.plot(gridresults['iqr_grid'],
iqr_missed_stet_found)
plt.xlabel('IQR stdev multiplier threshold')
plt.ylabel('# objects IQR missed but stetson found')
plt.title('IQR missed, stetson J found')
else:
plt.text(0.5,0.5,
'iqr-missed-stet-found values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplot(3,5,15)
if np.any(np.isfinite(iqr_missed_inveta_found)):
plt.plot(gridresults['iqr_grid'],
iqr_missed_inveta_found)
plt.xlabel('IQR stdev multiplier threshold')
plt.ylabel('# objects IQR missed but inveta found')
plt.title('IQR missed, inveta found')
else:
plt.text(0.5,0.5,
'iqr-missed-inveta-found values are all nan '
'for this magbin',
transform=plt.gca().transAxes,
horizontalalignment='center',
verticalalignment='center')
plt.xticks([])
plt.yticks([])
plt.subplots_adjust(hspace=0.25,wspace=0.25)
plt.suptitle('magcol: %s, magbin: %.3f' % (magcol, magbinmedian))
plotdir = os.path.join(gridresults['simbasedir'],
'varindex-gridsearch-plots')
if not os.path.exists(plotdir):
os.mkdir(plotdir)
gridplotf = os.path.join(
plotdir,
'%s-magbin-%.3f-var-recoverygrid-permagbin.png' %
(magcol, magbinmedian)
)
plt.savefig(gridplotf,dpi=100,bbox_inches='tight')
plt.close('all')
# get the best values of MCC, recall, precision and their associated
# stet, inveta
stet_mcc_maxind = np.where(stet_mcc == np.max(stet_mcc))
stet_precision_maxind = np.where(
stet_precision == np.max(stet_precision)
)
stet_recall_maxind = np.where(stet_recall == np.max(stet_recall))
best_stet_mcc = stet_mcc[stet_mcc_maxind]
best_stet_precision = stet_mcc[stet_precision_maxind]
best_stet_recall = stet_mcc[stet_recall_maxind]
stet_with_best_mcc = gridresults['stetson_grid'][stet_mcc_maxind]
stet_with_best_precision = gridresults['stetson_grid'][
stet_precision_maxind
]
stet_with_best_recall = (
gridresults['stetson_grid'][stet_recall_maxind]
)
inveta_mcc_maxind = np.where(inveta_mcc == np.max(inveta_mcc))
inveta_precision_maxind = np.where(
inveta_precision == np.max(inveta_precision)
)
inveta_recall_maxind = (
np.where(inveta_recall == np.max(inveta_recall))
)
best_inveta_mcc = inveta_mcc[inveta_mcc_maxind]
best_inveta_precision = inveta_mcc[inveta_precision_maxind]
best_inveta_recall = inveta_mcc[inveta_recall_maxind]
inveta_with_best_mcc = gridresults['inveta_grid'][inveta_mcc_maxind]
inveta_with_best_precision = gridresults['inveta_grid'][
inveta_precision_maxind
]
inveta_with_best_recall = gridresults['inveta_grid'][
inveta_recall_maxind
]
iqr_mcc_maxind = np.where(iqr_mcc == np.max(iqr_mcc))
iqr_precision_maxind = np.where(
iqr_precision == np.max(iqr_precision)
)
iqr_recall_maxind = (
np.where(iqr_recall == np.max(iqr_recall))
)
best_iqr_mcc = iqr_mcc[iqr_mcc_maxind]
best_iqr_precision = iqr_mcc[iqr_precision_maxind]
best_iqr_recall = iqr_mcc[iqr_recall_maxind]
iqr_with_best_mcc = gridresults['iqr_grid'][iqr_mcc_maxind]
iqr_with_best_precision = gridresults['iqr_grid'][
iqr_precision_maxind
]
iqr_with_best_recall = gridresults['iqr_grid'][
iqr_recall_maxind
]
plotres[magcol][magbinmedian] = {
# stetson
'stet_grid':gridresults['stetson_grid'],
'stet_mcc':stet_mcc,
'stet_precision':stet_precision,
'stet_recall':stet_recall,
'stet_missed_inveta_found':stet_missed_inveta_found,
'best_stet_mcc':best_stet_mcc,
'stet_with_best_mcc':stet_with_best_mcc,
'best_stet_precision':best_stet_precision,
'stet_with_best_precision':stet_with_best_precision,
'best_stet_recall':best_stet_recall,
'stet_with_best_recall':stet_with_best_recall,
# inveta
'inveta_grid':gridresults['inveta_grid'],
'inveta_mcc':inveta_mcc,
'inveta_precision':inveta_precision,
'inveta_recall':inveta_recall,
'inveta_missed_stet_found':inveta_missed_stet_found,
'best_inveta_mcc':best_inveta_mcc,
'inveta_with_best_mcc':inveta_with_best_mcc,
'best_inveta_precision':best_inveta_precision,
'inveta_with_best_precision':inveta_with_best_precision,
'best_inveta_recall':best_inveta_recall,
'inveta_with_best_recall':inveta_with_best_recall,
# iqr
'iqr_grid':gridresults['iqr_grid'],
'iqr_mcc':iqr_mcc,
'iqr_precision':iqr_precision,
'iqr_recall':iqr_recall,
'iqr_missed_stet_found':iqr_missed_stet_found,
'best_iqr_mcc':best_iqr_mcc,
'iqr_with_best_mcc':iqr_with_best_mcc,
'best_iqr_precision':best_iqr_precision,
'iqr_with_best_precision':iqr_with_best_precision,
'best_iqr_recall':best_iqr_recall,
'iqr_with_best_recall':iqr_with_best_recall,
# plot info
'recoveryplot':gridplotf
}
# recommend inveta, stetson index, and iqr for this magbin
# if there are multiple stets, choose the smallest one
if stet_with_best_mcc.size > 1:
plotres[magcol]['best_stetsonj'].append(stet_with_best_mcc[0])
elif stet_with_best_mcc.size > 0:
plotres[magcol]['best_stetsonj'].append(stet_with_best_mcc[0])
else:
plotres[magcol]['best_stetsonj'].append(np.nan)
# if there are multiple best invetas, choose the smallest one
if inveta_with_best_mcc.size > 1:
plotres[magcol]['best_inveta'].append(inveta_with_best_mcc[0])
elif inveta_with_best_mcc.size > 0:
plotres[magcol]['best_inveta'].append(inveta_with_best_mcc[0])
else:
plotres[magcol]['best_inveta'].append(np.nan)
# if there are multiple best iqrs, choose the smallest one
if iqr_with_best_mcc.size > 1:
plotres[magcol]['best_iqr'].append(iqr_with_best_mcc[0])
elif iqr_with_best_mcc.size > 0:
plotres[magcol]['best_iqr'].append(iqr_with_best_mcc[0])
else:
plotres[magcol]['best_iqr'].append(np.nan)
# write the plotresults to a pickle
plotrespicklef = os.path.join(simbasedir,
'varindex-gridsearch-magbin-results.pkl')
with open(plotrespicklef, 'wb') as outfd:
pickle.dump(plotres, outfd, pickle.HIGHEST_PROTOCOL)
# recommend the values of stetson J and inveta to use
for magcol in gridresults['magcols']:
LOGINFO('best stdev multipliers for each %s magbin:' % magcol)
LOGINFO('magbin inveta stetson J IQR')
for magbin, inveta, stet, iqr in zip(
plotres[magcol]['magbinmedians'],
plotres[magcol]['best_inveta'],
plotres[magcol]['best_stetsonj'],
plotres[magcol]['best_iqr']):
LOGINFO('%.3f %.3f %.3f %.3f' % (magbin,
inveta,
stet,
iqr))
return plotres
|
def run_periodfinding(simbasedir,
pfmethods=('gls','pdm','bls'),
pfkwargs=({},{},{'startp':1.0,'maxtransitduration':0.3}),
getblssnr=False,
sigclip=5.0,
nperiodworkers=10,
ncontrolworkers=4,
liststartindex=None,
listmaxobjects=None):
'''This runs periodfinding using several period-finders on a collection of
fake LCs.
As a rough benchmark, 25000 fake LCs with 10000--50000 points per LC take
about 26 days in total to run on an invocation of this function using
GLS+PDM+BLS and 10 periodworkers and 4 controlworkers (so all 40 'cores') on
a 2 x Xeon E5-2660v3 machine.
Parameters
----------
pfmethods : sequence of str
This is used to specify which periodfinders to run. These must be in the
`lcproc.periodsearch.PFMETHODS` dict.
pfkwargs : sequence of dict
This is used to provide optional kwargs to the period-finders.
getblssnr : bool
If this is True, will run BLS SNR calculations for each object and
magcol. This takes a while to run, so it's disabled (False) by default.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
nperiodworkers : int
This is the number of parallel period-finding worker processes to use.
ncontrolworkers : int
This is the number of parallel period-finding control workers to
use. Each control worker will launch `nperiodworkers` worker processes.
liststartindex : int
The starting index of processing. This refers to the filename list
generated by running `glob.glob` on the fake LCs in `simbasedir`.
maxobjects : int
The maximum number of objects to process in this run. Use this with
`liststartindex` to effectively distribute working on a large list of
input light curves over several sessions or machines.
Returns
-------
str
The path to the output summary pickle produced by
`lcproc.periodsearch.parallel_pf`
'''
# get the info from the simbasedir
with open(os.path.join(simbasedir, 'fakelcs-info.pkl'),'rb') as infd:
siminfo = pickle.load(infd)
lcfpaths = siminfo['lcfpath']
pfdir = os.path.join(simbasedir,'periodfinding')
# get the column defs for the fakelcs
timecols = siminfo['timecols']
magcols = siminfo['magcols']
errcols = siminfo['errcols']
# register the fakelc pklc as a custom lcproc format
# now we should be able to use all lcproc functions correctly
fakelc_formatkey = 'fake-%s' % siminfo['lcformat']
lcproc.register_lcformat(
fakelc_formatkey,
'*-fakelc.pkl',
timecols,
magcols,
errcols,
'astrobase.lcproc',
'_read_pklc',
magsarefluxes=siminfo['magsarefluxes']
)
if liststartindex:
lcfpaths = lcfpaths[liststartindex:]
if listmaxobjects:
lcfpaths = lcfpaths[:listmaxobjects]
pfinfo = periodsearch.parallel_pf(lcfpaths,
pfdir,
lcformat=fakelc_formatkey,
pfmethods=pfmethods,
pfkwargs=pfkwargs,
getblssnr=getblssnr,
sigclip=sigclip,
nperiodworkers=nperiodworkers,
ncontrolworkers=ncontrolworkers)
with open(os.path.join(simbasedir,
'fakelc-periodsearch.pkl'),'wb') as outfd:
pickle.dump(pfinfo, outfd, pickle.HIGHEST_PROTOCOL)
return os.path.join(simbasedir,'fakelc-periodsearch.pkl')
|
def check_periodrec_alias(actualperiod,
recoveredperiod,
tolerance=1.0e-3):
'''This determines what kind of aliasing (if any) exists between
`recoveredperiod` and `actualperiod`.
Parameters
----------
actualperiod : float
The actual period of the object.
recoveredperiod : float
The recovered period of the object.
tolerance : float
The absolute difference required between the input periods to mark the
recovered period as close to the actual period.
Returns
-------
str
The type of alias determined for the input combination of periods. This
will be CSV string with values taken from the following list, based on
the types of alias found::
['actual',
'twice',
'half',
'ratio_over_1plus',
'ratio_over_1minus',
'ratio_over_1plus_twice',
'ratio_over_1minus_twice',
'ratio_over_1plus_thrice',
'ratio_over_1minus_thrice',
'ratio_over_minus1',
'ratio_over_twice_minus1']
'''
if not (np.isfinite(actualperiod) and np.isfinite(recoveredperiod)):
LOGERROR("can't compare nan values for actual/recovered periods")
return 'unknown'
else:
#################
## ALIAS TYPES ##
#################
# simple ratios
twotimes_p = actualperiod*2.0
half_p = actualperiod*0.5
# first kind of alias
alias_1a = actualperiod/(1.0+actualperiod)
alias_1b = actualperiod/(1.0-actualperiod)
# second kind of alias
alias_2a = actualperiod/(1.0+2.0*actualperiod)
alias_2b = actualperiod/(1.0-2.0*actualperiod)
# third kind of alias
alias_3a = actualperiod/(1.0+3.0*actualperiod)
alias_3b = actualperiod/(1.0-3.0*actualperiod)
# fourth kind of alias
alias_4a = actualperiod/(actualperiod - 1.0)
alias_4b = actualperiod/(2.0*actualperiod - 1.0)
aliases = np.ravel(np.array([
actualperiod,
twotimes_p,
half_p,
alias_1a,
alias_1b,
alias_2a,
alias_2b,
alias_3a,
alias_3b,
alias_4a,
alias_4b]
))
alias_labels = np.array(ALIAS_TYPES)
# check type of alias
closest_alias = np.isclose(recoveredperiod, aliases, atol=tolerance)
if np.any(closest_alias):
closest_alias_type = alias_labels[closest_alias]
return ','.join(closest_alias_type.tolist())
else:
return 'other'
|
def periodicvar_recovery(fakepfpkl,
simbasedir,
period_tolerance=1.0e-3):
'''Recovers the periodic variable status/info for the simulated PF result.
- Uses simbasedir and the lcfbasename stored in fakepfpkl to figure out
where the LC for this object is.
- Gets the actual_varparams, actual_varperiod, actual_vartype,
actual_varamplitude elements from the LC.
- Figures out if the current objectid is a periodic variable (using
actual_vartype).
- If it is a periodic variable, gets the canonical period assigned to it.
- Checks if the period was recovered in any of the five best periods
reported by any of the period-finders, checks if the period recovered was
a harmonic of the period.
- Returns the objectid, actual period and vartype, recovered period, and
recovery status.
Parameters
----------
fakepfpkl : str
This is a periodfinding-<objectid>.pkl[.gz] file produced in the
`simbasedir/periodfinding` subdirectory after `run_periodfinding` above
is done.
simbasedir : str
The base directory where all of the fake LCs and period-finding results
are.
period_tolerance : float
The maximum difference that this function will consider between an
actual period (or its aliases) and a recovered period to consider it as
as a 'recovered' period.
Returns
-------
dict
Returns a dict of period-recovery results.
'''
if fakepfpkl.endswith('.gz'):
infd = gzip.open(fakepfpkl,'rb')
else:
infd = open(fakepfpkl,'rb')
fakepf = pickle.load(infd)
infd.close()
# get info from the fakepf dict
objectid, lcfbasename = fakepf['objectid'], fakepf['lcfbasename']
lcfpath = os.path.join(simbasedir,'lightcurves',lcfbasename)
# if the LC doesn't exist, bail out
if not os.path.exists(lcfpath):
LOGERROR('light curve for %s does not exist at: %s' % (objectid,
lcfpath))
return None
# now, open the fakelc
fakelc = lcproc._read_pklc(lcfpath)
# get the actual_varparams, actual_varperiod, actual_varamplitude
actual_varparams, actual_varperiod, actual_varamplitude, actual_vartype = (
fakelc['actual_varparams'],
fakelc['actual_varperiod'],
fakelc['actual_varamplitude'],
fakelc['actual_vartype']
)
# get the moments too so we can track LC noise, etc.
actual_moments = fakelc['moments']
# get the magcols for this LC
magcols = fakelc['magcols']
# get the recovered info from each of the available methods
pfres = {
'objectid':objectid,
'simbasedir':simbasedir,
'magcols':magcols,
'fakelc':os.path.abspath(lcfpath),
'fakepf':os.path.abspath(fakepfpkl),
'actual_vartype':actual_vartype,
'actual_varperiod':actual_varperiod,
'actual_varamplitude':actual_varamplitude,
'actual_varparams':actual_varparams,
'actual_moments':actual_moments,
'recovery_periods':[],
'recovery_lspvals':[],
'recovery_pfmethods':[],
'recovery_magcols':[],
'recovery_status':[],
'recovery_pdiff':[],
}
# populate the pfres dict with the periods, pfmethods, and magcols
for magcol in magcols:
for pfm in lcproc.PFMETHODS:
if pfm in fakepf[magcol]:
# only get the unique recovered periods by using
# period_tolerance
for rpi, rp in enumerate(
fakepf[magcol][pfm]['nbestperiods']
):
if ((not np.any(np.isclose(
rp,
np.array(pfres['recovery_periods']),
rtol=period_tolerance
))) and np.isfinite(rp)):
# populate the recovery periods, pfmethods, and magcols
pfres['recovery_periods'].append(rp)
pfres['recovery_pfmethods'].append(pfm)
pfres['recovery_magcols'].append(magcol)
# normalize the periodogram peak value to between
# 0 and 1 so we can put in the results of multiple
# periodfinders on one scale
if pfm == 'pdm':
this_lspval = (
np.max(fakepf[magcol][pfm]['lspvals']) -
fakepf[magcol][pfm]['nbestlspvals'][rpi]
)
else:
this_lspval = (
fakepf[magcol][pfm]['nbestlspvals'][rpi] /
np.max(fakepf[magcol][pfm]['lspvals'])
)
# add the normalized lspval to the outdict for
# this object as well. later, we'll use this to
# construct a periodogram for objects that were actually
# not variables
pfres['recovery_lspvals'].append(this_lspval)
# convert the recovery_* lists to arrays
pfres['recovery_periods'] = np.array(pfres['recovery_periods'])
pfres['recovery_lspvals'] = np.array(pfres['recovery_lspvals'])
pfres['recovery_pfmethods'] = np.array(pfres['recovery_pfmethods'])
pfres['recovery_magcols'] = np.array(pfres['recovery_magcols'])
#
# now figure out recovery status
#
# if this is an actual periodic variable, characterize the recovery
if (actual_vartype and
actual_vartype in PERIODIC_VARTYPES and
np.isfinite(actual_varperiod)):
if pfres['recovery_periods'].size > 0:
for ri in range(pfres['recovery_periods'].size):
pfres['recovery_pdiff'].append(pfres['recovery_periods'][ri] -
np.asscalar(actual_varperiod))
# get the alias types
pfres['recovery_status'].append(
check_periodrec_alias(actual_varperiod,
pfres['recovery_periods'][ri],
tolerance=period_tolerance)
)
# turn the recovery_pdiff/status lists into arrays
pfres['recovery_status'] = np.array(pfres['recovery_status'])
pfres['recovery_pdiff'] = np.array(pfres['recovery_pdiff'])
# find the best recovered period and its status
rec_absdiff = np.abs(pfres['recovery_pdiff'])
best_recp_ind = rec_absdiff == rec_absdiff.min()
pfres['best_recovered_period'] = (
pfres['recovery_periods'][best_recp_ind]
)
pfres['best_recovered_pfmethod'] = (
pfres['recovery_pfmethods'][best_recp_ind]
)
pfres['best_recovered_magcol'] = (
pfres['recovery_magcols'][best_recp_ind]
)
pfres['best_recovered_status'] = (
pfres['recovery_status'][best_recp_ind]
)
pfres['best_recovered_pdiff'] = (
pfres['recovery_pdiff'][best_recp_ind]
)
else:
LOGWARNING(
'no finite periods recovered from period-finding for %s' %
fakepfpkl
)
pfres['recovery_status'] = np.array(['no_finite_periods_recovered'])
pfres['recovery_pdiff'] = np.array([np.nan])
pfres['best_recovered_period'] = np.array([np.nan])
pfres['best_recovered_pfmethod'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_magcol'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_status'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_pdiff'] = np.array([np.nan])
# if this is not actually a variable, get the recovered period,
# etc. anyway. this way, we can see what we need to look out for and avoid
# when getting these values for actual objects
else:
pfres['recovery_status'] = np.array(
['not_variable']*pfres['recovery_periods'].size
)
pfres['recovery_pdiff'] = np.zeros(pfres['recovery_periods'].size)
pfres['best_recovered_period'] = np.array([np.nan])
pfres['best_recovered_pfmethod'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_magcol'] = np.array([],dtype=np.unicode_)
pfres['best_recovered_status'] = np.array(['not_variable'])
pfres['best_recovered_pdiff'] = np.array([np.nan])
return pfres
|
def periodrec_worker(task):
'''This is a parallel worker for running period-recovery.
Parameters
----------
task : tuple
This is used to pass args to the `periodicvar_recovery` function::
task[0] = period-finding result pickle to work on
task[1] = simbasedir
task[2] = period_tolerance
Returns
-------
dict
This is the dict produced by the `periodicvar_recovery` function for the
input period-finding result pickle.
'''
pfpkl, simbasedir, period_tolerance = task
try:
return periodicvar_recovery(pfpkl,
simbasedir,
period_tolerance=period_tolerance)
except Exception as e:
LOGEXCEPTION('periodic var recovery failed for %s' % repr(task))
return None
|
def parallel_periodicvar_recovery(simbasedir,
period_tolerance=1.0e-3,
liststartind=None,
listmaxobjects=None,
nworkers=None):
'''This is a parallel driver for `periodicvar_recovery`.
Parameters
----------
simbasedir : str
The base directory where all of the fake LCs and period-finding results
are.
period_tolerance : float
The maximum difference that this function will consider between an
actual period (or its aliases) and a recovered period to consider it as
as a 'recovered' period.
liststartindex : int
The starting index of processing. This refers to the filename list
generated by running `glob.glob` on the period-finding result pickles in
`simbasedir/periodfinding`.
listmaxobjects : int
The maximum number of objects to process in this run. Use this with
`liststartindex` to effectively distribute working on a large list of
input period-finding result pickles over several sessions or machines.
nperiodworkers : int
This is the number of parallel period-finding worker processes to use.
Returns
-------
str
Returns the filename of the pickle produced containing all of the period
recovery results.
'''
# figure out the periodfinding pickles directory
pfpkldir = os.path.join(simbasedir,'periodfinding')
if not os.path.exists(pfpkldir):
LOGERROR('no "periodfinding" subdirectory in %s, can\'t continue' %
simbasedir)
return None
# find all the periodfinding pickles
pfpkl_list = glob.glob(os.path.join(pfpkldir,'*periodfinding*pkl*'))
if len(pfpkl_list) > 0:
if liststartind:
pfpkl_list = pfpkl_list[liststartind:]
if listmaxobjects:
pfpkl_list = pfpkl_list[:listmaxobjects]
tasks = [(x, simbasedir, period_tolerance) for x in pfpkl_list]
pool = mp.Pool(nworkers)
results = pool.map(periodrec_worker, tasks)
pool.close()
pool.join()
resdict = {x['objectid']:x for x in results if x is not None}
actual_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and x['actual_vartype'] in PERIODIC_VARTYPES)],
dtype=np.unicode_
)
recovered_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and 'actual' in x['best_recovered_status'])],
dtype=np.unicode_
)
alias_twice_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and 'twice' in x['best_recovered_status'])],
dtype=np.unicode_
)
alias_half_periodicvars = np.array(
[x['objectid'] for x in results
if (x is not None and 'half' in x['best_recovered_status'])],
dtype=np.unicode_
)
all_objectids = [x['objectid'] for x in results]
outdict = {'simbasedir':os.path.abspath(simbasedir),
'objectids':all_objectids,
'period_tolerance':period_tolerance,
'actual_periodicvars':actual_periodicvars,
'recovered_periodicvars':recovered_periodicvars,
'alias_twice_periodicvars':alias_twice_periodicvars,
'alias_half_periodicvars':alias_half_periodicvars,
'details':resdict}
outfile = os.path.join(simbasedir,'periodicvar-recovery.pkl')
with open(outfile, 'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
return outdict
else:
LOGERROR(
'no periodfinding result pickles found in %s, can\'t continue' %
pfpkldir
)
return None
|
def plot_periodicvar_recovery_results(
precvar_results,
aliases_count_as_recovered=None,
magbins=None,
periodbins=None,
amplitudebins=None,
ndetbins=None,
minbinsize=1,
plotfile_ext='png',
):
'''This plots the results of periodic var recovery.
This function makes plots for periodicvar recovered fraction as a function
of:
- magbin
- periodbin
- amplitude of variability
- ndet
with plot lines broken down by:
- magcol
- periodfinder
- vartype
- recovery status
The kwargs `magbins`, `periodbins`, `amplitudebins`, and `ndetbins` can be
used to set the bin lists as needed. The kwarg `minbinsize` controls how
many elements per bin are required to accept a bin in processing its
recovery characteristics for mags, periods, amplitudes, and ndets.
Parameters
----------
precvar_results : dict or str
This is either a dict returned by parallel_periodicvar_recovery or the
pickle created by that function.
aliases_count_as_recovered : list of str or 'all'
This is used to set which kinds of aliases this function considers as
'recovered' objects. Normally, we require that recovered objects have a
recovery status of 'actual' to indicate the actual period was
recovered. To change this default behavior, aliases_count_as_recovered
can be set to a list of alias status strings that should be considered
as 'recovered' objects as well. Choose from the following alias types::
'twice' recovered_p = 2.0*actual_p
'half' recovered_p = 0.5*actual_p
'ratio_over_1plus' recovered_p = actual_p/(1.0+actual_p)
'ratio_over_1minus' recovered_p = actual_p/(1.0-actual_p)
'ratio_over_1plus_twice' recovered_p = actual_p/(1.0+2.0*actual_p)
'ratio_over_1minus_twice' recovered_p = actual_p/(1.0-2.0*actual_p)
'ratio_over_1plus_thrice' recovered_p = actual_p/(1.0+3.0*actual_p)
'ratio_over_1minus_thrice' recovered_p = actual_p/(1.0-3.0*actual_p)
'ratio_over_minus1' recovered_p = actual_p/(actual_p - 1.0)
'ratio_over_twice_minus1' recovered_p = actual_p/(2.0*actual_p - 1.0)
or set `aliases_count_as_recovered='all'` to include all of the above in
the 'recovered' periodic var list.
magbins : np.array
The magnitude bins to plot the recovery rate results over. If None, the
default mag bins will be used: `np.arange(8.0,16.25,0.25)`.
periodbins : np.array
The period bins to plot the recovery rate results over. If None, the
default period bins will be used: `np.arange(0.0,500.0,0.5)`.
amplitudebins : np.array
The variability amplitude bins to plot the recovery rate results
over. If None, the default amplitude bins will be used:
`np.arange(0.0,2.0,0.05)`.
ndetbins : np.array
The ndet bins to plot the recovery rate results over. If None, the
default ndet bins will be used: `np.arange(0.0,60000.0,1000.0)`.
minbinsize : int
The minimum number of objects per bin required to plot a bin and its
recovery fraction on the plot.
plotfile_ext : {'png','pdf'}
Sets the plot output files' extension.
Returns
-------
dict
A dict containing recovery fraction statistics and the paths to each of
the plots made.
'''
# get the result pickle/dict
if isinstance(precvar_results, str) and os.path.exists(precvar_results):
with open(precvar_results,'rb') as infd:
precvar = pickle.load(infd)
elif isinstance(precvar_results, dict):
precvar = precvar_results
else:
LOGERROR('could not understand the input '
'periodic var recovery dict/pickle')
return None
# get the simbasedir and open the fakelc-info.pkl. we'll need the magbins
# definition from here.
simbasedir = precvar['simbasedir']
lcinfof = os.path.join(simbasedir,'fakelcs-info.pkl')
if not os.path.exists(lcinfof):
LOGERROR('fakelcs-info.pkl does not exist in %s, can\'t continue' %
simbasedir)
return None
with open(lcinfof,'rb') as infd:
lcinfo = pickle.load(infd)
# get the magcols, vartypes, sdssr, isvariable flags
magcols = lcinfo['magcols']
objectid = lcinfo['objectid']
ndet = lcinfo['ndet']
sdssr = lcinfo['sdssr']
# get the actual periodic vars
actual_periodicvars = precvar['actual_periodicvars']
# generate lists of objects binned by magbins and periodbins
LOGINFO('getting sdssr and ndet for actual periodic vars...')
# get the sdssr and ndet for all periodic vars
periodicvar_sdssr = []
periodicvar_ndet = []
periodicvar_objectids = []
for pobj in actual_periodicvars:
pobjind = objectid == pobj
periodicvar_objectids.append(pobj)
periodicvar_sdssr.append(sdssr[pobjind])
periodicvar_ndet.append(ndet[pobjind])
periodicvar_sdssr = np.array(periodicvar_sdssr)
periodicvar_objectids = np.array(periodicvar_objectids)
periodicvar_ndet = np.array(periodicvar_ndet)
LOGINFO('getting periods, vartypes, '
'amplitudes, ndet for actual periodic vars...')
# get the periods, vartypes, amplitudes for the actual periodic vars
periodicvar_periods = [
np.asscalar(precvar['details'][x]['actual_varperiod'])
for x in periodicvar_objectids
]
periodicvar_amplitudes = [
np.asscalar(precvar['details'][x]['actual_varamplitude'])
for x in periodicvar_objectids
]
periodicvar_vartypes = [
precvar['details'][x]['actual_vartype'] for x in periodicvar_objectids
]
#
# do the binning
#
# bin by mag
LOGINFO('binning actual periodic vars by magnitude...')
magbinned_sdssr = []
magbinned_periodicvars = []
if not magbins:
magbins = PERIODREC_DEFAULT_MAGBINS
magbininds = np.digitize(np.ravel(periodicvar_sdssr), magbins)
for mbinind, magi in zip(np.unique(magbininds),
range(len(magbins)-1)):
thisbin_periodicvars = periodicvar_objectids[magbininds == mbinind]
if (thisbin_periodicvars.size > (minbinsize-1)):
magbinned_sdssr.append((magbins[magi] + magbins[magi+1])/2.0)
magbinned_periodicvars.append(thisbin_periodicvars)
# bin by period
LOGINFO('binning actual periodic vars by period...')
periodbinned_periods = []
periodbinned_periodicvars = []
if not periodbins:
periodbins = PERIODREC_DEFAULT_PERIODBINS
periodbininds = np.digitize(np.ravel(periodicvar_periods), periodbins)
for pbinind, peri in zip(np.unique(periodbininds),
range(len(periodbins)-1)):
thisbin_periodicvars = periodicvar_objectids[periodbininds == pbinind]
if (thisbin_periodicvars.size > (minbinsize-1)):
periodbinned_periods.append((periodbins[peri] +
periodbins[peri+1])/2.0)
periodbinned_periodicvars.append(thisbin_periodicvars)
# bin by amplitude of variability
LOGINFO('binning actual periodic vars by variability amplitude...')
amplitudebinned_amplitudes = []
amplitudebinned_periodicvars = []
if not amplitudebins:
amplitudebins = PERIODREC_DEFAULT_AMPBINS
amplitudebininds = np.digitize(np.ravel(np.abs(periodicvar_amplitudes)),
amplitudebins)
for abinind, ampi in zip(np.unique(amplitudebininds),
range(len(amplitudebins)-1)):
thisbin_periodicvars = periodicvar_objectids[
amplitudebininds == abinind
]
if (thisbin_periodicvars.size > (minbinsize-1)):
amplitudebinned_amplitudes.append(
(amplitudebins[ampi] +
amplitudebins[ampi+1])/2.0
)
amplitudebinned_periodicvars.append(thisbin_periodicvars)
# bin by ndet
LOGINFO('binning actual periodic vars by ndet...')
ndetbinned_ndets = []
ndetbinned_periodicvars = []
if not ndetbins:
ndetbins = PERIODREC_DEFAULT_NDETBINS
ndetbininds = np.digitize(np.ravel(periodicvar_ndet), ndetbins)
for nbinind, ndeti in zip(np.unique(ndetbininds),
range(len(ndetbins)-1)):
thisbin_periodicvars = periodicvar_objectids[ndetbininds == nbinind]
if (thisbin_periodicvars.size > (minbinsize-1)):
ndetbinned_ndets.append(
(ndetbins[ndeti] +
ndetbins[ndeti+1])/2.0
)
ndetbinned_periodicvars.append(thisbin_periodicvars)
# now figure out what 'recovered' means using the provided
# aliases_count_as_recovered kwarg
recovered_status = ['actual']
if isinstance(aliases_count_as_recovered, list):
for atype in aliases_count_as_recovered:
if atype in ALIAS_TYPES:
recovered_status.append(atype)
else:
LOGWARNING('unknown alias type: %s, skipping' % atype)
elif aliases_count_as_recovered and aliases_count_as_recovered == 'all':
for atype in ALIAS_TYPES[1:]:
recovered_status.append(atype)
# find all the matching objects for these recovered statuses
recovered_periodicvars = np.array(
[precvar['details'][x]['objectid'] for x in precvar['details']
if (precvar['details'][x] is not None and
precvar['details'][x]['best_recovered_status']
in recovered_status)],
dtype=np.unicode_
)
LOGINFO('recovered %s/%s periodic variables (frac: %.3f) with '
'period recovery status: %s' %
(recovered_periodicvars.size,
actual_periodicvars.size,
float(recovered_periodicvars.size/actual_periodicvars.size),
', '.join(recovered_status)))
# get the objects recovered per bin and overall recovery fractions per bin
magbinned_recovered_objects = [
np.intersect1d(x,recovered_periodicvars)
for x in magbinned_periodicvars
]
magbinned_recfrac = np.array([float(x.size/y.size) for x,y
in zip(magbinned_recovered_objects,
magbinned_periodicvars)])
periodbinned_recovered_objects = [
np.intersect1d(x,recovered_periodicvars)
for x in periodbinned_periodicvars
]
periodbinned_recfrac = np.array([float(x.size/y.size) for x,y
in zip(periodbinned_recovered_objects,
periodbinned_periodicvars)])
amplitudebinned_recovered_objects = [
np.intersect1d(x,recovered_periodicvars)
for x in amplitudebinned_periodicvars
]
amplitudebinned_recfrac = np.array(
[float(x.size/y.size) for x,y
in zip(amplitudebinned_recovered_objects,
amplitudebinned_periodicvars)]
)
ndetbinned_recovered_objects = [
np.intersect1d(x,recovered_periodicvars)
for x in ndetbinned_periodicvars
]
ndetbinned_recfrac = np.array([float(x.size/y.size) for x,y
in zip(ndetbinned_recovered_objects,
ndetbinned_periodicvars)])
# convert the bin medians to arrays
magbinned_sdssr = np.array(magbinned_sdssr)
periodbinned_periods = np.array(periodbinned_periods)
amplitudebinned_amplitudes = np.array(amplitudebinned_amplitudes)
ndetbinned_ndets = np.array(ndetbinned_ndets)
# this is the initial output dict
outdict = {
'simbasedir':simbasedir,
'precvar_results':precvar,
'magcols':magcols,
'objectids':objectid,
'ndet':ndet,
'sdssr':sdssr,
'actual_periodicvars':actual_periodicvars,
'recovered_periodicvars':recovered_periodicvars,
'recovery_definition':recovered_status,
# mag binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
'magbins':magbins,
'magbinned_mags':magbinned_sdssr,
'magbinned_periodicvars':magbinned_periodicvars,
'magbinned_recoveredvars':magbinned_recovered_objects,
'magbinned_recfrac':magbinned_recfrac,
# period binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
'periodbins':periodbins,
'periodbinned_periods':periodbinned_periods,
'periodbinned_periodicvars':periodbinned_periodicvars,
'periodbinned_recoveredvars':periodbinned_recovered_objects,
'periodbinned_recfrac':periodbinned_recfrac,
# amplitude binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
'amplitudebins':amplitudebins,
'amplitudebinned_amplitudes':amplitudebinned_amplitudes,
'amplitudebinned_periodicvars':amplitudebinned_periodicvars,
'amplitudebinned_recoveredvars':amplitudebinned_recovered_objects,
'amplitudebinned_recfrac':amplitudebinned_recfrac,
# ndet binned actual periodicvars
# note that only bins with nobjects > minbinsize are included
'ndetbins':ndetbins,
'ndetbinned_ndets':ndetbinned_ndets,
'ndetbinned_periodicvars':ndetbinned_periodicvars,
'ndetbinned_recoveredvars':ndetbinned_recovered_objects,
'ndetbinned_recfrac':ndetbinned_recfrac,
}
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is not None)]
)
# figure out all alias types
all_aliastypes = recovered_status
# add these to the outdict
outdict['aliastypes'] = all_aliastypes
outdict['pfmethods'] = all_pfmethods
outdict['vartypes'] = all_vartypes
# these are recfracs per-magcol, -vartype, -periodfinder, -aliastype
# binned appropriately by mags, periods, amplitudes, and ndet
# all of these have the shape as the magcols, aliastypes, pfmethods, and
# vartypes lists above.
magbinned_per_magcol_recfracs = []
magbinned_per_vartype_recfracs = []
magbinned_per_pfmethod_recfracs = []
magbinned_per_aliastype_recfracs = []
periodbinned_per_magcol_recfracs = []
periodbinned_per_vartype_recfracs = []
periodbinned_per_pfmethod_recfracs = []
periodbinned_per_aliastype_recfracs = []
amplitudebinned_per_magcol_recfracs = []
amplitudebinned_per_vartype_recfracs = []
amplitudebinned_per_pfmethod_recfracs = []
amplitudebinned_per_aliastype_recfracs = []
ndetbinned_per_magcol_recfracs = []
ndetbinned_per_vartype_recfracs = []
ndetbinned_per_pfmethod_recfracs = []
ndetbinned_per_aliastype_recfracs = []
#
# finally, we do stuff for the plots!
#
recplotdir = os.path.join(simbasedir, 'periodic-variable-recovery-plots')
if not os.path.exists(recplotdir):
os.mkdir(recplotdir)
# 1. recovery-rate by magbin
# 1a. plot of overall recovery rate per magbin
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(magbinned_sdssr, magbinned_recfrac,marker='.',ms=0.0)
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-overall.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 1b. plot of recovery rate per magbin per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
for magcol in magcols:
thismagcol_recfracs = []
for magbin_pv, magbin_rv in zip(magbinned_periodicvars,
magbinned_recovered_objects):
thisbin_thismagcol_recvars = [
x for x in magbin_rv
if (precvar['details'][x]['best_recovered_magcol'] == magcol)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thismagcol_recvars).size /
magbin_pv.size
)
thismagcol_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr,
np.array(thismagcol_recfracs),
marker='.',
label='magcol: %s' % magcol,
ms=0.0)
# add this to the outdict array
magbinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs))
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-magcols.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 1c. plot of recovery rate per magbin per periodfinder
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
for pfm in all_pfmethods:
thispf_recfracs = []
for magbin_pv, magbin_rv in zip(magbinned_periodicvars,
magbinned_recovered_objects):
thisbin_thispf_recvars = [
x for x in magbin_rv
if (precvar['details'][x]['best_recovered_pfmethod'] == pfm)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thispf_recvars).size /
magbin_pv.size
)
thispf_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr,
np.array(thispf_recfracs),
marker='.',
label='%s' % pfm.upper(),
ms=0.0)
# add this to the outdict array
magbinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs))
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 1d. plot of recovery rate per magbin per variable type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is not None)]
)
for vt in all_vartypes:
thisvt_recfracs = []
for magbin_pv, magbin_rv in zip(magbinned_periodicvars,
magbinned_recovered_objects):
thisbin_thisvt_recvars = [
x for x in magbin_rv
if (precvar['details'][x]['actual_vartype'] == vt)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisvt_recvars).size /
magbin_pv.size
)
thisvt_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr,
np.array(thisvt_recfracs),
marker='.',
label='%s' % vt,
ms=0.0)
# add this to the outdict array
magbinned_per_vartype_recfracs.append(np.array(thisvt_recfracs))
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 1e. plot of recovery rate per magbin per alias type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all alias types
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for magbin_pv, magbin_rv in zip(magbinned_periodicvars,
magbinned_recovered_objects):
thisbin_thisat_recvars = [
x for x in magbin_rv
if (precvar['details'][x]['best_recovered_status'][0] == at)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisat_recvars).size /
magbin_pv.size
)
thisat_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(magbinned_sdssr,
np.array(thisat_recfracs),
marker='.',
label='%s' % at,
ms=0.0)
# add this to the outdict array
magbinned_per_aliastype_recfracs.append(np.array(thisat_recfracs))
# finish up the plot
plt.plot(magbinned_sdssr, magbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-magnitudes-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2. recovery-rate by periodbin
# 2a. plot of overall recovery rate per periodbin
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0)
plt.xlabel('periodic variable period [days]')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var periods')
plt.ylim((0,1))
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-overall.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2b. plot of recovery rate per periodbin per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
for magcol in magcols:
thismagcol_recfracs = []
for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars,
periodbinned_recovered_objects):
thisbin_thismagcol_recvars = [
x for x in periodbin_rv
if (precvar['details'][x]['best_recovered_magcol'] == magcol)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thismagcol_recvars).size /
periodbin_pv.size
)
thismagcol_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods,
np.array(thismagcol_recfracs),
marker='.',
label='magcol: %s' % magcol,
ms=0.0)
# add this to the outdict array
periodbinned_per_magcol_recfracs.append(np.array(thismagcol_recfracs))
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var periods')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-magcols.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2c. plot of recovery rate per periodbin per periodfinder
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
for pfm in all_pfmethods:
thispf_recfracs = []
for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars,
periodbinned_recovered_objects):
thisbin_thispf_recvars = [
x for x in periodbin_rv
if (precvar['details'][x]['best_recovered_pfmethod'] == pfm)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thispf_recvars).size /
periodbin_pv.size
)
thispf_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods,
np.array(thispf_recfracs),
marker='.',
label='%s' % pfm.upper(),
ms=0.0)
# add this to the outdict array
periodbinned_per_pfmethod_recfracs.append(np.array(thispf_recfracs))
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var periods')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2d. plot of recovery rate per periodbin per variable type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is not None)]
)
for vt in all_vartypes:
thisvt_recfracs = []
for periodbin_pv, periodbin_rv in zip(periodbinned_periodicvars,
periodbinned_recovered_objects):
thisbin_thisvt_recvars = [
x for x in periodbin_rv
if (precvar['details'][x]['actual_vartype'] == vt)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisvt_recvars).size /
periodbin_pv.size
)
thisvt_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods,
np.array(thisvt_recfracs),
marker='.',
label='%s' % vt,
ms=0.0)
# add this to the outdict array
periodbinned_per_vartype_recfracs.append(np.array(thisvt_recfracs))
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 2e. plot of recovery rate per periodbin per alias type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for periodbin_pv, periodbin_rv in zip(
periodbinned_periodicvars,
periodbinned_recovered_objects
):
thisbin_thisat_recvars = [
x for x in periodbin_rv
if (precvar['details'][x]['best_recovered_status'][0] == at)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisat_recvars).size /
periodbin_pv.size
)
thisat_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(periodbinned_periods,
np.array(thisat_recfracs),
marker='.',
label='%s' % at,
ms=0.0)
# add this to the outdict array
periodbinned_per_aliastype_recfracs.append(np.array(thisat_recfracs))
# finish up the plot
plt.plot(periodbinned_periods, periodbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var magnitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-periods-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3. recovery-rate by amplitude bin
# 3a. plot of overall recovery rate per amplitude bin
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0)
plt.xlabel('periodic variable amplitude [mag]')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-overall.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3b. plot of recovery rate per amplitude bin per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
for magcol in magcols:
thismagcol_recfracs = []
for amplitudebin_pv, amplitudebin_rv in zip(
amplitudebinned_periodicvars,
amplitudebinned_recovered_objects
):
thisbin_thismagcol_recvars = [
x for x in amplitudebin_rv
if (precvar['details'][x]['best_recovered_magcol'] == magcol)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thismagcol_recvars).size /
amplitudebin_pv.size
)
thismagcol_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes,
np.array(thismagcol_recfracs),
marker='.',
label='magcol: %s' % magcol,
ms=0.0)
# add this to the outdict array
amplitudebinned_per_magcol_recfracs.append(
np.array(thismagcol_recfracs)
)
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-magcols.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3c. plot of recovery rate per amplitude bin per periodfinder
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
for pfm in all_pfmethods:
thispf_recfracs = []
for amplitudebin_pv, amplitudebin_rv in zip(
amplitudebinned_periodicvars,
amplitudebinned_recovered_objects
):
thisbin_thispf_recvars = [
x for x in amplitudebin_rv
if (precvar['details'][x]['best_recovered_pfmethod'] == pfm)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thispf_recvars).size /
amplitudebin_pv.size
)
thispf_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes,
np.array(thispf_recfracs),
marker='.',
label='%s' % pfm.upper(),
ms=0.0)
# add this to the outdict array
amplitudebinned_per_pfmethod_recfracs.append(
np.array(thispf_recfracs)
)
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3d. plot of recovery rate per amplitude bin per variable type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is not None)]
)
for vt in all_vartypes:
thisvt_recfracs = []
for amplitudebin_pv, amplitudebin_rv in zip(
amplitudebinned_periodicvars,
amplitudebinned_recovered_objects
):
thisbin_thisvt_recvars = [
x for x in amplitudebin_rv
if (precvar['details'][x]['actual_vartype'] == vt)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisvt_recvars).size /
amplitudebin_pv.size
)
thisvt_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes,
np.array(thisvt_recfracs),
marker='.',
label='%s' % vt,
ms=0.0)
# add this to the outdict array
amplitudebinned_per_vartype_recfracs.append(
np.array(thisvt_recfracs)
)
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 3e. plot of recovery rate per amplitude bin per alias type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for amplitudebin_pv, amplitudebin_rv in zip(
amplitudebinned_periodicvars,
amplitudebinned_recovered_objects
):
thisbin_thisat_recvars = [
x for x in amplitudebin_rv
if (precvar['details'][x]['best_recovered_status'][0] == at)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisat_recvars).size /
amplitudebin_pv.size
)
thisat_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(amplitudebinned_amplitudes,
np.array(thisat_recfracs),
marker='.',
label='%s' % at,
ms=0.0)
# add this to the outdict array
amplitudebinned_per_aliastype_recfracs.append(
np.array(thisat_recfracs)
)
# finish up the plot
plt.plot(amplitudebinned_amplitudes, amplitudebinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var amplitudes')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-amplitudes-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4. recovery-rate by ndet bin
# 4a. plot of overall recovery rate per ndet bin
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0)
plt.xlabel('periodic variable light curve points')
plt.ylabel('recovered fraction of periodic variables')
plt.title('overall recovery fraction by periodic var ndet')
plt.ylim((0,1))
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-overall.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4b. plot of recovery rate per ndet bin per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
for magcol in magcols:
thismagcol_recfracs = []
for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,
ndetbinned_recovered_objects):
thisbin_thismagcol_recvars = [
x for x in ndetbin_rv
if (precvar['details'][x]['best_recovered_magcol'] == magcol)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thismagcol_recvars).size /
ndetbin_pv.size
)
thismagcol_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets,
np.array(thismagcol_recfracs),
marker='.',
label='magcol: %s' % magcol,
ms=0.0)
# add this to the outdict array
ndetbinned_per_magcol_recfracs.append(
np.array(thismagcol_recfracs)
)
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per magcol recovery fraction by periodic var ndets')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-magcols.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4c. plot of recovery rate per ndet bin per periodfinder
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out which pfmethods were used
all_pfmethods = np.unique(
np.concatenate(
[np.unique(precvar['details'][x]['recovery_pfmethods'])
for x in precvar['details']]
)
)
for pfm in all_pfmethods:
thispf_recfracs = []
for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,
ndetbinned_recovered_objects):
thisbin_thispf_recvars = [
x for x in ndetbin_rv
if (precvar['details'][x]['best_recovered_pfmethod'] == pfm)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thispf_recvars).size /
ndetbin_pv.size
)
thispf_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets,
np.array(thispf_recfracs),
marker='.',
label='%s' % pfm.upper(),
ms=0.0)
# add this to the outdict array
ndetbinned_per_pfmethod_recfracs.append(
np.array(thispf_recfracs)
)
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per period-finder recovery fraction by periodic var ndets')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4d. plot of recovery rate per ndet bin per variable type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_vartypes = np.unique(
[(precvar['details'][x]['actual_vartype'])
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] in PERIODIC_VARTYPES)]
)
for vt in all_vartypes:
thisvt_recfracs = []
for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,
ndetbinned_recovered_objects):
thisbin_thisvt_recvars = [
x for x in ndetbin_rv
if (precvar['details'][x]['actual_vartype'] == vt)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisvt_recvars).size /
ndetbin_pv.size
)
thisvt_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets,
np.array(thisvt_recfracs),
marker='.',
label='%s' % vt,
ms=0.0)
# add this to the outdict array
ndetbinned_per_vartype_recfracs.append(
np.array(thisvt_recfracs)
)
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per vartype recovery fraction by periodic var ndets')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 4e. plot of recovery rate per ndet bin per alias type
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
# figure out all vartypes
all_aliastypes = recovered_status
for at in all_aliastypes:
thisat_recfracs = []
for ndetbin_pv, ndetbin_rv in zip(ndetbinned_periodicvars,
ndetbinned_recovered_objects):
thisbin_thisat_recvars = [
x for x in ndetbin_rv
if (precvar['details'][x]['best_recovered_status'][0] == at)
]
thisbin_thismagcol_recfrac = (
np.array(thisbin_thisat_recvars).size /
ndetbin_pv.size
)
thisat_recfracs.append(thisbin_thismagcol_recfrac)
# now that we have per magcol recfracs, plot them
plt.plot(ndetbinned_ndets,
np.array(thisat_recfracs),
marker='.',
label='%s' % at,
ms=0.0)
# add this to the outdict array
ndetbinned_per_aliastype_recfracs.append(
np.array(thisat_recfracs)
)
# finish up the plot
plt.plot(ndetbinned_ndets, ndetbinned_recfrac,
marker='.',ms=0.0, label='overall', color='k')
plt.xlabel(r'SDSS $r$ magnitude')
plt.ylabel('recovered fraction of periodic variables')
plt.title('per alias-type recovery fraction by periodic var ndets')
plt.ylim((0,1))
plt.legend(markerscale=10.0)
plt.savefig(
os.path.join(recplotdir,
'recfrac-binned-ndet-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# update the lists in the outdict
outdict['magbinned_per_magcol_recfracs'] = (
magbinned_per_magcol_recfracs
)
outdict['magbinned_per_pfmethod_recfracs'] = (
magbinned_per_pfmethod_recfracs
)
outdict['magbinned_per_vartype_recfracs'] = (
magbinned_per_vartype_recfracs
)
outdict['magbinned_per_aliastype_recfracs'] = (
magbinned_per_aliastype_recfracs
)
outdict['periodbinned_per_magcol_recfracs'] = (
periodbinned_per_magcol_recfracs
)
outdict['periodbinned_per_pfmethod_recfracs'] = (
periodbinned_per_pfmethod_recfracs
)
outdict['periodbinned_per_vartype_recfracs'] = (
periodbinned_per_vartype_recfracs
)
outdict['periodbinned_per_aliastype_recfracs'] = (
periodbinned_per_aliastype_recfracs
)
outdict['amplitudebinned_per_magcol_recfracs'] = (
amplitudebinned_per_magcol_recfracs
)
outdict['amplitudebinned_per_pfmethod_recfracs'] = (
amplitudebinned_per_pfmethod_recfracs
)
outdict['amplitudebinned_per_vartype_recfracs'] = (
amplitudebinned_per_vartype_recfracs
)
outdict['amplitudebinned_per_aliastype_recfracs'] = (
amplitudebinned_per_aliastype_recfracs
)
outdict['ndetbinned_per_magcol_recfracs'] = (
ndetbinned_per_magcol_recfracs
)
outdict['ndetbinned_per_pfmethod_recfracs'] = (
ndetbinned_per_pfmethod_recfracs
)
outdict['ndetbinned_per_vartype_recfracs'] = (
ndetbinned_per_vartype_recfracs
)
outdict['ndetbinned_per_aliastype_recfracs'] = (
ndetbinned_per_aliastype_recfracs
)
# get the overall recovered vars per pfmethod
overall_recvars_per_pfmethod = []
for pfm in all_pfmethods:
thispfm_recvars = np.array([
x for x in precvar['details'] if
((x in recovered_periodicvars) and
(precvar['details'][x]['best_recovered_pfmethod'] == pfm))
])
overall_recvars_per_pfmethod.append(thispfm_recvars)
# get the overall recovered vars per vartype
overall_recvars_per_vartype = []
for vt in all_vartypes:
thisvt_recvars = np.array([
x for x in precvar['details'] if
((x in recovered_periodicvars) and
(precvar['details'][x]['actual_vartype'] == vt))
])
overall_recvars_per_vartype.append(thisvt_recvars)
# get the overall recovered vars per magcol
overall_recvars_per_magcol = []
for mc in magcols:
thismc_recvars = np.array([
x for x in precvar['details'] if
((x in recovered_periodicvars) and
(precvar['details'][x]['best_recovered_magcol'] == mc))
])
overall_recvars_per_magcol.append(thismc_recvars)
# get the overall recovered vars per aliastype
overall_recvars_per_aliastype = []
for at in all_aliastypes:
thisat_recvars = np.array([
x for x in precvar['details'] if
((x in recovered_periodicvars) and
(precvar['details'][x]['best_recovered_status'] == at))
])
overall_recvars_per_aliastype.append(thisat_recvars)
# update the outdict with these
outdict['overall_recfrac_per_pfmethod'] = np.array([
x.size/actual_periodicvars.size for x in overall_recvars_per_pfmethod
])
outdict['overall_recfrac_per_vartype'] = np.array([
x.size/actual_periodicvars.size for x in overall_recvars_per_vartype
])
outdict['overall_recfrac_per_magcol'] = np.array([
x.size/actual_periodicvars.size for x in overall_recvars_per_magcol
])
outdict['overall_recfrac_per_aliastype'] = np.array([
x.size/actual_periodicvars.size for x in overall_recvars_per_aliastype
])
# 5. bar plot of overall recovery rate per pfmethod
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
xt = np.arange(len(all_pfmethods))
xl = all_pfmethods
plt.barh(xt, outdict['overall_recfrac_per_pfmethod'], 0.50)
plt.yticks(xt, xl)
plt.xlabel('period-finding method')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per period-finding method')
plt.savefig(
os.path.join(recplotdir,
'recfrac-overall-pfmethod.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 6. bar plot of overall recovery rate per magcol
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
xt = np.arange(len(magcols))
xl = magcols
plt.barh(xt, outdict['overall_recfrac_per_magcol'], 0.50)
plt.yticks(xt, xl)
plt.xlabel('light curve magnitude column')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per light curve magcol')
plt.savefig(
os.path.join(recplotdir,
'recfrac-overall-magcol.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 7. bar plot of overall recovery rate per aliastype
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
xt = np.arange(len(all_aliastypes))
xl = all_aliastypes
plt.barh(xt, outdict['overall_recfrac_per_aliastype'], 0.50)
plt.yticks(xt, xl)
plt.xlabel('period recovery status')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per period recovery status')
plt.savefig(
os.path.join(recplotdir,
'recfrac-overall-aliastype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 8. bar plot of overall recovery rate per vartype
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
xt = np.arange(len(all_vartypes))
xl = all_vartypes
plt.barh(xt, outdict['overall_recfrac_per_vartype'], 0.50)
plt.yticks(xt, xl)
plt.xlabel('periodic variable type')
plt.ylabel('overall recovery rate')
plt.title('overall recovery rate per periodic variable type')
plt.savefig(
os.path.join(recplotdir,
'recfrac-overall-vartype.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 9. overall recovered period periodogram for objects that aren't actual
# periodic variables. this effectively should give us the window function of
# the observations
notvariable_recovered_periods = np.concatenate([
precvar['details'][x]['recovery_periods']
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is None)
])
notvariable_recovered_lspvals = np.concatenate([
precvar['details'][x]['recovery_lspvals']
for x in precvar['details'] if
(precvar['details'][x]['actual_vartype'] is None)
])
sortind = np.argsort(notvariable_recovered_periods)
notvariable_recovered_periods = notvariable_recovered_periods[sortind]
notvariable_recovered_lspvals = notvariable_recovered_lspvals[sortind]
outdict['notvariable_recovered_periods'] = notvariable_recovered_periods
outdict['notvariable_recovered_lspvals'] = notvariable_recovered_lspvals
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.plot(notvariable_recovered_periods,
notvariable_recovered_lspvals,
ms=1.0,linestyle='none',marker='.')
plt.xscale('log')
plt.xlabel('recovered periods [days]')
plt.ylabel('recovered normalized periodogram power')
plt.title('periodogram for actual not-variable objects')
plt.savefig(
os.path.join(recplotdir,
'recovered-periodogram-nonvariables.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# 10. overall recovered period histogram for objects marked
# not-variable. this gives us the most common periods
fig = plt.figure(figsize=(6.4*1.5,4.8*1.5))
plt.hist(notvariable_recovered_periods,bins=np.arange(0.02,300.0,1.0e-3),
histtype='step')
plt.xscale('log')
plt.xlabel('recovered periods [days]')
plt.ylabel('number of times periods recovered')
plt.title('recovered period histogram for non-variable objects')
plt.savefig(
os.path.join(recplotdir,
'recovered-period-hist-nonvariables.%s' % plotfile_ext),
dpi=100,
bbox_inches='tight'
)
plt.close('all')
# at the end, write the outdict to a pickle and return it
outfile = os.path.join(simbasedir, 'periodicvar-recovery-plotresults.pkl')
with open(outfile,'wb') as outfd:
pickle.dump(outdict, outfd, pickle.HIGHEST_PROTOCOL)
return outdict
|
def mast_query(service,
params,
data=None,
apiversion='v0',
forcefetch=False,
cachedir='~/.astrobase/mast-cache',
verbose=True,
timeout=10.0,
refresh=5.0,
maxtimeout=90.0,
maxtries=3,
raiseonfail=False,
jitter=5.0):
'''This queries the STScI MAST service for catalog data.
All results are downloaded as JSON files that are written to `cachedir`.
Parameters
----------
service : str
This is the name of the service to use. See
https://mast.stsci.edu/api/v0/_services.html for a list of all available
services.
params : dict
This is a dict containing the input params to the service as described
on its details page linked in the `service description page on MAST
<https://mast.stsci.edu/api/v0/_services.html>`_.
data : dict or None
This contains optional data to upload to the service.
apiversion : str
The API version of the MAST service to use. This sets the URL that this
function will call, using `apiversion` as key into the `MAST_URLS` dict
above.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
raiseonfail : bool
If this is True, the function will raise an Exception if something goes
wrong, instead of returning None.
jitter : float
This is used to control the scale of the random wait in seconds before
starting the query. Useful in parallelized situations.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
# this matches:
# https://mast.stsci.edu/api/v0/class_mashup_1_1_mashup_request.html
inputparams = {
'format':'json',
'params':params,
'service':service,
'timeout':timeout,
}
if data is not None:
inputparams['data'] = data
# see if the cachedir exists
if '~' in cachedir:
cachedir = os.path.expanduser(cachedir)
if not os.path.exists(cachedir):
os.makedirs(cachedir)
# generate the cachefname and look for it
xcachekey = '-'.join([repr(inputparams[x])
for x in sorted(inputparams.keys())])
cachekey = hashlib.sha256(xcachekey.encode()).hexdigest()
cachefname = os.path.join(
cachedir,
'%s.json' % (cachekey,)
)
provenance = 'cache'
#####################
## RUN A NEW QUERY ##
#####################
# otherwise, we check the cache if it's done already, or run it again if not
if forcefetch or (not os.path.exists(cachefname)):
time.sleep(random.randint(1,jitter))
provenance = 'new download'
waitdone = False
timeelapsed = 0.0
ntries = 1
url = MAST_URLS[apiversion]['url']
formdata = {'request':json.dumps(inputparams)}
while (not waitdone) or (ntries < maxtries):
if timeelapsed > maxtimeout:
retdict = None
break
try:
resp = requests.post(url,
data=formdata,
# we'll let the service time us out first
# if that fails, we'll timeout ourselves
timeout=timeout+1.0)
resp.raise_for_status()
respjson = resp.json()
if respjson['status'] == 'COMPLETE':
data = respjson['data']
nrows = len(data)
if nrows > 0:
with open(cachefname, 'w') as outfd:
json.dump(respjson, outfd)
retdict = {
'params':inputparams,
'provenance':provenance,
'cachefname':cachefname
}
waitdone = True
if verbose:
LOGINFO('query successful. nmatches: %s' % nrows)
break
else:
LOGERROR(
'no matching objects found for inputparams: %r' %
inputparams
)
retdict = None
waitdone = True
break
# if we're still executing after the initial timeout is done
elif respjson['status'] == 'EXECUTING':
if verbose:
LOGINFO('query is still executing, '
'waiting %s seconds to retry...' % refresh)
waitdone = False
time.sleep(refresh)
timeelapsed = timeelapsed + refresh
retdict = None
else:
LOGERROR('Query failed! Message from service: %s' %
respjson['msg'])
retdict = None
waitdone = True
break
except requests.exceptions.Timeout as e:
if verbose:
LOGWARNING('MAST query try timed out, '
'site is probably down. '
'Waiting for %s seconds to try again...' %
refresh)
waitdone = False
time.sleep(refresh)
timeelapsed = timeelapsed + refresh
retdict = None
except KeyboardInterrupt as e:
LOGERROR('MAST request wait aborted for '
'%s' % repr(inputparams))
return None
except Exception as e:
LOGEXCEPTION('MAST query failed!')
if raiseonfail:
raise
return None
#
# increment number of tries at the bottom of the loop
#
ntries = ntries + 1
#
# done with waiting for completion
#
if retdict is None:
LOGERROR('Timed out, errored out, or reached maximum number '
'of tries with no response. Query was: %r' % inputparams)
return None
else:
return retdict
# otherwise, get the file from the cache
else:
if verbose:
LOGINFO('getting cached MAST query result for '
'request: %s' %
(repr(inputparams)))
retdict = {
'params':inputparams,
'provenance':provenance,
'cachefname':cachefname
}
return retdict
|
def tic_conesearch(
ra,
decl,
radius_arcmin=5.0,
apiversion='v0',
forcefetch=False,
cachedir='~/.astrobase/mast-cache',
verbose=True,
timeout=10.0,
refresh=5.0,
maxtimeout=90.0,
maxtries=3,
jitter=5.0,
raiseonfail=False
):
'''This runs a TESS Input Catalog cone search on MAST.
If you use this, please cite the TIC paper (Stassun et al 2018;
http://adsabs.harvard.edu/abs/2018AJ....156..102S). Also see the "living"
TESS input catalog docs:
https://docs.google.com/document/d/1zdiKMs4Ld4cXZ2DW4lMX-fuxAF6hPHTjqjIwGqnfjqI
Also see: https://mast.stsci.edu/api/v0/_t_i_cfields.html for the fields
returned by the service and present in the result JSON file.
Parameters
----------
ra,decl : float
The center coordinates of the cone-search in decimal degrees.
radius_arcmin : float
The cone-search radius in arcminutes.
apiversion : str
The API version of the MAST service to use. This sets the URL that this
function will call, using `apiversion` as key into the `MAST_URLS` dict
above.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
jitter : float
This is used to control the scale of the random wait in seconds before
starting the query. Useful in parallelized situations.
raiseonfail : bool
If this is True, the function will raise an Exception if something goes
wrong, instead of returning None.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
params = {'ra':ra,
'dec':decl,
'radius':radius_arcmin/60.0}
service = 'Mast.Catalogs.Tic.Cone'
return mast_query(service,
params,
jitter=jitter,
apiversion=apiversion,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
raiseonfail=raiseonfail)
|
def tic_xmatch(
ra,
decl,
radius_arcsec=5.0,
apiversion='v0',
forcefetch=False,
cachedir='~/.astrobase/mast-cache',
verbose=True,
timeout=90.0,
refresh=5.0,
maxtimeout=180.0,
maxtries=3,
jitter=5.0,
raiseonfail=False
):
'''This does a cross-match with TIC.
Parameters
----------
ra,decl : np.arrays or lists of floats
The coordinates that will be cross-matched against the TIC.
radius_arcsec : float
The cross-match radius in arcseconds.
apiversion : str
The API version of the MAST service to use. This sets the URL that this
function will call, using `apiversion` as key into the `MAST_URLS` dict
above.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
jitter : float
This is used to control the scale of the random wait in seconds before
starting the query. Useful in parallelized situations.
raiseonfail : bool
If this is True, the function will raise an Exception if something goes
wrong, instead of returning None.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
service = 'Mast.Tic.Crossmatch'
xmatch_input = {'fields':[{'name':'ra','type':'float'},
{'name':'dec','type':'float'}]}
xmatch_input['data'] = [{'ra':x, 'dec':y} for (x,y) in zip(ra, decl)]
params = {'raColumn':'ra',
'decColumn':'dec',
'radius':radius_arcsec/3600.0}
return mast_query(service,
params,
data=xmatch_input,
jitter=jitter,
apiversion=apiversion,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
raiseonfail=raiseonfail)
|
def tic_objectsearch(
objectid,
idcol_to_use="ID",
apiversion='v0',
forcefetch=False,
cachedir='~/.astrobase/mast-cache',
verbose=True,
timeout=90.0,
refresh=5.0,
maxtimeout=180.0,
maxtries=3,
jitter=5.0,
raiseonfail=False
):
'''
This runs a TIC search for a specified TIC ID.
Parameters
----------
objectid : str
The object ID to look up information for.
idcol_to_use : str
This is the name of the object ID column to use when looking up the
provided `objectid`. This is one of {'ID', 'HIP', 'TYC', 'UCAC',
'TWOMASS', 'ALLWISE', 'SDSS', 'GAIA', 'APASS', 'KIC'}.
apiversion : str
The API version of the MAST service to use. This sets the URL that this
function will call, using `apiversion` as key into the `MAST_URLS` dict
above.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
maxtries : int
The maximum number of tries (across all mirrors tried) to make to either
submit the request or download the results, before giving up.
jitter : float
This is used to control the scale of the random wait in seconds before
starting the query. Useful in parallelized situations.
raiseonfail : bool
If this is True, the function will raise an Exception if something goes
wrong, instead of returning None.
Returns
-------
dict
This returns a dict of the following form::
{'params':dict of the input params used for the query,
'provenance':'cache' or 'new download',
'result':path to the file on disk with the downloaded data table}
'''
params = {
'columns':'*',
'filters':[
{"paramName": idcol_to_use,
"values":[str(objectid)]}
]
}
service = 'Mast.Catalogs.Filtered.Tic'
return mast_query(service,
params,
jitter=jitter,
apiversion=apiversion,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout,
maxtries=maxtries,
raiseonfail=raiseonfail)
|
def send_email(sender,
subject,
content,
email_recipient_list,
email_address_list,
email_user=None,
email_pass=None,
email_server=None):
'''This sends an email to addresses, informing them about events.
The email account settings are retrieved from the settings file as described
above.
Parameters
----------
sender : str
The name of the sender to use in the email header.
subject : str
Subject of the email.
content : str
Content of the email.
email_recipient list : list of str
This is a list of email recipient names of the form:
`['Example Person 1', 'Example Person 1', ...]`
email_recipient list : list of str
This is a list of email recipient addresses of the form:
`['[email protected]', '[email protected]', ...]`
email_user : str
The username of the email server account that will send the emails. If
this is None, the value of EMAIL_USER from the
~/.astrobase/.emailsettings file will be used. If that is None as well,
this function won't work.
email_pass : str
The password of the email server account that will send the emails. If
this is None, the value of EMAIL_PASS from the
~/.astrobase/.emailsettings file will be used. If that is None as well,
this function won't work.
email_server : str
The address of the email server that will send the emails. If this is
None, the value of EMAIL_USER from the ~/.astrobase/.emailsettings file
will be used. If that is None as well, this function won't work.
Returns
-------
bool
True if email sending succeeded. False if email sending failed.
'''
if not email_user:
email_user = EMAIL_USER
if not email_pass:
email_pass = EMAIL_PASSWORD
if not email_server:
email_server = EMAIL_SERVER
if not email_server and email_user and email_pass:
raise ValueError("no email server address and "
"credentials available, can't continue")
msg_text = EMAIL_TEMPLATE.format(
sender=sender,
hostname=socket.gethostname(),
activity_time='%sZ' % datetime.utcnow().isoformat(),
activity_report=content
)
email_sender = '%s <%s>' % (sender, EMAIL_USER)
# put together the recipient and email lists
email_recipients = [('%s <%s>' % (x,y))
for (x,y) in zip(email_recipient_list,
email_address_list)]
# put together the rest of the message
email_msg = MIMEText(msg_text)
email_msg['From'] = email_sender
email_msg['To'] = ', '.join(email_recipients)
email_msg['Message-Id'] = make_msgid()
email_msg['Subject'] = '[%s on %s] %s' % (
sender,
socket.gethostname(),
subject
)
email_msg['Date'] = formatdate(time.time())
# start the email process
try:
server = smtplib.SMTP(EMAIL_SERVER, 587)
server_ehlo_response = server.ehlo()
if server.has_extn('STARTTLS'):
try:
tls_start_response = server.starttls()
tls_ehlo_response = server.ehlo()
login_response = server.login(EMAIL_USER, EMAIL_PASSWORD)
send_response = (
server.sendmail(email_sender,
email_address_list,
email_msg.as_string())
)
except Exception as e:
print('script email sending failed with error: %s'
% e)
send_response = None
if send_response is not None:
print('script email sent successfully')
quit_response = server.quit()
return True
else:
quit_response = server.quit()
return False
else:
print('email server does not support STARTTLS,'
' bailing out...')
quit_response = server.quit()
return False
except Exception as e:
print('sending email failed with error: %s' % e)
returnval = False
quit_response = server.quit()
return returnval
|
def fourier_sinusoidal_func(fourierparams, times, mags, errs):
'''This generates a sinusoidal light curve using a Fourier cosine series.
Parameters
----------
fourierparams : list
This MUST be a list of the following form like so::
[period,
epoch,
[amplitude_1, amplitude_2, amplitude_3, ..., amplitude_X],
[phase_1, phase_2, phase_3, ..., phase_X]]
where X is the Fourier order.
times,mags,errs : np.array
The input time-series of measurements and associated errors for which
the model will be generated. The times will be used to generate model
mags, and the input `times`, `mags`, and `errs` will be resorted by
model phase and returned.
Returns
-------
(modelmags, phase, ptimes, pmags, perrs) : tuple
Returns the model mags and phase values. Also returns the input `times`,
`mags`, and `errs` sorted by the model's phase.
'''
period, epoch, famps, fphases = fourierparams
# figure out the order from the length of the Fourier param list
forder = len(famps)
# phase the times with this period
iphase = (times - epoch)/period
iphase = iphase - np.floor(iphase)
phasesortind = np.argsort(iphase)
phase = iphase[phasesortind]
ptimes = times[phasesortind]
pmags = mags[phasesortind]
perrs = errs[phasesortind]
# calculate all the individual terms of the series
fseries = [famps[x]*np.cos(2.0*np.pi*x*phase + fphases[x])
for x in range(forder)]
# this is the zeroth order coefficient - a constant equal to median mag
modelmags = np.median(mags)
# sum the series
for fo in fseries:
modelmags += fo
return modelmags, phase, ptimes, pmags, perrs
|
def fourier_sinusoidal_residual(fourierparams, times, mags, errs):
'''
This returns the residual between the model mags and the actual mags.
Parameters
----------
fourierparams : list
This MUST be a list of the following form like so::
[period,
epoch,
[amplitude_1, amplitude_2, amplitude_3, ..., amplitude_X],
[phase_1, phase_2, phase_3, ..., phase_X]]
where X is the Fourier order.
times,mags,errs : np.array
The input time-series of measurements and associated errors for which
the model will be generated. The times will be used to generate model
mags, and the input `times`, `mags`, and `errs` will be resorted by
model phase and returned.
Returns
-------
np.array
The residuals between the input `mags` and generated `modelmags`,
weighted by the measurement errors in `errs`.
'''
modelmags, phase, ptimes, pmags, perrs = (
fourier_sinusoidal_func(fourierparams, times, mags, errs)
)
# this is now a weighted residual taking into account the measurement err
return (pmags - modelmags)/perrs
|
def _make_periodogram(axes,
lspinfo,
objectinfo,
findercmap,
finderconvolve,
verbose=True,
findercachedir='~/.astrobase/stamp-cache'):
'''Makes periodogram, objectinfo, and finder tile for `checkplot_png` and
`twolsp_checkplot_png`.
Parameters
----------
axes : matplotlib.axes.Axes object
The Axes object which will contain the plot being made.
lspinfo : dict
Dict containing results from a period-finder in `astrobase.periodbase`
or a dict that corresponds to that format.
objectinfo : dict
Dict containing basic info about the object being processed.
findercmap : matplotlib Colormap object or str
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
verbose : bool
If True, indicates progress.
findercachedir : str
The directory where the FITS finder images are downloaded and cached.
Returns
-------
Does not return anything, works on the input Axes object directly.
'''
# get the appropriate plot ylabel
pgramylabel = PLOTYLABELS[lspinfo['method']]
# get the periods and lspvals from lspinfo
periods = lspinfo['periods']
lspvals = lspinfo['lspvals']
bestperiod = lspinfo['bestperiod']
nbestperiods = lspinfo['nbestperiods']
nbestlspvals = lspinfo['nbestlspvals']
# make the LSP plot on the first subplot
axes.plot(periods,lspvals)
axes.set_xscale('log',basex=10)
axes.set_xlabel('Period [days]')
axes.set_ylabel(pgramylabel)
plottitle = '%s - %.6f d' % (METHODLABELS[lspinfo['method']],
bestperiod)
axes.set_title(plottitle)
# show the best five peaks on the plot
for bestperiod, bestpeak in zip(nbestperiods,
nbestlspvals):
axes.annotate('%.6f' % bestperiod,
xy=(bestperiod, bestpeak), xycoords='data',
xytext=(0.0,25.0), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize='14.0')
# make a grid
axes.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# if objectinfo is present, get things from it
if (objectinfo and isinstance(objectinfo, dict) and
('objectid' in objectinfo or 'hatid' in objectinfo) and
'ra' in objectinfo and 'decl' in objectinfo and
objectinfo['ra'] and objectinfo['decl']):
if 'objectid' not in objectinfo:
objectid = objectinfo['hatid']
else:
objectid = objectinfo['objectid']
if verbose:
LOGINFO('adding in object information and '
'finder chart for %s at RA: %.3f, DEC: %.3f' %
(objectid, objectinfo['ra'], objectinfo['decl']))
# calculate colors
if ('bmag' in objectinfo and 'vmag' in objectinfo and
'jmag' in objectinfo and 'kmag' in objectinfo and
'sdssi' in objectinfo and
objectinfo['bmag'] and objectinfo['vmag'] and
objectinfo['jmag'] and objectinfo['kmag'] and
objectinfo['sdssi']):
bvcolor = objectinfo['bmag'] - objectinfo['vmag']
jkcolor = objectinfo['jmag'] - objectinfo['kmag']
ijcolor = objectinfo['sdssi'] - objectinfo['jmag']
else:
bvcolor = None
jkcolor = None
ijcolor = None
if ('teff' in objectinfo and 'gmag' in objectinfo and
objectinfo['teff'] and objectinfo['gmag']):
# Gaia data input
teff_val = objectinfo['teff']
gmag = objectinfo['gmag']
# bump the ylim of the LSP plot so that the overplotted finder and
# objectinfo can fit in this axes plot
lspylim = axes.get_ylim()
axes.set_ylim(lspylim[0], lspylim[1]+0.75*(lspylim[1]-lspylim[0]))
# get the stamp
try:
dss, dssheader = skyview_stamp(objectinfo['ra'],
objectinfo['decl'],
convolvewith=finderconvolve,
flip=False,
cachedir=findercachedir,
verbose=verbose)
stamp = dss
# inset plot it on the current axes
inset = inset_axes(axes, width="40%", height="40%", loc=1)
inset.imshow(stamp, cmap=findercmap, origin='lower')
inset.set_xticks([])
inset.set_yticks([])
inset.set_frame_on(False)
# grid lines pointing to the center of the frame
inset.axvline(x=150,ymin=0.375,ymax=0.45,linewidth=2.0,color='b')
inset.axhline(y=150,xmin=0.375,xmax=0.45,linewidth=2.0,color='b')
except OSError as e:
LOGERROR('downloaded FITS appears to be corrupt, retrying...')
dss, dssheader = skyview_stamp(objectinfo['ra'],
objectinfo['decl'],
convolvewith=finderconvolve,
flip=False,
forcefetch=True,
cachedir=findercachedir,
verbose=verbose)
stamp = dss
# inset plot it on the current axes
inset = inset_axes(axes, width="40%", height="40%", loc=1)
inset.imshow(stamp, cmap=findercmap, origin='lower')
inset.set_xticks([])
inset.set_yticks([])
inset.set_frame_on(False)
# grid lines pointing to the center of the frame
inset.axvline(x=150,ymin=0.375,ymax=0.45,linewidth=2.0,color='b')
inset.axhline(y=150,xmin=0.375,xmax=0.45,linewidth=2.0,color='b')
except Exception as e:
LOGEXCEPTION('could not fetch a DSS stamp for this '
'object %s using coords (%.3f,%.3f)' %
(objectid, objectinfo['ra'], objectinfo['decl']))
# annotate with objectinfo
axes.text(
0.05,0.95,
'%s' % objectid,
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0
)
axes.text(
0.05,0.91,
'RA = %.3f, DEC = %.3f' % (objectinfo['ra'], objectinfo['decl']),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0
)
if bvcolor:
axes.text(0.05,0.87,
'$B - V$ = %.3f, $V$ = %.3f' % (bvcolor,
objectinfo['vmag']),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
elif 'vmag' in objectinfo and objectinfo['vmag']:
axes.text(0.05,0.87,
'$V$ = %.3f' % (objectinfo['vmag'],),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
if ijcolor:
axes.text(0.05,0.83,
'$i - J$ = %.3f, $J$ = %.3f' % (ijcolor,
objectinfo['jmag']),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
elif 'jmag' in objectinfo and objectinfo['jmag']:
axes.text(0.05,0.83,
'$J$ = %.3f' % (objectinfo['jmag'],),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
if jkcolor:
axes.text(0.05,0.79,
'$J - K$ = %.3f, $K$ = %.3f' % (jkcolor,
objectinfo['kmag']),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
elif 'kmag' in objectinfo and objectinfo['kmag']:
axes.text(0.05,0.79,
'$K$ = %.3f' % (objectinfo['kmag'],),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
if 'sdssr' in objectinfo and objectinfo['sdssr']:
axes.text(0.05,0.75,'SDSS $r$ = %.3f' % objectinfo['sdssr'],
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
if ('teff' in objectinfo and 'gmag' in objectinfo and
objectinfo['teff'] and objectinfo['gmag']):
# gaia data available
axes.text(0.05,0.87,
r'$G$ = %.1f, $T_\mathrm{eff}$ = %d' % (
gmag, int(teff_val)),
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
# add in proper motion stuff if available in objectinfo
if ('pmra' in objectinfo and objectinfo['pmra'] and
'pmdecl' in objectinfo and objectinfo['pmdecl']):
pm = total_proper_motion(objectinfo['pmra'],
objectinfo['pmdecl'],
objectinfo['decl'])
axes.text(0.05,0.67,r'$\mu$ = %.2f mas yr$^{-1}$' % pm,
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
if 'jmag' in objectinfo and objectinfo['jmag']:
rpm = reduced_proper_motion(objectinfo['jmag'],pm)
axes.text(0.05,0.63,'$H_J$ = %.2f' % rpm,
ha='left',va='center',transform=axes.transAxes,
fontsize=18.0)
|
def _make_magseries_plot(axes,
stimes,
smags,
serrs,
magsarefluxes=False,
ms=2.0):
'''Makes the mag-series plot tile for `checkplot_png` and
`twolsp_checkplot_png`.
axes : matplotlib.axes.Axes object
The Axes object where the generated plot will go.
stimes,smags,serrs : np.array
The mag/flux time-series arrays along with associated errors. These
should all have been run through nan-stripping and sigma-clipping
beforehand.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags so the
plot y-axis direction and range can be set appropriately.
ms : float
The `markersize` kwarg to use when making the mag-series plot.
Returns
-------
Does not return anything, works on the input Axes object directly.
'''
scaledplottime = stimes - npmin(stimes)
axes.plot(scaledplottime,
smags,
marker='o',
ms=ms, ls='None',mew=0,
color='green',
rasterized=True)
# flip y axis for mags
if not magsarefluxes:
plot_ylim = axes.get_ylim()
axes.set_ylim((plot_ylim[1], plot_ylim[0]))
# set the x axis limit
axes.set_xlim((npmin(scaledplottime)-1.0,
npmax(scaledplottime)+1.0))
# make a grid
axes.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the x and y axis labels
plot_xlabel = 'JD - %.3f' % npmin(stimes)
if magsarefluxes:
plot_ylabel = 'flux'
else:
plot_ylabel = 'magnitude'
axes.set_xlabel(plot_xlabel)
axes.set_ylabel(plot_ylabel)
# fix the yaxis ticks (turns off offset and uses the full
# value of the yaxis tick)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
axes.get_xaxis().get_major_formatter().set_useOffset(False)
|
def _make_phased_magseries_plot(axes,
periodind,
stimes, smags, serrs,
varperiod, varepoch,
phasewrap, phasesort,
phasebin, minbinelems,
plotxlim,
lspmethod,
lspmethodind=0,
xliminsetmode=False,
twolspmode=False,
magsarefluxes=False,
verbose=True,
phasems=2.0,
phasebinms=4.0):
'''Makes the phased magseries plot tile for the `checkplot_png` and
`twolsp_checkplot_png` functions.
Parameters
----------
axes : matplotlib.axes.Axes object
The Axes object where the generated plot will be written.
periodind : int
The index of the current best period being processed in the lspinfo
dict.
stimes,smags,serrs : np.array
The mag/flux time-series arrays along with associated errors. These
should all have been run through nan-stripping and sigma-clipping
beforehand.
varperiod : float or None
The period to use for this phased light curve plot tile.
varepoch : 'min' or float or list of lists or None
The epoch to use for this phased light curve plot tile. If this is a
float, will use the provided value directly. If this is 'min', will
automatically figure out the time-of-minimum of the phased light
curve. If this is None, will use the mimimum value of `stimes` as the
epoch of the phased light curve plot. If this is a list of lists, will
use the provided value of `lspmethodind` to look up the current
period-finder method and the provided value of `periodind` to look up
the epoch associated with that method and the current period. This is
mostly only useful when `twolspmode` is True.
phasewrap : bool
If this is True, the phased time-series will be wrapped around
phase 0.0.
phasesort : bool
If True, will sort the phased light curve in order of increasing phase.
phasebin: float
The bin size to use to group together measurements closer than this
amount in phase. This is in units of phase. If this is a float, a
phase-binned version of the phased light curve will be overplotted on
top of the regular phased light curve.
minbinelems : int
The minimum number of elements required per phase bin to include it in
the phased LC plot.
plotxlim : sequence of two floats or None
The x-range (min, max) of the phased light curve plot. If None, will be
determined automatically.
lspmethod : str
One of the three-letter keys corresponding to period-finder method names
in the `astrobase.plotbase.METHODSHORTLABELS` dict. Used to set the plot
title correctly.
lspmethodind : int
If `twolspmode` is set, this will be used to look up the correct epoch
associated with the current period-finder method and period.
xliminsetmode : bool
If this is True, the generated phased light curve plot will use the
values of `plotxlim` as the main plot x-axis limits (i.e. zoomed-in if
`plotxlim` is a range smaller than the full phase range), and will show
the full phased light curve plot as an smaller inset. Useful for
planetary transit light curves.
twolspmode : bool
If this is True, will use the `lspmethodind` and `periodind` to look up
the correct values of epoch, etc. in the provided `varepoch` list of
lists for plotting purposes.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags so the
plot y-axis direction and range can be set appropriately.
verbose : bool
If True, indicates progress.
phasems : float
The marker size to use for the main phased light curve plot symbols.
phasebinms : float
The marker size to use for the binned phased light curve plot symbols.
Returns
-------
Does not return anything, works on the input Axes object directly.
'''
plotvarepoch = None
# figure out the epoch, if it's None, use the min of the time
if varepoch is None:
plotvarepoch = npmin(stimes)
# if the varepoch is 'min', then fit a spline to the light curve
# phased using the min of the time, find the fit mag minimum and use
# the time for that as the varepoch
elif isinstance(varepoch, str) and varepoch == 'min':
try:
spfit = spline_fit_magseries(stimes,
smags,
serrs,
varperiod,
magsarefluxes=magsarefluxes,
sigclip=None,
verbose=verbose)
plotvarepoch = spfit['fitinfo']['fitepoch']
if len(plotvarepoch) != 1:
plotvarepoch = varepoch[0]
except Exception as e:
LOGERROR('spline fit failed, trying SavGol fit')
sgfit = savgol_fit_magseries(stimes,
smags,
serrs,
varperiod,
sigclip=None,
magsarefluxes=magsarefluxes,
verbose=verbose)
plotvarepoch = sgfit['fitinfo']['fitepoch']
if len(plotvarepoch) != 1:
plotvarepoch = plotvarepoch[0]
finally:
if plotvarepoch is None:
LOGERROR('could not find a min epoch time, '
'using min(times) as the epoch for '
'the phase-folded LC')
plotvarepoch = npmin(stimes)
elif isinstance(varepoch, list):
try:
if twolspmode:
thisvarepochlist = varepoch[lspmethodind]
plotvarepoch = thisvarepochlist[periodind]
else:
plotvarepoch = varepoch[periodind]
except Exception as e:
LOGEXCEPTION(
"varepoch provided in list form either doesn't match "
"the length of nbestperiods from the period-finder "
"result, or something else went wrong. using min(times) "
"as the epoch instead"
)
plotvarepoch = npmin(stimes)
# the final case is to use the provided varepoch directly
else:
plotvarepoch = varepoch
if verbose:
LOGINFO('plotting phased LC with period %.6f, epoch %.5f' %
(varperiod, plotvarepoch))
# phase the magseries
phasedlc = phase_magseries(stimes,
smags,
varperiod,
plotvarepoch,
wrap=phasewrap,
sort=phasesort)
plotphase = phasedlc['phase']
plotmags = phasedlc['mags']
# if we're supposed to bin the phases, do so
if phasebin:
binphasedlc = phase_bin_magseries(plotphase,
plotmags,
binsize=phasebin,
minbinelems=minbinelems)
binplotphase = binphasedlc['binnedphases']
binplotmags = binphasedlc['binnedmags']
# finally, make the phased LC plot
axes.plot(plotphase,
plotmags,
marker='o',
ms=phasems, ls='None',mew=0,
color='gray',
rasterized=True)
# overlay the binned phased LC plot if we're making one
if phasebin:
axes.plot(binplotphase,
binplotmags,
marker='o',
ms=phasebinms, ls='None',mew=0,
color='#1c1e57',
rasterized=True)
# flip y axis for mags
if not magsarefluxes:
plot_ylim = axes.get_ylim()
axes.set_ylim((plot_ylim[1], plot_ylim[0]))
# set the x axis limit
if not plotxlim:
axes.set_xlim((npmin(plotphase)-0.1,
npmax(plotphase)+0.1))
else:
axes.set_xlim((plotxlim[0],plotxlim[1]))
# make a grid
axes.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the x and y axis labels
plot_xlabel = 'phase'
if magsarefluxes:
plot_ylabel = 'flux'
else:
plot_ylabel = 'magnitude'
axes.set_xlabel(plot_xlabel)
axes.set_ylabel(plot_ylabel)
# fix the yaxis ticks (turns off offset and uses the full
# value of the yaxis tick)
axes.get_yaxis().get_major_formatter().set_useOffset(False)
axes.get_xaxis().get_major_formatter().set_useOffset(False)
# make the plot title
if periodind == 0:
plottitle = '%s best period: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
varperiod,
plotvarepoch
)
elif periodind == 1 and not twolspmode:
plottitle = '%s best period x 0.5: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
varperiod,
plotvarepoch
)
elif periodind == 2 and not twolspmode:
plottitle = '%s best period x 2: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
varperiod,
plotvarepoch
)
elif periodind > 2 and not twolspmode:
plottitle = '%s peak %s: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
periodind-1,
varperiod,
plotvarepoch
)
elif periodind > 0:
plottitle = '%s peak %s: %.6f d - epoch: %.5f' % (
METHODSHORTLABELS[lspmethod],
periodind+1,
varperiod,
plotvarepoch
)
axes.set_title(plottitle)
# if we're making an inset plot showing the full range
if (plotxlim and isinstance(plotxlim, (list,tuple)) and
len(plotxlim) == 2 and xliminsetmode is True):
# bump the ylim of the plot so that the inset can fit in this axes plot
axesylim = axes.get_ylim()
if magsarefluxes:
axes.set_ylim(axesylim[0],
axesylim[1] + 0.5*npabs(axesylim[1]-axesylim[0]))
else:
axes.set_ylim(axesylim[0],
axesylim[1] - 0.5*npabs(axesylim[1]-axesylim[0]))
# put the inset axes in
inset = inset_axes(axes, width="40%", height="40%", loc=1)
# make the scatter plot for the phased LC plot
inset.plot(plotphase,
plotmags,
marker='o',
ms=2.0, ls='None',mew=0,
color='gray',
rasterized=True)
# overlay the binned phased LC plot if we're making one
if phasebin:
inset.plot(binplotphase,
binplotmags,
marker='o',
ms=4.0, ls='None',mew=0,
color='#1c1e57',
rasterized=True)
# show the full phase coverage
# show the full phase coverage
if phasewrap:
inset.set_xlim(-0.2,0.8)
else:
inset.set_xlim(-0.1,1.1)
# flip y axis for mags
if not magsarefluxes:
inset_ylim = inset.get_ylim()
inset.set_ylim((inset_ylim[1], inset_ylim[0]))
# set the plot title
inset.text(0.5,0.1,'full phased light curve',
ha='center',va='center',transform=inset.transAxes)
# don't show axes labels or ticks
inset.set_xticks([])
inset.set_yticks([])
|
def checkplot_png(lspinfo,
times,
mags,
errs,
varepoch='min',
magsarefluxes=False,
objectinfo=None,
findercmap='gray_r',
finderconvolve=None,
findercachedir='~/.astrobase/stamp-cache',
normto='globalmedian',
normmingap=4.0,
sigclip=4.0,
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=(-0.8,0.8),
xliminsetmode=False,
bestperiodhighlight=None,
plotdpi=100,
outfile=None,
verbose=True):
'''This makes a checkplot PNG using the output from a period-finder routine.
A checkplot is a 3 x 3 grid of plots like so::
[periodogram + objectinfo] [ unphased LC ] [period 1 phased LC]
[ period 1 phased LC /2 ] [period 1 phased LC x2] [period 2 phased LC]
[ period 3 phased LC ] [period 4 phased LC ] [period 5 phased LC]
This is used to sanity check the five best periods obtained from a
period-finder function in `astrobase.periodbase` or from your own
period-finder routines if their results can be turned into a dict with the
format shown below.
Parameters
----------
lspinfo : dict or str
If this is a dict, it must be a dict produced by an
`astrobase.periodbase` period-finder function or a dict from your own
period-finder function or routine that is of the form below with at
least these keys::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
`nbestperiods` and `nbestlspvals` must have at least 5 elements each,
e.g. describing the five 'best' (highest power) peaks in the
periodogram.
If lspinfo is a str, then it must be a path to a pickle file (ending
with the extension '.pkl' or '.pkl.gz') that contains a dict of the form
described above.
times,mags,errs : np.array
The mag/flux time-series arrays to process along with associated errors.
varepoch : 'min' or float or None or list of lists
This sets the time of minimum light finding strategy for the checkplot::
the epoch used for all phased
If `varepoch` is None -> light curve plots will be
`min(times)`.
If `varepoch='min'` -> automatic epoch finding for all
periods using light curve fits.
If varepoch is a single float -> this epoch will be used for all
phased light curve plots
If varepoch is a list of floats each epoch will be applied to
with length = `len(nbestperiods)+2` -> the phased light curve for each
from period-finder results period specifically
If you use a list for varepoch, it must be of length
`len(lspinfo['nbestperiods']) + 2`, because we insert half and twice the
period into the best periods list to make those phased LC plots.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags so the
plot y-axis direction and range can be set appropriately.
objectinfo : dict or None
If provided, this is a dict containing information on the object whose
light curve is being processed. This function will then be able to look
up and download a finder chart for this object and write that to the
output checkplot PNG image.The `objectinfo` dict must be of the form and
contain at least the keys described below::
{'objectid': the name of the object,
'ra': the right ascension of the object in decimal degrees,
'decl': the declination of the object in decimal degrees,
'ndet': the number of observations of this object}
You can also provide magnitudes and proper motions of the object using
the following keys and the appropriate values in the `objectinfo`
dict. These will be used to calculate colors, total and reduced proper
motion, etc. and display these in the output checkplot PNG.
- SDSS mag keys: 'sdssu', 'sdssg', 'sdssr', 'sdssi', 'sdssz'
- 2MASS mag keys: 'jmag', 'hmag', 'kmag'
- Cousins mag keys: 'bmag', 'vmag'
- GAIA specific keys: 'gmag', 'teff'
- proper motion keys: 'pmra', 'pmdecl'
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
findercachedir : str
The directory where the FITS finder images are downloaded and cached.
normto : {'globalmedian', 'zero'} or a float
This sets the normalization target::
'globalmedian' -> norms each mag to global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
normmingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
phasewrap : bool
If this is True, the phased time-series will be wrapped around phase
0.0.
phasesort : bool
If this is True, the phased time-series will be sorted in phase.
phasebin : float or None
If this is provided, indicates the bin size to use to group together
measurements closer than this amount in phase. This is in units of
phase. The binned phased light curve will be overplotted on top of the
phased light curve. Useful for when one has many measurement points and
needs to pick out a small trend in an otherwise noisy phased light
curve.
minbinelems : int
The minimum number of elements in each phase bin.
plotxlim : sequence of two floats or None
The x-axis limits to use when making the phased light curve plot. By
default, this is (-0.8, 0.8), which places phase 0.0 at the center of
the plot and covers approximately two cycles in phase to make any trends
clear.
xliminsetmode : bool
If this is True, the generated phased light curve plot will use the
values of `plotxlim` as the main plot x-axis limits (i.e. zoomed-in if
`plotxlim` is a range smaller than the full phase range), and will show
the full phased light curve plot as an smaller inset. Useful for
planetary transit light curves.
bestperiodhighlight : str or None
If not None, this is a str with a matplotlib color specification to use
as the background color to highlight the phased light curve plot of the
'best' period and epoch combination. If None, no highlight will be
applied.
outfile : str or None
The file name of the file to save the checkplot to. If this is None,
will write to a file called 'checkplot.png' in the current working
directory.
plotdpi : int
Sets the resolution in DPI for PNG plots (default = 100).
verbose : bool
If False, turns off many of the informational messages. Useful for
when an external function is driving lots of `checkplot_png` calls.
Returns
-------
str
The file path to the generated checkplot PNG file.
'''
if not outfile and isinstance(lspinfo,str):
# generate the plot filename
plotfpath = os.path.join(
os.path.dirname(lspinfo),
'checkplot-%s.png' % (
os.path.basename(lspinfo),
)
)
elif outfile:
plotfpath = outfile
else:
plotfpath = 'checkplot.png'
# get the lspinfo from a pickle file transparently
if isinstance(lspinfo,str) and os.path.exists(lspinfo):
if verbose:
LOGINFO('loading LSP info from pickle %s' % lspinfo)
if '.gz' in lspinfo:
with gzip.open(lspinfo,'rb') as infd:
lspinfo = pickle.load(infd)
else:
with open(lspinfo,'rb') as infd:
lspinfo = pickle.load(infd)
# get the things to plot out of the data
if ('periods' in lspinfo and
'lspvals' in lspinfo and
'bestperiod' in lspinfo):
bestperiod = lspinfo['bestperiod']
nbestperiods = lspinfo['nbestperiods']
lspmethod = lspinfo['method']
else:
LOGERROR('could not understand lspinfo for this object, skipping...')
return None
if not npisfinite(bestperiod):
LOGWARNING('no best period found for this object, skipping...')
return None
# initialize the plot
fig, axes = plt.subplots(3,3)
axes = npravel(axes)
# this is a full page plot
fig.set_size_inches(30,24)
#######################
## PLOT 1 is the LSP ##
#######################
_make_periodogram(axes[0],lspinfo,objectinfo,
findercmap, finderconvolve,
verbose=verbose,
findercachedir=findercachedir)
######################################
## NOW MAKE THE PHASED LIGHT CURVES ##
######################################
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# take care of the normalization
if normto is not False:
stimes, smags = normalize_magseries(stimes, smags,
normto=normto,
magsarefluxes=magsarefluxes,
mingap=normmingap)
# make sure we have some lightcurve points to plot after sigclip
if len(stimes) >= 50:
##############################
## PLOT 2 is an unphased LC ##
##############################
_make_magseries_plot(axes[1], stimes, smags, serrs,
magsarefluxes=magsarefluxes)
###########################
### NOW PLOT PHASED LCS ###
###########################
# make the plot for each best period
lspbestperiods = nbestperiods[::]
lspperiodone = lspbestperiods[0]
lspbestperiods.insert(1,lspperiodone*2.0)
lspbestperiods.insert(1,lspperiodone*0.5)
for periodind, varperiod in enumerate(lspbestperiods):
# make sure the best period phased LC plot stands out
if periodind == 0 and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
axes[periodind+2].set_facecolor(bestperiodhighlight)
else:
axes[periodind+2].set_axis_bgcolor(bestperiodhighlight)
_make_phased_magseries_plot(axes[periodind+2],
periodind,
stimes, smags, serrs,
varperiod, varepoch,
phasewrap, phasesort,
phasebin, minbinelems,
plotxlim, lspmethod,
xliminsetmode=xliminsetmode,
magsarefluxes=magsarefluxes,
verbose=verbose)
# end of plotting for each ax
# save the plot to disk
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath,dpi=plotdpi)
else:
fig.savefig(plotfpath)
plt.close('all')
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath)
return plotfpath
# otherwise, there's no valid data for this plot
else:
LOGWARNING('no good data')
for periodind in range(5):
axes[periodind+2].text(
0.5,0.5,
('no best aperture light curve available'),
horizontalalignment='center',
verticalalignment='center',
transform=axes[periodind+2].transAxes
)
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath, dpi=plotdpi)
else:
fig.savefig(plotfpath)
plt.close('all')
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath)
return plotfpath
|
def twolsp_checkplot_png(lspinfo1,
lspinfo2,
times,
mags,
errs,
varepoch='min',
magsarefluxes=False,
objectinfo=None,
findercmap='gray_r',
finderconvolve=None,
findercachedir='~/.astrobase/stamp-cache',
normto='globalmedian',
normmingap=4.0,
sigclip=4.0,
phasewrap=True,
phasesort=True,
phasebin=0.002,
minbinelems=7,
plotxlim=(-0.8,0.8),
unphasedms=2.0,
phasems=2.0,
phasebinms=4.0,
xliminsetmode=False,
bestperiodhighlight=None,
plotdpi=100,
outfile=None,
verbose=True):
'''This makes a checkplot using results from two independent period-finders.
Adapted from Luke Bouma's implementation of a similar function in his
work. This makes a special checkplot that uses two lspinfo dictionaries,
from two independent period-finding methods. For EBs, it's probably best to
use Stellingwerf PDM or Schwarzenberg-Czerny AoV as one of these, and the
Box Least-squared Search method as the other one.
The checkplot layout in this case is::
[ pgram1 + objectinfo ] [ pgram2 ] [ unphased LC ]
[ pgram1 P1 phased LC ] [ pgram1 P2 phased LC ] [ pgram1 P3 phased LC ]
[ pgram2 P1 phased LC ] [ pgram2 P2 phased LC ] [ pgram2 P3 phased LC ]
where:
- pgram1 is the plot for the periodogram in the lspinfo1 dict
- pgram1 P1, P2, and P3 are the best three periods from lspinfo1
- pgram2 is the plot for the periodogram in the lspinfo2 dict
- pgram2 P1, P2, and P3 are the best three periods from lspinfo2
Note that we take the output file name from lspinfo1 if lspinfo1 is a string
filename pointing to a (gzipped) pickle containing the results dict from a
period-finding routine similar to those in periodbase.
Parameters
----------
lspinfo1,lspinfo2 : dict or str
If this is a dict, it must be a dict produced by an
`astrobase.periodbase` period-finder function or a dict from your own
period-finder function or routine that is of the form below with at
least these keys::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
`nbestperiods` and `nbestlspvals` must have at least 3 elements each,
e.g. describing the three 'best' (highest power) peaks in the
periodogram.
If lspinfo is a str, then it must be a path to a pickle file (ending
with the extension '.pkl' or '.pkl.gz') that contains a dict of the form
described above.
times,mags,errs : np.array
The mag/flux time-series arrays to process along with associated errors.
varepoch : 'min' or float or None or list of lists
This sets the time of minimum light finding strategy for the checkplot::
the epoch used for all phased
If `varepoch` is None -> light curve plots will be
`min(times)`.
If `varepoch='min'` -> automatic epoch finding for all
periods using light curve fits.
If varepoch is a single float -> this epoch will be used for all
phased light curve plots
If varepoch is a list of floats each epoch will be applied to
with length = `len(nbestperiods)` -> the phased light curve for each
from period-finder results period specifically
If you use a list for varepoch, it must be of length
`len(lspinfo['nbestperiods'])`.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags so the
plot y-axis direction and range can be set appropriately/
objectinfo : dict or None
If provided, this is a dict containing information on the object whose
light curve is being processed. This function will then be able to look
up and download a finder chart for this object and write that to the
output checkplot PNG image.The `objectinfo` dict must be of the form and
contain at least the keys described below::
{'objectid': the name of the object,
'ra': the right ascension of the object in decimal degrees,
'decl': the declination of the object in decimal degrees,
'ndet': the number of observations of this object}
You can also provide magnitudes and proper motions of the object using
the following keys and the appropriate values in the `objectinfo`
dict. These will be used to calculate colors, total and reduced proper
motion, etc. and display these in the output checkplot PNG.
- SDSS mag keys: 'sdssu', 'sdssg', 'sdssr', 'sdssi', 'sdssz'
- 2MASS mag keys: 'jmag', 'hmag', 'kmag'
- Cousins mag keys: 'bmag', 'vmag'
- GAIA specific keys: 'gmag', 'teff'
- proper motion keys: 'pmra', 'pmdecl'
findercmap : str or matplotlib.cm.ColorMap object
The Colormap object to use for the finder chart image.
finderconvolve : astropy.convolution.Kernel object or None
If not None, the Kernel object to use for convolving the finder image.
findercachedir : str
The directory where the FITS finder images are downloaded and cached.
normto : {'globalmedian', 'zero'} or a float
This sets the LC normalization target::
'globalmedian' -> norms each mag to global median of the LC column
'zero' -> norms each mag to zero
a float -> norms each mag to this specified float value.
normmingap : float
This defines how much the difference between consecutive measurements is
allowed to be to consider them as parts of different timegroups. By
default it is set to 4.0 days.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
phasewrap : bool
If this is True, the phased time-series will be wrapped around phase
0.0.
phasesort : bool
If this is True, the phased time-series will be sorted in phase.
phasebin : float or None
If this is provided, indicates the bin size to use to group together
measurements closer than this amount in phase. This is in units of
phase. The binned phased light curve will be overplotted on top of the
phased light curve. Useful for when one has many measurement points and
needs to pick out a small trend in an otherwise noisy phased light
curve.
minbinelems : int
The minimum number of elements in each phase bin.
plotxlim : sequence of two floats or None
The x-axis limits to use when making the phased light curve plot. By
default, this is (-0.8, 0.8), which places phase 0.0 at the center of
the plot and covers approximately two cycles in phase to make any trends
clear.
unphasedms : float
The marker size to use for the main unphased light curve plot symbols.
phasems : float
The marker size to use for the main phased light curve plot symbols.
phasebinms : float
The marker size to use for the binned phased light curve plot symbols.
xliminsetmode : bool
If this is True, the generated phased light curve plot will use the
values of `plotxlim` as the main plot x-axis limits (i.e. zoomed-in if
`plotxlim` is a range smaller than the full phase range), and will show
the full phased light curve plot as an smaller inset. Useful for
planetary transit light curves.
bestperiodhighlight : str or None
If not None, this is a str with a matplotlib color specification to use
as the background color to highlight the phased light curve plot of the
'best' period and epoch combination. If None, no highlight will be
applied.
outfile : str or None
The file name of the file to save the checkplot to. If this is None,
will write to a file called 'checkplot.png' in the current working
directory.
plotdpi : int
Sets the resolution in DPI for PNG plots (default = 100).
verbose : bool
If False, turns off many of the informational messages. Useful for
when an external function is driving lots of `checkplot_png` calls.
Returns
-------
str
The file path to the generated checkplot PNG file.
'''
# generate the plot filename
if not outfile and isinstance(lspinfo1,str):
plotfpath = os.path.join(
os.path.dirname(lspinfo1),
'twolsp-checkplot-%s.png' % (
os.path.basename(lspinfo1),
)
)
elif outfile:
plotfpath = outfile
else:
plotfpath = 'twolsp-checkplot.png'
# get the first LSP from a pickle file transparently
if isinstance(lspinfo1,str) and os.path.exists(lspinfo1):
if verbose:
LOGINFO('loading LSP info from pickle %s' % lspinfo1)
if '.gz' in lspinfo1:
with gzip.open(lspinfo1,'rb') as infd:
lspinfo1 = pickle.load(infd)
else:
with open(lspinfo1,'rb') as infd:
lspinfo1 = pickle.load(infd)
# get the second LSP from a pickle file transparently
if isinstance(lspinfo2,str) and os.path.exists(lspinfo2):
if verbose:
LOGINFO('loading LSP info from pickle %s' % lspinfo2)
if '.gz' in lspinfo2:
with gzip.open(lspinfo2,'rb') as infd:
lspinfo2 = pickle.load(infd)
else:
with open(lspinfo2,'rb') as infd:
lspinfo2 = pickle.load(infd)
# get the things to plot out of the data
if ('periods' in lspinfo1 and 'periods' in lspinfo2 and
'lspvals' in lspinfo1 and 'lspvals' in lspinfo2 and
'bestperiod' in lspinfo1 and 'bestperiod' in lspinfo2):
bestperiod1 = lspinfo1['bestperiod']
nbestperiods1 = lspinfo1['nbestperiods']
lspmethod1 = lspinfo1['method']
bestperiod2 = lspinfo2['bestperiod']
nbestperiods2 = lspinfo2['nbestperiods']
lspmethod2 = lspinfo2['method']
else:
LOGERROR('could not understand lspinfo1 or lspinfo2 '
'for this object, skipping...')
return None
if (not npisfinite(bestperiod1)) or (not npisfinite(bestperiod2)):
LOGWARNING('no best period found for this object, skipping...')
return None
# initialize the plot
fig, axes = plt.subplots(3,3)
axes = npravel(axes)
# this is a full page plot
fig.set_size_inches(30,24)
######################################################################
## PLOT 1 is the LSP from lspinfo1, including objectinfo and finder ##
######################################################################
_make_periodogram(axes[0], lspinfo1, objectinfo,
findercmap, finderconvolve,
verbose=verbose,
findercachedir=findercachedir)
#####################################
## PLOT 2 is the LSP from lspinfo2 ##
#####################################
_make_periodogram(axes[1], lspinfo2, None,
findercmap, finderconvolve)
##########################################
## FIX UP THE MAGS AND REMOVE BAD STUFF ##
##########################################
# sigclip first
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# take care of the normalization
if normto is not False:
stimes, smags = normalize_magseries(stimes, smags,
normto=normto,
magsarefluxes=magsarefluxes,
mingap=normmingap)
# make sure we have some lightcurve points to plot after sigclip
if len(stimes) >= 50:
##############################
## PLOT 3 is an unphased LC ##
##############################
_make_magseries_plot(axes[2], stimes, smags, serrs,
magsarefluxes=magsarefluxes,
ms=unphasedms)
# make the plot for each best period
lspbestperiods1 = nbestperiods1[::]
lspbestperiods2 = nbestperiods2[::]
##########################################################
### NOW PLOT PHASED LCS FOR 3 BEST PERIODS IN LSPINFO1 ###
##########################################################
for periodind, varperiod, plotaxes in zip([0,1,2],
lspbestperiods1[:3],
[axes[3], axes[4], axes[5]]):
# make sure the best period phased LC plot stands out
if periodind == 0 and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
plotaxes.set_facecolor(bestperiodhighlight)
else:
plotaxes.set_axis_bgcolor(bestperiodhighlight)
_make_phased_magseries_plot(plotaxes,
periodind,
stimes, smags, serrs,
varperiod, varepoch,
phasewrap, phasesort,
phasebin, minbinelems,
plotxlim, lspmethod1,
lspmethodind=0,
twolspmode=True,
magsarefluxes=magsarefluxes,
xliminsetmode=xliminsetmode,
verbose=verbose,
phasems=phasems,
phasebinms=phasebinms)
##########################################################
### NOW PLOT PHASED LCS FOR 3 BEST PERIODS IN LSPINFO2 ###
##########################################################
for periodind, varperiod, plotaxes in zip([0,1,2],
lspbestperiods2[:3],
[axes[6], axes[7], axes[8]]):
# make sure the best period phased LC plot stands out
if periodind == 0 and bestperiodhighlight:
if MPLVERSION >= (2,0,0):
plotaxes.set_facecolor(bestperiodhighlight)
else:
plotaxes.set_axis_bgcolor(bestperiodhighlight)
_make_phased_magseries_plot(plotaxes,
periodind,
stimes, smags, serrs,
varperiod, varepoch,
phasewrap, phasesort,
phasebin, minbinelems,
plotxlim, lspmethod2,
lspmethodind=1,
twolspmode=True,
magsarefluxes=magsarefluxes,
xliminsetmode=xliminsetmode,
verbose=verbose,
phasems=phasems,
phasebinms=phasebinms)
# end of plotting for each ax
# save the plot to disk
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath,dpi=plotdpi)
else:
fig.savefig(plotfpath)
plt.close()
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath)
return plotfpath
# otherwise, there's no valid data for this plot
else:
LOGWARNING('no good data')
for periodind in range(5):
axes[periodind+2].text(
0.5,0.5,
('no best aperture light curve available'),
horizontalalignment='center',
verticalalignment='center',
transform=axes[periodind+2].transAxes
)
fig.set_tight_layout(True)
if plotfpath.endswith('.png'):
fig.savefig(plotfpath, dpi=plotdpi)
else:
fig.savefig(plotfpath)
plt.close()
if verbose:
LOGINFO('checkplot done -> %s' % plotfpath)
return plotfpath
|
def precess_coordinates(ra, dec,
epoch_one, epoch_two,
jd=None,
mu_ra=0.0,
mu_dec=0.0,
outscalar=False):
'''Precesses target coordinates `ra`, `dec` from `epoch_one` to `epoch_two`.
This takes into account the jd of the observations, as well as the proper
motion of the target mu_ra, mu_dec. Adapted from J. D. Hartman's
VARTOOLS/converttime.c [coordprecess].
Parameters
----------
ra,dec : float
The equatorial coordinates of the object at `epoch_one` to precess in
decimal degrees.
epoch_one : float
Origin epoch to precess from to target epoch. This is a float, like:
1985.0, 2000.0, etc.
epoch_two : float
Target epoch to precess from origin epoch. This is a float, like:
2000.0, 2018.0, etc.
jd : float
The full Julian date to use along with the propermotions in `mu_ra`, and
`mu_dec` to handle proper motion along with the coordinate frame
precession. If one of `jd`, `mu_ra`, or `mu_dec` is missing, the proper
motion will not be used to calculate the final precessed coordinates.
mu_ra,mu_dec : float
The proper motion in mas/yr in right ascension and declination. If these
are provided along with `jd`, the total proper motion of the object will
be taken into account to calculate the final precessed coordinates.
outscalar : bool
If True, converts the output coordinates from one-element np.arrays to
scalars.
Returns
-------
precessed_ra, precessed_dec : float
A tuple of precessed equatorial coordinates in decimal degrees at
`epoch_two` taking into account proper motion if `jd`, `mu_ra`, and
`mu_dec` are provided.
'''
raproc, decproc = np.radians(ra), np.radians(dec)
if ((mu_ra != 0.0) and (mu_dec != 0.0) and jd):
jd_epoch_one = JD2000 + (epoch_one - epoch_two)*365.25
raproc = (
raproc +
(jd - jd_epoch_one)*mu_ra*MAS_P_YR_TO_RAD_P_DAY/np.cos(decproc)
)
decproc = decproc + (jd - jd_epoch_one)*mu_dec*MAS_P_YR_TO_RAD_P_DAY
ca = np.cos(raproc)
cd = np.cos(decproc)
sa = np.sin(raproc)
sd = np.sin(decproc)
if epoch_one != epoch_two:
t1 = 1.0e-3 * (epoch_two - epoch_one)
t2 = 1.0e-3 * (epoch_one - 2000.0)
a = ( t1*ARCSEC_TO_RADIANS * (23062.181 + t2*(139.656 + 0.0139*t2) +
t1*(30.188 - 0.344*t2+17.998*t1)) )
b = t1*t1*ARCSEC_TO_RADIANS*(79.280 + 0.410*t2 + 0.205*t1) + a
c = (
ARCSEC_TO_RADIANS*t1*(20043.109 - t2*(85.33 + 0.217*t2) +
t1*(-42.665 - 0.217*t2 - 41.833*t2))
)
sina, sinb, sinc = np.sin(a), np.sin(b), np.sin(c)
cosa, cosb, cosc = np.cos(a), np.cos(b), np.cos(c)
precmatrix = np.matrix([[cosa*cosb*cosc - sina*sinb,
sina*cosb + cosa*sinb*cosc,
cosa*sinc],
[-cosa*sinb - sina*cosb*cosc,
cosa*cosb - sina*sinb*cosc,
-sina*sinc],
[-cosb*sinc,
-sinb*sinc,
cosc]])
precmatrix = precmatrix.transpose()
x = (np.matrix([cd*ca, cd*sa, sd])).transpose()
x2 = precmatrix * x
outra = np.arctan2(x2[1],x2[0])
outdec = np.arcsin(x2[2])
outradeg = np.rad2deg(outra)
outdecdeg = np.rad2deg(outdec)
if outradeg < 0.0:
outradeg = outradeg + 360.0
if outscalar:
return float(outradeg), float(outdecdeg)
else:
return outradeg, outdecdeg
else:
# if the epochs are the same and no proper motion, this will be the same
# as the input values. if the epochs are the same, but there IS proper
# motion (and a given JD), then these will be perturbed from the input
# values of ra, dec by the appropriate amount of motion
return np.degrees(raproc), np.degrees(decproc)
|
def _single_true(iterable):
'''This returns True if only one True-ish element exists in `iterable`.
Parameters
----------
iterable : iterable
Returns
-------
bool
True if only one True-ish element exists in `iterable`. False otherwise.
'''
# return True if exactly one true found
iterator = iter(iterable)
# consume from "i" until first true or it's exhausted
has_true = any(iterator)
# carry on consuming until another true value / exhausted
has_another_true = any(iterator)
return has_true and not has_another_true
|
def get_epochs_given_midtimes_and_period(
t_mid,
period,
err_t_mid=None,
t0_fixed=None,
t0_percentile=None,
verbose=False
):
'''This calculates the future epochs for a transit, given a period and a
starting epoch
The equation used is::
t_mid = period*epoch + t0
Default behavior if no kwargs are used is to define `t0` as the median
finite time of the passed `t_mid` array.
Only one of `err_t_mid` or `t0_fixed` should be passed.
Parameters
----------
t_mid : np.array
A np.array of transit mid-time measurements
period : float
The period used to calculate epochs, per the equation above. For typical
use cases, a period precise to ~1e-5 days is sufficient to get correct
epochs.
err_t_mid : None or np.array
If provided, contains the errors of the transit mid-time
measurements. The zero-point epoch is then set equal to the average of
the transit times, weighted as `1/err_t_mid^2` . This minimizes the
covariance between the transit epoch and the period (e.g., Gibson et
al. 2013). For standard O-C analysis this is the best method.
t0_fixed : None or float:
If provided, use this t0 as the starting epoch. (Overrides all others).
t0_percentile : None or float
If provided, use this percentile of `t_mid` to define `t0`.
Returns
-------
tuple
This is the of the form `(integer_epoch_array, t0)`.
`integer_epoch_array` is an array of integer epochs (float-type),
of length equal to the number of *finite* mid-times passed.
'''
kwargarr = np.array([isinstance(err_t_mid,np.ndarray),
t0_fixed,
t0_percentile])
if not _single_true(kwargarr) and not np.all(~kwargarr.astype(bool)):
raise AssertionError(
'can have at most one of err_t_mid, t0_fixed, t0_percentile')
t_mid = t_mid[np.isfinite(t_mid)]
N_midtimes = len(t_mid)
if t0_fixed:
t0 = t0_fixed
elif isinstance(err_t_mid,np.ndarray):
# get the weighted average. then round it to the nearest transit epoch.
t0_avg = np.average(t_mid, weights=1/err_t_mid**2)
t0_options = np.arange(min(t_mid), max(t_mid)+period, period)
t0 = t0_options[np.argmin(np.abs(t0_options - t0_avg))]
else:
if not t0_percentile:
# if there are an odd number of times, take the median time as
# epoch=0. elif there are an even number of times, take the lower
# of the two middle times as epoch=0.
if N_midtimes % 2 == 1:
t0 = np.median(t_mid)
else:
t0 = t_mid[int(N_midtimes/2)]
else:
t0 = np.sort(t_mid)[int(N_midtimes*t0_percentile/100)]
epoch = (t_mid - t0)/period
# do not convert numpy entries to actual ints, because np.nan is float type
int_epoch = np.round(epoch, 0)
if verbose:
LOGINFO('epochs before rounding')
LOGINFO('\n{:s}'.format(repr(epoch)))
LOGINFO('epochs after rounding')
LOGINFO('\n{:s}'.format(repr(int_epoch)))
return int_epoch, t0
|
def unixtime_to_jd(unix_time):
'''This converts UNIX time in seconds to a Julian date in UTC (JD_UTC).
Parameters
----------
unix_time : float
A UNIX time in decimal seconds since the 1970 UNIX epoch.
Returns
-------
jd : float
The Julian date corresponding to the provided UNIX time.
'''
# use astropy's time module
jdutc = astime.Time(unix_time, format='unix', scale='utc')
return jdutc.jd
|
def datetime_to_jd(dt):
'''This converts a Python datetime object (naive, time in UT) to JD_UTC.
Parameters
----------
dt : datetime
A naive Python `datetime` object (e.g. with no tz attribute) measured at
UTC.
Returns
-------
jd : float
The Julian date corresponding to the `datetime` object.
'''
jdutc = astime.Time(dt, format='datetime',scale='utc')
return jdutc.jd
|
def jd_to_datetime(jd, returniso=False):
'''This converts a UTC JD to a Python `datetime` object or ISO date string.
Parameters
----------
jd : float
The Julian date measured at UTC.
returniso : bool
If False, returns a naive Python `datetime` object corresponding to
`jd`. If True, returns the ISO format string corresponding to the date
and time at UTC from `jd`.
Returns
-------
datetime or str
Depending on the value of `returniso`.
'''
tt = astime.Time(jd, format='jd', scale='utc')
if returniso:
return tt.iso
else:
return tt.datetime
|
def jd_corr(jd,
ra, dec,
obslon=None,
obslat=None,
obsalt=None,
jd_type='bjd'):
'''Returns BJD_TDB or HJD_TDB for input JD_UTC.
The equation used is::
BJD_TDB = JD_UTC + JD_to_TDB_corr + romer_delay
where:
- JD_to_TDB_corr is the difference between UTC and TDB JDs
- romer_delay is the delay caused by finite speed of light from Earth-Sun
This is based on the code at:
https://mail.scipy.org/pipermail/astropy/2014-April/003148.html
Note that this does not correct for:
1. precession of coordinates if the epoch is not 2000.0
2. precession of coordinates if the target has a proper motion
3. Shapiro delay
4. Einstein delay
Parameters
----------
jd : float or array-like
The Julian date(s) measured at UTC.
ra,dec : float
The equatorial coordinates of the object in decimal degrees.
obslon,obslat,obsalt : float or None
The longitude, latitude of the observatory in decimal degrees and
altitude of the observatory in meters. If these are not provided, the
corrected JD will be calculated with respect to the center of the Earth.
jd_type : {'bjd','hjd'}
Conversion type to perform, either to Baryocentric Julian Date ('bjd')
or to Heliocenter Julian Date ('hjd').
Returns
-------
float or np.array
The converted BJD or HJD.
'''
if not HAVEKERNEL:
LOGERROR('no JPL kernel available, can\'t continue!')
return
# Source unit-vector
## Assume coordinates in ICRS
## Set distance to unit (kilometers)
# convert the angles to degrees
rarad = np.radians(ra)
decrad = np.radians(dec)
cosra = np.cos(rarad)
sinra = np.sin(rarad)
cosdec = np.cos(decrad)
sindec = np.sin(decrad)
# this assumes that the target is very far away
src_unitvector = np.array([cosdec*cosra,cosdec*sinra,sindec])
# Convert epochs to astropy.time.Time
## Assume JD(UTC)
if (obslon is None) or (obslat is None) or (obsalt is None):
t = astime.Time(jd, scale='utc', format='jd')
else:
t = astime.Time(jd, scale='utc', format='jd',
location=('%.5fd' % obslon,
'%.5fd' % obslat,
obsalt))
# Get Earth-Moon barycenter position
## NB: jplephem uses Barycentric Dynamical Time, e.g. JD(TDB)
## and gives positions relative to solar system barycenter
barycenter_earthmoon = jplkernel[0,3].compute(t.tdb.jd)
# Get Moon position vectors from the center of Earth to the Moon
# this means we get the following vectors from the ephemerides
# Earth Barycenter (3) -> Moon (301)
# Earth Barycenter (3) -> Earth (399)
# so the final vector is [3,301] - [3,399]
# units are in km
moonvector = (jplkernel[3,301].compute(t.tdb.jd) -
jplkernel[3,399].compute(t.tdb.jd))
# Compute Earth position vectors (this is for the center of the earth with
# respect to the solar system barycenter)
# all these units are in km
pos_earth = (barycenter_earthmoon - moonvector * 1.0/(1.0+EMRAT))
if jd_type == 'bjd':
# Compute BJD correction
## Assume source vectors parallel at Earth and Solar System
## Barycenter
## i.e. source is at infinity
# the romer_delay correction is (r.dot.n)/c where:
# r is the vector from SSB to earth center
# n is the unit vector from
correction_seconds = np.dot(pos_earth.T, src_unitvector)/CLIGHT_KPS
correction_days = correction_seconds/SEC_P_DAY
elif jd_type == 'hjd':
# Compute HJD correction via Sun ephemeris
# this is the position vector of the center of the sun in km
# Solar System Barycenter (0) -> Sun (10)
pos_sun = jplkernel[0,10].compute(t.tdb.jd)
# this is the vector from the center of the sun to the center of the
# earth
sun_earth_vec = pos_earth - pos_sun
# calculate the heliocentric correction
correction_seconds = np.dot(sun_earth_vec.T, src_unitvector)/CLIGHT_KPS
correction_days = correction_seconds/SEC_P_DAY
# TDB is the appropriate time scale for these ephemerides
new_jd = t.tdb.jd + correction_days
return new_jd
|
def _lclist_parallel_worker(task):
'''This is a parallel worker for makelclist.
Parameters
----------
task : tuple
This is a tuple containing the following items:
task[0] = lcf
task[1] = columns
task[2] = lcformat
task[3] = lcformatdir
task[4] = lcndetkey
Returns
-------
dict or None
This contains all of the info for the object processed in this LC read
operation. If this fails, returns None
'''
lcf, columns, lcformat, lcformatdir, lcndetkey = task
# get the bits needed for lcformat handling
# NOTE: we re-import things in this worker function because sometimes
# functions can't be pickled correctly for passing them to worker functions
# in a processing pool
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# we store the full path of the light curve
lcobjdict = {'lcfname':os.path.abspath(lcf)}
try:
# read the light curve in
lcdict = readerfunc(lcf)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
# insert all of the columns
for colkey in columns:
if '.' in colkey:
getkey = colkey.split('.')
else:
getkey = [colkey]
try:
thiscolval = _dict_get(lcdict, getkey)
except Exception as e:
LOGWARNING('column %s does not exist for %s' %
(colkey, lcf))
thiscolval = np.nan
# update the lcobjdict with this value
lcobjdict[getkey[-1]] = thiscolval
except Exception as e:
LOGEXCEPTION('could not figure out columns for %s' % lcf)
# insert all of the columns as nans
for colkey in columns:
if '.' in colkey:
getkey = colkey.split('.')
else:
getkey = [colkey]
thiscolval = np.nan
# update the lclistdict with this value
lcobjdict[getkey[-1]] = thiscolval
# now get the actual ndets; this excludes nans and infs
for dk in lcndetkey:
try:
if '.' in dk:
getdk = dk.split('.')
else:
getdk = [dk]
ndetcol = _dict_get(lcdict, getdk)
actualndets = ndetcol[np.isfinite(ndetcol)].size
lcobjdict['%s.ndet' % getdk[-1]] = actualndets
except Exception as e:
lcobjdict['%s.ndet' % getdk[-1]] = np.nan
return lcobjdict
|
def make_lclist(basedir,
outfile,
use_list_of_filenames=None,
lcformat='hat-sql',
lcformatdir=None,
fileglob=None,
recursive=True,
columns=['objectid',
'objectinfo.ra',
'objectinfo.decl',
'objectinfo.ndet'],
makecoordindex=('objectinfo.ra','objectinfo.decl'),
field_fitsfile=None,
field_wcsfrom=None,
field_scale=ZScaleInterval(),
field_stretch=LinearStretch(),
field_colormap=plt.cm.gray_r,
field_findersize=None,
field_pltopts={'marker':'o',
'markersize':10.0,
'markerfacecolor':'none',
'markeredgewidth':2.0,
'markeredgecolor':'red'},
field_grid=False,
field_gridcolor='k',
field_zoomcontain=True,
maxlcs=None,
nworkers=NCPUS):
'''This generates a light curve catalog for all light curves in a directory.
Given a base directory where all the files are, and a light curve format,
this will find all light curves, pull out the keys in each lcdict requested
in the `columns` kwarg for each object, and write them to the requested
output pickle file. These keys should be pointers to scalar values
(i.e. something like `objectinfo.ra` is OK, but something like 'times' won't
work because it's a vector).
Generally, this works with light curve reading functions that produce
lcdicts as detailed in the docstring for `lcproc.register_lcformat`. Once
you've registered your light curve reader functions using the
`lcproc.register_lcformat` function, pass in the `formatkey` associated with
your light curve format, and this function will be able to read all light
curves in that format as well as the object information stored in their
`objectinfo` dict.
Parameters
----------
basedir : str or list of str
If this is a str, points to a single directory to search for light
curves. If this is a list of str, it must be a list of directories to
search for light curves. All of these will be searched to find light
curve files matching either your light curve format's default fileglob
(when you registered your LC format), or a specific fileglob that you
can pass in using the `fileglob` kwargh here. If the `recursive` kwarg
is set, the provided directories will be searched recursively.
If `use_list_of_filenames` is not None, it will override this argument
and the function will take those light curves as the list of files it
must process instead of whatever is specified in `basedir`.
outfile : str
This is the name of the output file to write. This will be a pickle
file, so a good convention to use for this name is something like
'my-lightcurve-catalog.pkl'.
use_list_of_filenames : list of str or None
Use this kwarg to override whatever is provided in `basedir` and
directly pass in a list of light curve files to process. This can speed
up this function by a lot because no searches on disk will be performed
to find light curve files matching `basedir` and `fileglob`.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
fileglob : str or None
If provided, is a string that is a valid UNIX filename glob. Used to
override the default fileglob for this LC format when searching for
light curve files in `basedir`.
recursive : bool
If True, the directories specified in `basedir` will be searched
recursively for all light curve files that match the default fileglob
for this LC format or a specific one provided in `fileglob`.
columns : list of str
This is a list of keys in the lcdict produced by your light curve reader
function that contain object information, which will be extracted and
put into the output light curve catalog. It's highly recommended that
your LC reader function produce a lcdict that contains at least the
default keys shown here.
The lcdict keys to extract are specified by using an address scheme:
- First level dict keys can be specified directly:
e.g., 'objectid' will extract lcdict['objectid']
- Keys at other levels can be specified by using a period to indicate
the level:
- e.g., 'objectinfo.ra' will extract lcdict['objectinfo']['ra']
- e.g., 'objectinfo.varinfo.features.stetsonj' will extract
lcdict['objectinfo']['varinfo']['features']['stetsonj']
makecoordindex : list of two str or None
This is used to specify which lcdict keys contain the right ascension
and declination coordinates for this object. If these are provided, the
output light curve catalog will have a kdtree built on all object
coordinates, which enables fast spatial searches and cross-matching to
external catalogs by `checkplot` and `lcproc` functions.
field_fitsfile : str or None
If this is not None, it should be the path to a FITS image containing
the objects these light curves are for. If this is provided,
`make_lclist` will use the WCS information in the FITS itself if
`field_wcsfrom` is None (or from a WCS header file pointed to by
`field_wcsfrom`) to obtain x and y pixel coordinates for all of the
objects in the field. A finder chart will also be made using
`astrobase.plotbase.fits_finder_chart` using the corresponding
`field_scale`, `_stretch`, `_colormap`, `_findersize`, `_pltopts`,
`_grid`, and `_gridcolors` kwargs for that function, reproduced here to
enable customization of the finder chart plot.
field_wcsfrom : str or None
If `wcsfrom` is None, the WCS to transform the RA/Dec to pixel x/y will
be taken from the FITS header of `fitsfile`. If this is not None, it
must be a FITS or similar file that contains a WCS header in its first
extension.
field_scale : astropy.visualization.Interval object
`scale` sets the normalization for the FITS pixel values. This is an
astropy.visualization Interval object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_stretch : astropy.visualization.Stretch object
`stretch` sets the stretch function for mapping FITS pixel values to
output pixel values. This is an astropy.visualization Stretch object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_colormap : matplotlib Colormap object
`colormap` is a matplotlib color map object to use for the output image.
field_findersize : None or tuple of two ints
If `findersize` is None, the output image size will be set by the NAXIS1
and NAXIS2 keywords in the input `fitsfile` FITS header. Otherwise,
`findersize` must be a tuple with the intended x and y size of the image
in inches (all output images will use a DPI = 100).
field_pltopts : dict
`field_pltopts` controls how the overlay points will be plotted. This
a dict with standard matplotlib marker, etc. kwargs as key-val pairs,
e.g. 'markersize', 'markerfacecolor', etc. The default options make red
outline circles at the location of each object in the overlay.
field_grid : bool
`grid` sets if a grid will be made on the output image.
field_gridcolor : str
`gridcolor` sets the color of the grid lines. This is a usual matplotib
color spec string.
field_zoomcontain : bool
`field_zoomcontain` controls if the finder chart will be zoomed to
just contain the overlayed points. Everything outside the footprint of
these points will be discarded.
maxlcs : int or None
This sets how many light curves to process in the input LC list
generated by searching for LCs in `basedir` or in the list provided as
`use_list_of_filenames`.
nworkers : int
This sets the number of parallel workers to launch to collect
information from the light curves.
Returns
-------
str
Returns the path to the generated light curve catalog pickle file.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
if not fileglob:
fileglob = dfileglob
# this is to get the actual ndet
# set to the magnitudes column
lcndetkey = dmagcols
if isinstance(use_list_of_filenames, list):
matching = use_list_of_filenames
else:
# handle the case where basedir is a list of directories
if isinstance(basedir, list):
matching = []
for bdir in basedir:
# now find the files
LOGINFO('searching for %s light curves in %s ...' % (lcformat,
bdir))
if recursive is False:
matching.extend(glob.glob(os.path.join(bdir, fileglob)))
else:
# use recursive glob for Python 3.5+
if sys.version_info[:2] > (3,4):
matching.extend(glob.glob(os.path.join(bdir,
'**',
fileglob),
recursive=True))
# otherwise, use os.walk and glob
else:
# use os.walk to go through the directories
walker = os.walk(bdir)
for root, dirs, _files in walker:
for sdir in dirs:
searchpath = os.path.join(root,
sdir,
fileglob)
foundfiles = glob.glob(searchpath)
if foundfiles:
matching.extend(foundfiles)
# otherwise, handle the usual case of one basedir to search in
else:
# now find the files
LOGINFO('searching for %s light curves in %s ...' %
(lcformat, basedir))
if recursive is False:
matching = glob.glob(os.path.join(basedir, fileglob))
else:
# use recursive glob for Python 3.5+
if sys.version_info[:2] > (3,4):
matching = glob.glob(os.path.join(basedir,
'**',
fileglob),recursive=True)
# otherwise, use os.walk and glob
else:
# use os.walk to go through the directories
walker = os.walk(basedir)
matching = []
for root, dirs, _files in walker:
for sdir in dirs:
searchpath = os.path.join(root,
sdir,
fileglob)
foundfiles = glob.glob(searchpath)
if foundfiles:
matching.extend(foundfiles)
#
# now that we have all the files, process them
#
if matching and len(matching) > 0:
LOGINFO('found %s light curves' % len(matching))
# cut down matching to maxlcs
if maxlcs:
matching = matching[:maxlcs]
# prepare the output dict
lclistdict = {
'basedir':basedir,
'lcformat':lcformat,
'fileglob':fileglob,
'recursive':recursive,
'columns':columns,
'makecoordindex':makecoordindex,
'nfiles':len(matching),
'objects': {
}
}
# columns that will always be present in the output lclistdict
derefcols = ['lcfname']
derefcols.extend(['%s.ndet' % x.split('.')[-1] for x in lcndetkey])
for dc in derefcols:
lclistdict['objects'][dc] = []
# fill in the rest of the lclist columns from the columns kwarg
for col in columns:
# dereference the column
thiscol = col.split('.')
thiscol = thiscol[-1]
lclistdict['objects'][thiscol] = []
derefcols.append(thiscol)
# start collecting info
LOGINFO('collecting light curve info...')
tasks = [(x, columns, lcformat, lcformatdir, lcndetkey)
for x in matching]
with ProcessPoolExecutor(max_workers=nworkers) as executor:
results = executor.map(_lclist_parallel_worker, tasks)
results = [x for x in results]
# update the columns in the overall dict from the results of the
# parallel map
for result in results:
for xcol in derefcols:
lclistdict['objects'][xcol].append(result[xcol])
executor.shutdown()
# done with collecting info
# turn all of the lists in the lclistdict into arrays
for col in lclistdict['objects']:
lclistdict['objects'][col] = np.array(lclistdict['objects'][col])
# handle duplicate objectids with different light curves
uniques, counts = np.unique(lclistdict['objects']['objectid'],
return_counts=True)
duplicated_objectids = uniques[counts > 1]
if duplicated_objectids.size > 0:
# redo the objectid array so it has a bit larger dtype so the extra
# tag can fit into the field
dt = lclistdict['objects']['objectid'].dtype.str
dt = '<U%s' % (
int(dt.replace('<','').replace('U','').replace('S','')) + 3
)
lclistdict['objects']['objectid'] = np.array(
lclistdict['objects']['objectid'],
dtype=dt
)
for objid in duplicated_objectids:
objid_inds = np.where(
lclistdict['objects']['objectid'] == objid
)
# mark the duplicates, assume the first instance is the actual
# one
for ncounter, nind in enumerate(objid_inds[0][1:]):
lclistdict['objects']['objectid'][nind] = '%s-%s' % (
lclistdict['objects']['objectid'][nind],
ncounter+2
)
LOGWARNING(
'tagging duplicated instance %s of objectid: '
'%s as %s-%s, lightcurve: %s' %
(ncounter+2, objid, objid, ncounter+2,
lclistdict['objects']['lcfname'][nind])
)
# if we're supposed to make a spatial index, do so
if (makecoordindex and
isinstance(makecoordindex, (list, tuple)) and
len(makecoordindex) == 2):
try:
# deref the column names
racol, declcol = makecoordindex
racol = racol.split('.')[-1]
declcol = declcol.split('.')[-1]
# get the ras and decls
objra, objdecl = (lclistdict['objects'][racol],
lclistdict['objects'][declcol])
# get the xyz unit vectors from ra,decl
# since i had to remind myself:
# https://en.wikipedia.org/wiki/Equatorial_coordinate_system
cosdecl = np.cos(np.radians(objdecl))
sindecl = np.sin(np.radians(objdecl))
cosra = np.cos(np.radians(objra))
sinra = np.sin(np.radians(objra))
xyz = np.column_stack((cosra*cosdecl,sinra*cosdecl, sindecl))
# generate the kdtree
kdt = sps.cKDTree(xyz,copy_data=True)
# put the tree into the dict
lclistdict['kdtree'] = kdt
LOGINFO('kdtree generated for (ra, decl): (%s, %s)' %
(makecoordindex[0], makecoordindex[1]))
except Exception as e:
LOGEXCEPTION('could not make kdtree for (ra, decl): (%s, %s)' %
(makecoordindex[0], makecoordindex[1]))
raise
# generate the xy pairs if fieldfits is not None
if field_fitsfile and os.path.exists(field_fitsfile):
# read in the FITS file
if field_wcsfrom is None:
hdulist = pyfits.open(field_fitsfile)
hdr = hdulist[0].header
hdulist.close()
w = WCS(hdr)
wcsok = True
elif os.path.exists(field_wcsfrom):
w = WCS(field_wcsfrom)
wcsok = True
else:
LOGERROR('could not determine WCS info for input FITS: %s' %
field_fitsfile)
wcsok = False
if wcsok:
# first, transform the ra/decl to x/y and put these in the
# lclist output dict
radecl = np.column_stack((objra, objdecl))
lclistdict['objects']['framexy'] = w.all_world2pix(
radecl,
1
)
# next, we'll make a PNG plot for the finder
finder_outfile = os.path.join(
os.path.dirname(outfile),
os.path.splitext(os.path.basename(outfile))[0] + '.png'
)
finder_png = fits_finder_chart(
field_fitsfile,
finder_outfile,
wcsfrom=field_wcsfrom,
scale=field_scale,
stretch=field_stretch,
colormap=field_colormap,
findersize=field_findersize,
overlay_ra=objra,
overlay_decl=objdecl,
overlay_pltopts=field_pltopts,
overlay_zoomcontain=field_zoomcontain,
grid=field_grid,
gridcolor=field_gridcolor
)
if finder_png is not None:
LOGINFO('generated a finder PNG '
'with an object position overlay '
'for this LC list: %s' % finder_png)
# write the pickle
with open(outfile,'wb') as outfd:
pickle.dump(lclistdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
LOGINFO('done. LC info -> %s' % outfile)
return outfile
else:
LOGERROR('no files found in %s matching %s' % (basedir, fileglob))
return None
|
def filter_lclist(lc_catalog,
objectidcol='objectid',
racol='ra',
declcol='decl',
xmatchexternal=None,
xmatchdistarcsec=3.0,
externalcolnums=(0,1,2),
externalcolnames=['objectid','ra','decl'],
externalcoldtypes='U20,f8,f8',
externalcolsep=None,
externalcommentchar='#',
conesearch=None,
conesearchworkers=1,
columnfilters=None,
field_fitsfile=None,
field_wcsfrom=None,
field_scale=ZScaleInterval(),
field_stretch=LinearStretch(),
field_colormap=plt.cm.gray_r,
field_findersize=None,
field_pltopts={'marker':'o',
'markersize':10.0,
'markerfacecolor':'none',
'markeredgewidth':2.0,
'markeredgecolor':'red'},
field_grid=False,
field_gridcolor='k',
field_zoomcontain=True,
copylcsto=None):
'''This is used to perform cone-search, cross-match, and column-filter
operations on a light curve catalog generated by `make_lclist`.
Uses the output of `make_lclist` above. This function returns a list of
light curves matching various criteria specified by the `xmatchexternal`,
`conesearch`, and `columnfilters kwargs`. Use this function to generate
input lists for other lcproc functions,
e.g. `lcproc.lcvfeatures.parallel_varfeatures`,
`lcproc.periodfinding.parallel_pf`, and `lcproc.lcbin.parallel_timebin`,
among others.
The operations are applied in this order if more than one is specified:
`xmatchexternal` -> `conesearch` -> `columnfilters`. All results from these
operations are joined using a logical AND operation.
Parameters
----------
objectidcol : str
This is the name of the object ID column in the light curve catalog.
racol : str
This is the name of the RA column in the light curve catalog.
declcol : str
This is the name of the Dec column in the light curve catalog.
xmatchexternal : str or None
If provided, this is the filename of a text file containing objectids,
ras and decs to match the objects in the light curve catalog to by their
positions.
xmatchdistarcsec : float
This is the distance in arcseconds to use when cross-matching to the
external catalog in `xmatchexternal`.
externalcolnums : sequence of int
This a list of the zero-indexed column numbers of columns to extract
from the external catalog file.
externalcolnames : sequence of str
This is a list of names of columns that will be extracted from the
external catalog file. This is the same length as
`externalcolnums`. These must contain the names provided as the
`objectid`, `ra`, and `decl` column names so this function knows which
column numbers correspond to those columns and can use them to set up
the cross-match.
externalcoldtypes : str
This is a CSV string containing numpy dtype definitions for all columns
listed to extract from the external catalog file. The number of dtype
definitions should be equal to the number of columns to extract.
externalcolsep : str or None
The column separator to use when extracting columns from the external
catalog file. If None, any whitespace between columns is used as the
separator.
externalcommentchar : str
The character indicating that a line in the external catalog file is to
be ignored.
conesearch : list of float
This is used to specify cone-search parameters. It should be a three
element list:
[center_ra_deg, center_decl_deg, search_radius_deg]
conesearchworkers : int
The number of parallel workers to launch for the cone-search operation.
columnfilters : list of str
This is a list of strings indicating any filters to apply on each column
in the light curve catalog. All column filters are applied in the
specified sequence and are combined with a logical AND operator. The
format of each filter string should be:
'<lc_catalog column>|<operator>|<operand>'
where:
- <lc_catalog column> is a column in the lc_catalog pickle file
- <operator> is one of: 'lt', 'gt', 'le', 'ge', 'eq', 'ne', which
correspond to the usual operators: <, >, <=, >=, ==, != respectively.
- <operand> is a float, int, or string.
field_fitsfile : str or None
If this is not None, it should be the path to a FITS image containing
the objects these light curves are for. If this is provided,
`make_lclist` will use the WCS information in the FITS itself if
`field_wcsfrom` is None (or from a WCS header file pointed to by
`field_wcsfrom`) to obtain x and y pixel coordinates for all of the
objects in the field. A finder chart will also be made using
`astrobase.plotbase.fits_finder_chart` using the corresponding
`field_scale`, `_stretch`, `_colormap`, `_findersize`, `_pltopts`,
`_grid`, and `_gridcolors` kwargs for that function, reproduced here to
enable customization of the finder chart plot.
field_wcsfrom : str or None
If `wcsfrom` is None, the WCS to transform the RA/Dec to pixel x/y will
be taken from the FITS header of `fitsfile`. If this is not None, it
must be a FITS or similar file that contains a WCS header in its first
extension.
field_scale : astropy.visualization.Interval object
`scale` sets the normalization for the FITS pixel values. This is an
astropy.visualization Interval object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_stretch : astropy.visualization.Stretch object
`stretch` sets the stretch function for mapping FITS pixel values to
output pixel values. This is an astropy.visualization Stretch object.
See http://docs.astropy.org/en/stable/visualization/normalization.html
for details on `scale` and `stretch` objects.
field_colormap : matplotlib Colormap object
`colormap` is a matplotlib color map object to use for the output image.
field_findersize : None or tuple of two ints
If `findersize` is None, the output image size will be set by the NAXIS1
and NAXIS2 keywords in the input `fitsfile` FITS header. Otherwise,
`findersize` must be a tuple with the intended x and y size of the image
in inches (all output images will use a DPI = 100).
field_pltopts : dict
`field_pltopts` controls how the overlay points will be plotted. This
a dict with standard matplotlib marker, etc. kwargs as key-val pairs,
e.g. 'markersize', 'markerfacecolor', etc. The default options make red
outline circles at the location of each object in the overlay.
field_grid : bool
`grid` sets if a grid will be made on the output image.
field_gridcolor : str
`gridcolor` sets the color of the grid lines. This is a usual matplotib
color spec string.
field_zoomcontain : bool
`field_zoomcontain` controls if the finder chart will be zoomed to
just contain the overlayed points. Everything outside the footprint of
these points will be discarded.
copylcsto : str
If this is provided, it is interpreted as a directory target to copy
all the light curves that match the specified conditions.
Returns
-------
tuple
Returns a two elem tuple: (matching_object_lcfiles, matching_objectids)
if conesearch and/or column filters are used. If `xmatchexternal` is
also used, a three-elem tuple is returned: (matching_object_lcfiles,
matching_objectids, extcat_matched_objectids).
'''
with open(lc_catalog,'rb') as infd:
lclist = pickle.load(infd)
# generate numpy arrays of the matching object indexes. we do it this way so
# we can AND everything at the end, instead of having to look up the objects
# at these indices and running the columnfilter on them
xmatch_matching_index = np.full_like(lclist['objects'][objectidcol],
False,
dtype=np.bool)
conesearch_matching_index = np.full_like(lclist['objects'][objectidcol],
False,
dtype=np.bool)
# do the xmatch first
ext_matches = []
ext_matching_objects = []
if (xmatchexternal and
isinstance(xmatchexternal, str) and
os.path.exists(xmatchexternal)):
try:
# read in the external file
extcat = np.genfromtxt(xmatchexternal,
usecols=externalcolnums,
delimiter=externalcolsep,
names=externalcolnames,
dtype=externalcoldtypes,
comments=externalcommentchar)
ext_cosdecl = np.cos(np.radians(extcat['decl']))
ext_sindecl = np.sin(np.radians(extcat['decl']))
ext_cosra = np.cos(np.radians(extcat['ra']))
ext_sinra = np.sin(np.radians(extcat['ra']))
ext_xyz = np.column_stack((ext_cosra*ext_cosdecl,
ext_sinra*ext_cosdecl,
ext_sindecl))
ext_xyzdist = 2.0 * np.sin(np.radians(xmatchdistarcsec/3600.0)/2.0)
# get our kdtree
our_kdt = lclist['kdtree']
# get the external kdtree
ext_kdt = sps.cKDTree(ext_xyz)
# do a query_ball_tree
extkd_matchinds = ext_kdt.query_ball_tree(our_kdt, ext_xyzdist)
for extind, mind in enumerate(extkd_matchinds):
if len(mind) > 0:
ext_matches.append(mind[0])
# get the whole matching row for the ext objects recarray
ext_matching_objects.append(extcat[extind])
ext_matches = np.array(ext_matches)
if ext_matches.size > 0:
# update the xmatch_matching_index
xmatch_matching_index[ext_matches] = True
LOGINFO('xmatch: objects matched to %s within %.1f arcsec: %s' %
(xmatchexternal, xmatchdistarcsec, ext_matches.size))
else:
LOGERROR("xmatch: no objects were cross-matched to external "
"catalog spec: %s, can't continue" % xmatchexternal)
return None, None, None
except Exception as e:
LOGEXCEPTION('could not match to external catalog spec: %s' %
repr(xmatchexternal))
raise
# do the cone search next
if (conesearch and
isinstance(conesearch, (list, tuple)) and
len(conesearch) == 3):
try:
racenter, declcenter, searchradius = conesearch
cosdecl = np.cos(np.radians(declcenter))
sindecl = np.sin(np.radians(declcenter))
cosra = np.cos(np.radians(racenter))
sinra = np.sin(np.radians(racenter))
# this is the search distance in xyz unit vectors
xyzdist = 2.0 * np.sin(np.radians(searchradius)/2.0)
# get the kdtree
our_kdt = lclist['kdtree']
# look up the coordinates
kdtindices = our_kdt.query_ball_point([cosra*cosdecl,
sinra*cosdecl,
sindecl],
xyzdist,
n_jobs=conesearchworkers)
if kdtindices and len(kdtindices) > 0:
LOGINFO('cone search: objects within %.4f deg '
'of (%.3f, %.3f): %s' %
(searchradius, racenter, declcenter, len(kdtindices)))
# update the conesearch_matching_index
matchingind = kdtindices
conesearch_matching_index[np.array(matchingind)] = True
# we fail immediately if we found nothing. this assumes the user
# cares more about the cone-search than the regular column filters
else:
LOGERROR("cone-search: no objects were found within "
"%.4f deg of (%.3f, %.3f): %s, can't continue" %
(searchradius, racenter, declcenter, len(kdtindices)))
return None, None
except Exception as e:
LOGEXCEPTION('cone-search: could not run a cone-search, '
'is there a kdtree present in %s?' % lc_catalog)
raise
# now that we're done with cone-search, do the column filtering
allfilterinds = []
if columnfilters and isinstance(columnfilters, list):
# go through each filter
for cfilt in columnfilters:
try:
fcol, foperator, foperand = cfilt.split('|')
foperator = FILTEROPS[foperator]
# generate the evalstring
filterstr = (
"np.isfinite(lclist['objects']['%s']) & "
"(lclist['objects']['%s'] %s %s)"
) % (fcol, fcol, foperator, foperand)
filterind = eval(filterstr)
ngood = lclist['objects'][objectidcol][filterind].size
LOGINFO('filter: %s -> objects matching: %s ' % (cfilt, ngood))
allfilterinds.append(filterind)
except Exception as e:
LOGEXCEPTION('filter: could not understand filter spec: %s'
% cfilt)
LOGWARNING('filter: not applying this broken filter')
# now that we have all the filter indices good to go
# logical-AND all the things
# make sure we only do filtering if we were told to do so
if (xmatchexternal or conesearch or columnfilters):
filterstack = []
if xmatchexternal:
filterstack.append(xmatch_matching_index)
if conesearch:
filterstack.append(conesearch_matching_index)
if columnfilters:
filterstack.extend(allfilterinds)
finalfilterind = np.column_stack(filterstack)
finalfilterind = np.all(finalfilterind, axis=1)
# get the filtered object light curves and object names
filteredobjectids = lclist['objects'][objectidcol][finalfilterind]
filteredlcfnames = lclist['objects']['lcfname'][finalfilterind]
else:
filteredobjectids = lclist['objects'][objectidcol]
filteredlcfnames = lclist['objects']['lcfname']
# if we're told to make a finder chart with the selected objects
if field_fitsfile is not None and os.path.exists(field_fitsfile):
# get the RA and DEC of the matching objects
matching_ra = lclist['objects'][racol][finalfilterind]
matching_decl = lclist['objects'][declcol][finalfilterind]
matching_postfix = []
if xmatchexternal is not None:
matching_postfix.append(
'xmatch_%s' %
os.path.splitext(os.path.basename(xmatchexternal))[0]
)
if conesearch is not None:
matching_postfix.append('conesearch_RA%.3f_DEC%.3f_RAD%.5f' %
tuple(conesearch))
if columnfilters is not None:
for cfi, cf in enumerate(columnfilters):
if cfi == 0:
matching_postfix.append('filter_%s_%s_%s' %
tuple(cf.split('|')))
else:
matching_postfix.append('_and_%s_%s_%s' %
tuple(cf.split('|')))
if len(matching_postfix) > 0:
matching_postfix = '-%s' % '_'.join(matching_postfix)
else:
matching_postfix = ''
# next, we'll make a PNG plot for the finder
finder_outfile = os.path.join(
os.path.dirname(lc_catalog),
'%s%s.png' %
(os.path.splitext(os.path.basename(lc_catalog))[0],
matching_postfix)
)
finder_png = fits_finder_chart(
field_fitsfile,
finder_outfile,
wcsfrom=field_wcsfrom,
scale=field_scale,
stretch=field_stretch,
colormap=field_colormap,
findersize=field_findersize,
overlay_ra=matching_ra,
overlay_decl=matching_decl,
overlay_pltopts=field_pltopts,
field_zoomcontain=field_zoomcontain,
grid=field_grid,
gridcolor=field_gridcolor
)
if finder_png is not None:
LOGINFO('generated a finder PNG '
'with an object position overlay '
'for this filtered LC list: %s' % finder_png)
# if copylcsto is not None, copy LCs over to it
if copylcsto is not None:
if not os.path.exists(copylcsto):
os.mkdir(copylcsto)
if TQDM:
lciter = tqdm(filteredlcfnames)
else:
lciter = filteredlcfnames
LOGINFO('copying matching light curves to %s' % copylcsto)
for lc in lciter:
shutil.copy(lc, copylcsto)
LOGINFO('done. objects matching all filters: %s' % filteredobjectids.size)
if xmatchexternal and len(ext_matching_objects) > 0:
return filteredlcfnames, filteredobjectids, ext_matching_objects
else:
return filteredlcfnames, filteredobjectids
|
def _cpinfo_key_worker(task):
'''This wraps `checkplotlist.checkplot_infokey_worker`.
This is used to get the correct dtype for each element in retrieved results.
Parameters
----------
task : tuple
task[0] = cpfile
task[1] = keyspeclist (infokeys kwarg from `add_cpinfo_to_lclist`)
Returns
-------
dict
All of the requested keys from the checkplot are returned along with
their values in a dict.
'''
cpfile, keyspeclist = task
keystoget = [x[0] for x in keyspeclist]
nonesubs = [x[-2] for x in keyspeclist]
nansubs = [x[-1] for x in keyspeclist]
# reform the keystoget into a list of lists
for i, k in enumerate(keystoget):
thisk = k.split('.')
if sys.version_info[:2] < (3,4):
thisk = [(int(x) if x.isdigit() else x) for x in thisk]
else:
thisk = [(int(x) if x.isdecimal() else x) for x in thisk]
keystoget[i] = thisk
# add in the objectid as well to match to the object catalog later
keystoget.insert(0,['objectid'])
nonesubs.insert(0, '')
nansubs.insert(0,'')
# get all the keys we need
vals = checkplot_infokey_worker((cpfile, keystoget))
# if they have some Nones, nans, etc., reform them as expected
for val, nonesub, nansub, valind in zip(vals, nonesubs,
nansubs, range(len(vals))):
if val is None:
outval = nonesub
elif isinstance(val, float) and not np.isfinite(val):
outval = nansub
elif isinstance(val, (list, tuple)):
outval = ', '.join(val)
else:
outval = val
vals[valind] = outval
return vals
|
def add_cpinfo_to_lclist(
checkplots, # list or a directory path
initial_lc_catalog,
magcol, # to indicate checkplot magcol
outfile,
checkplotglob='checkplot*.pkl*',
infokeys=CPINFO_DEFAULTKEYS,
nworkers=NCPUS
):
'''This adds checkplot info to the initial light curve catalogs generated by
`make_lclist`.
This is used to incorporate all the extra info checkplots can have for
objects back into columns in the light curve catalog produced by
`make_lclist`. Objects are matched between the checkplots and the light
curve catalog using their `objectid`. This then allows one to search this
'augmented' light curve catalog by these extra columns. The 'augmented'
light curve catalog also forms the basis for search interface provided by
the LCC-Server.
The default list of keys that will be extracted from a checkplot and added
as columns in the initial light curve catalog is listed above in the
`CPINFO_DEFAULTKEYS` list.
Parameters
----------
checkplots : str or list
If this is a str, is interpreted as a directory which will be searched
for checkplot pickle files using `checkplotglob`. If this is a list, it
will be interpreted as a list of checkplot pickle files to process.
initial_lc_catalog : str
This is the path to the light curve catalog pickle made by
`make_lclist`.
magcol : str
This is used to indicate the light curve magnitude column to extract
magnitude column specific information. For example, Stetson variability
indices can be generated using magnitude measurements in separate
photometric apertures, which appear in separate `magcols` in the
checkplot. To associate each such feature of the object with its
specific `magcol`, pass that `magcol` in here. This `magcol` will then
be added as a prefix to the resulting column in the 'augmented' LC
catalog, e.g. Stetson J will appear as `magcol1_stetsonj` and
`magcol2_stetsonj` for two separate magcols.
outfile : str
This is the file name of the output 'augmented' light curve catalog
pickle file that will be written.
infokeys : list of tuples
This is a list of keys to extract from the checkplot and some info on
how this extraction is to be done. Each key entry is a six-element
tuple of the following form:
- key name in the checkplot
- numpy dtype of the value of this key
- False if key is associated with a magcol or True otherwise
- False if subsequent updates to the same column name will append to
existing key values in the output augmented light curve catalog or
True if these will overwrite the existing key value
- character to use to substitute a None value of the key in the
checkplot in the output light curve catalog column
- character to use to substitute a nan value of the key in the
checkplot in the output light curve catalog column
See the `CPFINFO_DEFAULTKEYS` list above for examples.
nworkers : int
The number of parallel workers to launch to extract checkplot
information.
Returns
-------
str
Returns the path to the generated 'augmented' light curve catalog pickle
file.
'''
# get the checkplots from the directory if one is provided
if not isinstance(checkplots, list) and os.path.exists(checkplots):
checkplots = sorted(glob.glob(os.path.join(checkplots, checkplotglob)))
tasklist = [(cpf, infokeys) for cpf in checkplots]
with ProcessPoolExecutor(max_workers=nworkers) as executor:
resultfutures = executor.map(_cpinfo_key_worker, tasklist)
results = [x for x in resultfutures]
executor.shutdown()
# now that we have all the checkplot info, we need to match to the
# objectlist in the lclist
# open the lclist
with open(initial_lc_catalog,'rb') as infd:
lc_catalog = pickle.load(infd)
catalog_objectids = np.array(lc_catalog['objects']['objectid'])
checkplot_objectids = np.array([x[0] for x in results])
# add the extra key arrays in the lclist dict
extrainfokeys = []
actualkeys = []
# set up the extrainfokeys list
for keyspec in infokeys:
key, dtype, firstlevel, overwrite_append, nonesub, nansub = keyspec
if firstlevel:
eik = key
else:
eik = '%s.%s' % (magcol, key)
extrainfokeys.append(eik)
# now handle the output dicts and column list
eactual = eik.split('.')
# this handles dereferenced list indices
if not eactual[-1].isdigit():
if not firstlevel:
eactual = '.'.join([eactual[0], eactual[-1]])
else:
eactual = eactual[-1]
else:
elastkey = eactual[-2]
# for list columns, this converts stuff like errs -> err,
# and parallaxes -> parallax
if elastkey.endswith('es'):
elastkey = elastkey[:-2]
elif elastkey.endswith('s'):
elastkey = elastkey[:-1]
if not firstlevel:
eactual = '.'.join([eactual[0], elastkey])
else:
eactual = elastkey
actualkeys.append(eactual)
# add a new column only if required
if eactual not in lc_catalog['columns']:
lc_catalog['columns'].append(eactual)
# we'll overwrite earlier existing columns in any case
lc_catalog['objects'][eactual] = []
# now go through each objectid in the catalog and add the extra keys to
# their respective arrays
for catobj in tqdm(catalog_objectids):
cp_objind = np.where(checkplot_objectids == catobj)
if len(cp_objind[0]) > 0:
# get the info line for this checkplot
thiscpinfo = results[cp_objind[0][0]]
# the first element is the objectid which we remove
thiscpinfo = thiscpinfo[1:]
# update the object catalog entries for this object
for ekind, ek in enumerate(actualkeys):
# add the actual thing to the output list
lc_catalog['objects'][ek].append(
thiscpinfo[ekind]
)
else:
# update the object catalog entries for this object
for ekind, ek in enumerate(actualkeys):
thiskeyspec = infokeys[ekind]
nonesub = thiskeyspec[-2]
lc_catalog['objects'][ek].append(
nonesub
)
# now we should have all the new keys in the object catalog
# turn them into arrays
for ek in actualkeys:
lc_catalog['objects'][ek] = np.array(
lc_catalog['objects'][ek]
)
# add the magcol to the lc_catalog
if 'magcols' in lc_catalog:
if magcol not in lc_catalog['magcols']:
lc_catalog['magcols'].append(magcol)
else:
lc_catalog['magcols'] = [magcol]
# write back the new object catalog
with open(outfile, 'wb') as outfd:
pickle.dump(lc_catalog, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return outfile
|
def variability_threshold(featuresdir,
outfile,
magbins=DEFAULT_MAGBINS,
maxobjects=None,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
min_lcmad_stdev=5.0,
min_stetj_stdev=2.0,
min_iqr_stdev=2.0,
min_inveta_stdev=2.0,
verbose=True):
'''This generates a list of objects with stetson J, IQR, and 1.0/eta
above some threshold value to select them as potential variable stars.
Use this to pare down the objects to review and put through
period-finding. This does the thresholding per magnitude bin; this should be
better than one single cut through the entire magnitude range. Set the
magnitude bins using the magbins kwarg.
FIXME: implement a voting classifier here. this will choose variables based
on the thresholds in IQR, stetson, and inveta based on weighting carried
over from the variability recovery sims.
Parameters
----------
featuresdir : str
This is the directory containing variability feature pickles created by
:py:func:`astrobase.lcproc.lcpfeatures.parallel_varfeatures` or similar.
outfile : str
This is the output pickle file that will contain all the threshold
information.
magbins : np.array of floats
This sets the magnitude bins to use for calculating thresholds.
maxobjects : int or None
This is the number of objects to process. If None, all objects with
feature pickles in `featuresdir` will be processed.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the thresholds.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the thresholds.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the thresholds.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
min_lcmad_stdev,min_stetj_stdev,min_iqr_stdev,min_inveta_stdev : float or np.array
These are all the standard deviation multiplier for the distributions of
light curve standard deviation, Stetson J variability index, the light
curve interquartile range, and 1/eta variability index
respectively. These multipliers set the minimum values of these measures
to use for selecting variable stars. If provided as floats, the same
value will be used for all magbins. If provided as np.arrays of `size =
magbins.size - 1`, will be used to apply possibly different sigma cuts
for each magbin.
verbose : bool
If True, will report progress and warn about any problems.
Returns
-------
dict
Contains all of the variability threshold information along with indices
into the array of the object IDs chosen as variables.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
# list of input pickles generated by varfeatures functions above
pklist = glob.glob(os.path.join(featuresdir, 'varfeatures-*.pkl'))
if maxobjects:
pklist = pklist[:maxobjects]
allobjects = {}
for magcol in magcols:
# keep local copies of these so we can fix them independently in case of
# nans
if (isinstance(min_stetj_stdev, list) or
isinstance(min_stetj_stdev, np.ndarray)):
magcol_min_stetj_stdev = min_stetj_stdev[::]
else:
magcol_min_stetj_stdev = min_stetj_stdev
if (isinstance(min_iqr_stdev, list) or
isinstance(min_iqr_stdev, np.ndarray)):
magcol_min_iqr_stdev = min_iqr_stdev[::]
else:
magcol_min_iqr_stdev = min_iqr_stdev
if (isinstance(min_inveta_stdev, list) or
isinstance(min_inveta_stdev, np.ndarray)):
magcol_min_inveta_stdev = min_inveta_stdev[::]
else:
magcol_min_inveta_stdev = min_inveta_stdev
LOGINFO('getting all object sdssr, LC MAD, stet J, IQR, eta...')
# we'll calculate the sigma per magnitude bin, so get the mags as well
allobjects[magcol] = {
'objectid':[],
'sdssr':[],
'lcmad':[],
'stetsonj':[],
'iqr':[],
'eta':[]
}
# fancy progress bar with tqdm if present
if TQDM and verbose:
listiterator = tqdm(pklist)
else:
listiterator = pklist
for pkl in listiterator:
with open(pkl,'rb') as infd:
thisfeatures = pickle.load(infd)
objectid = thisfeatures['objectid']
# the object magnitude
if ('info' in thisfeatures and
thisfeatures['info'] and
'sdssr' in thisfeatures['info']):
if (thisfeatures['info']['sdssr'] and
thisfeatures['info']['sdssr'] > 3.0):
sdssr = thisfeatures['info']['sdssr']
elif (magcol in thisfeatures and
thisfeatures[magcol] and
'median' in thisfeatures[magcol] and
thisfeatures[magcol]['median'] > 3.0):
sdssr = thisfeatures[magcol]['median']
elif (thisfeatures['info']['jmag'] and
thisfeatures['info']['hmag'] and
thisfeatures['info']['kmag']):
sdssr = jhk_to_sdssr(thisfeatures['info']['jmag'],
thisfeatures['info']['hmag'],
thisfeatures['info']['kmag'])
else:
sdssr = np.nan
else:
sdssr = np.nan
# the MAD of the light curve
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['mad']):
lcmad = thisfeatures[magcol]['mad']
else:
lcmad = np.nan
# stetson index
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['stetsonj']):
stetsonj = thisfeatures[magcol]['stetsonj']
else:
stetsonj = np.nan
# IQR
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['mag_iqr']):
iqr = thisfeatures[magcol]['mag_iqr']
else:
iqr = np.nan
# eta
if (magcol in thisfeatures and
thisfeatures[magcol] and
thisfeatures[magcol]['eta_normal']):
eta = thisfeatures[magcol]['eta_normal']
else:
eta = np.nan
allobjects[magcol]['objectid'].append(objectid)
allobjects[magcol]['sdssr'].append(sdssr)
allobjects[magcol]['lcmad'].append(lcmad)
allobjects[magcol]['stetsonj'].append(stetsonj)
allobjects[magcol]['iqr'].append(iqr)
allobjects[magcol]['eta'].append(eta)
#
# done with collection of info
#
LOGINFO('finding objects above thresholds per magbin...')
# turn the info into arrays
allobjects[magcol]['objectid'] = np.ravel(np.array(
allobjects[magcol]['objectid']
))
allobjects[magcol]['sdssr'] = np.ravel(np.array(
allobjects[magcol]['sdssr']
))
allobjects[magcol]['lcmad'] = np.ravel(np.array(
allobjects[magcol]['lcmad']
))
allobjects[magcol]['stetsonj'] = np.ravel(np.array(
allobjects[magcol]['stetsonj']
))
allobjects[magcol]['iqr'] = np.ravel(np.array(
allobjects[magcol]['iqr']
))
allobjects[magcol]['eta'] = np.ravel(np.array(
allobjects[magcol]['eta']
))
# only get finite elements everywhere
thisfinind = (
np.isfinite(allobjects[magcol]['sdssr']) &
np.isfinite(allobjects[magcol]['lcmad']) &
np.isfinite(allobjects[magcol]['stetsonj']) &
np.isfinite(allobjects[magcol]['iqr']) &
np.isfinite(allobjects[magcol]['eta'])
)
allobjects[magcol]['objectid'] = allobjects[magcol]['objectid'][
thisfinind
]
allobjects[magcol]['sdssr'] = allobjects[magcol]['sdssr'][thisfinind]
allobjects[magcol]['lcmad'] = allobjects[magcol]['lcmad'][thisfinind]
allobjects[magcol]['stetsonj'] = allobjects[magcol]['stetsonj'][
thisfinind
]
allobjects[magcol]['iqr'] = allobjects[magcol]['iqr'][thisfinind]
allobjects[magcol]['eta'] = allobjects[magcol]['eta'][thisfinind]
# invert eta so we can threshold the same way as the others
allobjects[magcol]['inveta'] = 1.0/allobjects[magcol]['eta']
# do the thresholding by magnitude bin
magbininds = np.digitize(allobjects[magcol]['sdssr'],
magbins)
binned_objectids = []
binned_sdssr = []
binned_sdssr_median = []
binned_lcmad = []
binned_stetsonj = []
binned_iqr = []
binned_inveta = []
binned_count = []
binned_objectids_thresh_stetsonj = []
binned_objectids_thresh_iqr = []
binned_objectids_thresh_inveta = []
binned_objectids_thresh_all = []
binned_lcmad_median = []
binned_lcmad_stdev = []
binned_stetsonj_median = []
binned_stetsonj_stdev = []
binned_inveta_median = []
binned_inveta_stdev = []
binned_iqr_median = []
binned_iqr_stdev = []
# go through all the mag bins and get the thresholds for J, inveta, IQR
for mbinind, magi in zip(np.unique(magbininds),
range(len(magbins)-1)):
thisbinind = np.where(magbininds == mbinind)
thisbin_sdssr_median = (magbins[magi] + magbins[magi+1])/2.0
binned_sdssr_median.append(thisbin_sdssr_median)
thisbin_objectids = allobjects[magcol]['objectid'][thisbinind]
thisbin_sdssr = allobjects[magcol]['sdssr'][thisbinind]
thisbin_lcmad = allobjects[magcol]['lcmad'][thisbinind]
thisbin_stetsonj = allobjects[magcol]['stetsonj'][thisbinind]
thisbin_iqr = allobjects[magcol]['iqr'][thisbinind]
thisbin_inveta = allobjects[magcol]['inveta'][thisbinind]
thisbin_count = thisbin_objectids.size
if thisbin_count > 4:
thisbin_lcmad_median = np.median(thisbin_lcmad)
thisbin_lcmad_stdev = np.median(
np.abs(thisbin_lcmad - thisbin_lcmad_median)
) * 1.483
binned_lcmad_median.append(thisbin_lcmad_median)
binned_lcmad_stdev.append(thisbin_lcmad_stdev)
thisbin_stetsonj_median = np.median(thisbin_stetsonj)
thisbin_stetsonj_stdev = np.median(
np.abs(thisbin_stetsonj - thisbin_stetsonj_median)
) * 1.483
binned_stetsonj_median.append(thisbin_stetsonj_median)
binned_stetsonj_stdev.append(thisbin_stetsonj_stdev)
# now get the objects above the required stdev threshold
if isinstance(magcol_min_stetj_stdev, float):
thisbin_objectids_thresh_stetsonj = thisbin_objectids[
thisbin_stetsonj > (
thisbin_stetsonj_median +
magcol_min_stetj_stdev*thisbin_stetsonj_stdev
)
]
elif (isinstance(magcol_min_stetj_stdev, np.ndarray) or
isinstance(magcol_min_stetj_stdev, list)):
thisbin_min_stetj_stdev = magcol_min_stetj_stdev[magi]
if not np.isfinite(thisbin_min_stetj_stdev):
LOGWARNING('provided threshold stetson J stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_stetj_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_stetj_stdev[magi] = 2.0
thisbin_objectids_thresh_stetsonj = thisbin_objectids[
thisbin_stetsonj > (
thisbin_stetsonj_median +
thisbin_min_stetj_stdev*thisbin_stetsonj_stdev
)
]
thisbin_iqr_median = np.median(thisbin_iqr)
thisbin_iqr_stdev = np.median(
np.abs(thisbin_iqr - thisbin_iqr_median)
) * 1.483
binned_iqr_median.append(thisbin_iqr_median)
binned_iqr_stdev.append(thisbin_iqr_stdev)
# get the objects above the required stdev threshold
if isinstance(magcol_min_iqr_stdev, float):
thisbin_objectids_thresh_iqr = thisbin_objectids[
thisbin_iqr > (thisbin_iqr_median +
magcol_min_iqr_stdev*thisbin_iqr_stdev)
]
elif (isinstance(magcol_min_iqr_stdev, np.ndarray) or
isinstance(magcol_min_iqr_stdev, list)):
thisbin_min_iqr_stdev = magcol_min_iqr_stdev[magi]
if not np.isfinite(thisbin_min_iqr_stdev):
LOGWARNING('provided threshold IQR stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_iqr_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_iqr_stdev[magi] = 2.0
thisbin_objectids_thresh_iqr = thisbin_objectids[
thisbin_iqr > (thisbin_iqr_median +
thisbin_min_iqr_stdev*thisbin_iqr_stdev)
]
thisbin_inveta_median = np.median(thisbin_inveta)
thisbin_inveta_stdev = np.median(
np.abs(thisbin_inveta - thisbin_inveta_median)
) * 1.483
binned_inveta_median.append(thisbin_inveta_median)
binned_inveta_stdev.append(thisbin_inveta_stdev)
if isinstance(magcol_min_inveta_stdev, float):
thisbin_objectids_thresh_inveta = thisbin_objectids[
thisbin_inveta > (
thisbin_inveta_median +
magcol_min_inveta_stdev*thisbin_inveta_stdev
)
]
elif (isinstance(magcol_min_inveta_stdev, np.ndarray) or
isinstance(magcol_min_inveta_stdev, list)):
thisbin_min_inveta_stdev = magcol_min_inveta_stdev[magi]
if not np.isfinite(thisbin_min_inveta_stdev):
LOGWARNING('provided threshold inveta stdev '
'for magbin: %.3f is nan, using 2.0' %
thisbin_sdssr_median)
thisbin_min_inveta_stdev = 2.0
# update the input list/array as well, since we'll be
# saving it to the output dict and using it to plot the
# variability thresholds
magcol_min_inveta_stdev[magi] = 2.0
thisbin_objectids_thresh_inveta = thisbin_objectids[
thisbin_inveta > (
thisbin_inveta_median +
thisbin_min_inveta_stdev*thisbin_inveta_stdev
)
]
else:
thisbin_objectids_thresh_stetsonj = (
np.array([],dtype=np.unicode_)
)
thisbin_objectids_thresh_iqr = (
np.array([],dtype=np.unicode_)
)
thisbin_objectids_thresh_inveta = (
np.array([],dtype=np.unicode_)
)
#
# done with check for enough objects in the bin
#
# get the intersection of all threshold objects to get objects that
# lie above the threshold for all variable indices
thisbin_objectids_thresh_all = reduce(
np.intersect1d,
(thisbin_objectids_thresh_stetsonj,
thisbin_objectids_thresh_iqr,
thisbin_objectids_thresh_inveta)
)
binned_objectids.append(thisbin_objectids)
binned_sdssr.append(thisbin_sdssr)
binned_lcmad.append(thisbin_lcmad)
binned_stetsonj.append(thisbin_stetsonj)
binned_iqr.append(thisbin_iqr)
binned_inveta.append(thisbin_inveta)
binned_count.append(thisbin_objectids.size)
binned_objectids_thresh_stetsonj.append(
thisbin_objectids_thresh_stetsonj
)
binned_objectids_thresh_iqr.append(
thisbin_objectids_thresh_iqr
)
binned_objectids_thresh_inveta.append(
thisbin_objectids_thresh_inveta
)
binned_objectids_thresh_all.append(
thisbin_objectids_thresh_all
)
#
# done with magbins
#
# update the output dict for this magcol
allobjects[magcol]['magbins'] = magbins
allobjects[magcol]['binned_objectids'] = binned_objectids
allobjects[magcol]['binned_sdssr_median'] = binned_sdssr_median
allobjects[magcol]['binned_sdssr'] = binned_sdssr
allobjects[magcol]['binned_count'] = binned_count
allobjects[magcol]['binned_lcmad'] = binned_lcmad
allobjects[magcol]['binned_lcmad_median'] = binned_lcmad_median
allobjects[magcol]['binned_lcmad_stdev'] = binned_lcmad_stdev
allobjects[magcol]['binned_stetsonj'] = binned_stetsonj
allobjects[magcol]['binned_stetsonj_median'] = binned_stetsonj_median
allobjects[magcol]['binned_stetsonj_stdev'] = binned_stetsonj_stdev
allobjects[magcol]['binned_iqr'] = binned_iqr
allobjects[magcol]['binned_iqr_median'] = binned_iqr_median
allobjects[magcol]['binned_iqr_stdev'] = binned_iqr_stdev
allobjects[magcol]['binned_inveta'] = binned_inveta
allobjects[magcol]['binned_inveta_median'] = binned_inveta_median
allobjects[magcol]['binned_inveta_stdev'] = binned_inveta_stdev
allobjects[magcol]['binned_objectids_thresh_stetsonj'] = (
binned_objectids_thresh_stetsonj
)
allobjects[magcol]['binned_objectids_thresh_iqr'] = (
binned_objectids_thresh_iqr
)
allobjects[magcol]['binned_objectids_thresh_inveta'] = (
binned_objectids_thresh_inveta
)
allobjects[magcol]['binned_objectids_thresh_all'] = (
binned_objectids_thresh_all
)
# get the common selected objects thru all measures
try:
allobjects[magcol]['objectids_all_thresh_all_magbins'] = np.unique(
np.concatenate(
allobjects[magcol]['binned_objectids_thresh_all']
)
)
except ValueError:
LOGWARNING('not enough variable objects matching all thresholds')
allobjects[magcol]['objectids_all_thresh_all_magbins'] = (
np.array([])
)
allobjects[magcol]['objectids_stetsonj_thresh_all_magbins'] = np.unique(
np.concatenate(
allobjects[magcol]['binned_objectids_thresh_stetsonj']
)
)
allobjects[magcol]['objectids_inveta_thresh_all_magbins'] = np.unique(
np.concatenate(allobjects[magcol]['binned_objectids_thresh_inveta'])
)
allobjects[magcol]['objectids_iqr_thresh_all_magbins'] = np.unique(
np.concatenate(allobjects[magcol]['binned_objectids_thresh_iqr'])
)
# turn these into np.arrays for easier plotting if they're lists
if isinstance(min_stetj_stdev, list):
allobjects[magcol]['min_stetj_stdev'] = np.array(
magcol_min_stetj_stdev
)
else:
allobjects[magcol]['min_stetj_stdev'] = magcol_min_stetj_stdev
if isinstance(min_iqr_stdev, list):
allobjects[magcol]['min_iqr_stdev'] = np.array(
magcol_min_iqr_stdev
)
else:
allobjects[magcol]['min_iqr_stdev'] = magcol_min_iqr_stdev
if isinstance(min_inveta_stdev, list):
allobjects[magcol]['min_inveta_stdev'] = np.array(
magcol_min_inveta_stdev
)
else:
allobjects[magcol]['min_inveta_stdev'] = magcol_min_inveta_stdev
# this one doesn't get touched (for now)
allobjects[magcol]['min_lcmad_stdev'] = min_lcmad_stdev
#
# done with all magcols
#
allobjects['magbins'] = magbins
with open(outfile,'wb') as outfd:
pickle.dump(allobjects, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return allobjects
|
def plot_variability_thresholds(varthreshpkl,
xmin_lcmad_stdev=5.0,
xmin_stetj_stdev=2.0,
xmin_iqr_stdev=2.0,
xmin_inveta_stdev=2.0,
lcformat='hat-sql',
lcformatdir=None,
magcols=None):
'''This makes plots for the variability threshold distributions.
Parameters
----------
varthreshpkl : str
The pickle produced by the function above.
xmin_lcmad_stdev,xmin_stetj_stdev,xmin_iqr_stdev,xmin_inveta_stdev : float or np.array
Values of the threshold values to override the ones in the
`vartresholdpkl`. If provided, will plot the thresholds accordingly
instead of using the ones in the input pickle directly.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
magcols : list of str or None
The magcol keys to use from the lcdict.
Returns
-------
str
The file name of the threshold plot generated.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
if magcols is None:
magcols = dmagcols
with open(varthreshpkl,'rb') as infd:
allobjects = pickle.load(infd)
magbins = allobjects['magbins']
for magcol in magcols:
min_lcmad_stdev = (
xmin_lcmad_stdev or allobjects[magcol]['min_lcmad_stdev']
)
min_stetj_stdev = (
xmin_stetj_stdev or allobjects[magcol]['min_stetj_stdev']
)
min_iqr_stdev = (
xmin_iqr_stdev or allobjects[magcol]['min_iqr_stdev']
)
min_inveta_stdev = (
xmin_inveta_stdev or allobjects[magcol]['min_inveta_stdev']
)
fig = plt.figure(figsize=(20,16))
# the mag vs lcmad
plt.subplot(221)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['lcmad']*1.483,
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_lcmad_median'])*1.483,
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_lcmad_median'])*1.483 +
min_lcmad_stdev*np.array(
allobjects[magcol]['binned_lcmad_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.xlabel('SDSS r')
plt.ylabel(r'lightcurve RMS (MAD $\times$ 1.483)')
plt.title('%s - SDSS r vs. light curve RMS' % magcol)
plt.yscale('log')
plt.tight_layout()
# the mag vs stetsonj
plt.subplot(222)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['stetsonj'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_stetsonj_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_stetsonj_median']) +
min_stetj_stdev*np.array(
allobjects[magcol]['binned_stetsonj_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.xlabel('SDSS r')
plt.ylabel('Stetson J index')
plt.title('%s - SDSS r vs. Stetson J index' % magcol)
plt.yscale('log')
plt.tight_layout()
# the mag vs IQR
plt.subplot(223)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['iqr'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_iqr_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_iqr_median']) +
min_iqr_stdev*np.array(
allobjects[magcol]['binned_iqr_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlabel('SDSS r')
plt.ylabel('IQR')
plt.title('%s - SDSS r vs. IQR' % magcol)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.yscale('log')
plt.tight_layout()
# the mag vs IQR
plt.subplot(224)
plt.plot(allobjects[magcol]['sdssr'],
allobjects[magcol]['inveta'],
marker='.',ms=1.0, linestyle='none',
rasterized=True)
plt.plot(allobjects[magcol]['binned_sdssr_median'],
allobjects[magcol]['binned_inveta_median'],
linewidth=3.0)
plt.plot(
allobjects[magcol]['binned_sdssr_median'],
np.array(allobjects[magcol]['binned_inveta_median']) +
min_inveta_stdev*np.array(
allobjects[magcol]['binned_inveta_stdev']
),
linewidth=3.0, linestyle='dashed'
)
plt.xlabel('SDSS r')
plt.ylabel(r'$1/\eta$')
plt.title(r'%s - SDSS r vs. $1/\eta$' % magcol)
plt.xlim((magbins.min()-0.25, magbins.max()))
plt.yscale('log')
plt.tight_layout()
plt.savefig('varfeatures-%s-%s-distributions.png' % (varthreshpkl,
magcol),
bbox_inches='tight')
plt.close('all')
|
def get_stamp(ra, decl,
survey='DSS2 Red',
scaling='Linear',
sizepix=300,
forcefetch=False,
cachedir='~/.astrobase/stamp-cache',
timeout=10.0,
retry_failed=True,
verbose=True,
jitter=5.0):
'''This gets a FITS cutout from the NASA GSFC SkyView service.
This downloads stamps in FITS format from the NASA SkyView service:
https://skyview.gsfc.nasa.gov/current/cgi/query.pl
Parameters
----------
ra,decl : float
These are decimal equatorial coordinates for the cutout center.
survey : str
The survey name to get the stamp from. This is one of the
values in the 'SkyView Surveys' option boxes on the SkyView
webpage. Currently, we've only tested using 'DSS2 Red' as the value for
this kwarg, but the other ones should work in principle.
scaling : str
This is the pixel value scaling function to use.
sizepix : int
The width and height of the cutout are specified by this value.
forcefetch : bool
If True, will disregard any existing cached copies of the stamp already
downloaded corresponding to the requested center coordinates and
redownload the FITS from the SkyView service.
cachedir : str
This is the path to the astrobase cache directory. All downloaded FITS
stamps are stored here as .fits.gz files so we can immediately respond
with the cached copy when a request is made for a coordinate center
that's already been downloaded.
timeout : float
Sets the timeout in seconds to wait for a response from the NASA SkyView
service.
retry_failed : bool
If the initial request to SkyView fails, and this is True, will retry
until it succeeds.
verbose : bool
If True, indicates progress.
jitter : float
This is used to control the scale of the random wait in seconds before
starting the query. Useful in parallelized situations.
Returns
-------
dict
A dict of the following form is returned::
{
'params':{input ra, decl and kwargs used},
'provenance':'cached' or 'new download',
'fitsfile':FITS file to which the cutout was saved on disk
}
'''
# parse the given params into the correct format for the form
formposition = ['%.4f, %.4f' % (ra, decl)]
formscaling = [scaling]
formparams = copy.deepcopy(SKYVIEW_PARAMS)
formparams['Position'] = formposition
formparams['survey'][0] = survey
formparams['scaling'] = formscaling
formparams['pixels'] = ['%s' % sizepix]
# see if the cachedir exists
if '~' in cachedir:
cachedir = os.path.expanduser(cachedir)
if not os.path.exists(cachedir):
os.makedirs(cachedir)
# figure out if we can get this image from the cache
cachekey = '%s-%s-%s-%s' % (formposition[0], survey, scaling, sizepix)
cachekey = hashlib.sha256(cachekey.encode()).hexdigest()
cachefname = os.path.join(cachedir, '%s.fits.gz' % cachekey)
provenance = 'cache'
# this is to handle older cached stamps that didn't include the sizepix
# parameter
if sizepix == 300:
oldcachekey = '%s-%s-%s' % (formposition[0], survey, scaling)
oldcachekey = hashlib.sha256(oldcachekey.encode()).hexdigest()
oldcachefname = os.path.join(cachedir, '%s.fits.gz' % oldcachekey)
if os.path.exists(oldcachefname):
cachefname = oldcachefname
# if this exists in the cache and we're not refetching, get the frame
if forcefetch or (not os.path.exists(cachefname)):
provenance = 'new download'
time.sleep(random.randint(1,jitter))
# fire the request
try:
if verbose:
LOGINFO('submitting stamp request for %s, %s, %s, %s' % (
formposition[0],
survey,
scaling,
sizepix)
)
req = requests.get(SKYVIEW_URL, params=formparams, timeout=timeout)
req.raise_for_status()
# get the text of the response, this includes the locations of the
# generated FITS on the server
resp = req.text
# find the URLS of the FITS
fitsurls = FITS_REGEX.findall(resp)
# download the URLs
if fitsurls:
for fitsurl in fitsurls:
fullfitsurl = urljoin(FITS_BASEURL, fitsurl)
if verbose:
LOGINFO('getting %s' % fullfitsurl)
fitsreq = requests.get(fullfitsurl, timeout=timeout)
with gzip.open(cachefname,'wb') as outfd:
outfd.write(fitsreq.content)
else:
LOGERROR('no FITS URLs found in query results for %s' %
formposition)
return None
except requests.exceptions.HTTPError as e:
LOGEXCEPTION('SkyView stamp request for '
'coordinates %s failed' % repr(formposition))
return None
except requests.exceptions.Timeout as e:
LOGERROR('SkyView stamp request for '
'coordinates %s did not complete within %s seconds' %
(repr(formposition), timeout))
return None
except Exception as e:
LOGEXCEPTION('SkyView stamp request for '
'coordinates %s failed' % repr(formposition))
return None
#
# DONE WITH FETCHING STUFF
#
# make sure the returned file is OK
try:
stampfits = pyfits.open(cachefname)
stampfits.close()
retdict = {
'params':{'ra':ra,
'decl':decl,
'survey':survey,
'scaling':scaling,
'sizepix':sizepix},
'provenance':provenance,
'fitsfile':cachefname
}
return retdict
except Exception as e:
LOGERROR('could not open cached FITS from Skyview download: %r' %
{'ra':ra,
'decl':decl,
'survey':survey,
'scaling': scaling,
'sizepix': sizepix})
if retry_failed:
return get_stamp(ra, decl,
survey=survey,
scaling=scaling,
sizepix=sizepix,
forcefetch=True,
cachedir=cachedir,
timeout=timeout,
verbose=verbose)
else:
return None
|
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
if change['type'] == 'container':
#: Only update what's needed
self.proxy.update_points(change)
else:
super(MapPolyline, self)._update_proxy(change)
|
def _update_proxy(self, change):
""" An observer which sends the state change to the proxy.
"""
if change['type'] == 'container':
#: Only update what's needed
self.proxy.update_points(change)
else:
super(MapPolygon, self)._update_proxy(change)
|
def handle_change(self, change):
""" Handle changes from atom ContainerLists """
op = change['operation']
if op in 'append':
self.add(len(change['value']), LatLng(*change['item']))
elif op == 'insert':
self.add(change['index'], LatLng(*change['item']))
elif op == 'extend':
points = [LatLng(*p) for p in change['items']]
self.addAll([bridge.encode(c) for c in points])
elif op == '__setitem__':
self.set(change['index'], LatLng(*change['newitem']))
elif op == 'pop':
self.remove(change['index'])
else:
raise NotImplementedError(
"Unsupported change operation {}".format(op))
|
def create_widget(self):
""" Create the underlying widget.
"""
self.init_options()
#: Retrieve the actual map
MapFragment.newInstance(self.options).then(
self.on_map_fragment_created)
# Holder for the fragment
self.widget = FrameLayout(self.get_context())
# I wrote this a few days ago and already forget how this hack works...
# lol We can't simply get a map reference using getMapAsync in the
# return value like we normally do with a normal call function return
# value. The bridge design was modified to store an object that cannot
# be decoded normally (via a standard Bridge.Packer) by saving the new
# object in the cache returning the id of the handler or proxy that
# invoked it. This way we can manually create a new id and pass that
# "future reference-able" object as our listener. At which point the
# bridge will create a reference entry in the cache for us with the of
# the object we gave it. Once in the cache we can use it like any
# bridge object we created.
self.map = GoogleMap(__id__=bridge.generate_id())
|
def init_options(self):
""" Initialize the underlying map options.
"""
self.options = GoogleMapOptions()
d = self.declaration
self.set_map_type(d.map_type)
if d.ambient_mode:
self.set_ambient_mode(d.ambient_mode)
if (d.camera_position or d.camera_zoom or
d.camera_tilt or d.camera_bearing):
self.update_camera()
if d.map_bounds:
self.set_map_bounds(d.map_bounds)
if not d.show_compass:
self.set_show_compass(d.show_compass)
if not d.show_zoom_controls:
self.set_show_zoom_controls(d.show_zoom_controls)
if not d.show_toolbar:
self.set_show_toolbar(d.show_toolbar)
if d.lite_mode:
self.set_lite_mode(d.lite_mode)
if not d.rotate_gestures:
self.set_rotate_gestures(d.rotate_gestures)
if not d.scroll_gestures:
self.set_scroll_gestures(d.scroll_gestures)
if not d.tilt_gestures:
self.set_tilt_gestures(d.tilt_gestures)
if not d.zoom_gestures:
self.set_zoom_gestures(d.zoom_gestures)
if d.min_zoom:
self.set_min_zoom(d.min_zoom)
if d.max_zoom:
self.set_max_zoom(d.max_zoom)
|
def init_map(self):
""" Add markers, polys, callouts, etc.."""
d = self.declaration
if d.show_location:
self.set_show_location(d.show_location)
if d.show_traffic:
self.set_show_traffic(d.show_traffic)
if d.show_indoors:
self.set_show_indoors(d.show_indoors)
if d.show_buildings:
self.set_show_buildings(d.show_buildings)
#: Local ref access is faster
mapview = self.map
mid = mapview.getId()
#: Connect signals
#: Camera
mapview.onCameraChange.connect(self.on_camera_changed)
mapview.onCameraMoveStarted.connect(self.on_camera_move_started)
mapview.onCameraMoveCanceled.connect(self.on_camera_move_stopped)
mapview.onCameraIdle.connect(self.on_camera_move_stopped)
mapview.setOnCameraChangeListener(mid)
mapview.setOnCameraMoveStartedListener(mid)
mapview.setOnCameraMoveCanceledListener(mid)
mapview.setOnCameraIdleListener(mid)
#: Clicks
mapview.onMapClick.connect(self.on_map_clicked)
mapview.setOnMapClickListener(mid)
mapview.onMapLongClick.connect(self.on_map_long_clicked)
mapview.setOnMapLongClickListener(mid)
#: Markers
mapview.onMarkerClick.connect(self.on_marker_clicked)
mapview.setOnMarkerClickListener(self.map.getId())
mapview.onMarkerDragStart.connect(self.on_marker_drag_start)
mapview.onMarkerDrag.connect(self.on_marker_drag)
mapview.onMarkerDragEnd.connect(self.on_marker_drag_end)
mapview.setOnMarkerDragListener(mid)
#: Info window
mapview.onInfoWindowClick.connect(self.on_info_window_clicked)
mapview.onInfoWindowLongClick.connect(self.on_info_window_long_clicked)
mapview.onInfoWindowClose.connect(self.on_info_window_closed)
mapview.setOnInfoWindowClickListener(mid)
mapview.setOnInfoWindowCloseListener(mid)
mapview.setOnInfoWindowLongClickListener(mid)
#: Polys
mapview.onPolygonClick.connect(self.on_poly_clicked)
mapview.onPolylineClick.connect(self.on_poly_clicked)
mapview.setOnPolygonClickListener(mid)
mapview.setOnPolylineClickListener(mid)
#: Circle
mapview.onCircleClick.connect(self.on_circle_clicked)
mapview.setOnCircleClickListener(mid)
|
def init_info_window_adapter(self):
""" Initialize the info window adapter. Should only be done if one of
the markers defines a custom view.
"""
adapter = self.adapter
if adapter:
return #: Already initialized
adapter = GoogleMap.InfoWindowAdapter()
adapter.getInfoContents.connect(self.on_info_window_contents_requested)
adapter.getInfoWindow.connect(self.on_info_window_requested)
self.map.setInfoWindowAdapter(adapter)
|
def on_map_fragment_created(self, obj_id):
""" Create the fragment and pull the map reference when it's loaded.
"""
self.fragment = MapFragment(__id__=obj_id)
#: Setup callback so we know when the map is ready
self.map.onMapReady.connect(self.on_map_ready)
self.fragment.getMapAsync(self.map.getId())
context = self.get_context()
def on_transaction(id):
trans = FragmentTransaction(__id__=id)
trans.add(self.widget.getId(), self.fragment)
trans.commit()
def on_fragment_manager(id):
fm = FragmentManager(__id__=id)
fm.beginTransaction().then(on_transaction)
context.widget.getSupportFragmentManager().then(on_fragment_manager)
|
def on_map_clicked(self, pos):
""" Called when the map is clicked """
d = self.declaration
d.clicked({
'click': 'short',
'position': tuple(pos)
})
|
def on_map_long_clicked(self, pos):
""" Called when the map is clicked """
d = self.declaration
d.clicked({
'click': 'long',
'position': tuple(pos)
})
|
def destroy(self):
""" Remove the marker if it was added to the map when destroying"""
marker = self.marker
parent = self.parent()
if marker:
if parent:
del parent.markers[marker.__id__]
marker.remove()
super(AndroidMapItemBase, self).destroy()
|
def child_added(self, child):
""" If a child is added we have to make sure the map adapter exists """
if child.widget:
# TODO: Should we keep count and remove the adapter if not all
# markers request it?
self.parent().init_info_window_adapter()
super(AndroidMapMarker, self).child_added(child)
|
def on_marker(self, marker):
""" Convert our options into the actual marker object"""
mid, pos = marker
self.marker = Marker(__id__=mid)
mapview = self.parent()
# Save ref
mapview.markers[mid] = self
# Required so the packer can pass the id
self.marker.setTag(mid)
# If we have a child widget we must configure the map to use the
# custom adapter
for w in self.child_widgets():
mapview.init_info_window_adapter()
break
d = self.declaration
if d.show_info:
self.set_show_info(d.show_info)
#: Can free the options now
del self.options
|
def on_marker(self, mid):
""" Convert our options into the actual circle object"""
self.marker = Circle(__id__=mid)
self.parent().markers[mid] = self
#: Required so the packer can pass the id
self.marker.setTag(mid)
d = self.declaration
if d.clickable:
self.set_clickable(d.clickable)
#: Can free the options now
del self.options
|
def fit_transform(self, raw_documents, y=None):
""" Learn the vocabulary dictionary and return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
An iterable which yields either str, unicode or file objects.
Returns
-------
X : array, [n_samples, n_features]
Document-term matrix.
"""
documents = super(CountVectorizer, self).fit_transform(
raw_documents=raw_documents, y=y)
self.n = len(raw_documents)
m = (self.transform(raw_documents) > 0).astype(int)
m = m.sum(axis=0).A1
self.period_ = m
self.df_ = m / self.n
return documents
|
def data(self, X=None, y=None, sentences=None):
"""
Add data to flow
"""
self.X = X
self.y = y
self.sentences = sentences
|
def transform(self, transformer):
"""
Add transformer to flow and apply transformer to data in flow
Parameters
----------
transformer : Transformer
a transformer to transform data
"""
self.transformers.append(transformer)
from languageflow.transformer.tagged import TaggedTransformer
if isinstance(transformer, TaggedTransformer):
self.X, self.y = transformer.transform(self.sentences)
if isinstance(transformer, TfidfVectorizer):
self.X = transformer.fit_transform(self.X)
if isinstance(transformer, CountVectorizer):
self.X = transformer.fit_transform(self.X)
if isinstance(transformer, NumberRemover):
self.X = transformer.transform(self.X)
if isinstance(transformer, MultiLabelBinarizer):
self.y = transformer.fit_transform(self.y)
|
def train(self):
"""
Train model with transformed data
"""
for i, model in enumerate(self.models):
N = [int(i * len(self.y)) for i in self.lc_range]
for n in N:
X = self.X[:n]
y = self.y[:n]
e = Experiment(X, y, model.estimator, self.scores,
self.validation_method)
e.log_folder = self.log_folder
e.train()
|
def export(self, model_name, export_folder):
"""
Export model and transformers to export_folder
Parameters
----------
model_name: string
name of model to export
export_folder: string
folder to store exported model and transformers
"""
for transformer in self.transformers:
if isinstance(transformer, MultiLabelBinarizer):
joblib.dump(transformer,
join(export_folder, "label.transformer.bin"),
protocol=2)
if isinstance(transformer, TfidfVectorizer):
joblib.dump(transformer,
join(export_folder, "tfidf.transformer.bin"),
protocol=2)
if isinstance(transformer, CountVectorizer):
joblib.dump(transformer,
join(export_folder, "count.transformer.bin"),
protocol=2)
if isinstance(transformer, NumberRemover):
joblib.dump(transformer,
join(export_folder, "number.transformer.bin"),
protocol=2)
model = [model for model in self.models if model.name == model_name][0]
e = Experiment(self.X, self.y, model.estimator, None)
model_filename = join(export_folder, "model.bin")
e.export(model_filename)
|
def fit_transform(self, raw_documents, y=None):
"""Learn vocabulary and idf, return term-document matrix.
This is equivalent to fit followed by transform, but more efficiently
implemented.
Parameters
----------
raw_documents : iterable
an iterable which yields either str, unicode or file objects
Returns
-------
X : sparse matrix, [n_samples, n_features]
Tf-idf-weighted document-term matrix.
"""
documents = super(TfidfVectorizer, self).fit_transform(
raw_documents=raw_documents, y=y)
count = CountVectorizer(encoding=self.encoding,
decode_error=self.decode_error,
strip_accents=self.strip_accents,
lowercase=self.lowercase,
preprocessor=self.preprocessor,
tokenizer=self.tokenizer,
stop_words=self.stop_words,
token_pattern=self.token_pattern,
ngram_range=self.ngram_range,
analyzer=self.analyzer,
max_df=self.max_df,
min_df=self.min_df,
max_features=self.max_features,
vocabulary=self.vocabulary_,
binary=self.binary,
dtype=self.dtype)
count.fit_transform(raw_documents=raw_documents, y=y)
self.period_ = count.period_
self.df_ = count.df_
self.n = count.n
return documents
|
def fit(self, X, y, coef_init=None, intercept_init=None,
sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data
y : numpy array, shape (n_samples,)
Target values
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
intercept_init : array, shape (n_classes,)
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified
Returns
-------
self : returns an instance of self.
"""
super(SGDClassifier, self).fit(X, y, coef_init, intercept_init,
sample_weight)
|
def print_cm(cm, labels, hide_zeroes=False, hide_diagonal=False, hide_threshold=None):
"""pretty print for confusion matrixes"""
columnwidth = max([len(x) for x in labels] + [5]) # 5 is value length
empty_cell = " " * columnwidth
# Print header
print(" " + empty_cell, end=" ")
for label in labels:
print("%{0}s".format(columnwidth) % label, end=" ")
print()
# Print rows
for i, label1 in enumerate(labels):
print(" %{0}s".format(columnwidth) % label1, end=" ")
for j in range(len(labels)):
cell = "%{0}.1f".format(columnwidth) % cm[i, j]
if hide_zeroes:
cell = cell if float(cm[i, j]) != 0 else empty_cell
if hide_diagonal:
cell = cell if i != j else empty_cell
if hide_threshold:
cell = cell if cm[i, j] > hide_threshold else empty_cell
print(cell, end=" ")
print()
|
def load_big_file(f):
"""
Workaround for loading a big pickle file. Files over 2GB cause pickle errors on certin Mac and Windows distributions.
:param f:
:return:
"""
logger.info(f'loading file {f}')
with open(f, 'r+b') as f_in:
# mmap seems to be much more memory efficient
bf = mmap.mmap(f_in.fileno(), 0)
f_in.close()
return bf
|
def url_to_filename(url: str, etag: str = None) -> str:
"""
Converts a url into a filename in a reversible way.
If `etag` is specified, add it on the end, separated by a period
(which necessarily won't appear in the base64-encoded filename).
Get rid of the quotes in the etag, since Windows doesn't like them.
"""
url_bytes = url.encode('utf-8')
b64_bytes = base64.b64encode(url_bytes)
decoded = b64_bytes.decode('utf-8')
if etag:
# Remove quotes from etag
etag = etag.replace('"', '')
return f"{decoded}.{etag}"
else:
return decoded
|
def filename_to_url(filename: str) -> Tuple[str, str]:
"""
Recovers the the url from the encoded filename. Returns it and the ETag
(which may be ``None``)
"""
try:
# If there is an etag, it's everything after the first period
decoded, etag = filename.split(".", 1)
except ValueError:
# Otherwise, use None
decoded, etag = filename, None
filename_bytes = decoded.encode('utf-8')
url_bytes = base64.b64decode(filename_bytes)
return url_bytes.decode('utf-8'), etag
|
def cached_path(url_or_filename: str, cache_dir: Path) -> Path:
"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""
dataset_cache = Path(CACHE_ROOT) / cache_dir
parsed = urlparse(url_or_filename)
if parsed.scheme in ('http', 'https'):
# URL, so get it from the cache (downloading if necessary)
return get_from_cache(url_or_filename, dataset_cache)
elif parsed.scheme == '' and Path(url_or_filename).exists():
# File, and it exists.
return Path(url_or_filename)
elif parsed.scheme == '':
# File, but it doesn't exist.
raise FileNotFoundError("file {} not found".format(url_or_filename))
else:
# Something unknown
raise ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))
|
def get_from_cache(url: str, cache_dir: Path = None) -> Path:
"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""
cache_dir.mkdir(parents=True, exist_ok=True)
filename = re.sub(r'.+/', '', url)
# get cache path to put the file
cache_path = cache_dir / filename
if cache_path.exists():
return cache_path
# make HEAD request to check ETag
response = requests.head(url)
if response.status_code != 200:
if "www.dropbox.com" in url:
# dropbox return code 301, so we ignore this error
pass
else:
raise IOError("HEAD request failed for url {}".format(url))
# add ETag to filename if it exists
# etag = response.headers.get("ETag")
if not cache_path.exists():
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
fd, temp_filename = tempfile.mkstemp()
logger.info("%s not found in cache, downloading to %s", url, temp_filename)
# GET file object
req = requests.get(url, stream=True)
content_length = req.headers.get('Content-Length')
total = int(content_length) if content_length is not None else None
progress = Tqdm.tqdm(unit="B", total=total)
with open(temp_filename, 'wb') as temp_file:
for chunk in req.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
progress.update(len(chunk))
temp_file.write(chunk)
progress.close()
logger.info("copying %s to cache at %s", temp_filename, cache_path)
shutil.copyfile(temp_filename, str(cache_path))
logger.info("removing temp file %s", temp_filename)
os.close(fd)
os.remove(temp_filename)
return cache_path
|
def fit(self, X, y):
"""Fit CRF according to X, y
Parameters
----------
X : list of text
each item is a text
y: list
each item is either a label (in multi class problem) or list of
labels (in multi label problem)
"""
trainer = pycrfsuite.Trainer(verbose=True)
for xseq, yseq in zip(X, y):
trainer.append(xseq, yseq)
trainer.set_params(self.params)
if self.filename:
filename = self.filename
else:
filename = 'model.tmp'
trainer.train(filename)
tagger = pycrfsuite.Tagger()
tagger.open(filename)
self.estimator = tagger
|
def predict(self, X):
"""Predict class labels for samples in X.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
"""
if isinstance(X[0], list):
return [self.estimator.tag(x) for x in X]
return self.estimator.tag(X)
|
def serve(self, port=62000):
""" Start LanguageBoard web application
Parameters
----------
port: int
port to serve web application
"""
from http.server import HTTPServer, CGIHTTPRequestHandler
os.chdir(self.log_folder)
httpd = HTTPServer(('', port), CGIHTTPRequestHandler)
print("Starting LanguageBoard on port: " + str(httpd.server_port))
webbrowser.open('http://0.0.0.0:{}'.format(port))
httpd.serve_forever()
|
def analyze(self, output_folder=".", auto_remove=False):
"""
:type auto_remove: boolean
:param boolean auto_remove: auto remove previous files in analyze folder
"""
if auto_remove:
try:
shutil.rmtree(output_folder)
except:
pass
try:
mkdir(output_folder)
except:
pass
tokens = [token for sublist in self.sentences for token in sublist]
df = pd.DataFrame(tokens)
log = u""
log += u"Sentences : {}\n".format(len(self.sentences))
n = df.shape[1]
log += self._analyze_first_token(df, 0, output_folder)
for i in range(1, n):
log += self._analyze_field(df, i, output_folder)
print(log)
stat_file = join(output_folder, "stats.txt")
write(stat_file, log)
|
def predict(self, X):
""" In order to obtain the most likely label for a list of text
Parameters
----------
X : list of string
Raw texts
Returns
-------
C : list of string
List labels
"""
x = X
if not isinstance(X, list):
x = [X]
y = self.estimator.predict(x)
y = [item[0] for item in y]
y = [self._remove_prefix(label) for label in y]
if not isinstance(X, list):
y = y[0]
return y
|
def log(model_folder, binary_file="count.transformer.bin",
log_folder="analyze"):
"""
Parameters
----------
model_folder : string
folder contains binaries file of model
binary_file : string
file path to count transformer binary file
log_folder : string
log folder
"""
file = join(model_folder, binary_file)
vectorizer = joblib.load(file)
output = []
for token in vectorizer.vocabulary_:
index = vectorizer.vocabulary_[token]
ngram = len(token.split(" "))
output.append({
"token": token,
"ngram": ngram,
"period": vectorizer.period_[index].item(),
"df": vectorizer.df_[index],
})
output = sorted(output, key=lambda item: item["df"])
content = json.dumps(output, ensure_ascii=False)
write(join(log_folder, "count.json"), content)
|
def fit(self, X, y):
"""Fit KimCNNClassifier according to X, y
Parameters
----------
X : list of string
each item is a raw text
y : list of string
each item is a label
"""
####################
# Data Loader
####################
word_vector_transformer = WordVectorTransformer(padding='max')
X = word_vector_transformer.fit_transform(X)
X = LongTensor(X)
self.word_vector_transformer = word_vector_transformer
y_transformer = LabelEncoder()
y = y_transformer.fit_transform(y)
y = torch.from_numpy(y)
self.y_transformer = y_transformer
dataset = CategorizedDataset(X, y)
dataloader = DataLoader(dataset,
batch_size=self.batch_size,
shuffle=True,
num_workers=4)
####################
# Model
####################
KERNEL_SIZES = self.kernel_sizes
NUM_KERNEL = self.num_kernel
EMBEDDING_DIM = self.embedding_dim
model = TextCNN(
vocab_size=word_vector_transformer.get_vocab_size(),
embedding_dim=EMBEDDING_DIM,
output_size=len(self.y_transformer.classes_),
kernel_sizes=KERNEL_SIZES,
num_kernel=NUM_KERNEL)
if USE_CUDA:
model = model.cuda()
####################
# Train
####################
EPOCH = self.epoch
LR = self.lr
loss_function = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=LR)
for epoch in range(EPOCH):
losses = []
for i, data in enumerate(dataloader):
X, y = data
X, y = Variable(X), Variable(y)
optimizer.zero_grad()
model.train()
output = model(X)
loss = loss_function(output, y)
losses.append(loss.data.tolist()[0])
loss.backward()
optimizer.step()
if i % 100 == 0:
print("[%d/%d] mean_loss : %0.2f" % (
epoch, EPOCH, np.mean(losses)))
losses = []
self.model = model
|
def config_sources(app, environment, cluster, configs_dirs, app_dir,
local=False, build=False):
"""Return the config files for an environment & cluster specific app."""
sources = [
# Machine-specific
(configs_dirs, 'hostname'),
(configs_dirs, 'hostname-local'),
(configs_dirs, 'hostname-build'),
# Global
(configs_dirs, 'common'),
# Environment + Cluster
(configs_dirs, 'common-%s' % environment),
(configs_dirs, 'common-%s-%s' % (environment, cluster)),
(configs_dirs, 'common-local'),
(configs_dirs, 'common-build'),
# Machine-specific overrides
(configs_dirs, 'common-overrides'),
# Application-specific
([app_dir], '%s-default' % app),
([app_dir], '%s-%s' % (app, environment)),
([app_dir], '%s-%s-%s' % (app, environment, cluster)),
(configs_dirs, app),
(configs_dirs, '%s-%s' % (app, environment)),
(configs_dirs, '%s-%s-%s' % (app, environment, cluster)),
([app_dir], '%s-local' % app),
([app_dir], '%s-build' % app),
(configs_dirs, '%s-local' % app),
(configs_dirs, '%s-build' % app),
# Machine-specific application override
(configs_dirs, '%s-overrides' % app),
]
# Filter out build sources if not requested
if not build:
sources = [source for source in sources
if not source[1].endswith('-build')]
# Filter out local sources if not build and not local
if not local:
sources = [source for source in sources
if not source[1].endswith('-local')]
return available_sources(sources)
|
def available_sources(sources):
"""Yield the sources that are present."""
for dirs, name in sources:
for directory in dirs:
fn = os.path.join(directory, name) + '.py'
if os.path.isfile(fn):
yield fn
|
def smush_config(sources, initial=None):
"""Merge the configuration sources and return the resulting DotDict."""
if initial is None:
initial = {}
config = DotDict(initial)
for fn in sources:
log.debug('Merging %s', fn)
mod = get_config_module(fn)
config = mod.update(config)
log.debug('Current config:\n%s', json.dumps(config, indent=4,
cls=LenientJSONEncoder))
return config
|
def merge_dicts(d1, d2, _path=None):
"""
Merge dictionary d2 into d1, overriding entries in d1 with values from d2.
d1 is mutated.
_path is for internal, recursive use.
"""
if _path is None:
_path = ()
if isinstance(d1, dict) and isinstance(d2, dict):
for k, v in d2.items():
if isinstance(v, MissingValue) and v.name is None:
v.name = '.'.join(_path + (k,))
if isinstance(v, DeletedValue):
d1.pop(k, None)
elif k not in d1:
if isinstance(v, dict):
d1[k] = merge_dicts({}, v, _path + (k,))
else:
d1[k] = v
else:
if isinstance(d1[k], dict) and isinstance(v, dict):
d1[k] = merge_dicts(d1[k], v, _path + (k,))
elif isinstance(d1[k], list) and isinstance(v, list):
# Lists are only supported as leaves
d1[k] += v
elif isinstance(d1[k], MissingValue):
d1[k] = v
elif d1[k] is None:
d1[k] = v
elif type(d1[k]) == type(v):
d1[k] = v
else:
raise TypeError('Refusing to replace a %s with a %s'
% (type(d1[k]), type(v)))
else:
raise TypeError('Cannot merge a %s with a %s' % (type(d1), type(d2)))
return d1
|
def filter_dict(unfiltered, filter_keys):
"""Return a subset of a dictionary using the specified keys."""
filtered = DotDict()
for k in filter_keys:
filtered[k] = unfiltered[k]
return filtered
|
def _convert_item(self, obj):
"""
Convert obj into a DotDict, or list of DotDict.
Directly nested lists aren't supported.
Returns the result
"""
if isinstance(obj, dict) and not isinstance(obj, DotDict):
obj = DotDict(obj)
elif isinstance(obj, list):
# must mutate and not just reassign, otherwise it will
# just use original object mutable/immutable
for i, item in enumerate(obj):
if isinstance(item, dict) and not isinstance(item, DotDict):
obj[i] = DotDict(item)
return obj
|
def filter_config(config, deploy_config):
"""Return a config subset using the filter defined in the deploy config."""
if not os.path.isfile(deploy_config):
return DotDict()
config_module = get_config_module(deploy_config)
return config_module.filter(config)
|
def seeded_auth_token(client, service, seed):
"""Return an auth token based on the client+service+seed tuple."""
hash_func = hashlib.md5()
token = ','.join((client, service, seed)).encode('utf-8')
hash_func.update(token)
return hash_func.hexdigest()
|
def write_config(config, app_dir, filename='configuration.json'):
"""Write configuration to the applicaiton directory."""
path = os.path.join(app_dir, filename)
with open(path, 'w') as f:
json.dump(
config, f, indent=4, cls=DetectMissingEncoder,
separators=(',', ': '))
|
def get_config_module(config_pathname):
"""Imports the config file to yoconfigurator.configs.<config_basename>."""
configs_mod = 'yoconfigurator.configs'
if configs_mod not in sys.modules:
sys.modules[configs_mod] = types.ModuleType(configs_mod)
module_name = os.path.basename(config_pathname).rsplit('.', 1)[0]
module_name = configs_mod + '.' + module_name
return _load_module(module_name, config_pathname)
|
def validate_date(date_text):
"""Return True if valid, raise ValueError if not"""
try:
if int(date_text) < 0:
return True
except ValueError:
pass
try:
datetime.strptime(date_text, '%Y-%m-%d')
return True
except ValueError:
pass
raise ValueError('Dates must be negative integers or YYYY-MM-DD in the past.')
|
def get_download_total(rows):
"""Return the total downloads, and the downloads column"""
headers = rows.pop(0)
index = headers.index('download_count')
total_downloads = sum(int(row[index]) for row in rows)
rows.insert(0, headers)
return total_downloads, index
|
def add_download_total(rows):
"""Add a final row to rows showing the total downloads"""
total_row = [""] * len(rows[0])
total_row[0] = "Total"
total_downloads, downloads_column = get_download_total(rows)
total_row[downloads_column] = str(total_downloads)
rows.append(total_row)
return rows
|
def pypinfo(
ctx,
project,
fields,
auth,
run,
json,
indent,
timeout,
limit,
days,
start_date,
end_date,
where,
order,
all_installers,
percent,
markdown,
):
"""Valid fields are:\n
project | version | file | pyversion | percent3 | percent2 | impl | impl-version |\n
openssl | date | month | year | country | installer | installer-version |\n
setuptools-version | system | system-release | distro | distro-version | cpu
"""
if auth:
set_credentials(auth)
click.echo('Credentials location set to "{}".'.format(get_credentials()))
return
if project is None and not fields:
click.echo(ctx.get_help())
return
parsed_fields = []
for field in fields:
parsed = FIELD_MAP.get(field)
if parsed is None:
raise ValueError('"{}" is an unsupported field.'.format(field))
parsed_fields.append(parsed)
order_name = order
order = FIELD_MAP.get(order)
if order:
order_name = order.name
parsed_fields.insert(0, order)
built_query = build_query(
project,
parsed_fields,
limit=limit,
days=days,
start_date=start_date,
end_date=end_date,
where=where,
order=order_name,
pip=not all_installers,
)
if run:
client = create_client(get_credentials())
query_job = client.query(built_query, job_config=create_config())
query_rows = query_job.result(timeout=timeout // 1000)
# Cached
from_cache = not not query_job.cache_hit
# Processed
bytes_processed = query_job.total_bytes_processed or 0
processed_amount, processed_unit = convert_units(bytes_processed)
# Billed
bytes_billed = query_job.total_bytes_billed or 0
billed_amount, billed_unit = convert_units(bytes_billed)
# Cost
billing_tier = query_job.billing_tier or 1
estimated_cost = Decimal(TIER_COST * billing_tier) / TB * Decimal(bytes_billed)
estimated_cost = str(estimated_cost.quantize(TO_CENTS, rounding=ROUND_UP))
rows = parse_query_result(query_job, query_rows)
if percent:
rows = add_percentages(rows, include_sign=not json)
# Only for tables, and if more than the header row + a single data row
if len(rows) > 2 and not json:
rows = add_download_total(rows)
if not json:
click.echo('Served from cache: {}'.format(from_cache))
click.echo('Data processed: {:.2f} {}'.format(processed_amount, processed_unit))
click.echo('Data billed: {:.2f} {}'.format(billed_amount, billed_unit))
click.echo('Estimated cost: ${}'.format(estimated_cost))
click.echo()
click.echo(tabulate(rows, markdown))
else:
query_info = {
'cached': from_cache,
'bytes_processed': bytes_processed,
'bytes_billed': bytes_billed,
'estimated_cost': estimated_cost,
}
click.echo(format_json(rows, query_info, indent))
else:
click.echo(built_query)
|
def find_and_patch_entry(soup, entry):
"""
Modify soup so Dash.app can generate TOCs on the fly.
"""
link = soup.find("a", {"class": "headerlink"}, href="#" + entry.anchor)
tag = soup.new_tag("a")
tag["name"] = APPLE_REF_TEMPLATE.format(entry.type, entry.name)
if link:
link.parent.insert(0, tag)
return True
elif entry.anchor.startswith("module-"):
soup.h1.parent.insert(0, tag)
return True
else:
return False
|
def inv_entry_to_path(data):
"""
Determine the path from the intersphinx inventory entry
Discard the anchors between head and tail to make it
compatible with situations where extra meta information is encoded.
"""
path_tuple = data[2].split("#")
if len(path_tuple) > 1:
path_str = "#".join((path_tuple[0], path_tuple[-1]))
else:
path_str = data[2]
return path_str
|
def main(
source,
force,
name,
quiet,
verbose,
destination,
add_to_dash,
add_to_global,
icon,
index_page,
enable_js,
online_redirect_url,
parser,
):
"""
Convert docs from SOURCE to Dash.app's docset format.
"""
try:
logging.config.dictConfig(
create_log_config(verbose=verbose, quiet=quiet)
)
except ValueError as e:
click.secho(e.args[0], fg="red")
raise SystemExit(1)
if icon:
icon_data = icon.read()
if not icon_data.startswith(PNG_HEADER):
log.error(
'"{}" is not a valid PNG image.'.format(
click.format_filename(icon.name)
)
)
raise SystemExit(1)
else:
icon_data = None
source, dest, name = setup_paths(
source,
destination,
name=name,
add_to_global=add_to_global,
force=force,
)
if parser is None:
parser = parsers.get_doctype(source)
if parser is None:
log.error(
'"{}" does not contain a known documentation format.'.format(
click.format_filename(source)
)
)
raise SystemExit(errno.EINVAL)
docset = prepare_docset(
source, dest, name, index_page, enable_js, online_redirect_url
)
doc_parser = parser(doc_path=docset.docs)
log.info(
(
"Converting "
+ click.style("{parser_name}", bold=True)
+ ' docs from "{src}" to "{dst}".'
).format(
parser_name=parser.name,
src=click.format_filename(source, shorten=True),
dst=click.format_filename(dest),
)
)
with docset.db_conn:
log.info("Parsing documentation...")
toc = patch_anchors(doc_parser, show_progressbar=not quiet)
for entry in doc_parser.parse():
docset.db_conn.execute(
"INSERT INTO searchIndex VALUES (NULL, ?, ?, ?)",
entry.as_tuple(),
)
toc.send(entry)
count = docset.db_conn.execute(
"SELECT COUNT(1) FROM searchIndex"
).fetchone()[0]
log.info(
(
"Added "
+ click.style("{count:,}", fg="green" if count > 0 else "red")
+ " index entries."
).format(count=count)
)
toc.close()
if icon_data:
add_icon(icon_data, dest)
if add_to_dash or add_to_global:
log.info("Adding to Dash.app...")
os.system('open -a dash "{}"'.format(dest))
|
def create_log_config(verbose, quiet):
"""
We use logging's levels as an easy-to-use verbosity controller.
"""
if verbose and quiet:
raise ValueError(
"Supplying both --quiet and --verbose makes no sense."
)
elif verbose:
level = logging.DEBUG
elif quiet:
level = logging.ERROR
else:
level = logging.INFO
logger_cfg = {"handlers": ["click_handler"], "level": level}
return {
"version": 1,
"formatters": {"click_formatter": {"format": "%(message)s"}},
"handlers": {
"click_handler": {
"level": level,
"class": "doc2dash.__main__.ClickEchoHandler",
"formatter": "click_formatter",
}
},
"loggers": {"doc2dash": logger_cfg, "__main__": logger_cfg},
}
|
def setup_paths(source, destination, name, add_to_global, force):
"""
Determine source and destination using the options.
"""
if source[-1] == "/":
source = source[:-1]
if not name:
name = os.path.split(source)[-1]
elif name.endswith(".docset"):
name = name.replace(".docset", "")
if add_to_global:
destination = DEFAULT_DOCSET_PATH
dest = os.path.join(destination or "", name + ".docset")
dst_exists = os.path.lexists(dest)
if dst_exists and force:
shutil.rmtree(dest)
elif dst_exists:
log.error(
'Destination path "{}" already exists.'.format(
click.format_filename(dest)
)
)
raise SystemExit(errno.EEXIST)
return source, dest, name
|
def prepare_docset(
source, dest, name, index_page, enable_js, online_redirect_url
):
"""
Create boilerplate files & directories and copy vanilla docs inside.
Return a tuple of path to resources and connection to sqlite db.
"""
resources = os.path.join(dest, "Contents", "Resources")
docs = os.path.join(resources, "Documents")
os.makedirs(resources)
db_conn = sqlite3.connect(os.path.join(resources, "docSet.dsidx"))
db_conn.row_factory = sqlite3.Row
db_conn.execute(
"CREATE TABLE searchIndex(id INTEGER PRIMARY KEY, name TEXT, "
"type TEXT, path TEXT)"
)
db_conn.commit()
plist_path = os.path.join(dest, "Contents", "Info.plist")
plist_cfg = {
"CFBundleIdentifier": name,
"CFBundleName": name,
"DocSetPlatformFamily": name.lower(),
"DashDocSetFamily": "python",
"isDashDocset": True,
"isJavaScriptEnabled": enable_js,
}
if index_page is not None:
plist_cfg["dashIndexFilePath"] = index_page
if online_redirect_url is not None:
plist_cfg["DashDocSetFallbackURL"] = online_redirect_url
write_plist(plist_cfg, plist_path)
shutil.copytree(source, docs)
return DocSet(path=dest, docs=docs, plist=plist_path, db_conn=db_conn)
|
def add_icon(icon_data, dest):
"""
Add icon to docset
"""
with open(os.path.join(dest, "icon.png"), "wb") as f:
f.write(icon_data)
|
def patch_anchors(parser, show_progressbar):
"""
Consume ``ParseEntry``s then patch docs for TOCs by calling
*parser*'s ``find_and_patch_entry``.
"""
files = defaultdict(list)
try:
while True:
pentry = (yield)
try:
fname, anchor = pentry.path.split("#")
files[fname].append(
TOCEntry(name=pentry.name, type=pentry.type, anchor=anchor)
)
except ValueError:
# pydoctor has no anchors for e.g. classes
pass
except GeneratorExit:
pass
def patch_files(files):
for fname, entries in files:
full_path = os.path.join(parser.doc_path, fname)
with codecs.open(full_path, mode="r", encoding="utf-8") as fp:
soup = BeautifulSoup(fp, "html.parser")
for entry in entries:
if not parser.find_and_patch_entry(soup, entry):
log.debug(
"Can't find anchor {} in {}.".format(
entry.anchor, click.format_filename(fname)
)
)
with open(full_path, mode="wb") as fp:
fp.write(soup.encode("utf-8"))
if show_progressbar is True:
with click.progressbar(
files.items(),
width=0,
length=len(files),
label="Adding table of contents meta data...",
) as pbar:
patch_files(pbar)
else:
patch_files(files.items())
|
def has_file_with(path, filename, content):
"""
Check whether *filename* in *path* contains the string *content*.
"""
try:
with open(os.path.join(path, filename), "rb") as f:
return content in f.read()
except IOError as e:
if e.errno == errno.ENOENT:
return False
else:
raise
|
def run_cell(self, cell):
"""Run the Cell code using the IPython globals and locals
Args:
cell (str): Python code to be executed
"""
globals = self.ipy_shell.user_global_ns
locals = self.ipy_shell.user_ns
globals.update({
"__ipy_scope__": None,
})
try:
with redirect_stdout(self.stdout):
self.run(cell, globals, locals)
except:
self.code_error = True
if self.options.debug:
raise BdbQuit
finally:
self.finalize()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.