Search is not available for this dataset
text
stringlengths 75
104k
|
---|
def jhk_to_sdssg(jmag,hmag,kmag):
'''Converts given J, H, Ks mags to an SDSS g magnitude value.
Parameters
----------
jmag,hmag,kmag : float
2MASS J, H, Ks mags of the object.
Returns
-------
float
The converted SDSS g band magnitude.
'''
return convert_constants(jmag,hmag,kmag,
SDSSG_JHK,
SDSSG_JH, SDSSG_JK, SDSSG_HK,
SDSSG_J, SDSSG_H, SDSSG_K)
|
def jhk_to_sdssr(jmag,hmag,kmag):
'''Converts given J, H, Ks mags to an SDSS r magnitude value.
Parameters
----------
jmag,hmag,kmag : float
2MASS J, H, Ks mags of the object.
Returns
-------
float
The converted SDSS r band magnitude.
'''
return convert_constants(jmag,hmag,kmag,
SDSSR_JHK,
SDSSR_JH, SDSSR_JK, SDSSR_HK,
SDSSR_J, SDSSR_H, SDSSR_K)
|
def jhk_to_sdssi(jmag,hmag,kmag):
'''Converts given J, H, Ks mags to an SDSS i magnitude value.
Parameters
----------
jmag,hmag,kmag : float
2MASS J, H, Ks mags of the object.
Returns
-------
float
The converted SDSS i band magnitude.
'''
return convert_constants(jmag,hmag,kmag,
SDSSI_JHK,
SDSSI_JH, SDSSI_JK, SDSSI_HK,
SDSSI_J, SDSSI_H, SDSSI_K)
|
def jhk_to_sdssz(jmag,hmag,kmag):
'''Converts given J, H, Ks mags to an SDSS z magnitude value.
Parameters
----------
jmag,hmag,kmag : float
2MASS J, H, Ks mags of the object.
Returns
-------
float
The converted SDSS z band magnitude.
'''
return convert_constants(jmag,hmag,kmag,
SDSSZ_JHK,
SDSSZ_JH, SDSSZ_JK, SDSSZ_HK,
SDSSZ_J, SDSSZ_H, SDSSZ_K)
|
def absolute_gaia_magnitude(gaia_mag,
gaia_parallax_mas,
gaia_mag_err=None,
gaia_parallax_err_mas=None):
'''Calculates the GAIA absolute magnitude for object (or array of objects).
Given a G mag and the parallax measured by GAIA, gets the absolute mag using
the usual equation::
G - M_G = 5 x log10(d_pc) - 5
M_G = 5 - 5log10(d_pc) + G
Parameters
----------
gaia_mag : float or array-like
The measured GAIA G magnitude.
gaia_parallax_max : float or array-like
The measured parallax of the object in mas.
gaia_mag_err : float or array-like or None
The measurement error in GAIA G magnitude.
gaia_parallax_err_mas : float or array-like or None
The measurement error in GAIA parallax in mas.
Returns
-------
float or array-like
The absolute magnitude M_G of the object(s).
If both `_err` input kwargs are provided, will return a tuple of the form::
(M_G float or array-like, M_G_err float or array-like)
'''
# get the distance
# we're using the naive calculation of d. this is inaccurate as stated in
# Bailer-Jones 2015 (http://arxiv.org/abs/1507.02105) if the error in
# parallax is a significant fraction of parallax
d_pc = np.abs(1000.0/gaia_parallax_mas)
# get the distance error
if gaia_parallax_err_mas is not None:
d_pc_err = (
(1000.0/(gaia_parallax_mas*gaia_parallax_mas)) *
gaia_parallax_err_mas
)
else:
d_pc_err = None
# calculate the absolute mag from the relation
# FIXME: this is NOT corrected for extinction in G mag. see Jordi+ 2010
# (http://adsabs.harvard.edu/abs/2010A%26A...523A..48J) to figure out
# A_G/A_V as a function of (V-I)_0, then apply it here
M_G = 5 - 5.0*np.log10(d_pc) + gaia_mag
# calculate the err in M_G
if d_pc_err is not None and gaia_mag_err is not None:
M_G_err = np.sqrt(
((5.0/(d_pc * np.log(10.0)))**2 * (d_pc_err)**2) +
gaia_mag_err*gaia_mag_err
)
else:
M_G_err = None
if M_G_err is not None:
return M_G, M_G_err
else:
return M_G
|
def aov_theta(times, mags, errs, frequency,
binsize=0.05, minbin=9):
'''Calculates the Schwarzenberg-Czerny AoV statistic at a test frequency.
Parameters
----------
times,mags,errs : np.array
The input time-series and associated errors.
frequency : float
The test frequency to calculate the theta statistic at.
binsize : float
The phase bin size to use.
minbin : int
The minimum number of items in a phase bin to consider in the
calculation of the statistic.
Returns
-------
theta_aov : float
The value of the AoV statistic at the specified `frequency`.
'''
period = 1.0/frequency
fold_time = times[0]
phased = phase_magseries(times,
mags,
period,
fold_time,
wrap=False,
sort=True)
phases = phased['phase']
pmags = phased['mags']
bins = nparange(0.0, 1.0, binsize)
ndets = phases.size
binnedphaseinds = npdigitize(phases, bins)
bin_s1_tops = []
bin_s2_tops = []
binndets = []
goodbins = 0
all_xbar = npmedian(pmags)
for x in npunique(binnedphaseinds):
thisbin_inds = binnedphaseinds == x
thisbin_mags = pmags[thisbin_inds]
if thisbin_mags.size > minbin:
thisbin_ndet = thisbin_mags.size
thisbin_xbar = npmedian(thisbin_mags)
# get s1
thisbin_s1_top = (
thisbin_ndet *
(thisbin_xbar - all_xbar) *
(thisbin_xbar - all_xbar)
)
# get s2
thisbin_s2_top = npsum((thisbin_mags - all_xbar) *
(thisbin_mags - all_xbar))
bin_s1_tops.append(thisbin_s1_top)
bin_s2_tops.append(thisbin_s2_top)
binndets.append(thisbin_ndet)
goodbins = goodbins + 1
# turn the quantities into arrays
bin_s1_tops = nparray(bin_s1_tops)
bin_s2_tops = nparray(bin_s2_tops)
binndets = nparray(binndets)
# calculate s1 first
s1 = npsum(bin_s1_tops)/(goodbins - 1.0)
# then calculate s2
s2 = npsum(bin_s2_tops)/(ndets - goodbins)
theta_aov = s1/s2
return theta_aov
|
def _aov_worker(task):
'''This is a parallel worker for the function below.
Parameters
----------
task : tuple
This is of the form below::
task[0] = times
task[1] = mags
task[2] = errs
task[3] = frequency
task[4] = binsize
task[5] = minbin
Returns
-------
theta_aov : float
The theta value at the specified frequency. nan if the calculation
fails.
'''
times, mags, errs, frequency, binsize, minbin = task
try:
theta = aov_theta(times, mags, errs, frequency,
binsize=binsize, minbin=minbin)
return theta
except Exception as e:
return npnan
|
def get_frequency_grid(times,
samplesperpeak=5,
nyquistfactor=5,
minfreq=None,
maxfreq=None,
returnf0dfnf=False):
'''This calculates a frequency grid for the period finding functions in this
module.
Based on the autofrequency function in astropy.stats.lombscargle.
http://docs.astropy.org/en/stable/_modules/astropy/stats/lombscargle/core.html#LombScargle.autofrequency
Parameters
----------
times : np.array
The times to use to generate the frequency grid over.
samplesperpeak : int
The minimum sample coverage each frequency point in the grid will get.
nyquistfactor : int
The multiplier over the Nyquist rate to use.
minfreq,maxfreq : float or None
If not None, these will be the limits of the frequency grid generated.
returnf0dfnf : bool
If this is True, will return the values of `f0`, `df`, and `Nf`
generated for this grid.
Returns
-------
np.array
A grid of frequencies.
'''
baseline = times.max() - times.min()
nsamples = times.size
df = 1. / baseline / samplesperpeak
if minfreq is not None:
f0 = minfreq
else:
f0 = 0.5 * df
if maxfreq is not None:
Nf = int(np.ceil((maxfreq - f0) / df))
else:
Nf = int(0.5 * samplesperpeak * nyquistfactor * nsamples)
if returnf0dfnf:
return f0, df, Nf, f0 + df * np.arange(Nf)
else:
return f0 + df * np.arange(Nf)
|
def independent_freq_count(frequencies, times, conservative=True):
'''This estimates M: the number of independent frequencies in the periodogram.
This follows the terminology on page 3 of Zechmeister & Kurster (2009)::
M = DELTA_f / delta_f
where::
DELTA_f = freq.max() - freq.min()
delta_f = 1.0/(times.max() - times.min())
Parameters
----------
frequencies : np.array
The frequencies array used for the calculation of the GLS periodogram.
times : np.array
The array of input times used for the calculation of the GLS
periodogram.
conservative : bool
If True, will follow the prescription given in Schwarzenberg-Czerny
(2003):
http://adsabs.harvard.edu/abs/2003ASPC..292..383S
and estimate the number of independent frequences as::
min(N_obs, N_freq, DELTA_f/delta_f)
Returns
-------
M : int
The number of independent frequencies.
'''
M = frequencies.ptp()*times.ptp()
if conservative:
M_eff = min([times.size, frequencies.size, M])
else:
M_eff = M
return M_eff
|
def bootstrap_falsealarmprob(lspinfo,
times,
mags,
errs,
nbootstrap=250,
magsarefluxes=False,
sigclip=10.0,
npeaks=None):
'''Calculates the false alarm probabilities of periodogram peaks using
bootstrap resampling of the magnitude time series.
The false alarm probability here is defined as::
(1.0 + sum(trialbestpeaks[i] > peak[j]))/(ntrialbestpeaks + 1)
for each best periodogram peak j. The index i is for each bootstrap
trial. This effectively gives us a significance for the peak. Smaller FAP
means a better chance that the peak is real.
The basic idea is to get the number of trial best peaks that are larger than
the current best peak and divide this by the total number of trials. The
distribution of these trial best peaks is obtained after scrambling the mag
values and rerunning the specified periodogram method for a bunch of trials.
`lspinfo` is the output dict from a periodbase periodogram function and MUST
contain a 'method' key that corresponds to one of the keys in the LSPMETHODS
dict above. This will let this function know which periodogram function to
run to generate the bootstrap samples. The lspinfo SHOULD also have a
'kwargs' key that corresponds to the input keyword arguments for the
periodogram function as it was run originally, to keep everything the same
during the bootstrap runs. If this is missing, default values will be used.
FIXME: this may not be strictly correct; must look more into bootstrap
significance testing. Also look into if we're doing resampling correctly for
time series because the samples are not iid. Look into moving block
bootstrap.
Parameters
----------
lspinfo : dict
A dict of period-finder results from one of the period-finders in
periodbase, or your own functions, provided it's of the form and
contains at least the keys listed below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above,
'kwargs': dict of kwargs passed to your own period-finder function}
If you provide your own function's period-finder results, you should add
a corresponding key for it to the LSPMETHODS dict above so the bootstrap
function can use it correctly. Your period-finder function should take
`times`, `mags`, errs and any extra parameters as kwargs and return a
dict of the form described above. A small worked example::
from your_module import your_periodfinder_func
from astrobase import periodbase
periodbase.LSPMETHODS['your-finder'] = your_periodfinder_func
# run a period-finder session
your_pfresults = your_periodfinder_func(times, mags, errs,
**extra_kwargs)
# run bootstrap to find FAP
falsealarm_info = periodbase.bootstrap_falsealarmprob(
your_pfresults,
times, mags, errs,
nbootstrap=250,
magsarefluxes=False,
)
times,mags,errs : np.arrays
The magnitude/flux time-series to process along with their associated
measurement errors.
nbootstrap : int
The total number of bootstrap trials to run. This is set to 250 by
default, but should probably be around 1000 for realistic results.
magsarefluxes : bool
If True, indicates the input time-series is fluxes and not mags.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
npeaks : int or None
The number of peaks from the list of 'nbestlspvals' in the period-finder
result dict to run the bootstrap for. If None, all of the peaks in this
list will have their FAP calculated.
Returns
-------
dict
Returns a dict of the form::
{'peaks':allpeaks,
'periods':allperiods,
'probabilities':allfaps,
'alltrialbestpeaks':alltrialbestpeaks}
'''
# figure out how many periods to work on
if (npeaks and (0 < npeaks < len(lspinfo['nbestperiods']))):
nperiods = npeaks
else:
LOGWARNING('npeaks not specified or invalid, '
'getting FAP for all %s periodogram peaks' %
len(lspinfo['nbestperiods']))
nperiods = len(lspinfo['nbestperiods'])
nbestperiods = lspinfo['nbestperiods'][:nperiods]
nbestpeaks = lspinfo['nbestlspvals'][:nperiods]
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
allpeaks = []
allperiods = []
allfaps = []
alltrialbestpeaks = []
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
for ind, period, peak in zip(range(len(nbestperiods)),
nbestperiods,
nbestpeaks):
LOGINFO('peak %s: running %s trials...' % (ind+1, nbootstrap))
trialbestpeaks = []
for _trial in range(nbootstrap):
# get a scrambled index
tindex = np.random.randint(0,
high=mags.size,
size=mags.size)
# get the kwargs dict out of the lspinfo
if 'kwargs' in lspinfo:
kwargs = lspinfo['kwargs']
# update the kwargs with some local stuff
kwargs.update({'magsarefluxes':magsarefluxes,
'sigclip':sigclip,
'verbose':False})
else:
kwargs = {'magsarefluxes':magsarefluxes,
'sigclip':sigclip,
'verbose':False}
# run the periodogram with scrambled mags and errs
# and the appropriate keyword arguments
lspres = LSPMETHODS[lspinfo['method']](
times, mags[tindex], errs[tindex],
**kwargs
)
trialbestpeaks.append(lspres['bestlspval'])
trialbestpeaks = np.array(trialbestpeaks)
alltrialbestpeaks.append(trialbestpeaks)
# calculate the FAP for a trial peak j = FAP[j] =
# (1.0 + sum(trialbestpeaks[i] > peak[j]))/(ntrialbestpeaks + 1)
if lspinfo['method'] != 'pdm':
falsealarmprob = (
(1.0 + trialbestpeaks[trialbestpeaks > peak].size) /
(trialbestpeaks.size + 1.0)
)
# for PDM, we're looking for a peak smaller than the best peak
# because values closer to 0.0 are more significant
else:
falsealarmprob = (
(1.0 + trialbestpeaks[trialbestpeaks < peak].size) /
(trialbestpeaks.size + 1.0)
)
LOGINFO('FAP for peak %s, period: %.6f = %.3g' % (ind+1,
period,
falsealarmprob))
allpeaks.append(peak)
allperiods.append(period)
allfaps.append(falsealarmprob)
return {'peaks':allpeaks,
'periods':allperiods,
'probabilities':allfaps,
'alltrialbestpeaks':alltrialbestpeaks}
else:
LOGERROR('not enough mag series points to calculate periodogram')
return None
|
def make_combined_periodogram(pflist, outfile, addmethods=False):
'''This just puts all of the period-finders on a single periodogram.
This will renormalize all of the periodograms so their values lie between 0
and 1, with values lying closer to 1 being more significant. Periodograms
that give the same best periods will have their peaks line up together.
Parameters
----------
pflist : list of dict
This is a list of result dicts from any of the period-finders in
periodbase. To use your own period-finders' results here, make sure the
result dict is of the form and has at least the keys below::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the
`astrobase.periodbase.METHODLABELS` dict,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above,
'kwargs': dict of kwargs passed to your own period-finder function}
outfile : str
This is the output file to write the output to. NOTE: EPS/PS won't work
because we use alpha transparency to better distinguish between the
various periodograms.
addmethods : bool
If this is True, will add all of the normalized periodograms together,
then renormalize them to between 0 and 1. In this way, if all of the
period-finders agree on something, it'll stand out easily. FIXME:
implement this kwarg.
Returns
-------
str
The name of the generated plot file.
'''
import matplotlib.pyplot as plt
for pf in pflist:
if pf['method'] == 'pdm':
plt.plot(pf['periods'],
np.max(pf['lspvals'])/pf['lspvals'] - 1.0,
label='%s P=%.5f' % (pf['method'], pf['bestperiod']),
alpha=0.5)
else:
plt.plot(pf['periods'],
pf['lspvals']/np.max(pf['lspvals']),
label='%s P=%.5f' % (pf['method'], pf['bestperiod']),
alpha=0.5)
plt.xlabel('period [days]')
plt.ylabel('normalized periodogram power')
plt.xscale('log')
plt.legend()
plt.tight_layout()
plt.savefig(outfile)
plt.close('all')
return outfile
|
def read_original_textlc(lcpath):
'''
Read .epdlc, and .tfalc light curves and return a corresponding labelled
dict (if LC from <2012) or astropy table (if >=2012). Each has different
keys that can be accessed via .keys()
Input:
lcpath: path (string) to light curve data, which is a textfile with HAT
LC data.
Example:
dat = read_original_textlc('HAT-115-0003266.epdlc')
'''
LOGINFO('reading original HAT text LC: {:s}'.format(lcpath))
N_lines_to_parse_comments = 50
with open(lcpath, 'rb') as file:
head = [next(file) for ind in range(N_lines_to_parse_comments)]
N_comment_lines = len([l for l in head if l.decode('UTF-8')[0] == '#'])
# if there are too many comment lines, fail out
if N_comment_lines < N_lines_to_parse_comments:
LOGERROR(
'LC file {fpath} has too many comment lines'.format(fpath=lcpath)
)
return None
first_data_line = list(
filter(None, head[N_comment_lines].decode('UTF-8').split())
)
N_cols = len(first_data_line)
# There are different column formats depending on when HAT pipeline was run
# also different formats for different types of LCs:
# pre-2012: .epdlc -> 17 columns
# pre-2012: .tfalc -> 20 columns
# post-2012: .epdlc or .tfalc -> 32 columns
if N_cols == 17:
colformat = 'pre2012-epdlc'
elif N_cols == 20:
colformat = 'pre2012-tfalc'
elif N_cols == 32:
colformat = 'post2012-hatlc'
else:
LOGERROR("can't handle this column format yet, "
"file: {fpath}, ncols: {ncols}".format(fpath=lcpath,
ncols=N_cols))
return None
# deal with pre-2012 column format
if colformat == 'pre2012-epdlc':
col_names = ['framekey','rjd',
'aim_000','aie_000','aiq_000',
'aim_001','aie_001','aiq_001',
'aim_002','aie_002','aiq_002',
'arm_000','arm_001','arm_002',
'aep_000','aep_001','aep_002']
col_dtypes = ['U8',float,
float,float,'U1',
float,float,'U1',
float,float,'U1',
float,float,float,
float,float,float]
dtype_pairs = [el for el in zip(col_names, col_dtypes)]
data = np.genfromtxt(lcpath, names=col_names, dtype=col_dtypes,
skip_header=N_comment_lines, delimiter=None)
out = {}
for ix in range(len(data.dtype.names)):
out[data.dtype.names[ix]] = data[data.dtype.names[ix]]
elif colformat == 'pre2012-tfalc':
col_names = ['framekey','rjd',
'aim_000','aie_000','aiq_000',
'aim_001','aie_001','aiq_001',
'aim_002','aie_002','aiq_002',
'arm_000','arm_001','arm_002',
'aep_000','aep_001','aep_002',
'atf_000','atf_001','atf_002']
col_dtypes = ['U8',float,
float,float,'U1',
float,float,'U1',
float,float,'U1',
float,float,float,
float,float,float,
float,float,float]
dtype_pairs = [el for el in zip(col_names, col_dtypes)]
data = np.genfromtxt(lcpath, names=col_names, dtype=col_dtypes,
skip_header=N_comment_lines, delimiter=None)
out = {}
for ix in range(len(data.dtype.names)):
out[data.dtype.names[ix]] = data[data.dtype.names[ix]]
elif colformat == 'post2012-hatlc':
col_names = ['hatid', 'framekey', 'fld', 'bjd',
'aim_000', 'aie_000', 'aiq_000',
'aim_001', 'aie_001', 'aiq_001',
'aim_002', 'aie_002', 'aiq_002',
'arm_000', 'arm_001', 'arm_002',
'aep_000', 'aep_001', 'aep_002',
'atf_000', 'atf_001', 'atf_002',
'xcc', 'ycc', 'bgv', 'bge',
'fsv', 'fdv', 'fkv',
'iha', 'izd', 'rjd']
out = astascii.read(lcpath, names=col_names, comment='#')
return out
|
def traptransit_fit_magseries(times, mags, errs,
transitparams,
sigclip=10.0,
plotfit=False,
magsarefluxes=False,
verbose=True):
'''This fits a trapezoid transit model to a magnitude time series.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit a trapezoid planet-transit model
to.
period : float
The period to use for the model fit.
transitparams : list of floats
These are initial parameters for the transit model fit. A list of the
following form is required::
transitparams = [transitperiod (time),
transitepoch (time),
transitdepth (flux or mags),
transitduration (phase),
ingressduration (phase)]
- for magnitudes -> `transitdepth` should be < 0
- for fluxes -> `transitdepth` should be > 0
If `transitepoch` is None, this function will do an initial spline fit
to find an approximate minimum of the phased light curve using the given
period.
The `transitdepth` provided is checked against the value of
`magsarefluxes`. if `magsarefluxes = True`, the `transitdepth` is forced
to be > 0; if `magsarefluxes` = False, the `transitdepth` is forced to
be < 0.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'traptransit',
'fitinfo':{
'initialparams':the initial transit params provided,
'finalparams':the final model fit transit params ,
'finalparamerrs':formal errors in the params,
'leastsqfit':the full tuple returned by scipy.leastsq,
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
'ntransitpoints': the number of LC points in transit phase
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# get rid of zero errs
nzind = np.nonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
# check the transitparams
transitperiod, transitepoch, transitdepth = transitparams[0:3]
# check if we have a transitepoch to use
if transitepoch is None:
if verbose:
LOGWARNING('no transitepoch given in transitparams, '
'trying to figure it out automatically...')
# do a spline fit to figure out the approximate min of the LC
try:
spfit = spline_fit_magseries(times, mags, errs, transitperiod,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
transitepoch = spfit['fitinfo']['fitepoch']
# if the spline-fit fails, try a savgol fit instead
except Exception as e:
sgfit = savgol_fit_magseries(times, mags, errs, transitperiod,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
transitepoch = sgfit['fitinfo']['fitepoch']
# if everything failed, then bail out and ask for the transitepoch
finally:
if transitepoch is None:
LOGERROR("couldn't automatically figure out the transit epoch, "
"can't continue. please provide it in transitparams.")
# assemble the returndict
returndict = {
'fittype':'traptransit',
'fitinfo':{
'initialparams':transitparams,
'finalparams':None,
'leastsqfit':None,
'fitmags':None,
'fitepoch':None,
},
'fitchisq':np.nan,
'fitredchisq':np.nan,
'fitplotfile':None,
'magseries':{
'phase':None,
'times':None,
'mags':None,
'errs':None,
'magsarefluxes':magsarefluxes,
},
}
return returndict
else:
# check the case when there are more than one transitepochs
# returned
if transitepoch.size > 1:
if verbose:
LOGWARNING(
"could not auto-find a single minimum in LC for "
"transitepoch, using the first one returned"
)
transitparams[1] = transitepoch[0]
else:
if verbose:
LOGWARNING(
'using automatically determined transitepoch = %.5f'
% transitepoch
)
transitparams[1] = transitepoch.item()
# next, check the transitdepth and fix it to the form required
if magsarefluxes:
if transitdepth < 0.0:
transitparams[2] = -transitdepth
else:
if transitdepth > 0.0:
transitparams[2] = -transitdepth
# finally, do the fit
try:
leastsqfit = spleastsq(transits.trapezoid_transit_residual,
transitparams,
args=(stimes, smags, serrs),
full_output=True)
except Exception as e:
leastsqfit = None
# if the fit succeeded, then we can return the final parameters
if leastsqfit and leastsqfit[-1] in (1,2,3,4):
finalparams = leastsqfit[0]
covxmatrix = leastsqfit[1]
# calculate the chisq and reduced chisq
fitmags, phase, ptimes, pmags, perrs, n_transitpoints = (
transits.trapezoid_transit_func(
finalparams,
stimes, smags, serrs,
get_ntransitpoints=True
)
)
fitchisq = np.sum(
((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)
)
fitredchisq = fitchisq/(len(pmags) - len(finalparams) - 1)
# get the residual variance and calculate the formal 1-sigma errs on the
# final parameters
residuals = leastsqfit[2]['fvec']
residualvariance = (
np.sum(residuals*residuals)/(pmags.size - finalparams.size)
)
if covxmatrix is not None:
covmatrix = residualvariance*covxmatrix
stderrs = np.sqrt(np.diag(covmatrix))
else:
LOGERROR('covxmatrix not available, fit probably failed!')
stderrs = None
if verbose:
LOGINFO(
'final fit done. chisq = %.5f, reduced chisq = %.5f' %
(fitchisq, fitredchisq)
)
# get the fit epoch
fperiod, fepoch = finalparams[:2]
# assemble the returndict
returndict = {
'fittype':'traptransit',
'fitinfo':{
'initialparams':transitparams,
'finalparams':finalparams,
'finalparamerrs':stderrs,
'leastsqfit':leastsqfit,
'fitmags':fitmags,
'fitepoch':fepoch,
'ntransitpoints':n_transitpoints
},
'fitchisq':fitchisq,
'fitredchisq':fitredchisq,
'fitplotfile':None,
'magseries':{
'phase':phase,
'times':ptimes,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes,
},
}
# make the fit plot if required
if plotfit and isinstance(plotfit, str):
make_fit_plot(phase, pmags, perrs, fitmags,
fperiod, ptimes.min(), fepoch,
plotfit,
magsarefluxes=magsarefluxes)
returndict['fitplotfile'] = plotfit
return returndict
# if the leastsq fit failed, return nothing
else:
LOGERROR('trapezoid-fit: least-squared fit to the light curve failed!')
# assemble the returndict
returndict = {
'fittype':'traptransit',
'fitinfo':{
'initialparams':transitparams,
'finalparams':None,
'finalparamerrs':None,
'leastsqfit':leastsqfit,
'fitmags':None,
'fitepoch':None,
'ntransitpoints':0
},
'fitchisq':np.nan,
'fitredchisq':np.nan,
'fitplotfile':None,
'magseries':{
'phase':None,
'times':None,
'mags':None,
'errs':None,
'magsarefluxes':magsarefluxes,
},
}
return returndict
|
def _get_value(quantitystr, fitparams, fixedparams):
"""This decides if a value is to be fit for or is fixed in a model fit.
When you want to get the value of some parameter, but you're not sure if
it's being fit or if it is fixed. then, e.g. for `period`::
period_value = _get_value('period', fitparams, fixedparams)
"""
# for Mandel-Agol fitting, sometimes we want to fix some parameters,
# and fit others. this function allows that flexibility.
fitparamskeys, fixedparamskeys = fitparams.keys(), fixedparams.keys()
if quantitystr in fitparamskeys:
quantity = fitparams[quantitystr]
elif quantitystr in fixedparamskeys:
quantity = fixedparams[quantitystr]
return quantity
|
def _transit_model(times, t0, per, rp, a, inc, ecc, w, u, limb_dark,
exp_time_minutes=2, supersample_factor=7):
'''This returns a BATMAN planetary transit model.
Parameters
----------
times : np.array
The times at which the model will be evaluated.
t0 : float
The time of periastron for the transit.
per : float
The orbital period of the planet.
rp : float
The stellar radius of the planet's star (in Rsun).
a : float
The semi-major axis of the planet's orbit (in Rsun).
inc : float
The orbital inclination (in degrees).
ecc : float
The eccentricity of the orbit.
w : float
The longitude of periastron (in degrees).
u : list of floats
The limb darkening coefficients specific to the limb darkening model
used.
limb_dark : {"uniform", "linear", "quadratic", "square-root", "logarithmic", "exponential", "power2", "custom"}
The type of limb darkening model to use. See the full list here:
https://www.cfa.harvard.edu/~lkreidberg/batman/tutorial.html#limb-darkening-options
exp_time_minutes : float
The amount of time to 'smear' the transit LC points over to simulate a
long exposure time.
supersample_factor: int
The number of supersampled time data points to average the lightcurve
model over.
Returns
-------
(params, batman_model) : tuple
The returned tuple contains the params list and the generated
`batman.TransitModel` object.
'''
params = batman.TransitParams() # object to store transit parameters
params.t0 = t0 # time of periastron
params.per = per # orbital period
params.rp = rp # planet radius (in stellar radii)
params.a = a # semi-major axis (in stellar radii)
params.inc = inc # orbital inclination (in degrees)
params.ecc = ecc # the eccentricity of the orbit
params.w = w # longitude of periastron (in degrees)
params.u = u # limb darkening coefficient list
params.limb_dark = limb_dark # limb darkening model to use
t = times
m = batman.TransitModel(params, t, exp_time=exp_time_minutes/60./24.,
supersample_factor=supersample_factor)
return params, m
|
def _log_prior_transit(theta, priorbounds):
'''
Assume priors on all parameters have uniform probability.
'''
# priorbounds contains the input priors, and because of how we previously
# sorted theta, its sorted keys tell us which parts of theta correspond to
# which physical quantities.
allowed = True
for ix, key in enumerate(np.sort(list(priorbounds.keys()))):
if priorbounds[key][0] < theta[ix] < priorbounds[key][1]:
allowed = True and allowed
else:
allowed = False
if allowed:
return 0.
return -np.inf
|
def _log_likelihood_transit(theta, params, model, t, flux, err_flux,
priorbounds):
'''
Given a batman TransitModel and its proposed parameters (theta), update the
batman params object with the proposed parameters and evaluate the gaussian
likelihood.
Note: the priorbounds are only needed to parse theta.
'''
u = []
for ix, key in enumerate(sorted(priorbounds.keys())):
if key == 'rp':
params.rp = theta[ix]
elif key == 't0':
params.t0 = theta[ix]
elif key == 'sma':
params.a = theta[ix]
elif key == 'incl':
params.inc = theta[ix]
elif key == 'period':
params.per = theta[ix]
elif key == 'ecc':
params.per = theta[ix]
elif key == 'omega':
params.w = theta[ix]
elif key == 'u_linear':
u.append(theta[ix])
elif key == 'u_quadratic':
u.append(theta[ix])
params.u = u
lc = model.light_curve(params)
residuals = flux - lc
log_likelihood = -0.5*(
np.sum((residuals/err_flux)**2 + np.log(2*np.pi*(err_flux)**2))
)
return log_likelihood
|
def _log_likelihood_transit_plus_line(theta, params, model, t, data_flux,
err_flux, priorbounds):
'''
Given a batman TransitModel and its proposed parameters (theta), update the
batman params object with the proposed parameters and evaluate the gaussian
likelihood.
Note: the priorbounds are only needed to parse theta.
'''
u = []
for ix, key in enumerate(sorted(priorbounds.keys())):
if key == 'rp':
params.rp = theta[ix]
elif key == 't0':
params.t0 = theta[ix]
elif key == 'sma':
params.a = theta[ix]
elif key == 'incl':
params.inc = theta[ix]
elif key == 'period':
params.per = theta[ix]
elif key == 'ecc':
params.per = theta[ix]
elif key == 'omega':
params.w = theta[ix]
elif key == 'u_linear':
u.append(theta[ix])
elif key == 'u_quadratic':
u.append(theta[ix])
params.u = u
elif key == 'poly_order0':
poly_order0 = theta[ix]
elif key == 'poly_order1':
poly_order1 = theta[ix]
try:
poly_order0
except Exception as e:
poly_order0 = 0
else:
pass
transit = model.light_curve(params)
line = poly_order0 + t*poly_order1
model = transit + line
residuals = data_flux - model
log_likelihood = -0.5*(
np.sum((residuals/err_flux)**2 + np.log(2*np.pi*(err_flux)**2))
)
return log_likelihood
|
def log_posterior_transit(theta, params, model, t, flux, err_flux, priorbounds):
'''
Evaluate posterior probability given proposed model parameters and
the observed flux timeseries.
'''
lp = _log_prior_transit(theta, priorbounds)
if not np.isfinite(lp):
return -np.inf
else:
return lp + _log_likelihood_transit(theta, params, model, t, flux,
err_flux, priorbounds)
|
def log_posterior_transit_plus_line(theta, params, model, t, flux, err_flux,
priorbounds):
'''
Evaluate posterior probability given proposed model parameters and
the observed flux timeseries.
'''
lp = _log_prior_transit_plus_line(theta, priorbounds)
if not np.isfinite(lp):
return -np.inf
else:
return (
lp + _log_likelihood_transit_plus_line(
theta, params, model, t, flux, err_flux, priorbounds)
)
|
def mandelagol_fit_magseries(
times, mags, errs,
fitparams,
priorbounds,
fixedparams,
trueparams=None,
burninpercent=0.3,
plotcorner=False,
samplesavpath=False,
n_walkers=50,
n_mcmc_steps=400,
eps=1e-4,
skipsampling=False,
overwriteexistingsamples=False,
mcmcprogressbar=False,
plotfit=False,
magsarefluxes=False,
sigclip=10.0,
verbose=True,
nworkers=4
):
'''This fits a Mandel & Agol (2002) planetary transit model to a flux time
series. You can fit and fix whatever parameters you want.
It relies on Kreidberg (2015)'s BATMAN implementation for the transit model,
emcee to sample the posterior (Foreman-Mackey et al 2013), `corner` to plot
it, and `h5py` to save the samples. See e.g., Claret's work for good guesses
of star-appropriate limb-darkening parameters.
NOTE: this only works for flux time-series at the moment.
NOTE: Between the `fitparams`, `priorbounds`, and `fixedparams` dicts, you
must specify all of the planetary transit parameters required by BATMAN:
`['t0', 'rp', 'sma', 'incl', 'u', 'rp', 'ecc', 'omega', 'period']`, or the
BATMAN model will fail to initialize.
Parameters
----------
times,mags,errs : np.array
The input flux time-series to fit a Fourier cosine series to.
fitparams : dict
This is the initial parameter guesses for MCMC, found e.g., by
BLS. The key string format must not be changed, but any parameter can be
either "fit" or "fixed". If it is "fit", it must have a corresponding
prior. For example::
fitparams = {'t0':1325.9, 'rp':np.sqrt(fitd['transitdepth']),
'sma':6.17, 'incl':85, 'u':[0.3, 0.2]}
where 'u' is a list of the limb darkening parameters, Linear first, then
quadratic. Quadratic limb darkening is the only form implemented.
priorbounds : dict
This sets the lower & upper bounds on uniform prior, e.g.::
priorbounds = {'rp':(0.135, 0.145), 'u_linear':(0.3-1, 0.3+1),
'u_quad':(0.2-1, 0.2+1), 't0':(np.min(time),
np.max(time)), 'sma':(6,6.4), 'incl':(80,90)}
fixedparams : dict
This sets which parameters are fixed, and their values. For example::
fixedparams = {'ecc':0.,
'omega':90.,
'limb_dark':'quadratic',
'period':fitd['period'] }
`limb_dark` must be "quadratic". It's "fixed", because once you
choose your limb-darkening model, it's fixed.
trueparams : list of floats
The true parameter values you're fitting for, if they're known (e.g., a
known planet, or fake data). Only for plotting purposes.
burninpercent : float
The percent of MCMC samples to discard as burn-in.
plotcorner : str or False
If this is a str, points to the path of output corner plot that will be
generated for this MCMC run.
samplesavpath : str
This must be provided so `emcee` can save its MCMC samples to disk as
HDF5 files. This will set the path of the output HDF5file written.
n_walkers : int
The number of MCMC walkers to use.
n_mcmc_steps : int
The number of MCMC steps to take.
eps : float
The radius of the `n_walkers-dimensional` Gaussian ball used to
initialize the MCMC.
skipsampling : bool
If you've already collected MCMC samples, and you do not want any more
sampling (e.g., just make the plots), set this to be True.
overwriteexistingsamples : bool
If you've collected samples, but you want to overwrite them, set this to
True. Usually, it should be False, which appends samples to
`samplesavpath` HDF5 file.
mcmcprogressbar : bool
If True, will show a progress bar for the MCMC process.
plotfit: str or bool
If a str, indicates the path of the output fit plot file. If False, no
fit plot will be made.
magsarefluxes : bool
This indicates if the input measurements in `mags` are actually fluxes.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
If True, will indicate MCMC progress.
nworkers : int
The number of parallel workers to launch for MCMC.
Returns
-------
dict
This function returns a dict containing the model fit parameters and
other fit information. The form of this dict is mostly standardized
across all functions in this module::
{
'fittype':'mandelagol',
'fitinfo':{
'initialparams':the initial transit params provided,
'fixedparams':the fixed transit params provided,
'finalparams':the final model fit transit params,
'finalparamerrs':formal errors in the params,
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
},
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
'''
from multiprocessing import Pool
fittype = 'mandelagol'
if not magsarefluxes:
raise NotImplementedError('magsarefluxes is not implemented yet.')
if not samplesavpath:
raise ValueError(
'This function requires that you save the samples somewhere'
)
if not mandel_agol_dependencies:
raise ImportError(
'This function depends on BATMAN, emcee>3.0, corner, and h5py.'
)
# sigma clip and get rid of zero errs
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
nzind = np.nonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
init_period = _get_value('period', fitparams, fixedparams)
init_epoch = _get_value('t0', fitparams, fixedparams)
init_rp = _get_value('rp', fitparams, fixedparams)
init_sma = _get_value('sma', fitparams, fixedparams)
init_incl = _get_value('incl', fitparams, fixedparams)
init_ecc = _get_value('ecc', fitparams, fixedparams)
init_omega = _get_value('omega', fitparams, fixedparams)
limb_dark = _get_value('limb_dark', fitparams, fixedparams)
init_u = _get_value('u', fitparams, fixedparams)
if not limb_dark == 'quadratic':
raise ValueError(
'only quadratic limb-darkening is supported at the moment'
)
# initialize the model and calculate the initial model light-curve
init_params, init_m = _transit_model(stimes, init_epoch, init_period,
init_rp, init_sma, init_incl, init_ecc,
init_omega, init_u, limb_dark)
init_flux = init_m.light_curve(init_params)
# guessed initial params. give nice guesses, or else emcee struggles.
theta, fitparamnames = [], []
for k in np.sort(list(fitparams.keys())):
if isinstance(fitparams[k], float) or isinstance(fitparams[k], int):
theta.append(fitparams[k])
fitparamnames.append(fitparams[k])
elif isinstance(fitparams[k], list):
if not len(fitparams[k]) == 2:
raise ValueError('should only be quadratic LD coeffs')
theta.append(fitparams[k][0])
theta.append(fitparams[k][1])
fitparamnames.append(fitparams[k][0])
fitparamnames.append(fitparams[k][1])
# initialize sampler
n_dim = len(theta)
initial_position_vec = [theta + eps*np.random.randn(n_dim)
for i in range(n_walkers)]
# run the MCMC, unless you just want to load the available samples
if not skipsampling:
backend = emcee.backends.HDFBackend(samplesavpath)
if overwriteexistingsamples:
LOGWARNING(
'erased samples previously at {:s}'.format(samplesavpath)
)
backend.reset(n_walkers, n_dim)
# if this is the first run, then start from a gaussian ball.
# otherwise, resume from the previous samples.
starting_positions = initial_position_vec
isfirstrun = True
if os.path.exists(backend.filename):
if backend.iteration > 1:
starting_positions = None
isfirstrun = False
if verbose and isfirstrun:
LOGINFO(
'start {:s} MCMC with {:d} dims, {:d} steps, {:d} walkers,'.
format(fittype, n_dim, n_mcmc_steps, n_walkers) +
' {:d} threads'.format(nworkers)
)
elif verbose and not isfirstrun:
LOGINFO(
'continue {:s} with {:d} dims, {:d} steps, {:d} walkers, '.
format(fittype, n_dim, n_mcmc_steps, n_walkers) +
'{:d} threads'.format(nworkers)
)
import sys
if sys.version_info >= (3, 3):
with Pool(nworkers) as pool:
sampler = emcee.EnsembleSampler(
n_walkers, n_dim, log_posterior_transit,
args=(init_params, init_m, stimes,
smags, serrs, priorbounds),
pool=pool,
backend=backend
)
sampler.run_mcmc(starting_positions, n_mcmc_steps,
progress=mcmcprogressbar)
elif sys.version_info < (3, 3):
sampler = emcee.EnsembleSampler(
n_walkers, n_dim, log_posterior_transit,
args=(init_params, init_m, stimes, smags, serrs, priorbounds),
threads=nworkers,
backend=backend
)
sampler.run_mcmc(starting_positions, n_mcmc_steps,
progress=mcmcprogressbar)
if verbose:
LOGINFO(
'ended {:s} MCMC run with {:d} steps, {:d} walkers, '.format(
fittype, n_mcmc_steps, n_walkers
) + '{:d} threads'.format(nworkers)
)
reader = emcee.backends.HDFBackend(samplesavpath)
n_to_discard = int(burninpercent*n_mcmc_steps)
samples = reader.get_chain(discard=n_to_discard, flat=True)
log_prob_samples = reader.get_log_prob(discard=n_to_discard, flat=True)
log_prior_samples = reader.get_blobs(discard=n_to_discard, flat=True)
# Get best-fit parameters and their 1-sigma error bars
fit_statistics = list(
map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
list(zip( *np.percentile(samples, [15.85, 50, 84.15], axis=0))))
)
medianparams, std_perrs, std_merrs = {}, {}, {}
for ix, k in enumerate(np.sort(list(priorbounds.keys()))):
medianparams[k] = fit_statistics[ix][0]
std_perrs[k] = fit_statistics[ix][1]
std_merrs[k] = fit_statistics[ix][2]
stderrs = {'std_perrs':std_perrs, 'std_merrs':std_merrs}
per = _get_value('period', medianparams, fixedparams)
t0 = _get_value('t0', medianparams, fixedparams)
rp = _get_value('rp', medianparams, fixedparams)
sma = _get_value('sma', medianparams, fixedparams)
incl = _get_value('incl', medianparams, fixedparams)
ecc = _get_value('ecc', medianparams, fixedparams)
omega = _get_value('omega', medianparams, fixedparams)
limb_dark = _get_value('limb_dark', medianparams, fixedparams)
try:
u = fixedparams['u']
except Exception as e:
u = [medianparams['u_linear'], medianparams['u_quad']]
fit_params, fit_m = _transit_model(stimes, t0, per, rp, sma, incl, ecc,
omega, u, limb_dark)
fitmags = fit_m.light_curve(fit_params)
fepoch = t0
# assemble the return dictionary
returndict = {
'fittype':fittype,
'fitinfo':{
'initialparams':fitparams,
'initialmags':init_flux,
'fixedparams':fixedparams,
'finalparams':medianparams,
'finalparamerrs':stderrs,
'fitmags':fitmags,
'fitepoch':fepoch,
},
'fitplotfile':None,
'magseries':{
'times':stimes,
'mags':smags,
'errs':serrs,
'magsarefluxes':magsarefluxes,
},
}
# make the output corner plot, and lightcurve plot if desired
if plotcorner:
if isinstance(trueparams,dict):
trueparamkeys = np.sort(list(trueparams.keys()))
truelist = [trueparams[k] for k in trueparamkeys]
fig = corner.corner(
samples,
labels=trueparamkeys,
truths=truelist,
quantiles=[0.1585, 0.5, .8415], show_titles=True
)
else:
fig = corner.corner(samples,
labels=fitparamnames,
quantiles=[0.1585, 0.5, .8415],
show_titles=True)
plt.savefig(plotcorner, dpi=300)
if verbose:
LOGINFO('saved {:s}'.format(plotcorner))
if plotfit and isinstance(plotfit, str):
f, ax = plt.subplots(figsize=(8,6))
ax.scatter(stimes, smags, c='k', alpha=0.5, label='observed',
zorder=1, s=1.5, rasterized=True, linewidths=0)
ax.scatter(stimes, init_flux, c='r', alpha=1,
s=3.5, zorder=2, rasterized=True, linewidths=0,
label='initial guess')
ax.scatter(
stimes, fitmags, c='b', alpha=1,
s=1.5, zorder=3, rasterized=True, linewidths=0,
label='fit {:d} dims'.format(
len(fitparamnames))
)
ax.legend(loc='best')
ax.set(xlabel='time [days]', ylabel='relative flux')
f.savefig(plotfit, dpi=300, bbox_inches='tight')
if verbose:
LOGINFO('saved {:s}'.format(plotfit))
returndict['fitplotfile'] = plotfit
return returndict
|
def mandelagol_and_line_fit_magseries(
times, mags, errs,
fitparams,
priorbounds,
fixedparams,
trueparams=None,
burninpercent=0.3,
plotcorner=False,
timeoffset=0,
samplesavpath=False,
n_walkers=50,
n_mcmc_steps=400,
eps=1e-4,
skipsampling=False,
overwriteexistingsamples=False,
mcmcprogressbar=False,
plotfit=False,
scatterxdata=None,
scatteryaxes=None,
magsarefluxes=True,
sigclip=10.0,
verbose=True,
nworkers=4
):
'''The model fit by this function is: a Mandel & Agol (2002) transit, PLUS a
line. You can fit and fix whatever parameters you want.
A typical use case: you want to measure transit times of individual SNR >~
50 transits. You fix all the transit parameters except for the mid-time,
and also fit for a line locally.
NOTE: this only works for flux time-series at the moment.
NOTE: Between the `fitparams`, `priorbounds`, and `fixedparams` dicts, you
must specify all of the planetary transit parameters required by BATMAN and
the parameters for the line fit: `['t0', 'rp', 'sma', 'incl', 'u', 'rp',
'ecc', 'omega', 'period', 'poly_order0', poly_order1']`, or the BATMAN model
will fail to initialize.
Parameters
----------
times,mags,errs : np.array
The input flux time-series to fit a Fourier cosine series to.
fitparams : dict
This is the initial parameter guesses for MCMC, found e.g., by
BLS. The key string format must not be changed, but any parameter can be
either "fit" or "fixed". If it is "fit", it must have a corresponding
prior. For example::
fitparams = {'t0':1325.9,
'poly_order0':1,
'poly_order1':0.}
where `t0` is the time of transit-center for a reference transit.
`poly_order0` corresponds to the intercept of the line, `poly_order1` is
the slope.
priorbounds : dict
This sets the lower & upper bounds on uniform prior, e.g.::
priorbounds = {'t0':(np.min(time), np.max(time)),
'poly_order0':(0.5,1.5),
'poly_order1':(-0.5,0.5) }
fixedparams : dict
This sets which parameters are fixed, and their values. For example::
fixedparams = {'ecc':0.,
'omega':90.,
'limb_dark':'quadratic',
'period':fitd['period'],
'rp':np.sqrt(fitd['transitdepth']),
'sma':6.17, 'incl':85, 'u':[0.3, 0.2]}
`limb_dark` must be "quadratic". It's "fixed", because once you
choose your limb-darkening model, it's fixed.
trueparams : list of floats
The true parameter values you're fitting for, if they're known (e.g., a
known planet, or fake data). Only for plotting purposes.
burninpercent : float
The percent of MCMC samples to discard as burn-in.
plotcorner : str or False
If this is a str, points to the path of output corner plot that will be
generated for this MCMC run.
timeoffset : float
If input times are offset by some constant, and you want saved pickles
to fix that.
samplesavpath : str
This must be provided so `emcee` can save its MCMC samples to disk as
HDF5 files. This will set the path of the output HDF5file written.
n_walkers : int
The number of MCMC walkers to use.
n_mcmc_steps : int
The number of MCMC steps to take.
eps : float
The radius of the `n_walkers-dimensional` Gaussian ball used to
initialize the MCMC.
skipsampling : bool
If you've already collected MCMC samples, and you do not want any more
sampling (e.g., just make the plots), set this to be True.
overwriteexistingsamples : bool
If you've collected samples, but you want to overwrite them, set this to
True. Usually, it should be False, which appends samples to
`samplesavpath` HDF5 file.
mcmcprogressbar : bool
If True, will show a progress bar for the MCMC process.
plotfit: str or bool
If a str, indicates the path of the output fit plot file. If False, no
fit plot will be made.
scatterxdata : np.array or None
Use this to overplot x,y scatter points on the output model/data
lightcurve (e.g., to highlight bad data, or to indicate an ephemeris),
this can take a `np.ndarray` with the same units as `times`.
scatteryaxes : np.array or None
Use this to provide the y-values for scatterxdata, in units of fraction
of an axis.
magsarefluxes : bool
This indicates if the input measurements in `mags` are actually fluxes.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
verbose : bool
If True, will indicate MCMC progress.
nworkers : int
The number of parallel workers to launch for MCMC.
Returns
-------
dict
This function returns a dict containing the model fit parameters and
other fit information. The form of this dict is mostly standardized
across all functions in this module::
{
'fittype':'mandelagol_and_line',
'fitinfo':{
'initialparams':the initial transit params provided,
'fixedparams':the fixed transit params provided,
'finalparams':the final model fit transit params,
'finalparamerrs':formal errors in the params,
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
},
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
'''
from multiprocessing import Pool
fittype = 'mandelagol_and_line'
if not magsarefluxes:
raise NotImplementedError('magsarefluxes is not implemented yet.')
if not samplesavpath:
raise ValueError(
'This function requires that you save the samples somewhere'
)
if not mandel_agol_dependencies:
raise ImportError(
'This function depends on BATMAN, emcee>3.0, corner, and h5py.'
)
# sigma clip and get rid of zero errs
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
nzind = np.nonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
init_period = _get_value('period', fitparams, fixedparams)
init_epoch = _get_value('t0', fitparams, fixedparams)
init_rp = _get_value('rp', fitparams, fixedparams)
init_sma = _get_value('sma', fitparams, fixedparams)
init_incl = _get_value('incl', fitparams, fixedparams)
init_ecc = _get_value('ecc', fitparams, fixedparams)
init_omega = _get_value('omega', fitparams, fixedparams)
limb_dark = _get_value('limb_dark', fitparams, fixedparams)
init_u = _get_value('u', fitparams, fixedparams)
init_poly_order0 = _get_value('poly_order0', fitparams, fixedparams)
init_poly_order1 = _get_value('poly_order1', fitparams, fixedparams)
if not limb_dark == 'quadratic':
raise ValueError(
'only quadratic limb-darkening is supported at the moment'
)
# initialize the model and calculate the initial model light-curve
init_params, init_m = _transit_model(
stimes, init_epoch, init_period, init_rp, init_sma, init_incl,
init_ecc, init_omega, init_u, limb_dark)
init_flux = (
init_m.light_curve(init_params) +
init_poly_order0 + init_poly_order1*stimes
)
# guessed initial params. give nice guesses, or else emcee struggles.
theta, fitparamnames = [], []
for k in np.sort(list(fitparams.keys())):
if isinstance(fitparams[k], float) or isinstance(fitparams[k], int):
theta.append(fitparams[k])
fitparamnames.append(fitparams[k])
elif isinstance(fitparams[k], list):
if not len(fitparams[k]) == 2:
raise ValueError('should only be quadratic LD coeffs')
theta.append(fitparams[k][0])
theta.append(fitparams[k][1])
fitparamnames.append(fitparams[k][0])
fitparamnames.append(fitparams[k][1])
# initialize sampler
n_dim = len(theta)
# run the MCMC, unless you just want to load the available samples
if not skipsampling:
backend = emcee.backends.HDFBackend(samplesavpath)
if overwriteexistingsamples:
LOGWARNING(
'erased samples previously at {:s}'.format(samplesavpath)
)
backend.reset(n_walkers, n_dim)
# if this is the first run, then start from a gaussian ball, centered
# on the maximum likelihood solution. otherwise, resume from the
# previous samples.
def nll(*args):
return -_log_likelihood_transit_plus_line(*args)
soln = spminimize(
nll, theta, method='BFGS',
args=(init_params, init_m, stimes, smags, serrs, priorbounds)
)
theta_ml = soln.x
ml_poly_order0 = theta_ml[0]
ml_poly_order1 = theta_ml[1]
ml_rp = theta_ml[2]
ml_t0 = theta_ml[3]
ml_params, ml_m = _transit_model(stimes, ml_t0, init_period,
ml_rp, init_sma, init_incl,
init_ecc, init_omega, init_u,
limb_dark)
ml_mags = (
ml_m.light_curve(ml_params) +
ml_poly_order0 + ml_poly_order1*stimes
)
initial_position_vec = [theta_ml + eps*np.random.randn(n_dim)
for i in range(n_walkers)]
starting_positions = initial_position_vec
isfirstrun = True
if os.path.exists(backend.filename):
if backend.iteration > 1:
starting_positions = None
isfirstrun = False
if verbose and isfirstrun:
LOGINFO(
'start {:s} MCMC with {:d} dims, {:d} steps, {:d} walkers,'.
format(fittype, n_dim, n_mcmc_steps, n_walkers) +
' {:d} threads'.format(nworkers)
)
elif verbose and not isfirstrun:
LOGINFO(
'continue {:s} with {:d} dims, {:d} steps, {:d} walkers, '.
format(fittype, n_dim, n_mcmc_steps, n_walkers) +
'{:d} threads'.format(nworkers)
)
with Pool(nworkers) as pool:
sampler = emcee.EnsembleSampler(
n_walkers, n_dim, log_posterior_transit_plus_line,
args=(init_params, init_m, stimes, smags, serrs, priorbounds),
pool=pool,
backend=backend
)
sampler.run_mcmc(starting_positions, n_mcmc_steps,
progress=mcmcprogressbar)
if verbose:
LOGINFO(
'ended {:s} MCMC run with {:d} steps, {:d} walkers, '.format(
fittype, n_mcmc_steps, n_walkers
) + '{:d} threads'.format(nworkers)
)
reader = emcee.backends.HDFBackend(samplesavpath)
n_to_discard = int(burninpercent*n_mcmc_steps)
samples = reader.get_chain(discard=n_to_discard, flat=True)
log_prob_samples = reader.get_log_prob(discard=n_to_discard, flat=True)
log_prior_samples = reader.get_blobs(discard=n_to_discard, flat=True)
# Get best-fit parameters and their 1-sigma error bars
fit_statistics = list(
map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
list(zip( *np.percentile(samples, [15.85, 50, 84.15], axis=0))))
)
medianparams, std_perrs, std_merrs = {}, {}, {}
for ix, k in enumerate(np.sort(list(priorbounds.keys()))):
medianparams[k] = fit_statistics[ix][0]
std_perrs[k] = fit_statistics[ix][1]
std_merrs[k] = fit_statistics[ix][2]
stderrs = {'std_perrs':std_perrs, 'std_merrs':std_merrs}
per = _get_value('period', medianparams, fixedparams)
t0 = _get_value('t0', medianparams, fixedparams)
rp = _get_value('rp', medianparams, fixedparams)
sma = _get_value('sma', medianparams, fixedparams)
incl = _get_value('incl', medianparams, fixedparams)
ecc = _get_value('ecc', medianparams, fixedparams)
omega = _get_value('omega', medianparams, fixedparams)
limb_dark = _get_value('limb_dark', medianparams, fixedparams)
try:
u = fixedparams['u']
except Exception as e:
u = [medianparams['u_linear'], medianparams['u_quad']]
poly_order0 = _get_value('poly_order0', medianparams, fixedparams)
poly_order1 = _get_value('poly_order1', medianparams, fixedparams)
# initialize the model and calculate the initial model light-curve
fit_params, fit_m = _transit_model(stimes, t0, per, rp, sma, incl, ecc,
omega, u, limb_dark)
fitmags = (
fit_m.light_curve(fit_params) +
poly_order0 + poly_order1*stimes
)
fepoch = t0
# assemble the return dictionary
medianparams['t0'] += timeoffset
returndict = {
'fittype':fittype,
'fitinfo':{
'initialparams':fitparams,
'initialmags':init_flux,
'fixedparams':fixedparams,
'finalparams':medianparams,
'finalparamerrs':stderrs,
'fitmags':fitmags,
'fitepoch':fepoch+timeoffset,
},
'fitplotfile':None,
'magseries':{
'times':stimes+timeoffset,
'mags':smags,
'errs':serrs,
'magsarefluxes':magsarefluxes,
},
}
# make the output corner plot, and lightcurve plot if desired
if plotcorner:
fig = corner.corner(
samples,
labels=['line intercept-1', 'line slope',
'rp','t0-{:.4f}'.format(timeoffset)],
truths=[ml_poly_order0, ml_poly_order1, ml_rp, ml_t0],
quantiles=[0.1585, 0.5, .8415], show_titles=True
)
plt.savefig(plotcorner, dpi=300)
if verbose:
LOGINFO('saved {:s}'.format(plotcorner))
if plotfit and isinstance(plotfit, str):
plt.close('all')
f, (a0, a1) = plt.subplots(nrows=2, ncols=1, sharex=True,
figsize=(8,5),
gridspec_kw={'height_ratios':[3, 1]})
a0.scatter(stimes, smags, c='k', alpha=0.9, label='data', zorder=1,
s=10, rasterized=True, linewidths=0)
DEBUGGING = False
if DEBUGGING:
a0.scatter(stimes, init_flux, c='r', alpha=1, s=3.5, zorder=2,
rasterized=True, linewidths=0,
label='initial guess for ml')
a0.scatter(stimes, ml_mags, c='g', alpha=1, s=3.5, zorder=2,
rasterized=True, linewidths=0, label='max likelihood')
a0.plot(
stimes, fitmags, c='b',
zorder=0, rasterized=True, lw=2, alpha=0.4,
label='{:s} fit, {:d} dims'.format(fittype, len(fitparamnames))
)
a1.scatter(
stimes, smags-fitmags, c='k', alpha=0.9,
rasterized=True, s=10, linewidths=0
)
if scatterxdata and scatteryaxes:
import matplotlib.transforms as transforms
for a in [a0, a1]:
transform = transforms.blended_transform_factory(
a.transData, a.transAxes
)
a.scatter(scatterxdata, scatteryaxes, c='r', alpha=0.9,
zorder=2, s=10, rasterized=True, linewidths=0,
marker="^", transform=transform)
a1.set_xlabel('time-t0 [days]')
a0.set_ylabel('relative flux')
a1.set_ylabel('residual')
a0.legend(loc='best', fontsize='x-small')
for a in [a0, a1]:
a.get_yaxis().set_tick_params(which='both', direction='in')
a.get_xaxis().set_tick_params(which='both', direction='in')
f.tight_layout(h_pad=0, w_pad=0)
f.savefig(plotfit, dpi=300, bbox_inches='tight')
if verbose:
LOGINFO('saved {:s}'.format(plotfit))
returndict['fitplotfile'] = plotfit
return returndict
|
def list_trilegal_filtersystems():
'''
This just lists all the filter systems available for TRILEGAL.
'''
print('%-40s %s' % ('FILTER SYSTEM NAME','DESCRIPTION'))
print('%-40s %s' % ('------------------','-----------'))
for key in sorted(TRILEGAL_FILTER_SYSTEMS.keys()):
print('%-40s %s' % (key, TRILEGAL_FILTER_SYSTEMS[key]['desc']))
|
def query_galcoords(gal_lon,
gal_lat,
filtersystem='sloan_2mass',
field_deg2=1.0,
usebinaries=True,
extinction_sigma=0.1,
magnitude_limit=26.0,
maglim_filtercol=4,
trilegal_version=1.6,
extraparams=None,
forcefetch=False,
cachedir='~/.astrobase/trilegal-cache',
verbose=True,
timeout=60.0,
refresh=150.0,
maxtimeout=700.0):
'''This queries the TRILEGAL model form, downloads results, and parses them.
Parameters
----------
gal_lon,gal_lat : float
These are the center galactic longitude and latitude in degrees.
filtersystem : str
This is a key in the TRILEGAL_FILTER_SYSTEMS dict. Use the function
:py:func:`astrobase.services.trilegal.list_trilegal_filtersystems` to
see a nicely formatted table with the key and description for each of
these.
field_deg2 : float
The area of the simulated field in square degrees.
usebinaries : bool
If this is True, binaries will be present in the model results.
extinction_sigma : float
This is the applied std dev around the `Av_extinction` value for the
galactic coordinates requested.
magnitude_limit : float
This is the limiting magnitude of the simulation in the
`maglim_filtercol` band index of the filter system chosen.
maglim_filtercol : int
The index in the filter system list of the magnitude limiting band.
trilegal_version : float
This is the the version of the TRILEGAL form to use. This can usually be
left as-is.
extraparams : dict or None
This is a dict that can be used to override parameters of the model
other than the basic ones used for input to this function. All
parameters are listed in `TRILEGAL_DEFAULT_PARAMS` above. See:
http://stev.oapd.inaf.it/cgi-bin/trilegal
for explanations of these parameters.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
Returns
-------
dict
This returns a dict of the form::
{'params':the input param dict used,
'extraparams':any extra params used,
'provenance':'cached' or 'new download',
'tablefile':the path on disk to the downloaded model text file}
'''
# these are the default parameters
inputparams = copy.deepcopy(TRILEGAL_INPUT_PARAMS)
# update them with the input params
inputparams['binary_kind'] = '1' if usebinaries else '0'
inputparams['extinction_sigma'] = '%.2f' % extinction_sigma
inputparams['field'] = '%.2f' % field_deg2
inputparams['icm_lim'] = str(maglim_filtercol)
inputparams['mag_lim'] = '%.2f' % magnitude_limit
inputparams['trilegal_version'] = str(trilegal_version)
# get the coordinates
inputparams['gc_l'] = '%.3f' % gal_lon
inputparams['gc_b'] = '%.3f' % gal_lat
# check if the area is less than 10 deg^2
if field_deg2 > 10.0:
LOGERROR("can't have an area > 10 square degrees")
return None
# get the extinction parameter. this is by default A[inf] in V. we'll use
# the value from SF11 generated by the 2MASS DUST service
extinction_info = dust.extinction_query(gal_lon,
gal_lat,
coordtype='galactic',
forcefetch=forcefetch,
verbose=verbose,
timeout=timeout)
try:
Av_infinity = extinction_info['Amag']['CTIO V']['sf11']
inputparams['extinction_infty'] = '%.5f' % Av_infinity
except Exception as e:
LOGEXCEPTION(
'could not get A_V_SF11 from 2MASS DUST '
'for Galactic coords: (%.3f, %.3f), '
'using default value of %s' % (gal_lon, gal_lat,
inputparams['extinction_infty'])
)
# get the filter system table
if filtersystem in TRILEGAL_FILTER_SYSTEMS:
inputparams['photsys_file'] = (
TRILEGAL_FILTER_SYSTEMS[filtersystem]['table']
)
else:
LOGERROR('filtersystem name: %s is not in the table of known '
'filter systems.\n'
'Try the trilegal.list_trilegal_filtersystems() function '
'to see all available filter systems.' % filtersystem)
return None
# override the complete form param dict now with our params
trilegal_params = copy.deepcopy(TRILEGAL_DEFAULT_PARAMS)
trilegal_params.update(inputparams)
# override the final params with any extraparams
if extraparams and isinstance(extraparams, dict):
trilegal_params.update(extraparams)
# see if the cachedir exists
if '~' in cachedir:
cachedir = os.path.expanduser(cachedir)
if not os.path.exists(cachedir):
os.makedirs(cachedir)
# generate the cachefname and look for it
cachekey = repr(inputparams)
cachekey = hashlib.sha256(cachekey.encode()).hexdigest()
cachefname = os.path.join(cachedir, '%s.txt.gz' % cachekey)
provenance = 'cache'
lockfile = os.path.join(cachedir, 'LOCK-%s' % cachekey)
# run the query if results not found in the cache
if forcefetch or (not os.path.exists(cachefname)):
# first, check if a query like this is running already
if os.path.exists(lockfile):
with open(lockfile,'r') as infd:
lock_contents = infd.read()
lock_contents = lock_contents.replace('\n','')
LOGERROR('this query appears to be active since %s'
'in another instance, not running it again' %
lock_contents)
return None
else:
with open(lockfile,'w') as outfd:
outfd.write(datetime.utcnow().isoformat())
provenance = 'new download'
try:
if verbose:
LOGINFO('submitting TRILEGAL request for input params: %s'
% repr(inputparams))
posturl = TRILEGAL_POSTURL.format(formversion=trilegal_version)
req = requests.post(posturl,
data=trilegal_params,
timeout=timeout)
resp = req.text
# get the URL of the result file
resultfile = TRILEGAL_REGEX.search(resp)
if resultfile:
resultfile = resultfile[0]
waitdone = False
timeelapsed = 0.0
resultfileurl = '%s/%s' % (
TRILEGAL_BASEURL,
resultfile.replace('a href=..','')
)
if verbose:
LOGINFO(
'request submitted sucessfully, waiting for results...'
)
# wait for 2 minutes, then try to download the result file
while not waitdone:
if timeelapsed > maxtimeout:
LOGERROR('TRILEGAL timed out after waiting for results,'
' request was: '
'%s' % repr(inputparams))
# remove the lock file
if os.path.exists(lockfile):
os.remove(lockfile)
return None
time.sleep(refresh)
timeelapsed = timeelapsed + refresh
try:
resreq = requests.get(resultfileurl)
resreq.raise_for_status()
if verbose:
LOGINFO('TRILEGAL completed, retrieving results...')
# stream the response to the output cache file
with gzip.open(cachefname,'wb') as outfd:
for chunk in resreq.iter_content(chunk_size=65536):
outfd.write(chunk)
tablefname = cachefname
waitdone = True
if verbose:
LOGINFO('done.')
except Exception as e:
if verbose:
LOGINFO('elapsed time: %.1f, result file: %s '
'not ready yet...'
% (timeelapsed, resultfileurl))
continue
else:
LOGERROR('no result file URL found in TRILEGAL output, '
'this is probably an error with the input. '
'HTML of error page follows:\n')
LOGINFO(resp)
# remove the lock file
if os.path.exists(lockfile):
os.remove(lockfile)
return None
except requests.exceptions.Timeout as e:
LOGERROR('TRILEGAL submission timed out, '
'site is probably down. Request was: '
'%s' % repr(inputparams))
return None
except Exception as e:
LOGEXCEPTION('TRILEGAL request failed for '
'%s' % repr(inputparams))
return None
finally:
# remove the lock file
if os.path.exists(lockfile):
os.remove(lockfile)
# otherwise, get the file from the cache
else:
if verbose:
LOGINFO('getting cached TRILEGAL model result for '
'request: %s' %
(repr(inputparams)))
tablefname = cachefname
# return a dict pointing to the result file
# we'll parse this later
resdict = {'params':inputparams,
'extraparams':extraparams,
'provenance':provenance,
'tablefile':tablefname}
return resdict
|
def query_radecl(ra,
decl,
filtersystem='sloan_2mass',
field_deg2=1.0,
usebinaries=True,
extinction_sigma=0.1,
magnitude_limit=26.0,
maglim_filtercol=4,
trilegal_version=1.6,
extraparams=None,
forcefetch=False,
cachedir='~/.astrobase/trilegal-cache',
verbose=True,
timeout=60.0,
refresh=150.0,
maxtimeout=700.0):
'''This runs the TRILEGAL query for decimal equatorial coordinates.
Parameters
----------
ra,decl : float
These are the center equatorial coordinates in decimal degrees
filtersystem : str
This is a key in the TRILEGAL_FILTER_SYSTEMS dict. Use the function
:py:func:`astrobase.services.trilegal.list_trilegal_filtersystems` to
see a nicely formatted table with the key and description for each of
these.
field_deg2 : float
The area of the simulated field in square degrees. This is in the
Galactic coordinate system.
usebinaries : bool
If this is True, binaries will be present in the model results.
extinction_sigma : float
This is the applied std dev around the `Av_extinction` value for the
galactic coordinates requested.
magnitude_limit : float
This is the limiting magnitude of the simulation in the
`maglim_filtercol` band index of the filter system chosen.
maglim_filtercol : int
The index in the filter system list of the magnitude limiting band.
trilegal_version : float
This is the the version of the TRILEGAL form to use. This can usually be
left as-is.
extraparams : dict or None
This is a dict that can be used to override parameters of the model
other than the basic ones used for input to this function. All
parameters are listed in `TRILEGAL_DEFAULT_PARAMS` above. See:
http://stev.oapd.inaf.it/cgi-bin/trilegal
for explanations of these parameters.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our initial request.
refresh : float
This sets the amount of time in seconds to wait before checking if the
result file is available. If the results file isn't available after
`refresh` seconds have elapsed, the function will wait for `refresh`
seconds continuously, until `maxtimeout` is reached or the results file
becomes available.
maxtimeout : float
The maximum amount of time in seconds to wait for a result to become
available after submitting our query request.
Returns
-------
dict
This returns a dict of the form::
{'params':the input param dict used,
'extraparams':any extra params used,
'provenance':'cached' or 'new download',
'tablefile':the path on disk to the downloaded model text file}
'''
# convert the ra/decl to gl, gb
radecl = SkyCoord(ra=ra*u.degree, dec=decl*u.degree)
gl = radecl.galactic.l.degree
gb = radecl.galactic.b.degree
return query_galcoords(gl,
gb,
filtersystem=filtersystem,
field_deg2=field_deg2,
usebinaries=usebinaries,
extinction_sigma=extinction_sigma,
magnitude_limit=magnitude_limit,
maglim_filtercol=maglim_filtercol,
trilegal_version=trilegal_version,
extraparams=extraparams,
forcefetch=forcefetch,
cachedir=cachedir,
verbose=verbose,
timeout=timeout,
refresh=refresh,
maxtimeout=maxtimeout)
|
def read_model_table(modelfile):
'''
This reads a downloaded TRILEGAL model file.
Parameters
----------
modelfile : str
Path to the downloaded model file to read.
Returns
-------
np.recarray
Returns the model table as a Numpy record array.
'''
infd = gzip.open(modelfile)
model = np.genfromtxt(infd,names=True)
infd.close()
return model
|
def _time_independent_equals(a, b):
'''
This compares two values in constant time.
Taken from tornado:
https://github.com/tornadoweb/tornado/blob/
d4eb8eb4eb5cc9a6677e9116ef84ded8efba8859/tornado/web.py#L3060
'''
if len(a) != len(b):
return False
result = 0
if isinstance(a[0], int): # python3 byte strings
for x, y in zip(a, b):
result |= x ^ y
else: # python2
for x, y in zip(a, b):
result |= ord(x) ^ ord(y)
return result == 0
|
def default(self, obj):
'''Overrides the default serializer for `JSONEncoder`.
This can serialize the following objects in addition to what
`JSONEncoder` can already do.
- `np.array`
- `bytes`
- `complex`
- `np.float64` and other `np.dtype` objects
Parameters
----------
obj : object
A Python object to serialize to JSON.
Returns
-------
str
A JSON encoded representation of the input object.
'''
if isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, bytes):
return obj.decode()
elif isinstance(obj, complex):
return (obj.real, obj.imag)
elif (isinstance(obj, (float, np.float64, np.float_)) and
not np.isfinite(obj)):
return None
elif isinstance(obj, (np.int8, np.int16, np.int32, np.int64)):
return int(obj)
else:
return json.JSONEncoder.default(self, obj)
|
def initialize(self, currentdir, assetpath, cplist,
cplistfile, executor, readonly, baseurl):
'''
handles initial setup.
'''
self.currentdir = currentdir
self.assetpath = assetpath
self.currentproject = cplist
self.cplistfile = cplistfile
self.executor = executor
self.readonly = readonly
self.baseurl = baseurl
|
def get(self):
'''This handles GET requests to the index page.
TODO: provide the correct baseurl from the checkplotserver options dict,
so the frontend JS can just read that off immediately.
'''
# generate the project's list of checkplots
project_checkplots = self.currentproject['checkplots']
project_checkplotbasenames = [os.path.basename(x)
for x in project_checkplots]
project_checkplotindices = range(len(project_checkplots))
# get the sortkey and order
project_cpsortkey = self.currentproject['sortkey']
if self.currentproject['sortorder'] == 'asc':
project_cpsortorder = 'ascending'
elif self.currentproject['sortorder'] == 'desc':
project_cpsortorder = 'descending'
# get the filterkey and condition
project_cpfilterstatements = self.currentproject['filterstatements']
self.render('cpindex.html',
project_checkplots=project_checkplots,
project_cpsortorder=project_cpsortorder,
project_cpsortkey=project_cpsortkey,
project_cpfilterstatements=project_cpfilterstatements,
project_checkplotbasenames=project_checkplotbasenames,
project_checkplotindices=project_checkplotindices,
project_checkplotfile=self.cplistfile,
readonly=self.readonly,
baseurl=self.baseurl)
|
def get(self, checkplotfname):
'''This handles GET requests to serve a specific checkplot pickle.
This is an AJAX endpoint; returns JSON that gets converted by the
frontend into things to render.
'''
if checkplotfname:
# do the usual safing
self.checkplotfname = xhtml_escape(
base64.b64decode(url_unescape(checkplotfname))
)
# see if this plot is in the current project
if self.checkplotfname in self.currentproject['checkplots']:
# make sure this file exists
cpfpath = os.path.join(
os.path.abspath(os.path.dirname(self.cplistfile)),
self.checkplotfname
)
LOGGER.info('loading %s...' % cpfpath)
if not os.path.exists(cpfpath):
msg = "couldn't find checkplot %s" % cpfpath
LOGGER.error(msg)
resultdict = {'status':'error',
'message':msg,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
# this is the async call to the executor
cpdict = yield self.executor.submit(
_read_checkplot_picklefile, cpfpath
)
#####################################
## continue after we're good to go ##
#####################################
LOGGER.info('loaded %s' % cpfpath)
# break out the initial info
objectid = cpdict['objectid']
objectinfo = cpdict['objectinfo']
varinfo = cpdict['varinfo']
if 'pfmethods' in cpdict:
pfmethods = cpdict['pfmethods']
else:
pfmethods = []
for pfm in PFMETHODS:
if pfm in cpdict:
pfmethods.append(pfm)
# handle neighbors for this object
neighbors = []
if ('neighbors' in cpdict and
cpdict['neighbors'] is not None and
len(cpdict['neighbors'])) > 0:
nbrlist = cpdict['neighbors']
# get each neighbor, its info, and its phased LCs
for nbr in nbrlist:
if 'magdiffs' in nbr:
nbrmagdiffs = nbr['magdiffs']
else:
nbrmagdiffs = None
if 'colordiffs' in nbr:
nbrcolordiffs = nbr['colordiffs']
else:
nbrcolordiffs = None
thisnbrdict = {
'objectid':nbr['objectid'],
'objectinfo':{
'ra':nbr['ra'],
'decl':nbr['decl'],
'xpix':nbr['xpix'],
'ypix':nbr['ypix'],
'distarcsec':nbr['dist'],
'magdiffs':nbrmagdiffs,
'colordiffs':nbrcolordiffs
}
}
try:
nbr_magseries = nbr['magseries']['plot']
thisnbrdict['magseries'] = nbr_magseries
except Exception as e:
LOGGER.error(
"could not load magseries plot for "
"neighbor %s for object %s"
% (nbr['objectid'],
cpdict['objectid'])
)
try:
for pfm in pfmethods:
if pfm in nbr:
thisnbrdict[pfm] = {
'plot':nbr[pfm][0]['plot'],
'period':nbr[pfm][0]['period'],
'epoch':nbr[pfm][0]['epoch']
}
except Exception as e:
LOGGER.error(
"could not load phased LC plots for "
"neighbor %s for object %s"
% (nbr['objectid'],
cpdict['objectid'])
)
neighbors.append(thisnbrdict)
# load object comments
if 'comments' in cpdict:
objectcomments = cpdict['comments']
else:
objectcomments = None
# load the xmatch results, if any
if 'xmatch' in cpdict:
objectxmatch = cpdict['xmatch']
# get rid of those pesky nans
for xmcat in objectxmatch:
if isinstance(objectxmatch[xmcat]['info'], dict):
xminfo = objectxmatch[xmcat]['info']
for xmek in xminfo:
if (isinstance(xminfo[xmek], float) and
(not np.isfinite(xminfo[xmek]))):
xminfo[xmek] = None
else:
objectxmatch = None
# load the colormagdiagram object
if 'colormagdiagram' in cpdict:
colormagdiagram = cpdict['colormagdiagram']
else:
colormagdiagram = None
# these are base64 which can be provided directly to JS to
# generate images (neat!)
if 'finderchart' in cpdict:
finderchart = cpdict['finderchart']
else:
finderchart = None
if ('magseries' in cpdict and
isinstance(cpdict['magseries'], dict) and
'plot' in cpdict['magseries']):
magseries = cpdict['magseries']['plot']
time0 = cpdict['magseries']['times'].min()
magseries_ndet = cpdict['magseries']['times'].size
else:
magseries = None
time0 = 0.0
magseries_ndet = 0
LOGGER.warning(
"no 'magseries' key present in this "
"checkplot, some plots may be broken..."
)
if 'status' in cpdict:
cpstatus = cpdict['status']
else:
cpstatus = 'unknown, possibly incomplete checkplot'
# load the uifilters if present
if 'uifilters' in cpdict:
uifilters = cpdict['uifilters']
else:
uifilters = {'psearch_magfilters':None,
'psearch_sigclip':None,
'psearch_timefilters':None}
# FIXME: add in other stuff required by the frontend
# - signals
# FIXME: the frontend should load these other things as well
# into the various elems on the period-search-tools and
# variability-tools tabs
# this is the initial dict
resultdict = {
'status':'ok',
'message':'found checkplot %s' % self.checkplotfname,
'readonly':self.readonly,
'result':{
'time0':'%.3f' % time0,
'objectid':objectid,
'objectinfo':objectinfo,
'colormagdiagram':colormagdiagram,
'objectcomments':objectcomments,
'varinfo':varinfo,
'uifilters':uifilters,
'neighbors':neighbors,
'xmatch':objectxmatch,
'finderchart':finderchart,
'magseries':magseries,
# fallback in case objectinfo doesn't have ndet
'magseries_ndet':magseries_ndet,
'cpstatus':cpstatus,
'pfmethods':pfmethods
}
}
# make sure to replace nans with Nones. frontend JS absolutely
# hates NaNs and for some reason, the JSON encoder defined at
# the top of this file doesn't deal with them even though it
# should
for key in resultdict['result']['objectinfo']:
if (isinstance(resultdict['result']['objectinfo'][key],
(float, np.float64, np.float_)) and
(not np.isfinite(resultdict['result'][
'objectinfo'
][key]))):
resultdict['result']['objectinfo'][key] = None
elif (isinstance(resultdict['result']['objectinfo'][key],
ndarray)):
thisval = resultdict['result']['objectinfo'][key]
thisval = thisval.tolist()
for i, v in enumerate(thisval):
if (isinstance(v,(float, np.float64, np.float_)) and
(not(np.isfinite(v)))):
thisval[i] = None
resultdict['result']['objectinfo'][key] = thisval
# remove nans from varinfo itself
for key in resultdict['result']['varinfo']:
if (isinstance(
resultdict['result']['varinfo'][key],
(float, np.float64, np.float_)) and
(not np.isfinite(
resultdict['result']['varinfo'][key]
))):
resultdict['result']['varinfo'][key] = None
elif (isinstance(
resultdict['result']['varinfo'][key],
ndarray)):
thisval = (
resultdict['result']['varinfo'][key]
)
thisval = thisval.tolist()
for i, v in enumerate(thisval):
if (isinstance(v,(float, np.float64, np.float_)) and
(not(np.isfinite(v)))):
thisval[i] = None
resultdict['result']['varinfo'][key] = (
thisval
)
# remove nans from varinfo['features']
if ('features' in resultdict['result']['varinfo'] and
isinstance(resultdict['result']['varinfo']['features'],
dict)):
for key in resultdict['result']['varinfo']['features']:
if (isinstance(
resultdict[
'result'
]['varinfo']['features'][key],
(float, np.float64, np.float_)) and
(not np.isfinite(
resultdict[
'result'
]['varinfo']['features'][key]))):
resultdict[
'result'
]['varinfo']['features'][key] = None
elif (isinstance(
resultdict[
'result'
]['varinfo']['features'][key],
ndarray)):
thisval = (
resultdict['result']['varinfo']['features'][key]
)
thisval = thisval.tolist()
for i, v in enumerate(thisval):
if (isinstance(v,(float,
np.float64,
np.float_)) and
(not(np.isfinite(v)))):
thisval[i] = None
resultdict['result']['varinfo']['features'][key] = (
thisval
)
# now get the periodograms and phased LCs
for key in pfmethods:
# get the periodogram for this method
periodogram = cpdict[key]['periodogram']
# get the phased LC with best period
if 0 in cpdict[key] and isinstance(cpdict[key][0], dict):
phasedlc0plot = cpdict[key][0]['plot']
phasedlc0period = float(cpdict[key][0]['period'])
phasedlc0epoch = float(cpdict[key][0]['epoch'])
else:
phasedlc0plot = None
phasedlc0period = None
phasedlc0epoch = None
# get the associated fitinfo for this period if it
# exists
if (0 in cpdict[key] and
isinstance(cpdict[key][0], dict) and
'lcfit' in cpdict[key][0] and
isinstance(cpdict[key][0]['lcfit'], dict)):
phasedlc0fit = {
'method':(
cpdict[key][0]['lcfit']['fittype']
),
'redchisq':(
cpdict[key][0]['lcfit']['fitredchisq']
),
'chisq':(
cpdict[key][0]['lcfit']['fitchisq']
),
'params':(
cpdict[key][0][
'lcfit'
]['fitinfo']['finalparams'] if
'finalparams' in
cpdict[key][0]['lcfit']['fitinfo'] else None
)
}
else:
phasedlc0fit = None
# get the phased LC with 2nd best period
if 1 in cpdict[key] and isinstance(cpdict[key][1], dict):
phasedlc1plot = cpdict[key][1]['plot']
phasedlc1period = float(cpdict[key][1]['period'])
phasedlc1epoch = float(cpdict[key][1]['epoch'])
else:
phasedlc1plot = None
phasedlc1period = None
phasedlc1epoch = None
# get the associated fitinfo for this period if it
# exists
if (1 in cpdict[key] and
isinstance(cpdict[key][1], dict) and
'lcfit' in cpdict[key][1] and
isinstance(cpdict[key][1]['lcfit'], dict)):
phasedlc1fit = {
'method':(
cpdict[key][1]['lcfit']['fittype']
),
'redchisq':(
cpdict[key][1]['lcfit']['fitredchisq']
),
'chisq':(
cpdict[key][1]['lcfit']['fitchisq']
),
'params':(
cpdict[key][1][
'lcfit'
]['fitinfo']['finalparams'] if
'finalparams' in
cpdict[key][1]['lcfit']['fitinfo'] else None
)
}
else:
phasedlc1fit = None
# get the phased LC with 3rd best period
if 2 in cpdict[key] and isinstance(cpdict[key][2], dict):
phasedlc2plot = cpdict[key][2]['plot']
phasedlc2period = float(cpdict[key][2]['period'])
phasedlc2epoch = float(cpdict[key][2]['epoch'])
else:
phasedlc2plot = None
phasedlc2period = None
phasedlc2epoch = None
# get the associated fitinfo for this period if it
# exists
if (2 in cpdict[key] and
isinstance(cpdict[key][2], dict) and
'lcfit' in cpdict[key][2] and
isinstance(cpdict[key][2]['lcfit'], dict)):
phasedlc2fit = {
'method':(
cpdict[key][2]['lcfit']['fittype']
),
'redchisq':(
cpdict[key][2]['lcfit']['fitredchisq']
),
'chisq':(
cpdict[key][2]['lcfit']['fitchisq']
),
'params':(
cpdict[key][2][
'lcfit'
]['fitinfo']['finalparams'] if
'finalparams' in
cpdict[key][2]['lcfit']['fitinfo'] else None
)
}
else:
phasedlc2fit = None
resultdict['result'][key] = {
'nbestperiods':cpdict[key]['nbestperiods'],
'periodogram':periodogram,
'bestperiod':cpdict[key]['bestperiod'],
'phasedlc0':{
'plot':phasedlc0plot,
'period':phasedlc0period,
'epoch':phasedlc0epoch,
'lcfit':phasedlc0fit,
},
'phasedlc1':{
'plot':phasedlc1plot,
'period':phasedlc1period,
'epoch':phasedlc1epoch,
'lcfit':phasedlc1fit,
},
'phasedlc2':{
'plot':phasedlc2plot,
'period':phasedlc2period,
'epoch':phasedlc2epoch,
'lcfit':phasedlc2fit,
},
}
#
# end of processing per pfmethod
#
# return the checkplot via JSON
self.write(resultdict)
self.finish()
else:
LOGGER.error('could not find %s' % self.checkplotfname)
resultdict = {'status':'error',
'message':"This checkplot doesn't exist.",
'readonly':self.readonly,
'result':None}
self.write(resultdict)
self.finish()
else:
resultdict = {'status':'error',
'message':'No checkplot provided to load.',
'readonly':self.readonly,
'result':None}
self.write(resultdict)
|
def post(self, cpfile):
'''This handles POST requests.
Also an AJAX endpoint. Updates the persistent checkplot dict using the
changes from the UI, and then saves it back to disk. This could
definitely be faster by just loading the checkplot into a server-wide
shared dict or something.
'''
# if self.readonly is set, then don't accept any changes
# return immediately with a 400
if self.readonly:
msg = "checkplotserver is in readonly mode. no updates allowed."
resultdict = {'status':'error',
'message':msg,
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
# now try to update the contents
try:
self.cpfile = base64.b64decode(url_unescape(cpfile)).decode()
cpcontents = self.get_argument('cpcontents', default=None)
savetopng = self.get_argument('savetopng', default=None)
if not self.cpfile or not cpcontents:
msg = "did not receive a checkplot update payload"
resultdict = {'status':'error',
'message':msg,
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
cpcontents = json.loads(cpcontents)
# the only keys in cpdict that can updated from the UI are from
# varinfo, objectinfo (objecttags), uifilters, and comments
updated = {'varinfo': cpcontents['varinfo'],
'objectinfo':cpcontents['objectinfo'],
'comments':cpcontents['comments'],
'uifilters':cpcontents['uifilters']}
# we need to reform the self.cpfile so it points to the full path
cpfpath = os.path.join(
os.path.abspath(os.path.dirname(self.cplistfile)),
self.cpfile
)
LOGGER.info('loading %s...' % cpfpath)
if not os.path.exists(cpfpath):
msg = "couldn't find checkplot %s" % cpfpath
LOGGER.error(msg)
resultdict = {'status':'error',
'message':msg,
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
# dispatch the task
updated = yield self.executor.submit(checkplot_pickle_update,
cpfpath, updated)
# continue processing after this is done
if updated:
LOGGER.info('updated checkplot %s successfully' % updated)
resultdict = {'status':'success',
'message':'checkplot update successful',
'readonly':self.readonly,
'result':{'checkplot':updated,
'unixtime':utime.time(),
'changes':cpcontents,
'cpfpng': None}}
# handle a savetopng trigger
if savetopng:
cpfpng = os.path.abspath(cpfpath.replace('.pkl','.png'))
cpfpng = StrIO()
pngdone = yield self.executor.submit(
checkplot_pickle_to_png,
cpfpath, cpfpng
)
if pngdone is not None:
# we'll send back the PNG, which can then be loaded by
# the frontend and reformed into a download
pngdone.seek(0)
pngbin = pngdone.read()
pngb64 = base64.b64encode(pngbin)
pngdone.close()
del pngbin
resultdict['result']['cpfpng'] = pngb64
else:
resultdict['result']['cpfpng'] = ''
self.write(resultdict)
self.finish()
else:
LOGGER.error('could not handle checkplot update for %s: %s' %
(self.cpfile, cpcontents))
msg = "checkplot update failed because of a backend error"
resultdict = {'status':'error',
'message':msg,
'readonly':self.readonly,
'result':None}
self.write(resultdict)
self.finish()
# if something goes wrong, inform the user
except Exception as e:
LOGGER.exception('could not handle checkplot update for %s: %s' %
(self.cpfile, cpcontents))
msg = "checkplot update failed because of an exception"
resultdict = {'status':'error',
'message':msg,
'readonly':self.readonly,
'result':None}
self.write(resultdict)
self.finish()
|
def get(self):
'''
This handles GET requests for the current checkplot-list.json file.
Used with AJAX from frontend.
'''
# add the reviewed key to the current dict if it doesn't exist
# this will hold all the reviewed objects for the frontend
if 'reviewed' not in self.currentproject:
self.currentproject['reviewed'] = {}
# just returns the current project as JSON
self.write(self.currentproject)
|
def post(self):
'''This handles POST requests.
Saves the changes made by the user on the frontend back to the current
checkplot-list.json file.
'''
# if self.readonly is set, then don't accept any changes
# return immediately with a 400
if self.readonly:
msg = "checkplotserver is in readonly mode. no updates allowed."
resultdict = {'status':'error',
'message':msg,
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
objectid = self.get_argument('objectid', None)
changes = self.get_argument('changes',None)
# if either of the above is invalid, return nothing
if not objectid or not changes:
msg = ("could not parse changes to the checkplot filelist "
"from the frontend")
LOGGER.error(msg)
resultdict = {'status':'error',
'message':msg,
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
# otherwise, update the checkplot list JSON
objectid = xhtml_escape(objectid)
changes = json.loads(changes)
# update the dictionary
if 'reviewed' not in self.currentproject:
self.currentproject['reviewed'] = {}
self.currentproject['reviewed'][objectid] = changes
# update the JSON file
with open(self.cplistfile,'w') as outfd:
json.dump(self.currentproject, outfd)
# return status
msg = ("wrote all changes to the checkplot filelist "
"from the frontend for object: %s" % objectid)
LOGGER.info(msg)
resultdict = {'status':'success',
'message':msg,
'readonly':self.readonly,
'result':{'objectid':objectid,
'changes':changes}}
self.write(resultdict)
self.finish()
|
def get(self, cpfile):
'''This handles a GET request to run a specified LC tool.
Parameters
----------
cpfile : str
This is the checkplot file to run the tool on.
Returns
-------
str
Returns a JSON response.
Notes
-----
The URI structure is::
/tools/<cpfile>?[args]
where args are::
?lctool=<lctool>&argkey1=argval1&argkey2=argval2&...
&forcereload=true <- if this is present, then reload values from
original checkplot.
&objectid=<objectid>
`lctool` is one of the strings below
Period search functions::
psearch-gls: run Lomb-Scargle with given params
psearch-bls: run BLS with given params
psearch-pdm: run phase dispersion minimization with given params
psearch-aov: run analysis-of-variance with given params
psearch-mav: run analysis-of-variance (multi-harm) with given params
psearch-acf: run ACF period search with given params
psearch-win: run spectral window function search with given params
Arguments recognized by all period-search functions are::
startp=XX
endp=XX
magsarefluxes=True|False
autofreq=True|False
stepsize=XX
Variability characterization functions::
var-varfeatures: gets the variability features from the checkplot or
recalculates if they're not present
var-prewhiten: pre-whitens the light curve with a sinusoidal signal
var-masksig: masks a given phase location with given width from the
light curve
Light curve manipulation functions ::
phasedlc-newplot: make phased LC with new provided period/epoch
lcfit-fourier: fit a Fourier function to the phased LC
lcfit-spline: fit a spline function to the phased LC
lcfit-legendre: fit a Legendre polynomial to the phased LC
lcfit-savgol: fit a Savitsky-Golay polynomial to the phased LC
FIXME: figure out how to cache the results of these functions
temporarily and save them back to the checkplot after we click on save
in the frontend.
TODO: look for a checkplot-blah-blah.pkl-cps-processing file in the same
place as the usual pickle file. if this exists and is newer than the pkl
file, load it instead. Or have a checkplotdict['cpservertemp'] item.
'''
if cpfile:
self.cpfile = (
xhtml_escape(base64.b64decode(url_unescape(cpfile)))
)
# see if this plot is in the current project
if self.cpfile in self.currentproject['checkplots']:
# make sure this file exists
cpfpath = os.path.join(
os.path.abspath(os.path.dirname(self.cplistfile)),
self.cpfile
)
# if we can't find the pickle, quit immediately
if not os.path.exists(cpfpath):
msg = "couldn't find checkplot %s" % cpfpath
LOGGER.error(msg)
resultdict = {'status':'error',
'message':msg,
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
###########################
# now parse the arguments #
###########################
# check if we have to force-reload
forcereload = self.get_argument('forcereload',False)
if forcereload and xhtml_escape(forcereload):
forcereload = True if forcereload == 'true' else False
# get the objectid
cpobjectid = self.get_argument('objectid',None)
# get the light curve tool to use
lctool = self.get_argument('lctool', None)
# preemptive dict to fill out
resultdict = {'status':None,
'message':None,
'readonly':self.readonly,
'result':None}
# check if the lctool arg is provided
if lctool:
lctool = xhtml_escape(lctool)
lctoolargs = []
lctoolkwargs = {}
# check if this lctool is OK and has all the required args
if lctool in CPTOOLMAP:
try:
# all args should have been parsed
# successfully. parse the kwargs now
for xkwarg, xkwargtype, xkwargdef in zip(
CPTOOLMAP[lctool]['kwargs'],
CPTOOLMAP[lctool]['kwargtypes'],
CPTOOLMAP[lctool]['kwargdefs']
):
# get the kwarg
if xkwargtype is list:
wbkwarg = self.get_arguments(xkwarg)
if len(wbkwarg) > 0:
wbkwarg = [url_unescape(xhtml_escape(x))
for x in wbkwarg]
else:
wbkwarg = None
else:
wbkwarg = self.get_argument(xkwarg, None)
if wbkwarg is not None:
wbkwarg = url_unescape(
xhtml_escape(wbkwarg)
)
LOGGER.info('xkwarg = %s, wbkwarg = %s' %
(xkwarg, repr(wbkwarg)))
# if it's None, sub with the default
if wbkwarg is None:
wbkwarg = xkwargdef
# otherwise, cast it to the required type
else:
# special handling for lists of floats
if xkwargtype is list:
wbkwarg = [float(x) for x in wbkwarg]
# special handling for booleans
elif xkwargtype is bool:
if wbkwarg == 'false':
wbkwarg = False
elif wbkwarg == 'true':
wbkwarg = True
else:
wbkwarg = xkwargdef
# usual casting for other types
else:
wbkwarg = xkwargtype(wbkwarg)
# update the lctools kwarg dict
# make sure to remove any [] from the kwargs
# this was needed to parse the input query
# string correctly
if xkwarg.endswith('[]'):
xkwarg = xkwarg.rstrip('[]')
lctoolkwargs.update({xkwarg:wbkwarg})
except Exception as e:
LOGGER.exception('lctool %s, kwarg %s '
'will not work' %
(lctool, xkwarg))
resultdict['status'] = 'error'
resultdict['message'] = (
'lctool %s, kwarg %s '
'will not work' %
(lctool, xkwarg)
)
resultdict['result'] = {'objectid':cpobjectid}
self.write(resultdict)
raise tornado.web.Finish()
# if the tool is not in the CPTOOLSMAP
else:
LOGGER.error('lctool %s, does not exist' % lctool)
resultdict['status'] = 'error'
resultdict['message'] = (
'lctool %s does not exist' % lctool
)
resultdict['result'] = {'objectid':cpobjectid}
self.write(resultdict)
raise tornado.web.Finish()
# if no lctool arg is provided
else:
LOGGER.error('lctool argument not provided')
resultdict['status'] = 'error'
resultdict['message'] = (
'lctool argument not provided'
)
resultdict['result'] = {'objectid':cpobjectid}
self.write(resultdict)
raise tornado.web.Finish()
##############################################
## NOW WE'RE READY TO ACTUALLY DO SOMETHING ##
##############################################
LOGGER.info('loading %s...' % cpfpath)
# this loads the actual checkplot pickle
cpdict = yield self.executor.submit(
_read_checkplot_picklefile, cpfpath
)
# we check for the existence of a cpfpath + '-cpserver-temp'
# file first. this is where we store stuff before we write it
# back to the actual checkplot.
tempfpath = cpfpath + '-cpserver-temp'
# load the temp checkplot if it exists
if os.path.exists(tempfpath):
tempcpdict = yield self.executor.submit(
_read_checkplot_picklefile, tempfpath
)
# if it doesn't exist, read the times, mags, errs from the
# actual checkplot in prep for working on it
else:
tempcpdict = {
'objectid':cpdict['objectid'],
'magseries':{
'times':cpdict['magseries']['times'],
'mags':cpdict['magseries']['mags'],
'errs':cpdict['magseries']['errs'],
}
}
# if we're not forcing a rerun from the original checkplot dict
if not forcereload:
cptimes, cpmags, cperrs = (
tempcpdict['magseries']['times'],
tempcpdict['magseries']['mags'],
tempcpdict['magseries']['errs'],
)
LOGGER.info('forcereload = False')
# otherwise, reload the original times, mags, errs
else:
cptimes, cpmags, cperrs = (cpdict['magseries']['times'],
cpdict['magseries']['mags'],
cpdict['magseries']['errs'])
LOGGER.info('forcereload = True')
# collect the args
for xarg, xargtype in zip(CPTOOLMAP[lctool]['args'],
CPTOOLMAP[lctool]['argtypes']):
# handle special args
if xarg is None:
lctoolargs.append(None)
elif xarg == 'times':
lctoolargs.append(cptimes)
elif xarg == 'mags':
lctoolargs.append(cpmags)
elif xarg == 'errs':
lctoolargs.append(cperrs)
# handle other args
else:
try:
if xargtype is list:
wbarg = self.get_arguments(xarg)
else:
wbarg = url_unescape(
xhtml_escape(
self.get_argument(xarg, None)
)
)
# cast the arg to the required type
# special handling for lists
if xargtype is list:
wbarg = [float(x) for x in wbarg]
# special handling for epochs that can be optional
elif xargtype is float and xarg == 'varepoch':
try:
wbarg = xargtype(wbarg)
except Exception as e:
wbarg = None
# usual casting for other types
else:
wbarg = xargtype(wbarg)
lctoolargs.append(wbarg)
except Exception as e:
LOGGER.exception('lctool %s, arg %s '
'will not work' %
(lctool, xarg))
resultdict['status'] = 'error'
resultdict['message'] = (
'lctool %s, arg %s '
'will not work' %
(lctool, xarg)
)
resultdict['result'] = {'objectid':cpobjectid}
self.write(resultdict)
raise tornado.web.Finish()
LOGGER.info(lctool)
LOGGER.info(lctoolargs)
LOGGER.info(lctoolkwargs)
############################
## handle the lctools now ##
############################
# make sure the results aren't there already.
# if they are and force-reload is not True,
# just return them instead.
resloc = CPTOOLMAP[lctool]['resloc']
# TODO: figure out a way to make the dispatched tasks
# cancellable. This can probably be done by having a global
# TOOLQUEUE object that gets imported on initialize(). In this
# object, we could put in key:vals like so:
#
# TOOLQUEUE['lctool-<toolname>-cpfpath'] = (
# yield self.executor.submit(blah, *blah_args, **blah_kwargs)
# )
#
# then we probably need some sort of frontend AJAX call that
# enqueues things and can then cancel stuff from the queue. see
# stuff we need to figure out:
# - if the above scheme actually yields so we remain async
# - if the Future object supports cancellation
# - if the Future object that isn't resolved actually works
# get the objectid. we'll send this along with every
# result. this should handle the case of the current objectid
# not being the same as the objectid being looked at by the
# user. in effect, this will allow the user to launch a
# long-running process and come back to it later since the
# frontend will load the older results when they are complete.
objectid = cpdict['objectid']
# if lctool is a periodogram method
if lctool in ('psearch-gls',
'psearch-bls',
'psearch-pdm',
'psearch-aov',
'psearch-mav',
'psearch-acf',
'psearch-win'):
lspmethod = resloc[0]
# if we can return the results from a previous run
if (lspmethod in tempcpdict and
isinstance(tempcpdict[lspmethod], dict) and
(not forcereload)):
# for a periodogram method, we need the
# following items
bestperiod = (
tempcpdict[lspmethod]['bestperiod']
)
nbestperiods = (
tempcpdict[lspmethod]['nbestperiods']
)
nbestlspvals = (
tempcpdict[lspmethod]['nbestlspvals']
)
periodogram = (
tempcpdict[lspmethod]['periodogram']
)
# get the first phased LC plot and its period
# and epoch
phasedlc0plot = (
tempcpdict[lspmethod][0]['plot']
)
phasedlc0period = float(
tempcpdict[lspmethod][0]['period']
)
phasedlc0epoch = float(
tempcpdict[lspmethod][0]['epoch']
)
LOGGER.warning(
'returning previously unsaved '
'results for lctool %s from %s' %
(lctool, tempfpath)
)
#
# assemble the returndict
#
resultdict['status'] = 'warning'
resultdict['message'] = (
'previous '
'unsaved results from %s' %
lctool
)
resultdict['result'] = {
'objectid':objectid,
lspmethod:{
'nbestperiods':nbestperiods,
'periodogram':periodogram,
'bestperiod':bestperiod,
'nbestpeaks':nbestlspvals,
'phasedlc0':{
'plot':phasedlc0plot,
'period':phasedlc0period,
'epoch':phasedlc0epoch,
}
}
}
self.write(resultdict)
self.finish()
# otherwise, we have to rerun the periodogram method
else:
# see if sigclip is set. if so, then do the sigclip on
# the times, mags, errs
if lctoolkwargs['sigclip'] is not None:
wtimes, wmags, werrs = lcmath.sigclip_magseries(
lctoolargs[0],
lctoolargs[1],
lctoolargs[2],
sigclip=lctoolkwargs['sigclip'],
magsarefluxes=lctoolkwargs['magsarefluxes']
)
lctoolargs[0] = wtimes
lctoolargs[1] = wmags
lctoolargs[2] = werrs
#
# process the LC filters now
#
# see if the lctimefilters are set
if lctoolkwargs['lctimefilters']:
wtimes, wmags, werrs = (lctoolargs[0],
lctoolargs[1],
lctoolargs[2])
filtermasks = [
np.full_like(wtimes, False, dtype=np.bool_)
]
# parse the time filter strings
filterstr = lctoolkwargs['lctimefilters']
filters = filterstr.split(',')
filters = [
x.strip().lstrip('(').rstrip(')').strip()
for x in filters
]
for filt in filters:
try:
thisfilt = filt.split(':')
if len(thisfilt) == 2:
filt_lo = float(thisfilt[0])
filt_hi = float(thisfilt[1])
filtermasks.append(
((wtimes -
cptimes.min()) < filt_hi) &
((wtimes -
cptimes.min()) > filt_lo)
)
elif (len(thisfilt) == 3 and
thisfilt[0].strip() == 'not'):
filt_lo = float(thisfilt[1])
filt_hi = float(thisfilt[2])
filtermasks.append(np.logical_not(
(((wtimes -
cptimes.min()) < filt_hi) &
((wtimes -
cptimes.min()) > filt_lo))
))
else:
continue
except Exception as e:
continue
# finally, apply the filters if applicable
if len(filtermasks) > 0:
# apply the filters using an OR
filterind = np.column_stack(filtermasks)
filterind = np.any(filterind, axis=1)
lctoolargs[0] = wtimes[filterind]
lctoolargs[1] = wmags[filterind]
lctoolargs[2] = werrs[filterind]
# see if the lcmagfilters are set
if lctoolkwargs['lcmagfilters']:
wtimes, wmags, werrs = (lctoolargs[0],
lctoolargs[1],
lctoolargs[2])
filtermasks = [
np.full_like(wtimes, False, dtype=np.bool_)
]
# parse the time filter strings
filterstr = lctoolkwargs['lcmagfilters']
filters = filterstr.split(',')
filters = [
x.strip().strip()
for x in filters
]
for filt in filters:
try:
thisfilt = filt.split(':')
if len(thisfilt) == 2:
filt_lo = float(thisfilt[0])
filt_hi = float(thisfilt[1])
filtermasks.append(
(wmags < filt_hi) &
(wmags > filt_lo)
)
elif (len(thisfilt) == 3 and
thisfilt[0].strip() == 'not'):
filt_lo = float(thisfilt[1])
filt_hi = float(thisfilt[2])
filtermasks.append(np.logical_not(
((wmags < filt_hi) &
(wmags > filt_lo))
))
else:
continue
except Exception as e:
continue
# finally, apply the filters if applicable
if len(filtermasks) > 0:
# apply the filters using an OR
filterind = np.column_stack(filtermasks)
filterind = np.any(filterind, axis=1)
lctoolargs[0] = wtimes[filterind]
lctoolargs[1] = wmags[filterind]
lctoolargs[2] = werrs[filterind]
# at the end of processing, remove from lctookwargs
# since the pfmethod doesn't know about this
del lctoolkwargs['lctimefilters']
del lctoolkwargs['lcmagfilters']
#
# now run the period finder and get results
#
lctoolfunction = CPTOOLMAP[lctool]['func']
# run the period finder
funcresults = yield self.executor.submit(
lctoolfunction,
*lctoolargs,
**lctoolkwargs
)
# get what we need out of funcresults when it
# returns.
nbestperiods = funcresults['nbestperiods']
nbestlspvals = funcresults['nbestlspvals']
bestperiod = funcresults['bestperiod']
# generate the periodogram png
pgramres = yield self.executor.submit(
_pkl_periodogram,
funcresults,
)
# generate the phased LCs. we show these in the frontend
# along with the periodogram.
phasedlcargs0 = (None,
lspmethod,
-1,
lctoolargs[0],
lctoolargs[1],
lctoolargs[2],
nbestperiods[0],
'min')
if len(nbestperiods) > 1:
phasedlcargs1 = (None,
lspmethod,
-1,
lctoolargs[0],
lctoolargs[1],
lctoolargs[2],
nbestperiods[1],
'min')
else:
phasedlcargs1 = None
if len(nbestperiods) > 2:
phasedlcargs2 = (None,
lspmethod,
-1,
lctoolargs[0],
lctoolargs[1],
lctoolargs[2],
nbestperiods[2],
'min')
else:
phasedlcargs2 = None
# here, we set a bestperiodhighlight to distinguish this
# plot from the ones existing in the checkplot already
phasedlckwargs = {
'xliminsetmode':False,
'magsarefluxes':lctoolkwargs['magsarefluxes'],
'bestperiodhighlight':'#defa75',
}
# dispatch the plot functions
phasedlc0 = yield self.executor.submit(
_pkl_phased_magseries_plot,
*phasedlcargs0,
**phasedlckwargs
)
if phasedlcargs1 is not None:
phasedlc1 = yield self.executor.submit(
_pkl_phased_magseries_plot,
*phasedlcargs1,
**phasedlckwargs
)
else:
phasedlc1 = None
if phasedlcargs2 is not None:
phasedlc2 = yield self.executor.submit(
_pkl_phased_magseries_plot,
*phasedlcargs2,
**phasedlckwargs
)
else:
phasedlc2 = None
# save these to the tempcpdict
# save the pickle only if readonly is not true
if not self.readonly:
tempcpdict[lspmethod] = {
'periods':funcresults['periods'],
'lspvals':funcresults['lspvals'],
'bestperiod':funcresults['bestperiod'],
'nbestperiods':funcresults['nbestperiods'],
'nbestlspvals':funcresults['nbestlspvals'],
'periodogram':(
pgramres[lspmethod]['periodogram']
),
0:phasedlc0,
}
if phasedlc1 is not None:
tempcpdict[lspmethod][1] = phasedlc1
if phasedlc2 is not None:
tempcpdict[lspmethod][2] = phasedlc2
savekwargs = {
'outfile':tempfpath,
'protocol':pickle.HIGHEST_PROTOCOL
}
savedcpf = yield self.executor.submit(
_write_checkplot_picklefile,
tempcpdict,
**savekwargs
)
LOGGER.info(
'saved temp results from '
'%s to checkplot: %s' %
(lctool, savedcpf)
)
else:
LOGGER.warning(
'not saving temp results to checkplot '
' because readonly = True'
)
#
# assemble the return dict
#
# the periodogram
periodogram = pgramres[lspmethod]['periodogram']
# phasedlc plot, period, and epoch for best 3 peaks
phasedlc0plot = phasedlc0['plot']
phasedlc0period = float(phasedlc0['period'])
phasedlc0epoch = float(phasedlc0['epoch'])
if phasedlc1 is not None:
phasedlc1plot = phasedlc1['plot']
phasedlc1period = float(phasedlc1['period'])
phasedlc1epoch = float(phasedlc1['epoch'])
if phasedlc2 is not None:
phasedlc2plot = phasedlc2['plot']
phasedlc2period = float(phasedlc2['period'])
phasedlc2epoch = float(phasedlc2['epoch'])
resultdict['status'] = 'success'
resultdict['message'] = (
'new results for %s' %
lctool
)
resultdict['result'] = {
'objectid':objectid,
lspmethod:{
'nbestperiods':nbestperiods,
'nbestpeaks':nbestlspvals,
'periodogram':periodogram,
'bestperiod':bestperiod,
'phasedlc0':{
'plot':phasedlc0plot,
'period':phasedlc0period,
'epoch':phasedlc0epoch,
},
}
}
if phasedlc1 is not None:
resultdict['result'][lspmethod]['phasedlc1'] = {
'plot':phasedlc1plot,
'period':phasedlc1period,
'epoch':phasedlc1epoch,
}
if phasedlc2 is not None:
resultdict['result'][lspmethod]['phasedlc2'] = {
'plot':phasedlc2plot,
'period':phasedlc2period,
'epoch':phasedlc2epoch,
}
# return to frontend
self.write(resultdict)
self.finish()
# if the lctool is a call to the phased LC plot itself
# this requires lots of parameters
# these should all be present in the frontend
elif lctool == 'phasedlc-newplot':
lspmethod = lctoolargs[1]
periodind = lctoolargs[2]
# if we can return the results from a previous run
if (not forcereload and lspmethod in tempcpdict and
isinstance(tempcpdict[lspmethod], dict) and
periodind in tempcpdict[lspmethod] and
isinstance(tempcpdict[lspmethod][periodind], dict)):
# we get phased LC at periodind from a previous run
phasedlc = tempcpdict[lspmethod][periodind]
LOGGER.warning(
'returning previously unsaved '
'results for lctool %s from %s' %
(lctool, tempfpath)
)
#
# assemble the returndict
#
resultdict['status'] = 'warning'
resultdict['message'] = (
'previous '
'unsaved results from %s' %
lctool
)
retkey = 'phasedlc%s' % periodind
resultdict['result'] = {
'objectid':objectid,
lspmethod:{
retkey:phasedlc
}
}
self.write(resultdict)
self.finish()
# otherwise, we need to dispatch the function
else:
# add the highlight to distinguish this plot from usual
# checkplot plots
# full disclosure: http://c0ffee.surge.sh/
lctoolkwargs['bestperiodhighlight'] = '#defa75'
# set the input periodind to -1 to make sure we still
# have the highlight on the plot. we use the correct
# periodind when returning
lctoolargs[2] = -1
# see if sigclip is set. if so, then do the sigclip on
# the times, mags, errs
if lctoolkwargs['sigclip'] is not None:
stimes, smags, serrs = lcmath.sigclip_magseries(
lctoolargs[3],
lctoolargs[4],
lctoolargs[5],
sigclip=lctoolkwargs['sigclip'],
magsarefluxes=lctoolkwargs['magsarefluxes']
)
else:
stimes, smags, serrs = (lctoolargs[3],
lctoolargs[4],
lctoolargs[5])
#
# process the LC filters now
#
# see if the lctimefilters are set
if lctoolkwargs['lctimefilters']:
wtimes, wmags, werrs = stimes, smags, serrs
filtermasks = [
np.full_like(wtimes, False, dtype=np.bool_)
]
# parse the time filter strings
filterstr = lctoolkwargs['lctimefilters']
filters = filterstr.split(',')
filters = [
x.strip().lstrip('(').rstrip(')').strip()
for x in filters
]
for filt in filters:
try:
thisfilt = filt.split(':')
if len(thisfilt) == 2:
filt_lo = float(thisfilt[0])
filt_hi = float(thisfilt[1])
filtermasks.append(
((wtimes -
cptimes.min()) < filt_hi) &
((wtimes -
cptimes.min()) > filt_lo)
)
elif (len(thisfilt) == 3 and
thisfilt[0].strip() == 'not'):
filt_lo = float(thisfilt[1])
filt_hi = float(thisfilt[2])
filtermasks.append(np.logical_not(
(((wtimes -
cptimes.min()) < filt_hi) &
((wtimes -
cptimes.min()) > filt_lo))
))
else:
continue
except Exception as e:
continue
# finally, apply the filters if applicable
if len(filtermasks) > 0:
# apply the filters using an OR
filterind = np.column_stack(filtermasks)
filterind = np.any(filterind, axis=1)
stimes = wtimes[filterind]
smags = wmags[filterind]
serrs = werrs[filterind]
# see if the lcmagfilters are set
if lctoolkwargs['lcmagfilters']:
wtimes, wmags, werrs = stimes, smags, serrs
filtermasks = [
np.full_like(wtimes, False, dtype=np.bool_)
]
# parse the time filter strings
filterstr = lctoolkwargs['lcmagfilters']
filters = filterstr.split(',')
filters = [
x.strip().strip()
for x in filters
]
for filt in filters:
try:
thisfilt = filt.split(':')
if len(thisfilt) == 2:
filt_lo = float(thisfilt[0])
filt_hi = float(thisfilt[1])
filtermasks.append(
(wmags < filt_hi) &
(wmags > filt_lo)
)
elif (len(thisfilt) == 3 and
thisfilt[0].strip() == 'not'):
filt_lo = float(thisfilt[1])
filt_hi = float(thisfilt[2])
filtermasks.append(np.logical_not(
((wmags < filt_hi) &
(wmags > filt_lo))
))
else:
continue
except Exception as e:
continue
# finally, apply the filters if applicable
if len(filtermasks) > 0:
# apply the filters using an OR
filterind = np.column_stack(filtermasks)
filterind = np.any(filterind, axis=1)
stimes = wtimes[filterind]
smags = wmags[filterind]
serrs = werrs[filterind]
# at the end of processing, remove from lctookwargs
# since the pfmethod doesn't know about this
del lctoolkwargs['lctimefilters']
del lctoolkwargs['lcmagfilters']
# if the varepoch is set to None, try to get the
# minimum-light epoch using a spline fit
if lctoolargs[-1] is None:
LOGGER.warning(
'automatically getting min epoch '
'for phased LC plot'
)
try:
spfit = lcfit.spline_fit_magseries(
stimes, # times
smags, # mags
serrs, # errs
lctoolargs[6], # period
magsarefluxes=lctoolkwargs['magsarefluxes'],
sigclip=None,
verbose=True
)
# set the epoch correctly now for the plot
lctoolargs[-1] = spfit['fitinfo']['fitepoch']
if len(spfit['fitinfo']['fitepoch']) != 1:
lctoolargs[-1] = (
spfit['fitinfo']['fitepoch'][0]
)
# if the spline fit fails, use the minimum of times as
# epoch as usual
except Exception as e:
LOGGER.exception(
'spline fit failed, '
'using min(times) as epoch'
)
lctoolargs[-1] = np.min(stimes)
# now run the phased LC function with provided args,
# kwargs
# final times, mags, errs
lctoolargs[3] = stimes
lctoolargs[4] = smags
lctoolargs[5] = serrs
# the sigclip kwarg isn't used here since we did this
# already earlier
del lctoolkwargs['sigclip']
lctoolfunction = CPTOOLMAP[lctool]['func']
funcresults = yield self.executor.submit(
lctoolfunction,
*lctoolargs,
**lctoolkwargs
)
# save these to the tempcpdict
# save the pickle only if readonly is not true
if not self.readonly:
if (lspmethod in tempcpdict and
isinstance(tempcpdict[lspmethod], dict)):
if periodind in tempcpdict[lspmethod]:
tempcpdict[lspmethod][periodind] = (
funcresults
)
else:
tempcpdict[lspmethod].update(
{periodind: funcresults}
)
else:
tempcpdict[lspmethod] = {periodind: funcresults}
savekwargs = {
'outfile':tempfpath,
'protocol':pickle.HIGHEST_PROTOCOL
}
savedcpf = yield self.executor.submit(
_write_checkplot_picklefile,
tempcpdict,
**savekwargs
)
LOGGER.info(
'saved temp results from '
'%s to checkplot: %s' %
(lctool, savedcpf)
)
else:
LOGGER.warning(
'not saving temp results to checkplot '
' because readonly = True'
)
#
# assemble the return dict
#
resultdict['status'] = 'success'
resultdict['message'] = (
'new results for %s' %
lctool
)
retkey = 'phasedlc%s' % periodind
resultdict['result'] = {
'objectid':objectid,
lspmethod:{
retkey:funcresults
}
}
self.write(resultdict)
self.finish()
# if the lctool is var-varfeatures
elif lctool == 'var-varfeatures':
# see if we can return results from a previous iteration of
# this tool
if (not forcereload and
'varinfo' in tempcpdict and
isinstance(tempcpdict['varinfo'], dict) and
'varfeatures' in tempcpdict['varinfo'] and
isinstance(tempcpdict['varinfo']['varfeatures'], dict)):
LOGGER.warning(
'returning previously unsaved '
'results for lctool %s from %s' %
(lctool, tempfpath)
)
#
# assemble the returndict
#
resultdict['status'] = 'warning'
resultdict['message'] = (
'previous '
'unsaved results from %s' %
lctool
)
resultdict['result'] = {
'objectid':objectid,
'varinfo': {
'varfeatures': (
tempcpdict['varinfo']['varfeatures']
)
}
}
self.write(resultdict)
self.finish()
# otherwise, we need to dispatch the function
else:
lctoolfunction = CPTOOLMAP[lctool]['func']
funcresults = yield self.executor.submit(
lctoolfunction,
*lctoolargs,
**lctoolkwargs
)
# save these to the tempcpdict
# save the pickle only if readonly is not true
if not self.readonly:
if ('varinfo' in tempcpdict and
isinstance(tempcpdict['varinfo'], dict)):
if 'varfeatures' in tempcpdict['varinfo']:
tempcpdict['varinfo']['varfeatures'] = (
funcresults
)
else:
tempcpdict['varinfo'].update(
{'varfeatures': funcresults}
)
else:
tempcpdict['varinfo'] = {'varfeatures':
funcresults}
savekwargs = {
'outfile':tempfpath,
'protocol':pickle.HIGHEST_PROTOCOL
}
savedcpf = yield self.executor.submit(
_write_checkplot_picklefile,
tempcpdict,
**savekwargs
)
LOGGER.info(
'saved temp results from '
'%s to checkplot: %s' %
(lctool, savedcpf)
)
else:
LOGGER.warning(
'not saving temp results to checkplot '
' because readonly = True'
)
#
# assemble the return dict
#
resultdict['status'] = 'success'
resultdict['message'] = (
'new results for %s' %
lctool
)
resultdict['result'] = {
'objectid':objectid,
'varinfo':{
'varfeatures':funcresults
}
}
self.write(resultdict)
self.finish()
# if the lctool is var-prewhiten or var-masksig
elif lctool in ('var-prewhiten','var-masksig'):
key1, key2 = resloc
# see if we can return results from a previous iteration of
# this tool
if (not forcereload and
key1 in tempcpdict and
isinstance(tempcpdict[key1], dict) and
key2 in tempcpdict[key1] and
isinstance(tempcpdict[key1][key2], dict)):
LOGGER.warning(
'returning previously unsaved '
'results for lctool %s from %s' %
(lctool, tempfpath)
)
#
# assemble the returndict
#
resultdict['status'] = 'warning'
resultdict['message'] = (
'previous '
'unsaved results from %s' %
lctool
)
resultdict['result'] = {
'objectid':objectid,
key1: {
key2: (
tempcpdict[key1][key2]
)
}
}
self.write(resultdict)
self.finish()
# otherwise, we need to dispatch the function
else:
lctoolfunction = CPTOOLMAP[lctool]['func']
# send in a stringio object for the fitplot kwarg
lctoolkwargs['plotfit'] = StrIO()
funcresults = yield self.executor.submit(
lctoolfunction,
*lctoolargs,
**lctoolkwargs
)
# we turn the returned fitplotfile fd into a base64
# encoded string after reading it
fitfd = funcresults['fitplotfile']
fitfd.seek(0)
fitbin = fitfd.read()
fitb64 = base64.b64encode(fitbin)
fitfd.close()
funcresults['fitplotfile'] = fitb64
# save these to the tempcpdict
# save the pickle only if readonly is not true
if not self.readonly:
if (key1 in tempcpdict and
isinstance(tempcpdict[key1], dict)):
if key2 in tempcpdict[key1]:
tempcpdict[key1][key2] = (
funcresults
)
else:
tempcpdict[key1].update(
{key2: funcresults}
)
else:
tempcpdict[key1] = {key2: funcresults}
savekwargs = {
'outfile':tempfpath,
'protocol':pickle.HIGHEST_PROTOCOL
}
savedcpf = yield self.executor.submit(
_write_checkplot_picklefile,
tempcpdict,
**savekwargs
)
LOGGER.info(
'saved temp results from '
'%s to checkplot: %s' %
(lctool, savedcpf)
)
else:
LOGGER.warning(
'not saving temp results to checkplot '
' because readonly = True'
)
#
# assemble the return dict
#
# for this operation, we'll return:
# - fitplotfile
fitreturndict = {'fitplotfile':fitb64}
resultdict['status'] = 'success'
resultdict['message'] = (
'new results for %s' %
lctool
)
resultdict['result'] = {
'objectid':objectid,
key1:{
key2:fitreturndict
}
}
self.write(resultdict)
self.finish()
# if the lctool is a lcfit method
elif lctool in ('lcfit-fourier',
'lcfit-spline',
'lcfit-legendre',
'lcfit-savgol'):
key1, key2 = resloc
# see if we can return results from a previous iteration of
# this tool
if (not forcereload and
key1 in tempcpdict and
isinstance(tempcpdict[key1], dict) and
key2 in tempcpdict[key1] and
isinstance(tempcpdict[key1][key2], dict)):
LOGGER.warning(
'returning previously unsaved '
'results for lctool %s from %s' %
(lctool, tempfpath)
)
#
# assemble the returndict
#
resultdict['status'] = 'warning'
resultdict['message'] = (
'previous '
'unsaved results from %s' %
lctool
)
# these are the full results
phasedfitlc = tempcpdict[key1][key2]
# we only want a few things from them
fitresults = {
'method':phasedfitlc['lcfit']['fittype'],
'chisq':phasedfitlc['lcfit']['fitchisq'],
'redchisq':phasedfitlc['lcfit']['fitredchisq'],
'period':phasedfitlc['period'],
'epoch':phasedfitlc['epoch'],
'plot':phasedfitlc['plot'],
}
# add fitparams if there are any
if ('finalparams' in phasedfitlc['lcfit']['fitinfo'] and
phasedfitlc['lcfit']['fitinfo']['finalparams']
is not None):
fitresults['fitparams'] = (
phasedfitlc['lcfit']['fitinfo']['finalparams']
)
# this is the final result object
resultdict['result'] = {
'objectid':objectid,
key1: {
key2: (
fitresults
)
}
}
self.write(resultdict)
self.finish()
# otherwise, we need to dispatch the function
else:
lctoolfunction = CPTOOLMAP[lctool]['func']
funcresults = yield self.executor.submit(
lctoolfunction,
*lctoolargs,
**lctoolkwargs
)
# now that we have the fit results, generate a fitplot.
# these args are for the special fitplot mode of
# _pkl_phased_magseries_plot
phasedlcargs = (None,
'lcfit',
-1,
cptimes,
cpmags,
cperrs,
lctoolargs[3], # this is the fit period
'min')
# here, we set a bestperiodhighlight to distinguish this
# plot from the ones existing in the checkplot already
# also add the overplotfit information
phasedlckwargs = {
'xliminsetmode':False,
'magsarefluxes':lctoolkwargs['magsarefluxes'],
'bestperiodhighlight':'#defa75',
'overplotfit':funcresults
}
# dispatch the plot function
phasedlc = yield self.executor.submit(
_pkl_phased_magseries_plot,
*phasedlcargs,
**phasedlckwargs
)
# save these to the tempcpdict
# save the pickle only if readonly is not true
if not self.readonly:
if (key1 in tempcpdict and
isinstance(tempcpdict[key1], dict)):
if key2 in tempcpdict[key1]:
tempcpdict[key1][key2] = (
phasedlc
)
else:
tempcpdict[key1].update(
{key2: phasedlc}
)
else:
tempcpdict[key1] = {key2: phasedlc}
savekwargs = {
'outfile':tempfpath,
'protocol':pickle.HIGHEST_PROTOCOL
}
savedcpf = yield self.executor.submit(
_write_checkplot_picklefile,
tempcpdict,
**savekwargs
)
LOGGER.info(
'saved temp results from '
'%s to checkplot: %s' %
(lctool, savedcpf)
)
else:
LOGGER.warning(
'not saving temp results to checkplot '
' because readonly = True'
)
#
# assemble the return dict
#
fitresults = {
'method':phasedlc['lcfit']['fittype'],
'chisq':phasedlc['lcfit']['fitchisq'],
'redchisq':phasedlc['lcfit']['fitredchisq'],
'period':phasedlc['period'],
'epoch':phasedlc['epoch'],
'plot':phasedlc['plot'],
}
# add fitparams if there are any
if ('finalparams' in funcresults['fitinfo'] and
funcresults['fitinfo']['finalparams'] is not None):
fitresults['fitparams'] = (
funcresults['fitinfo']['finalparams']
)
resultdict['status'] = 'success'
resultdict['message'] = (
'new results for %s' %
lctool
)
resultdict['result'] = {
'objectid':objectid,
key1:{
key2:fitresults
}
}
self.write(resultdict)
self.finish()
# if this is the special lcfit subtract tool
elif lctool == 'lcfit-subtract':
fitmethod, periodind = lctoolargs
# find the fit requested
# subtract it from the cptimes, cpmags, cperrs
# if not readonly, write back to cptimes, cpmags, cperrs
# make a new phasedlc plot for the current periodind using
# these new cptimes, cpmags, cperrs
# return this plot
# if this is the special full reset tool
elif lctool == 'lctool-reset':
if os.path.exists(tempfpath):
os.remove(tempfpath)
LOGGER.warning('reset all LC tool results '
'for %s by removing %s' %
(tempfpath, cpfpath))
resultdict['status'] = 'success'
else:
resultdict['status'] = 'error'
LOGGER.warning('tried to reset LC tool results for %s, '
'but temp checkplot result pickle %s '
'does not exist' %
(tempfpath, cpfpath))
resultdict['message'] = (
'all unsynced results for this object have been purged'
)
resultdict['result'] = {'objectid':cpobjectid}
self.write(resultdict)
self.finish()
# if this is the special load results tool
elif lctool == 'lctool-results':
target = self.get_argument('resultsfor',None)
if target is not None:
target = xhtml_escape(target)
# get rid of invalid targets
if (target not in CPTOOLMAP or
target == 'lctool-reset' or
target == 'lctool-results' or
target == 'phasedlc-newplot' or
target == 'lcfit-subtract'):
LOGGER.error("can't get results for %s" % target)
resultdict['status'] = 'error'
resultdict['message'] = (
"can't get results for %s" % target
)
resultdict['result'] = {'objectid':cpobjectid}
self.write(resultdict)
raise tornado.web.Finish()
# if we're good to go, get the target location
targetloc = CPTOOLMAP[target]['resloc']
# first, search the cptempdict for this target
# if found, return it
# second, search the actual cpdict for this target
# if found, return it
# otherwise, we're being asked for everything
# return the whole
else:
pass
# otherwise, this is an unrecognized lctool
else:
LOGGER.error('lctool %s, does not exist' % lctool)
resultdict['status'] = 'error'
resultdict['message'] = (
'lctool %s does not exist' % lctool
)
resultdict['result'] = {'objectid':cpobjectid}
self.write(resultdict)
raise tornado.web.Finish()
# if the cpfile doesn't exist
else:
LOGGER.error('could not find %s' % self.cpfile)
resultdict = {'status':'error',
'message':"This checkplot doesn't exist.",
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
# if no checkplot was provided to load
else:
resultdict = {'status':'error',
'message':'No checkplot provided to load.',
'readonly':self.readonly,
'result':None}
self.write(resultdict)
raise tornado.web.Finish()
|
def initialize(self, executor, secret):
'''
This handles initial setup of the `RequestHandler`.
'''
self.executor = executor
self.secret = secret
|
def get(self):
'''This handles GET requests.
Returns the requested checkplot pickle's information as JSON.
Requires a pre-shared secret `key` argument for the operation to
complete successfully. This is obtained from a command-line argument.
'''
provided_key = self.get_argument('key',default=None)
if not provided_key:
LOGGER.error('standalone URL hit but no secret key provided')
retdict = {'status':'error',
'message':('standalone URL hit but '
'no secret key provided'),
'result':None,
'readonly':True}
self.set_status(401)
self.write(retdict)
raise tornado.web.Finish()
else:
provided_key = xhtml_escape(provided_key)
if not _time_independent_equals(provided_key,
self.secret):
LOGGER.error('secret key provided does not match known key')
retdict = {'status':'error',
'message':('standalone URL hit but '
'no secret key provided'),
'result':None,
'readonly':True}
self.set_status(401)
self.write(retdict)
raise tornado.web.Finish()
#
# actually start work here
#
LOGGER.info('key auth OK')
checkplotfname = self.get_argument('cp', default=None)
if checkplotfname:
try:
# do the usual safing
cpfpath = xhtml_escape(
base64.b64decode(url_unescape(checkplotfname))
)
except Exception as e:
msg = 'could not decode the incoming payload'
LOGGER.error(msg)
resultdict = {'status':'error',
'message':msg,
'result':None,
'readonly':True}
self.set_status(400)
self.write(resultdict)
raise tornado.web.Finish()
LOGGER.info('loading %s...' % cpfpath)
if not os.path.exists(cpfpath):
msg = "couldn't find checkplot %s" % cpfpath
LOGGER.error(msg)
resultdict = {'status':'error',
'message':msg,
'result':None,
'readonly':True}
self.set_status(404)
self.write(resultdict)
raise tornado.web.Finish()
#
# load the checkplot
#
# this is the async call to the executor
cpdict = yield self.executor.submit(
_read_checkplot_picklefile, cpfpath
)
#####################################
## continue after we're good to go ##
#####################################
LOGGER.info('loaded %s' % cpfpath)
# break out the initial info
objectid = cpdict['objectid']
objectinfo = cpdict['objectinfo']
varinfo = cpdict['varinfo']
if 'pfmethods' in cpdict:
pfmethods = cpdict['pfmethods']
else:
pfmethods = []
for pfm in PFMETHODS:
if pfm in cpdict:
pfmethods.append(pfm)
# handle neighbors for this object
neighbors = []
if ('neighbors' in cpdict and
cpdict['neighbors'] is not None and
len(cpdict['neighbors'])) > 0:
nbrlist = cpdict['neighbors']
# get each neighbor, its info, and its phased LCs
for nbr in nbrlist:
if 'magdiffs' in nbr:
nbrmagdiffs = nbr['magdiffs']
else:
nbrmagdiffs = None
if 'colordiffs' in nbr:
nbrcolordiffs = nbr['colordiffs']
else:
nbrcolordiffs = None
thisnbrdict = {
'objectid':nbr['objectid'],
'objectinfo':{
'ra':nbr['ra'],
'decl':nbr['decl'],
'xpix':nbr['xpix'],
'ypix':nbr['ypix'],
'distarcsec':nbr['dist'],
'magdiffs':nbrmagdiffs,
'colordiffs':nbrcolordiffs
}
}
try:
nbr_magseries = nbr['magseries']['plot']
thisnbrdict['magseries'] = nbr_magseries
except Exception as e:
LOGGER.error(
"could not load magseries plot for "
"neighbor %s for object %s"
% (nbr['objectid'],
cpdict['objectid'])
)
try:
for pfm in pfmethods:
if pfm in nbr:
thisnbrdict[pfm] = {
'plot':nbr[pfm][0]['plot'],
'period':nbr[pfm][0]['period'],
'epoch':nbr[pfm][0]['epoch']
}
except Exception as e:
LOGGER.error(
"could not load phased LC plots for "
"neighbor %s for object %s"
% (nbr['objectid'],
cpdict['objectid'])
)
neighbors.append(thisnbrdict)
# load object comments
if 'comments' in cpdict:
objectcomments = cpdict['comments']
else:
objectcomments = None
# load the xmatch results, if any
if 'xmatch' in cpdict:
objectxmatch = cpdict['xmatch']
else:
objectxmatch = None
# load the colormagdiagram object
if 'colormagdiagram' in cpdict:
colormagdiagram = cpdict['colormagdiagram']
else:
colormagdiagram = None
# these are base64 which can be provided directly to JS to
# generate images (neat!)
if 'finderchart' in cpdict:
finderchart = cpdict['finderchart']
else:
finderchart = None
if ('magseries' in cpdict and
isinstance(cpdict['magseries'], dict) and
'plot' in cpdict['magseries']):
magseries = cpdict['magseries']['plot']
time0 = cpdict['magseries']['times'].min()
magseries_ndet = cpdict['magseries']['times'].size
else:
magseries = None
time0 = 0.0
magseries_ndet = 0
LOGGER.warning(
"no 'magseries' key present in this "
"checkplot, some plots may be broken..."
)
if 'status' in cpdict:
cpstatus = cpdict['status']
else:
cpstatus = 'unknown, possibly incomplete checkplot'
# load the uifilters if present
if 'uifilters' in cpdict:
uifilters = cpdict['uifilters']
else:
uifilters = {'psearch_magfilters':None,
'psearch_sigclip':None,
'psearch_timefilters':None}
# this is the initial dict
resultdict = {
'status':'ok',
'message':'found checkplot %s' % os.path.basename(cpfpath),
'readonly':True,
'result':{
'time0':'%.3f' % time0,
'objectid':objectid,
'objectinfo':objectinfo,
'colormagdiagram':colormagdiagram,
'objectcomments':objectcomments,
'varinfo':varinfo,
'uifilters':uifilters,
'neighbors':neighbors,
'xmatch':objectxmatch,
'finderchart':finderchart,
'magseries':magseries,
# fallback in case objectinfo doesn't have ndet
'magseries_ndet':magseries_ndet,
'cpstatus':cpstatus,
'pfmethods':pfmethods
}
}
# now get the periodograms and phased LCs
for key in pfmethods:
# get the periodogram for this method
periodogram = cpdict[key]['periodogram']
# get the phased LC with best period
if 0 in cpdict[key] and isinstance(cpdict[key][0], dict):
phasedlc0plot = cpdict[key][0]['plot']
phasedlc0period = float(cpdict[key][0]['period'])
phasedlc0epoch = float(cpdict[key][0]['epoch'])
else:
phasedlc0plot = None
phasedlc0period = None
phasedlc0epoch = None
# get the associated fitinfo for this period if it
# exists
if (0 in cpdict[key] and
isinstance(cpdict[key][0], dict) and
'lcfit' in cpdict[key][0] and
isinstance(cpdict[key][0]['lcfit'], dict)):
phasedlc0fit = {
'method':(
cpdict[key][0]['lcfit']['fittype']
),
'redchisq':(
cpdict[key][0]['lcfit']['fitredchisq']
),
'chisq':(
cpdict[key][0]['lcfit']['fitchisq']
),
'params':(
cpdict[key][0][
'lcfit'
]['fitinfo']['finalparams'] if
'finalparams' in
cpdict[key][0]['lcfit']['fitinfo'] else None
)
}
else:
phasedlc0fit = None
# get the phased LC with 2nd best period
if 1 in cpdict[key] and isinstance(cpdict[key][1], dict):
phasedlc1plot = cpdict[key][1]['plot']
phasedlc1period = float(cpdict[key][1]['period'])
phasedlc1epoch = float(cpdict[key][1]['epoch'])
else:
phasedlc1plot = None
phasedlc1period = None
phasedlc1epoch = None
# get the associated fitinfo for this period if it
# exists
if (1 in cpdict[key] and
isinstance(cpdict[key][1], dict) and
'lcfit' in cpdict[key][1] and
isinstance(cpdict[key][1]['lcfit'], dict)):
phasedlc1fit = {
'method':(
cpdict[key][1]['lcfit']['fittype']
),
'redchisq':(
cpdict[key][1]['lcfit']['fitredchisq']
),
'chisq':(
cpdict[key][1]['lcfit']['fitchisq']
),
'params':(
cpdict[key][1][
'lcfit'
]['fitinfo']['finalparams'] if
'finalparams' in
cpdict[key][1]['lcfit']['fitinfo'] else None
)
}
else:
phasedlc1fit = None
# get the phased LC with 3rd best period
if 2 in cpdict[key] and isinstance(cpdict[key][2], dict):
phasedlc2plot = cpdict[key][2]['plot']
phasedlc2period = float(cpdict[key][2]['period'])
phasedlc2epoch = float(cpdict[key][2]['epoch'])
else:
phasedlc2plot = None
phasedlc2period = None
phasedlc2epoch = None
# get the associated fitinfo for this period if it
# exists
if (2 in cpdict[key] and
isinstance(cpdict[key][2], dict) and
'lcfit' in cpdict[key][2] and
isinstance(cpdict[key][2]['lcfit'], dict)):
phasedlc2fit = {
'method':(
cpdict[key][2]['lcfit']['fittype']
),
'redchisq':(
cpdict[key][2]['lcfit']['fitredchisq']
),
'chisq':(
cpdict[key][2]['lcfit']['fitchisq']
),
'params':(
cpdict[key][2][
'lcfit'
]['fitinfo']['finalparams'] if
'finalparams' in
cpdict[key][2]['lcfit']['fitinfo'] else None
)
}
else:
phasedlc2fit = None
resultdict['result'][key] = {
'nbestperiods':cpdict[key]['nbestperiods'],
'periodogram':periodogram,
'bestperiod':cpdict[key]['bestperiod'],
'phasedlc0':{
'plot':phasedlc0plot,
'period':phasedlc0period,
'epoch':phasedlc0epoch,
'lcfit':phasedlc0fit,
},
'phasedlc1':{
'plot':phasedlc1plot,
'period':phasedlc1period,
'epoch':phasedlc1epoch,
'lcfit':phasedlc1fit,
},
'phasedlc2':{
'plot':phasedlc2plot,
'period':phasedlc2period,
'epoch':phasedlc2epoch,
'lcfit':phasedlc2fit,
},
}
#
# end of processing per pfmethod
#
self.set_header('Content-Type','application/json; charset=UTF-8')
self.write(resultdict)
self.finish()
else:
LOGGER.error('no checkplot file requested')
resultdict = {'status':'error',
'message':"This checkplot doesn't exist.",
'readonly':True,
'result':None}
self.status(400)
self.write(resultdict)
self.finish()
|
def extinction_query(lon, lat,
coordtype='equatorial',
sizedeg=5.0,
forcefetch=False,
cachedir='~/.astrobase/dust-cache',
verbose=True,
timeout=10.0,
jitter=5.0):
'''This queries the 2MASS DUST service to find the extinction parameters
for the given `lon`, `lat`.
Parameters
----------
lon,lat: float
These are decimal right ascension and declination if `coordtype =
'equatorial'`. These are are decimal Galactic longitude and latitude if
`coordtype = 'galactic'`.
coordtype : {'equatorial','galactic'}
Sets the type of coordinates passed in as `lon`, `lat`.
sizedeg : float
This is the width of the image returned by the DUST service. This can
usually be left as-is if you're interested in the extinction only.
forcefetch : bool
If this is True, the query will be retried even if cached results for
it exist.
cachedir : str
This points to the directory where results will be downloaded.
verbose : bool
If True, will indicate progress and warn of any issues.
timeout : float
This sets the amount of time in seconds to wait for the service to
respond to our request.
jitter : float
This is used to control the scale of the random wait in seconds before
starting the query. Useful in parallelized situations.
Returns
-------
dict
A dict of the following form is returned::
{'Amag':{dict of extinction A_v values for several mag systems},
'table': array containing the full extinction table,
'tablefile': the path to the full extinction table file on disk,
'provenance': 'cached' or 'new download',
'request': string repr of the request made to 2MASS DUST}
'''
dustparams = DUST_PARAMS.copy()
# convert the lon, lat to the required format
# and generate the param dict
if coordtype == 'equatorial':
locstr = '%.3f %.3f Equ J2000' % (lon, lat)
elif coordtype == 'galactic':
locstr = '%.3f %.3f gal' % (lon, lat)
else:
LOGERROR('unknown coordinate type: %s' % coordtype)
return None
dustparams['locstr'] = locstr
dustparams['regSize'] = '%.3f' % sizedeg
# see if the cachedir exists
if '~' in cachedir:
cachedir = os.path.expanduser(cachedir)
if not os.path.exists(cachedir):
os.makedirs(cachedir)
# generate the cachekey and cache filename
cachekey = '%s - %.1f' % (locstr, sizedeg)
cachekey = hashlib.sha256(cachekey.encode()).hexdigest()
cachefname = os.path.join(cachedir, '%s.txt' % cachekey)
provenance = 'cache'
# if this does not exist in cache or if we're forcefetching, do the query
if forcefetch or (not os.path.exists(cachefname)):
time.sleep(random.randint(1,jitter))
provenance = 'new download'
try:
if verbose:
LOGINFO('submitting 2MASS DUST request for '
'lon = %.3f, lat = %.3f, type = %s, size = %.1f' %
(lon, lat, coordtype, sizedeg))
req = requests.get(DUST_URL, dustparams, timeout=timeout)
req.raise_for_status()
resp = req.text
# see if we got an extinction table URL in the response
tableurl = DUST_REGEX.search(resp)
# if we did, download it to the cache directory
if tableurl:
tableurl = tableurl.group(0)
req2 = requests.get(tableurl, timeout=timeout)
# write the table to the cache directory
with open(cachefname,'wb') as outfd:
outfd.write(req2.content)
tablefname = cachefname
else:
LOGERROR('could not get extinction parameters for '
'%s (%.3f, %.3f) with size = %.1f' % (coordtype,
lon,lat,sizedeg))
LOGERROR('error from DUST service follows:\n%s' % resp)
return None
except requests.exceptions.Timeout as e:
LOGERROR('DUST request timed out for '
'%s (%.3f, %.3f) with size = %.1f' % (coordtype,
lon,lat,sizedeg))
return None
except Exception as e:
LOGEXCEPTION('DUST request failed for '
'%s (%.3f, %.3f) with size = %.1f' % (coordtype,
lon,lat,sizedeg))
return None
# if this result is available in the cache, get it from there
else:
if verbose:
LOGINFO('getting cached 2MASS DUST result for '
'lon = %.3f, lat = %.3f, coordtype = %s, size = %.1f' %
(lon, lat, coordtype, sizedeg))
tablefname = cachefname
#
# now we should have the extinction table in some form
#
# read and parse the extinction table using astropy.Table
extinction_table = Table.read(tablefname, format='ascii.ipac')
# get the columns we need
filters = np.array(extinction_table['Filter_name'])
a_sf11_byfilter = np.array(extinction_table['A_SandF'])
a_sfd98_byfilter = np.array(extinction_table['A_SFD'])
# generate the output dict
extdict = {'Amag':{x:{'sf11':y, 'sfd98':z} for
x,y,z in zip(filters,a_sf11_byfilter,a_sfd98_byfilter)},
'table':np.array(extinction_table),
'tablefile':os.path.abspath(cachefname),
'provenance':provenance,
'request':'%s (%.3f, %.3f) with size = %.1f' % (coordtype,
lon,lat,
sizedeg)}
return extdict
|
def smooth_magseries_gaussfilt(mags, windowsize, windowfwhm=7):
'''This smooths the magseries with a Gaussian kernel.
Parameters
----------
mags : np.array
The input mags/flux time-series to smooth.
windowsize : int
This is a odd integer containing the smoothing window size.
windowfwhm : int
This is an odd integer containing the FWHM of the applied Gaussian
window function.
Returns
-------
np.array
The smoothed mag/flux time-series array.
'''
convkernel = Gaussian1DKernel(windowfwhm, x_size=windowsize)
smoothed = convolve(mags, convkernel, boundary='extend')
return smoothed
|
def smooth_magseries_savgol(mags, windowsize, polyorder=2):
'''This smooths the magseries with a Savitsky-Golay filter.
Parameters
----------
mags : np.array
The input mags/flux time-series to smooth.
windowsize : int
This is a odd integer containing the smoothing window size.
polyorder : int
This is an integer containing the polynomial degree order to use when
generating the Savitsky-Golay filter.
Returns
-------
np.array
The smoothed mag/flux time-series array.
'''
smoothed = savgol_filter(mags, windowsize, polyorder)
return smoothed
|
def _old_epd_diffmags(coeff, fsv, fdv, fkv, xcc, ycc, bgv, bge, mag):
'''
This calculates the difference in mags after EPD coefficients are
calculated.
final EPD mags = median(magseries) + epd_diffmags()
'''
return -(coeff[0]*fsv**2. +
coeff[1]*fsv +
coeff[2]*fdv**2. +
coeff[3]*fdv +
coeff[4]*fkv**2. +
coeff[5]*fkv +
coeff[6] +
coeff[7]*fsv*fdv +
coeff[8]*fsv*fkv +
coeff[9]*fdv*fkv +
coeff[10]*np.sin(2*np.pi*xcc) +
coeff[11]*np.cos(2*np.pi*xcc) +
coeff[12]*np.sin(2*np.pi*ycc) +
coeff[13]*np.cos(2*np.pi*ycc) +
coeff[14]*np.sin(4*np.pi*xcc) +
coeff[15]*np.cos(4*np.pi*xcc) +
coeff[16]*np.sin(4*np.pi*ycc) +
coeff[17]*np.cos(4*np.pi*ycc) +
coeff[18]*bgv +
coeff[19]*bge -
mag)
|
def _old_epd_magseries(times, mags, errs,
fsv, fdv, fkv, xcc, ycc, bgv, bge,
epdsmooth_windowsize=21,
epdsmooth_sigclip=3.0,
epdsmooth_func=smooth_magseries_signal_medfilt,
epdsmooth_extraparams=None):
'''
Detrends a magnitude series given in mag using accompanying values of S in
fsv, D in fdv, K in fkv, x coords in xcc, y coords in ycc, background in
bgv, and background error in bge. smooth is used to set a smoothing
parameter for the fit function. Does EPD voodoo.
'''
# find all the finite values of the magsnitude
finiteind = np.isfinite(mags)
# calculate median and stdev
mags_median = np.median(mags[finiteind])
mags_stdev = np.nanstd(mags)
# if we're supposed to sigma clip, do so
if epdsmooth_sigclip:
excludeind = abs(mags - mags_median) < epdsmooth_sigclip*mags_stdev
finalind = finiteind & excludeind
else:
finalind = finiteind
final_mags = mags[finalind]
final_len = len(final_mags)
# smooth the signal
if isinstance(epdsmooth_extraparams, dict):
smoothedmags = epdsmooth_func(final_mags,
epdsmooth_windowsize,
**epdsmooth_extraparams)
else:
smoothedmags = epdsmooth_func(final_mags, epdsmooth_windowsize)
# make the linear equation matrix
epdmatrix = np.c_[fsv[finalind]**2.0,
fsv[finalind],
fdv[finalind]**2.0,
fdv[finalind],
fkv[finalind]**2.0,
fkv[finalind],
np.ones(final_len),
fsv[finalind]*fdv[finalind],
fsv[finalind]*fkv[finalind],
fdv[finalind]*fkv[finalind],
np.sin(2*np.pi*xcc[finalind]),
np.cos(2*np.pi*xcc[finalind]),
np.sin(2*np.pi*ycc[finalind]),
np.cos(2*np.pi*ycc[finalind]),
np.sin(4*np.pi*xcc[finalind]),
np.cos(4*np.pi*xcc[finalind]),
np.sin(4*np.pi*ycc[finalind]),
np.cos(4*np.pi*ycc[finalind]),
bgv[finalind],
bge[finalind]]
# solve the matrix equation [epdmatrix] . [x] = [smoothedmags]
# return the EPD differential magss if the solution succeeds
try:
coeffs, residuals, rank, singulars = lstsq(epdmatrix, smoothedmags,
rcond=None)
if DEBUG:
print('coeffs = %s, residuals = %s' % (coeffs, residuals))
retdict = {'times':times,
'mags':(mags_median +
_old_epd_diffmags(coeffs, fsv, fdv,
fkv, xcc, ycc, bgv, bge, mags)),
'errs':errs,
'fitcoeffs':coeffs,
'residuals':residuals}
return retdict
# if the solution fails, return nothing
except Exception as e:
LOGEXCEPTION('EPD solution did not converge')
retdict = {'times':times,
'mags':np.full_like(mags, np.nan),
'errs':errs,
'fitcoeffs':coeffs,
'residuals':residuals}
return retdict
|
def _epd_function(coeffs, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd):
'''
This is the EPD function to fit using a smoothed mag-series.
'''
return (coeffs[0]*fsv*fsv +
coeffs[1]*fsv +
coeffs[2]*fdv*fdv +
coeffs[3]*fdv +
coeffs[4]*fkv*fkv +
coeffs[5]*fkv +
coeffs[6] +
coeffs[7]*fsv*fdv +
coeffs[8]*fsv*fkv +
coeffs[9]*fdv*fkv +
coeffs[10]*np.sin(2*pi_value*xcc) +
coeffs[11]*np.cos(2*pi_value*xcc) +
coeffs[12]*np.sin(2*pi_value*ycc) +
coeffs[13]*np.cos(2*pi_value*ycc) +
coeffs[14]*np.sin(4*pi_value*xcc) +
coeffs[15]*np.cos(4*pi_value*xcc) +
coeffs[16]*np.sin(4*pi_value*ycc) +
coeffs[17]*np.cos(4*pi_value*ycc) +
coeffs[18]*bgv +
coeffs[19]*bge +
coeffs[20]*iha +
coeffs[21]*izd)
|
def _epd_residual(coeffs, mags, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd):
'''
This is the residual function to minimize using scipy.optimize.leastsq.
'''
f = _epd_function(coeffs, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd)
residual = mags - f
return residual
|
def _epd_residual2(coeffs,
times, mags, errs,
fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd):
'''This is the residual function to minimize using
scipy.optimize.least_squares.
This variant is for :py:func:`.epd_magseries_extparams`.
'''
f = _epd_function(coeffs, fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd)
residual = mags - f
return residual
|
def epd_magseries(times, mags, errs,
fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd,
magsarefluxes=False,
epdsmooth_sigclip=3.0,
epdsmooth_windowsize=21,
epdsmooth_func=smooth_magseries_savgol,
epdsmooth_extraparams=None):
'''Detrends a magnitude series using External Parameter Decorrelation.
Requires a set of external parameters similar to those present in HAT light
curves. At the moment, the HAT light-curve-specific external parameters are:
- S: the 'fsv' column in light curves,
- D: the 'fdv' column in light curves,
- K: the 'fkv' column in light curves,
- x coords: the 'xcc' column in light curves,
- y coords: the 'ycc' column in light curves,
- background value: the 'bgv' column in light curves,
- background error: the 'bge' column in light curves,
- hour angle: the 'iha' column in light curves,
- zenith distance: the 'izd' column in light curves
S, D, and K are defined as follows:
- S -> measure of PSF sharpness (~1/sigma^2 sosmaller S = wider PSF)
- D -> measure of PSF ellipticity in xy direction
- K -> measure of PSF ellipticity in cross direction
S, D, K are related to the PSF's variance and covariance, see eqn 30-33 in
A. Pal's thesis: https://arxiv.org/abs/0906.3486
NOTE: The errs are completely ignored and returned unchanged (except for
sigclip and finite filtering).
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to detrend.
fsv : np.array
Array containing the external parameter `S` of the same length as times.
fdv : np.array
Array containing the external parameter `D` of the same length as times.
fkv : np.array
Array containing the external parameter `K` of the same length as times.
xcc : np.array
Array containing the external parameter `x-coords` of the same length as
times.
ycc : np.array
Array containing the external parameter `y-coords` of the same length as
times.
bgv : np.array
Array containing the external parameter `background value` of the same
length as times.
bge : np.array
Array containing the external parameter `background error` of the same
length as times.
iha : np.array
Array containing the external parameter `hour angle` of the same length
as times.
izd : np.array
Array containing the external parameter `zenith distance` of the same
length as times.
magsarefluxes : bool
Set this to True if `mags` actually contains fluxes.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before fitting the EPD
function to it.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
Returns
-------
dict
Returns a dict of the following form::
{'times':the input times after non-finite elems removed,
'mags':the EPD detrended mag values (the EPD mags),
'errs':the errs after non-finite elems removed,
'fitcoeffs':EPD fit coefficient values,
'fitinfo':the full tuple returned by scipy.leastsq,
'fitmags':the EPD fit function evaluated at times,
'mags_median': this is median of the EPD mags,
'mags_mad': this is the MAD of EPD mags}
'''
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
ftimes, fmags, ferrs = times[::][finind], mags[::][finind], errs[::][finind]
ffsv, ffdv, ffkv, fxcc, fycc, fbgv, fbge, fiha, fizd = (
fsv[::][finind],
fdv[::][finind],
fkv[::][finind],
xcc[::][finind],
ycc[::][finind],
bgv[::][finind],
bge[::][finind],
iha[::][finind],
izd[::][finind],
)
stimes, smags, serrs, separams = sigclip_magseries_with_extparams(
times, mags, errs,
[fsv, fdv, fkv, xcc, ycc, bgv, bge, iha, izd],
sigclip=epdsmooth_sigclip,
magsarefluxes=magsarefluxes
)
sfsv, sfdv, sfkv, sxcc, sycc, sbgv, sbge, siha, sizd = separams
# smooth the signal
if isinstance(epdsmooth_extraparams, dict):
smoothedmags = epdsmooth_func(smags,
epdsmooth_windowsize,
**epdsmooth_extraparams)
else:
smoothedmags = epdsmooth_func(smags, epdsmooth_windowsize)
# initial fit coeffs
initcoeffs = np.zeros(22)
# fit the smoothed mags and find the EPD function coefficients
leastsqfit = leastsq(_epd_residual,
initcoeffs,
args=(smoothedmags,
sfsv, sfdv, sfkv, sxcc,
sycc, sbgv, sbge, siha, sizd),
full_output=True)
# if the fit succeeds, then get the EPD mags
if leastsqfit[-1] in (1,2,3,4):
fitcoeffs = leastsqfit[0]
epdfit = _epd_function(fitcoeffs,
ffsv, ffdv, ffkv, fxcc, fycc,
fbgv, fbge, fiha, fizd)
epdmags = npmedian(fmags) + fmags - epdfit
retdict = {'times':ftimes,
'mags':epdmags,
'errs':ferrs,
'fitcoeffs':fitcoeffs,
'fitinfo':leastsqfit,
'fitmags':epdfit,
'mags_median':npmedian(epdmags),
'mags_mad':npmedian(npabs(epdmags - npmedian(epdmags)))}
return retdict
# if the solution fails, return nothing
else:
LOGERROR('EPD fit did not converge')
return None
|
def epd_magseries_extparams(
times,
mags,
errs,
externalparam_arrs,
initial_coeff_guess,
magsarefluxes=False,
epdsmooth_sigclip=3.0,
epdsmooth_windowsize=21,
epdsmooth_func=smooth_magseries_savgol,
epdsmooth_extraparams=None,
objective_func=_epd_residual2,
objective_kwargs=None,
optimizer_func=least_squares,
optimizer_kwargs=None,
):
'''This does EPD on a mag-series with arbitrary external parameters.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to run EPD on.
externalparam_arrs : list of np.arrays
This is a list of ndarrays of external parameters to decorrelate
against. These should all be the same size as `times`, `mags`, `errs`.
initial_coeff_guess : np.array
An array of initial fit coefficients to pass into the objective
function.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before smoothing it and
fitting the EPD function to it. The actual LC will not be sigma-clipped.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
objective_func : Python function
The function that calculates residuals between the model and the
smoothed mag-series. This must have the following signature::
def objective_func(fit_coeffs,
times,
mags,
errs,
*external_params,
**objective_kwargs)
where `times`, `mags`, `errs` are arrays of the sigma-clipped and
smoothed time-series, `fit_coeffs` is an array of EPD fit coefficients,
`external_params` is a tuple of the passed in external parameter arrays,
and `objective_kwargs` is a dict of any optional kwargs to pass into the
objective function.
This should return the value of the residual based on evaluating the
model function (and any weights based on errs or times).
objective_kwargs : dict or None
A dict of kwargs to pass into the `objective_func` function.
optimizer_func : Python function
The function that minimizes the residual between the model and the
smoothed mag-series using the `objective_func`. This should have a
signature similar to one of the optimizer functions in `scipy.optimize
<https://docs.scipy.org/doc/scipy/reference/optimize.html>`_, i.e.::
def optimizer_func(objective_func, initial_coeffs, args=(), ...)
and return a `scipy.optimize.OptimizeResult
<https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.OptimizeResult.html>`_. We'll
rely on the ``.success`` attribute to determine if the EPD fit was
successful, and the ``.x`` attribute to get the values of the fit
coefficients.
optimizer_kwargs : dict or None
A dict of kwargs to pass into the `optimizer_func` function.
Returns
-------
dict
Returns a dict of the following form::
{'times':the input times after non-finite elems removed,
'mags':the EPD detrended mag values (the EPD mags),
'errs':the errs after non-finite elems removed,
'fitcoeffs':EPD fit coefficient values,
'fitinfo':the result returned by the optimizer function,
'mags_median': this is the median of the EPD mags,
'mags_mad': this is the MAD of EPD mags}
'''
# get finite times, mags, errs
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
ftimes, fmags, ferrs = times[::][finind], mags[::][finind], errs[::][finind]
finalparam_arrs = []
for ep in externalparam_arrs:
finalparam_arrs.append(ep[::][finind])
# sigclip the LC to pass into the smoothing for EPD fit
stimes, smags, serrs, eparams = sigclip_magseries_with_extparams(
times.copy(), mags.copy(), errs.copy(),
[x.copy() for x in externalparam_arrs],
sigclip=epdsmooth_sigclip,
magsarefluxes=magsarefluxes
)
# smooth the signal before fitting the function to it
if isinstance(epdsmooth_extraparams, dict):
smoothedmags = epdsmooth_func(smags,
epdsmooth_windowsize,
**epdsmooth_extraparams)
else:
smoothedmags = epdsmooth_func(smags,
epdsmooth_windowsize)
# the initial coeffs are passed in here
initial_coeffs = initial_coeff_guess
# reform the objective function with any optional kwargs
if objective_kwargs is not None:
obj_func = partial(objective_func, **objective_kwargs)
else:
obj_func = objective_func
# run the optimizer function by passing in the objective function, the
# coeffs, and the smoothed mags and external params as part of the `args`
# tuple
if not optimizer_kwargs:
optimizer_kwargs = {}
fit_info = optimizer_func(
obj_func,
initial_coeffs,
args=(stimes, smoothedmags, serrs, *eparams),
**optimizer_kwargs
)
if fit_info.success:
fit_coeffs = fit_info.x
epd_mags = np.median(fmags) + obj_func(fit_coeffs,
ftimes,
fmags,
ferrs,
*finalparam_arrs)
retdict = {'times':ftimes,
'mags':epd_mags,
'errs':ferrs,
'fitcoeffs':fit_coeffs,
'fitinfo':fit_info,
'mags_median':npmedian(epd_mags),
'mags_mad':npmedian(npabs(epd_mags - npmedian(epd_mags)))}
return retdict
# if the solution fails, return nothing
else:
LOGERROR('EPD fit did not converge')
return None
|
def rfepd_magseries(times, mags, errs,
externalparam_arrs,
magsarefluxes=False,
epdsmooth=True,
epdsmooth_sigclip=3.0,
epdsmooth_windowsize=21,
epdsmooth_func=smooth_magseries_savgol,
epdsmooth_extraparams=None,
rf_subsample=1.0,
rf_ntrees=300,
rf_extraparams={'criterion':'mse',
'oob_score':False,
'n_jobs':-1}):
'''This uses a `RandomForestRegressor` to de-correlate the given magseries.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to run EPD on.
externalparam_arrs : list of np.arrays
This is a list of ndarrays of external parameters to decorrelate
against. These should all be the same size as `times`, `mags`, `errs`.
epdsmooth : bool
If True, sets the training LC for the RandomForestRegress to be a
smoothed version of the sigma-clipped light curve provided in `times`,
`mags`, `errs`.
epdsmooth_sigclip : float or int or sequence of two floats/ints or None
This specifies how to sigma-clip the input LC before smoothing it and
fitting the EPD function to it. The actual LC will not be sigma-clipped.
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
epdsmooth_windowsize : int
This is the number of LC points to smooth over to generate a smoothed
light curve that will be used to fit the EPD function.
epdsmooth_func : Python function
This sets the smoothing filter function to use. A Savitsky-Golay filter
is used to smooth the light curve by default. The functions that can be
used with this kwarg are listed in `varbase.trends`. If you want to use
your own function, it MUST have the following signature::
def smoothfunc(mags_array, window_size, **extraparams)
and return a numpy array of the same size as `mags_array` with the
smoothed time-series. Any extra params can be provided using the
`extraparams` dict.
epdsmooth_extraparams : dict
This is a dict of any extra filter params to supply to the smoothing
function.
rf_subsample : float
Defines the fraction of the size of the `mags` array to use for
training the random forest regressor.
rf_ntrees : int
This is the number of trees to use for the `RandomForestRegressor`.
rf_extraprams : dict
This is a dict of any extra kwargs to provide to the
`RandomForestRegressor` instance used.
Returns
-------
dict
Returns a dict with decorrelated mags and the usual info from the
`RandomForestRegressor`: variable importances, etc.
'''
# get finite times, mags, errs
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
ftimes, fmags, ferrs = times[::][finind], mags[::][finind], errs[::][finind]
finalparam_arrs = []
for ep in externalparam_arrs:
finalparam_arrs.append(ep[::][finind])
stimes, smags, serrs, eparams = sigclip_magseries_with_extparams(
times, mags, errs,
externalparam_arrs,
sigclip=epdsmooth_sigclip,
magsarefluxes=magsarefluxes
)
# smoothing is optional for RFR because we train on a fraction of the mag
# series and so should not require a smoothed input to fit a function to
if epdsmooth:
# smooth the signal
if isinstance(epdsmooth_extraparams, dict):
smoothedmags = epdsmooth_func(smags,
epdsmooth_windowsize,
**epdsmooth_extraparams)
else:
smoothedmags = epdsmooth_func(smags,
epdsmooth_windowsize)
else:
smoothedmags = smags
# set up the regressor
if isinstance(rf_extraparams, dict):
RFR = RandomForestRegressor(n_estimators=rf_ntrees,
**rf_extraparams)
else:
RFR = RandomForestRegressor(n_estimators=rf_ntrees)
# collect the features
features = np.column_stack(eparams)
# fit, then generate the predicted values, then get corrected values
# we fit on a randomly selected subsample of all the mags
if rf_subsample < 1.0:
featureindices = np.arange(smoothedmags.size)
# these are sorted because time-order should be important
training_indices = np.sort(
npr.choice(featureindices,
size=int(rf_subsample*smoothedmags.size),
replace=False)
)
else:
training_indices = np.arange(smoothedmags.size)
RFR.fit(features[training_indices,:], smoothedmags[training_indices])
# predict on the full feature set
flux_corrections = RFR.predict(np.column_stack(finalparam_arrs))
corrected_fmags = npmedian(fmags) + fmags - flux_corrections
retdict = {'times':ftimes,
'mags':corrected_fmags,
'errs':ferrs,
'feature_importances':RFR.feature_importances_,
'regressor':RFR,
'mags_median':npmedian(corrected_fmags),
'mags_mad':npmedian(npabs(corrected_fmags -
npmedian(corrected_fmags)))}
return retdict
|
def lcfit_features(times, mags, errs, period,
fourierorder=5,
# these are depth, duration, ingress duration
transitparams=(-0.01,0.1,0.1),
# these are depth, duration, depth ratio, secphase
ebparams=(-0.2,0.3,0.7,0.5),
sigclip=10.0,
magsarefluxes=False,
fitfailure_means_featurenan=False,
verbose=True):
'''This calculates various features related to fitting models to light
curves.
This function:
- calculates `R_ij` and `phi_ij` ratios for Fourier fit amplitudes and
phases.
- calculates the reduced chi-sq for fourier, EB, and planet transit fits.
- calculates the reduced chi-sq for fourier, EB, planet transit fits w/2 x
period.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to calculate periodic features for.
period : float
The period of variabiity to use to phase the light curve.
fourierorder : int
The Fourier order to use to generate sinusoidal function and fit that to
the phased light curve.
transitparams : list of floats
The transit depth, duration, and ingress duration to use to generate a
trapezoid planet transit model fit to the phased light curve. The period
used is the one provided in `period`, while the epoch is automatically
obtained from a spline fit to the phased light curve.
ebparams : list of floats
The primary eclipse depth, eclipse duration, the primary-secondary depth
ratio, and the phase of the secondary eclipse to use to generate an
eclipsing binary model fit to the phased light curve. The period used is
the one provided in `period`, while the epoch is automatically obtained
from a spline fit to the phased light curve.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
Set this to True if the input measurements in `mags` are actually
fluxes.
fitfailure_means_featurenan : bool
If the planet, EB and EBx2 fits don't return standard errors because the
covariance matrix could not be generated, then the fit is suspicious and
the features calculated can't be trusted. If
`fitfailure_means_featurenan` is True, then the output features for
these fits will be set to nan.
verbose : bool
If True, will indicate progress while working.
Returns
-------
dict
A dict of all the features calculated is returned.
'''
# get the finite values
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
ftimes, fmags, ferrs = times[finind], mags[finind], errs[finind]
# get nonzero errors
nzind = np.nonzero(ferrs)
ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]
# get the MAD of the unphased light curve
lightcurve_median = np.median(fmags)
lightcurve_mad = np.median(np.abs(fmags - lightcurve_median))
#
# fourier fit
#
# we fit a Fourier series to the light curve using the best period and
# extract the amplitudes and phases up to the 8th order to fit the LC. the
# various ratios of the amplitudes A_ij and the differences in the phases
# phi_ij are also used as periodic variability features
# do the fit
ffit = lcfit.fourier_fit_magseries(ftimes, fmags, ferrs, period,
fourierorder=fourierorder,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
# get the coeffs and redchisq
fourier_fitcoeffs = ffit['fitinfo']['finalparams']
fourier_chisq = ffit['fitchisq']
fourier_redchisq = ffit['fitredchisq']
if fourier_fitcoeffs is not None:
fourier_modelmags, _, _, fpmags, _ = sinusoidal.fourier_sinusoidal_func(
[period,
ffit['fitinfo']['fitepoch'],
ffit['fitinfo']['finalparams'][:fourierorder],
ffit['fitinfo']['finalparams'][fourierorder:]],
ftimes,
fmags,
ferrs
)
fourier_residuals = fourier_modelmags - fpmags
fourier_residual_median = np.median(fourier_residuals)
fourier_residual_mad = np.median(np.abs(fourier_residuals -
fourier_residual_median))
# break them out into amps and phases
famplitudes = fourier_fitcoeffs[:fourierorder]
fphases = fourier_fitcoeffs[fourierorder:]
famp_combos = combinations(famplitudes,2)
famp_cinds = combinations(range(len(famplitudes)),2)
fpha_combos = combinations(fphases,2)
fpha_cinds = combinations(range(len(fphases)),2)
else:
LOGERROR('LC fit to sinusoidal series model failed, '
'using initial params')
initfourieramps = [0.6] + [0.2]*(fourierorder - 1)
initfourierphas = [0.1] + [0.1]*(fourierorder - 1)
fourier_modelmags, _, _, fpmags, _ = sinusoidal.fourier_sinusoidal_func(
[period,
ffit['fitinfo']['fitepoch'],
initfourieramps,
initfourierphas],
ftimes,
fmags,
ferrs
)
fourier_residuals = fourier_modelmags - fpmags
fourier_residual_median = np.median(fourier_residuals)
fourier_residual_mad = np.median(np.abs(fourier_residuals -
fourier_residual_median))
# break them out into amps and phases
famplitudes = initfourieramps
fphases = initfourierphas
famp_combos = combinations(famplitudes,2)
famp_cinds = combinations(range(len(famplitudes)),2)
fpha_combos = combinations(fphases,2)
fpha_cinds = combinations(range(len(fphases)),2)
fampratios = {}
fphadiffs = {}
# get the ratios for all fourier coeff combinations
for ampi, ampc, phai, phac in zip(famp_cinds,
famp_combos,
fpha_cinds,
fpha_combos):
ampratind = 'R_%s%s' % (ampi[1]+1, ampi[0]+1)
# this is R_ij
amprat = ampc[1]/ampc[0]
phadiffind = 'phi_%s%s' % (phai[1]+1, phai[0]+1)
# this is phi_ij
phadiff = phac[1] - phai[0]*phac[0]
fampratios[ampratind] = amprat
fphadiffs[phadiffind] = phadiff
# update the outdict for the Fourier fit results
outdict = {
'fourier_ampratios':fampratios,
'fourier_phadiffs':fphadiffs,
'fourier_fitparams':fourier_fitcoeffs,
'fourier_redchisq':fourier_redchisq,
'fourier_chisq':fourier_chisq,
'fourier_residual_median':fourier_residual_median,
'fourier_residual_mad':fourier_residual_mad,
'fourier_residual_mad_over_lcmad':fourier_residual_mad/lightcurve_mad
}
# EB and planet fits will find the epoch automatically
planetfitparams = [period,
None,
transitparams[0],
transitparams[1],
transitparams[2]]
ebfitparams = [period,
None,
ebparams[0],
ebparams[1],
ebparams[2],
ebparams[3]]
# do the planet and EB fit with this period
planet_fit = lcfit.traptransit_fit_magseries(ftimes, fmags, ferrs,
planetfitparams,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
planetfit_finalparams = planet_fit['fitinfo']['finalparams']
planetfit_finalparamerrs = planet_fit['fitinfo']['finalparamerrs']
if planetfit_finalparamerrs is None and fitfailure_means_featurenan:
LOGWARNING('planet fit: no standard errors available '
'for fit parameters, fit is bad, '
'setting fit chisq and red-chisq to np.nan')
planetfit_chisq = np.nan
planetfit_redchisq = np.nan
planet_residual_median = np.nan
planet_residual_mad = np.nan
planet_residual_mad_over_lcmad = np.nan
else:
planetfit_chisq = planet_fit['fitchisq']
planetfit_redchisq = planet_fit['fitredchisq']
if planetfit_finalparams is not None:
planet_modelmags, _, _, ppmags, _ = transits.trapezoid_transit_func(
planetfit_finalparams,
ftimes,
fmags,
ferrs
)
else:
LOGERROR('LC fit to transit planet model '
'failed, using initial params')
planet_modelmags, _, _, ppmags, _ = transits.trapezoid_transit_func(
planetfitparams,
ftimes,
fmags,
ferrs
)
planet_residuals = planet_modelmags - ppmags
planet_residual_median = np.median(planet_residuals)
planet_residual_mad = np.median(np.abs(planet_residuals -
planet_residual_median))
planet_residual_mad_over_lcmad = planet_residual_mad/lightcurve_mad
eb_fit = lcfit.gaussianeb_fit_magseries(ftimes, fmags, ferrs,
ebfitparams,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
ebfit_finalparams = eb_fit['fitinfo']['finalparams']
ebfit_finalparamerrs = eb_fit['fitinfo']['finalparamerrs']
if ebfit_finalparamerrs is None and fitfailure_means_featurenan:
LOGWARNING('EB fit: no standard errors available '
'for fit parameters, fit is bad, '
'setting fit chisq and red-chisq to np.nan')
ebfit_chisq = np.nan
ebfit_redchisq = np.nan
eb_residual_median = np.nan
eb_residual_mad = np.nan
eb_residual_mad_over_lcmad = np.nan
else:
ebfit_chisq = eb_fit['fitchisq']
ebfit_redchisq = eb_fit['fitredchisq']
if ebfit_finalparams is not None:
eb_modelmags, _, _, ebpmags, _ = eclipses.invgauss_eclipses_func(
ebfit_finalparams,
ftimes,
fmags,
ferrs
)
else:
LOGERROR('LC fit to EB model failed, using initial params')
eb_modelmags, _, _, ebpmags, _ = eclipses.invgauss_eclipses_func(
ebfitparams,
ftimes,
fmags,
ferrs
)
eb_residuals = eb_modelmags - ebpmags
eb_residual_median = np.median(eb_residuals)
eb_residual_mad = np.median(np.abs(eb_residuals - eb_residual_median))
eb_residual_mad_over_lcmad = eb_residual_mad/lightcurve_mad
# do the EB fit with 2 x period
ebfitparams[0] = ebfitparams[0]*2.0
eb_fitx2 = lcfit.gaussianeb_fit_magseries(ftimes, fmags, ferrs,
ebfitparams,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
ebfitx2_finalparams = eb_fitx2['fitinfo']['finalparams']
ebfitx2_finalparamerrs = eb_fitx2['fitinfo']['finalparamerrs']
if ebfitx2_finalparamerrs is None and fitfailure_means_featurenan:
LOGWARNING('EB x2 period fit: no standard errors available '
'for fit parameters, fit is bad, '
'setting fit chisq and red-chisq to np.nan')
ebfitx2_chisq = np.nan
ebfitx2_redchisq = np.nan
ebx2_residual_median = np.nan
ebx2_residual_mad = np.nan
ebx2_residual_mad_over_lcmad = np.nan
else:
ebfitx2_chisq = eb_fitx2['fitchisq']
ebfitx2_redchisq = eb_fitx2['fitredchisq']
if ebfitx2_finalparams is not None:
ebx2_modelmags, _, _, ebx2pmags, _ = (
eclipses.invgauss_eclipses_func(
ebfitx2_finalparams,
ftimes,
fmags,
ferrs
)
)
else:
LOGERROR('LC fit to EB model with 2xP failed, using initial params')
ebx2_modelmags, _, _, ebx2pmags, _ = (
eclipses.invgauss_eclipses_func(
ebfitparams,
ftimes,
fmags,
ferrs
)
)
ebx2_residuals = ebx2_modelmags - ebx2pmags
ebx2_residual_median = np.median(ebx2_residuals)
ebx2_residual_mad = np.median(np.abs(ebx2_residuals -
ebx2_residual_median))
ebx2_residual_mad_over_lcmad = ebx2_residual_mad/lightcurve_mad
# update the outdict
outdict.update({
'planet_fitparams':planetfit_finalparams,
'planet_chisq':planetfit_chisq,
'planet_redchisq':planetfit_redchisq,
'planet_residual_median':planet_residual_median,
'planet_residual_mad':planet_residual_mad,
'planet_residual_mad_over_lcmad':(
planet_residual_mad_over_lcmad,
),
'eb_fitparams':ebfit_finalparams,
'eb_chisq':ebfit_chisq,
'eb_redchisq':ebfit_redchisq,
'eb_residual_median':eb_residual_median,
'eb_residual_mad':eb_residual_mad,
'eb_residual_mad_over_lcmad':(
eb_residual_mad_over_lcmad,
),
'ebx2_fitparams':ebfitx2_finalparams,
'ebx2_chisq':ebfitx2_chisq,
'ebx2_redchisq':ebfitx2_redchisq,
'ebx2_residual_median':ebx2_residual_median,
'ebx2_residual_mad':ebx2_residual_mad,
'ebx2_residual_mad_over_lcmad':(
ebx2_residual_mad_over_lcmad,
),
})
return outdict
|
def periodogram_features(pgramlist, times, mags, errs,
sigclip=10.0,
pdiff_threshold=1.0e-4,
sidereal_threshold=1.0e-4,
sampling_peak_multiplier=5.0,
sampling_startp=None,
sampling_endp=None,
verbose=True):
'''This calculates various periodogram features (for each periodogram).
The following features are obtained:
- For all best periods from all periodogram methods in `pgramlist`,
calculates the number of these with peaks that are at least
`sampling_peak_multiplier` x time-sampling periodogram peak at the same
period. This indicates how likely the `pgramlist` periodogram peaks are to
being real as opposed to just being caused by time-sampling
window-function of the observations.
- For all best periods from all periodogram methods in `pgramlist`,
calculates the number of best periods which are consistent with a sidereal
day (1.0027379 and 0.9972696), likely indicating that they're not real.
- For all best periods from all periodogram methods in `pgramlist`,
calculates the number of cross-wise period differences for all of these
that fall below the `pdiff_threshold` value. If this is high, most of the
period-finders in `pgramlist` agree on their best period results, so it's
likely the periods found are real.
Parameters
----------
pgramlist : list of dicts
This is a list of dicts returned by any of the periodfinding methods in
:py:mod:`astrobase.periodbase`. This can also be obtained from the
resulting pickle from the :py:func:astrobase.lcproc.periodsearch.run_pf`
function. It's a good idea to make `pgramlist` a list of periodogram
lists from all magnitude columns in the input light curve to test
periodic variability across all magnitude columns (e.g. period diffs
between EPD and TFA mags)
times,mags,errs : np.array
The input flux/mag time-series to use to calculate features. These are
used to recalculate the time-sampling L-S periodogram (using
:py:func:`astrobase.periodbase.zgls.specwindow_lsp`) if one is not
present in pgramlist. If it's present, these can all be set to None.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
pdiff_threshold : float
This is the max difference between periods to consider them the same.
sidereal_threshold : float
This is the max difference between any of the 'best' periods and the
sidereal day periods to consider them the same.
sampling_peak_multiplier : float
This is the minimum multiplicative factor of a 'best' period's
normalized periodogram peak over the sampling periodogram peak at the
same period required to accept the 'best' period as possibly real.
sampling_startp, sampling_endp : float
If the `pgramlist` doesn't have a time-sampling Lomb-Scargle
periodogram, it will be obtained automatically. Use these kwargs to
control the minimum and maximum period interval to be searched when
generating this periodogram.
verbose : bool
If True, will indicate progress and report errors.
Returns
-------
dict
Returns a dict with all of the periodogram features calculated.
'''
# run the sampling peak periodogram if necessary
pfmethodlist = [pgram['method'] for pgram in pgramlist]
if 'win' not in pfmethodlist:
# get the finite values
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
ftimes, fmags, ferrs = times[finind], mags[finind], errs[finind]
# get nonzero errors
nzind = np.nonzero(ferrs)
ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]
sampling_lsp = specwindow_lsp(times, mags, errs,
startp=sampling_startp,
endp=sampling_endp,
sigclip=sigclip,
verbose=verbose)
else:
sampling_lsp = pgramlist[pfmethodlist.index('win')]
# get the normalized sampling periodogram peaks
normalized_sampling_lspvals = (
sampling_lsp['lspvals']/(np.nanmax(sampling_lsp['lspvals']) -
np.nanmin(sampling_lsp['lspvals']))
)
normalized_sampling_periods = sampling_lsp['periods']
# go through the periodograms and calculate normalized peak height of best
# periods over the normalized peak height of the sampling periodogram at the
# same periods
for pfm, pgram in zip(pfmethodlist, pgramlist):
if pfm == 'pdm':
best_peak_sampling_ratios = []
close_to_sidereal_flag = []
periods = pgram['periods']
peaks = pgram['lspvals']
normalized_peaks = (1.0 - peaks)/(np.nanmax(1.0 - peaks) -
np.nanmin(1.0 - peaks))
# get the best period normalized peaks
if pgram['nbestperiods'] is None:
LOGERROR('no period results for method: %s' % pfm)
continue
for bp in pgram['nbestperiods']:
if np.isfinite(bp):
#
# first, get the normalized peak ratio
#
thisp_norm_pgrampeak = normalized_peaks[periods == bp]
thisp_sampling_pgramind = (
np.abs(normalized_sampling_periods -
bp) < pdiff_threshold
)
thisp_sampling_peaks = normalized_sampling_lspvals[
thisp_sampling_pgramind
]
if thisp_sampling_peaks.size > 1:
thisp_sampling_ratio = (
thisp_norm_pgrampeak/np.mean(thisp_sampling_peaks)
)
elif thisp_sampling_peaks.size == 1:
thisp_sampling_ratio = (
thisp_norm_pgrampeak/thisp_sampling_peaks
)
else:
LOGERROR('sampling periodogram is not defined '
'at period %.5f, '
'skipping calculation of ratio' % bp)
thisp_sampling_ratio = np.nan
best_peak_sampling_ratios.append(thisp_sampling_ratio)
#
# next, see if the best periods are close to a sidereal day
# or any multiples of thus
#
sidereal_a_ratio = (bp - 1.0027379)/bp
sidereal_b_ratio = (bp - 0.9972696)/bp
if ((sidereal_a_ratio < sidereal_threshold) or
(sidereal_b_ratio < sidereal_threshold)):
close_to_sidereal_flag.append(True)
else:
close_to_sidereal_flag.append(False)
else:
LOGERROR('period is nan')
best_peak_sampling_ratios.append(np.nan)
close_to_sidereal_flag.append(False)
# update the pgram with these
pgram['nbestpeakratios'] = best_peak_sampling_ratios
pgram['siderealflags'] = close_to_sidereal_flag
elif pfm != 'win':
best_peak_sampling_ratios = []
close_to_sidereal_flag = []
periods = pgram['periods']
peaks = pgram['lspvals']
normalized_peaks = peaks/(np.nanmax(peaks) - np.nanmin(peaks))
# get the best period normalized peaks
if pgram['nbestperiods'] is None:
LOGERROR('no period results for method: %s' % pfm)
continue
#
# first, get the best period normalized peaks
#
for bp in pgram['nbestperiods']:
if np.isfinite(bp):
thisp_norm_pgrampeak = normalized_peaks[periods == bp]
thisp_sampling_pgramind = (
np.abs(normalized_sampling_periods -
bp) < pdiff_threshold
)
thisp_sampling_peaks = normalized_sampling_lspvals[
thisp_sampling_pgramind
]
if thisp_sampling_peaks.size > 1:
thisp_sampling_ratio = (
thisp_norm_pgrampeak/np.mean(thisp_sampling_peaks)
)
elif thisp_sampling_peaks.size == 1:
thisp_sampling_ratio = (
thisp_norm_pgrampeak/thisp_sampling_peaks
)
else:
LOGERROR('sampling periodogram is not defined '
'at period %.5f, '
'skipping calculation of ratio' % bp)
thisp_sampling_ratio = np.nan
best_peak_sampling_ratios.append(thisp_sampling_ratio)
#
# next, see if the best periods are close to a sidereal day
# or any multiples of thus
#
sidereal_a_ratio = (bp - 1.0027379)/bp
sidereal_b_ratio = (bp - 0.9972696)/bp
if ((sidereal_a_ratio < sidereal_threshold) or
(sidereal_b_ratio < sidereal_threshold)):
close_to_sidereal_flag.append(True)
else:
close_to_sidereal_flag.append(False)
else:
LOGERROR('period is nan')
best_peak_sampling_ratios.append(np.nan)
close_to_sidereal_flag.append(False)
# update the pgram with these
pgram['nbestpeakratios'] = best_peak_sampling_ratios
pgram['siderealflags'] = close_to_sidereal_flag
#
# done with calculations, get the features we need
#
# get the best periods across all the period finding methods
all_bestperiods = np.concatenate(
[x['nbestperiods']
for x in pgramlist if
(x['method'] != 'win' and x['nbestperiods'] is not None)]
)
all_bestperiod_diffs = np.array(
[abs(a-b) for a,b in combinations(all_bestperiods,2)]
)
all_sampling_ratios = np.concatenate(
[x['nbestpeakratios']
for x in pgramlist if
(x['method'] != 'win' and x['nbestperiods'] is not None)]
)
all_sidereal_flags = np.concatenate(
[x['siderealflags']
for x in pgramlist if
(x['method'] != 'win' and x['nbestperiods'] is not None)]
)
# bestperiods_n_abovesampling - number of top period estimates with peaks
# that are at least sampling_peak_multiplier x
# sampling peak height at the same period
bestperiods_n_abovesampling = (
all_sampling_ratios[all_sampling_ratios >
sampling_peak_multiplier]
).size
# bestperiods_n_sidereal - number of top period estimates that are
# consistent with a 1 day period (1.0027379 and
# 0.9972696 actually, for sidereal day period)
bestperiods_n_sidereal = all_sidereal_flags.sum()
# bestperiods_diffn_threshold - the number of cross-wise period diffs from
# all period finders that fall below the
# pdiff_threshold
bestperiods_diffn_threshold = (
all_bestperiod_diffs < pdiff_threshold
).size
resdict = {
'bestperiods_n_abovesampling':bestperiods_n_abovesampling,
'bestperiods_n_sidereal':bestperiods_n_sidereal,
'bestperiods_diffn_threshold':bestperiods_diffn_threshold
}
return resdict
|
def phasedlc_features(times,
mags,
errs,
period,
nbrtimes=None,
nbrmags=None,
nbrerrs=None):
'''This calculates various phased LC features for the object.
Some of the features calculated here come from:
Kim, D.-W., Protopapas, P., Bailer-Jones, C. A. L., et al. 2014, Astronomy
and Astrophysics, 566, A43, and references therein (especially Richards, et
al. 2011).
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to calculate the phased LC features for.
period : float
The period used to phase the input mag/flux time-series.
nbrtimes,nbrmags,nbrerrs : np.array or None
If `nbrtimes`, `nbrmags`, and `nbrerrs` are all provided, they should be
ndarrays with `times`, `mags`, `errs` of this object's closest neighbor
(close within some small number x FWHM of telescope to check for
blending). This function will then also calculate extra features based
on the neighbor's phased LC using the `period` provided for the target
object.
Returns
-------
dict
Returns a dict with phased LC features.
'''
# get the finite values
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
ftimes, fmags, ferrs = times[finind], mags[finind], errs[finind]
# get nonzero errors
nzind = np.nonzero(ferrs)
ftimes, fmags, ferrs = ftimes[nzind], fmags[nzind], ferrs[nzind]
# only operate on LC if enough points
if ftimes.size > 49:
# get the MAD of the unphased light curve
lightcurve_median = np.median(fmags)
lightcurve_mad = np.median(np.abs(fmags - lightcurve_median))
# get p2p for raw lightcurve
p2p_unphasedlc = lightcurve_ptp_measures(ftimes, fmags, ferrs)
inveta_unphasedlc = 1.0/p2p_unphasedlc['eta_normal']
# phase the light curve with the given period, assume epoch is
# times.min()
phasedlc = lcmath.phase_magseries_with_errs(ftimes, fmags, ferrs,
period, ftimes.min(),
wrap=False)
phase = phasedlc['phase']
pmags = phasedlc['mags']
perrs = phasedlc['errs']
# get ptp measures for best period
ptp_bestperiod = lightcurve_ptp_measures(phase,pmags,perrs)
# phase the light curve with the given periodx2, assume epoch is
# times.min()
phasedlc = lcmath.phase_magseries_with_errs(ftimes, fmags, ferrs,
period*2.0, ftimes.min(),
wrap=False)
phasex2 = phasedlc['phase']
pmagsx2 = phasedlc['mags']
perrsx2 = phasedlc['errs']
# get ptp measures for best periodx2
ptp_bestperiodx2 = lightcurve_ptp_measures(phasex2,pmagsx2,perrsx2)
# eta_phasedlc_bestperiod - calculate eta for the phased LC with best
# period
inveta_bestperiod = 1.0/ptp_bestperiod['eta_normal']
# eta_phasedlc_bestperiodx2 - calculate eta for the phased LC with best
# period x 2
inveta_bestperiodx2 = 1.0/ptp_bestperiodx2['eta_normal']
# eta_phased_ratio_eta_raw - eta for best period phased LC / eta for raw
# LC
inveta_ratio_phased_unphased = inveta_bestperiod/inveta_unphasedlc
# eta_phasedx2_ratio_eta_raw - eta for best periodx2 phased LC/eta for
# raw LC
inveta_ratio_phasedx2_unphased = inveta_bestperiodx2/inveta_unphasedlc
# freq_model_max_delta_mags - absval of magdiff btw model phased LC
# maxima using period x 2. look at points
# more than 10 points away for maxima
phasedx2_maxval_ind = argrelmax(pmagsx2, order=10)
if phasedx2_maxval_ind[0].size > 1:
phasedx2_magdiff_maxval = (
np.max(np.abs(np.diff(pmagsx2[phasedx2_maxval_ind[0]])))
)
else:
phasedx2_magdiff_maxval = np.nan
# freq_model_min_delta_mags - absval of magdiff btw model phased LC
# minima using period x 2. look at points
# more than 10 points away for minima
phasedx2_minval_ind = argrelmin(pmagsx2, order=10)
if phasedx2_minval_ind[0].size > 1:
phasedx2_magdiff_minval = (
np.max(np.abs(np.diff(pmagsx2[phasedx2_minval_ind[0]])))
)
else:
phasedx2_magdiff_minval = np.nan
# p2p_scatter_pfold_over_mad - MAD of successive absolute mag diffs of
# the phased LC using best period divided
# by the MAD of the unphased LC
phased_magdiff = np.diff(pmags)
phased_magdiff_median = np.median(phased_magdiff)
phased_magdiff_mad = np.median(np.abs(phased_magdiff -
phased_magdiff_median))
phasedx2_magdiff = np.diff(pmagsx2)
phasedx2_magdiff_median = np.median(phasedx2_magdiff)
phasedx2_magdiff_mad = np.median(np.abs(phasedx2_magdiff -
phasedx2_magdiff_median))
phased_magdiffmad_unphased_mad_ratio = phased_magdiff_mad/lightcurve_mad
phasedx2_magdiffmad_unphased_mad_ratio = (
phasedx2_magdiff_mad/lightcurve_mad
)
# get the percentiles of the slopes of the adjacent mags for phasedx2
phasedx2_slopes = np.diff(pmagsx2)/np.diff(phasex2)
phasedx2_slope_percentiles = np.ravel(np.nanpercentile(phasedx2_slopes,
[10.0,90.0]))
phasedx2_slope_10percentile = phasedx2_slope_percentiles[0]
phasedx2_slope_90percentile = phasedx2_slope_percentiles[1]
# check if nbrtimes, _mags, _errs are available
if ((nbrtimes is not None) and
(nbrmags is not None) and
(nbrerrs is not None)):
# get the finite values
nfinind = (np.isfinite(nbrtimes) &
np.isfinite(nbrmags) &
np.isfinite(nbrerrs))
nftimes, nfmags, nferrs = (nbrtimes[nfinind],
nbrmags[nfinind],
nbrerrs[nfinind])
# get nonzero errors
nnzind = np.nonzero(nferrs)
nftimes, nfmags, nferrs = (nftimes[nnzind],
nfmags[nnzind],
nferrs[nnzind])
# only operate on LC if enough points
if nftimes.size > 49:
# get the phased light curve using the same period and epoch as
# the actual object
nphasedlc = lcmath.phase_magseries_with_errs(
nftimes, nfmags, nferrs,
period, ftimes.min(),
wrap=False
)
# normalize the object and neighbor phased mags
norm_pmags = pmags - np.median(pmags)
norm_npmags = nphasedlc['mags'] - np.median(nphasedlc['mags'])
# phase bin them both so we can compare LCs easily
phabinned_objectlc = lcmath.phase_bin_magseries(phase,
norm_pmags,
minbinelems=1)
phabinned_nbrlc = lcmath.phase_bin_magseries(nphasedlc['phase'],
norm_npmags,
minbinelems=1)
absdiffs = []
for pha, phamag in zip(phabinned_objectlc['binnedphases'],
phabinned_objectlc['binnedmags']):
try:
# get the matching phase from the neighbor phased LC
phadiffs = np.abs(pha - phabinned_nbrlc['binnedphases'])
minphadiffind = np.where(
(phadiffs < 1.0e-4) &
(phadiffs == np.min(phadiffs))
)
absmagdiff = np.abs(
phamag - phabinned_nbrlc['binnedmags'][
minphadiffind
]
)
if absmagdiff.size > 0:
absdiffs.append(absmagdiff.min())
except Exception as e:
continue
# sum of absdiff between the normalized to 0.0 phased LC of this
# object and that of the closest neighbor phased with the same
# period and epoch
if len(absdiffs) > 0:
sum_nbr_phasedlc_magdiff = sum(absdiffs)
else:
sum_nbr_phasedlc_magdiff = np.nan
else:
sum_nbr_phasedlc_magdiff = np.nan
else:
sum_nbr_phasedlc_magdiff = np.nan
return {
'inveta_unphasedlc':inveta_unphasedlc,
'inveta_bestperiod':inveta_bestperiod,
'inveta_bestperiodx2':inveta_bestperiodx2,
'inveta_ratio_phased_unphased':inveta_ratio_phased_unphased,
'inveta_ratio_phasedx2_unphased':inveta_ratio_phasedx2_unphased,
'phasedx2_magdiff_maxima':phasedx2_magdiff_maxval,
'phasedx2_magdiff_minina':phasedx2_magdiff_minval,
'phased_unphased_magdiff_mad_ratio':(
phased_magdiffmad_unphased_mad_ratio
),
'phasedx2_unphased_magdiff_mad_ratio':(
phasedx2_magdiffmad_unphased_mad_ratio
),
'phasedx2_slope_10percentile':phasedx2_slope_10percentile,
'phasedx2_slope_90percentile':phasedx2_slope_90percentile,
'sum_nbr_phasedlc_magdiff':sum_nbr_phasedlc_magdiff,
}
else:
return {
'inveta_unphasedlc':np.nan,
'inveta_bestperiod':np.nan,
'inveta_bestperiodx2':np.nan,
'inveta_ratio_phased_unphased':np.nan,
'inveta_ratio_phasedx2_unphased':np.nan,
'phasedx2_magdiff_maxima':np.nan,
'phasedx2_magdiff_minina':np.nan,
'phased_unphased_magdiff_mad_ratio':np.nan,
'phasedx2_unphased_magdiff_mad_ratio':np.nan,
'phasedx2_slope_10percentile':np.nan,
'phasedx2_slope_90percentile':np.nan,
'sum_nbr_phasedlc_magdiff':np.nan,
}
|
def stellingwerf_pdm_theta(times, mags, errs, frequency,
binsize=0.05, minbin=9):
'''
This calculates the Stellingwerf PDM theta value at a test frequency.
Parameters
----------
times,mags,errs : np.array
The input time-series and associated errors.
frequency : float
The test frequency to calculate the theta statistic at.
binsize : float
The phase bin size to use.
minbin : int
The minimum number of items in a phase bin to consider in the
calculation of the statistic.
Returns
-------
theta_pdm : float
The value of the theta statistic at the specified `frequency`.
'''
period = 1.0/frequency
fold_time = times[0]
phased = phase_magseries(times,
mags,
period,
fold_time,
wrap=False,
sort=True)
phases = phased['phase']
pmags = phased['mags']
bins = nparange(0.0, 1.0, binsize)
binnedphaseinds = npdigitize(phases, bins)
binvariances = []
binndets = []
goodbins = 0
for x in npunique(binnedphaseinds):
thisbin_inds = binnedphaseinds == x
thisbin_mags = pmags[thisbin_inds]
if thisbin_mags.size > minbin:
thisbin_variance = npvar(thisbin_mags,ddof=1)
binvariances.append(thisbin_variance)
binndets.append(thisbin_mags.size)
goodbins = goodbins + 1
# now calculate theta
binvariances = nparray(binvariances)
binndets = nparray(binndets)
theta_top = npsum(binvariances*(binndets - 1)) / (npsum(binndets) -
goodbins)
theta_bot = npvar(pmags,ddof=1)
theta = theta_top/theta_bot
return theta
|
def _stellingwerf_pdm_worker(task):
'''
This is a parallel worker for the function below.
Parameters
----------
task : tuple
This is of the form below::
task[0] = times
task[1] = mags
task[2] = errs
task[3] = frequency
task[4] = binsize
task[5] = minbin
Returns
-------
theta_pdm : float
The theta value at the specified frequency. nan if the calculation
fails.
'''
times, mags, errs, frequency, binsize, minbin = task
try:
theta = stellingwerf_pdm_theta(times, mags, errs, frequency,
binsize=binsize, minbin=minbin)
return theta
except Exception as e:
return npnan
|
def stellingwerf_pdm(times,
mags,
errs,
magsarefluxes=False,
startp=None,
endp=None,
stepsize=1.0e-4,
autofreq=True,
normalize=False,
phasebinsize=0.05,
mindetperbin=9,
nbestpeaks=5,
periodepsilon=0.1,
sigclip=10.0,
nworkers=None,
verbose=True):
'''This runs a parallelized Stellingwerf phase-dispersion minimization (PDM)
period search.
Parameters
----------
times,mags,errs : np.array
The mag/flux time-series with associated measurement errors to run the
period-finding on.
magsarefluxes : bool
If the input measurement values in `mags` and `errs` are in fluxes, set
this to True.
startp,endp : float or None
The minimum and maximum periods to consider for the transit search.
stepsize : float
The step-size in frequency to use when constructing a frequency grid for
the period search.
autofreq : bool
If this is True, the value of `stepsize` will be ignored and the
:py:func:`astrobase.periodbase.get_frequency_grid` function will be used
to generate a frequency grid based on `startp`, and `endp`. If these are
None as well, `startp` will be set to 0.1 and `endp` will be set to
`times.max() - times.min()`.
normalize : bool
This sets if the input time-series is normalized to 0.0 and rescaled
such that its variance = 1.0. This is the recommended procedure by
Schwarzenberg-Czerny 1996.
phasebinsize : float
The bin size in phase to use when calculating the PDM theta statistic at
a test frequency.
mindetperbin : int
The minimum number of elements in a phase bin to consider it valid when
calculating the PDM theta statistic at a test frequency.
nbestpeaks : int
The number of 'best' peaks to return from the periodogram results,
starting from the global maximum of the periodogram peak values.
periodepsilon : float
The fractional difference between successive values of 'best' periods
when sorting by periodogram power to consider them as separate periods
(as opposed to part of the same periodogram peak). This is used to avoid
broad peaks in the periodogram and make sure the 'best' periods returned
are all actually independent.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
nworkers : int
The number of parallel workers to use when calculating the periodogram.
verbose : bool
If this is True, will indicate progress and details about the frequency
grid used for the period search.
Returns
-------
dict
This function returns a dict, referred to as an `lspinfo` dict in other
astrobase functions that operate on periodogram results. This is a
standardized format across all astrobase period-finders, and is of the
form below::
{'bestperiod': the best period value in the periodogram,
'bestlspval': the periodogram peak associated with the best period,
'nbestpeaks': the input value of nbestpeaks,
'nbestlspvals': nbestpeaks-size list of best period peak values,
'nbestperiods': nbestpeaks-size list of best periods,
'lspvals': the full array of periodogram powers,
'periods': the full array of periods considered,
'method':'pdm' -> the name of the period-finder method,
'kwargs':{ dict of all of the input kwargs for record-keeping}}
'''
# get rid of nans first and sigclip
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=sigclip)
# make sure there are enough points to calculate a spectrum
if len(stimes) > 9 and len(smags) > 9 and len(serrs) > 9:
# get the frequencies to use
if startp:
endf = 1.0/startp
else:
# default start period is 0.1 day
endf = 1.0/0.1
if endp:
startf = 1.0/endp
else:
# default end period is length of time series
startf = 1.0/(stimes.max() - stimes.min())
# if we're not using autofreq, then use the provided frequencies
if not autofreq:
frequencies = nparange(startf, endf, stepsize)
if verbose:
LOGINFO(
'using %s frequency points, start P = %.3f, end P = %.3f' %
(frequencies.size, 1.0/endf, 1.0/startf)
)
else:
# this gets an automatic grid of frequencies to use
frequencies = get_frequency_grid(stimes,
minfreq=startf,
maxfreq=endf)
if verbose:
LOGINFO(
'using autofreq with %s frequency points, '
'start P = %.3f, end P = %.3f' %
(frequencies.size,
1.0/frequencies.max(),
1.0/frequencies.min())
)
# map to parallel workers
if (not nworkers) or (nworkers > NCPUS):
nworkers = NCPUS
if verbose:
LOGINFO('using %s workers...' % nworkers)
pool = Pool(nworkers)
# renormalize the working mags to zero and scale them so that the
# variance = 1 for use with our LSP functions
if normalize:
nmags = (smags - npmedian(smags))/npstd(smags)
else:
nmags = smags
tasks = [(stimes, nmags, serrs, x, phasebinsize, mindetperbin)
for x in frequencies]
lsp = pool.map(_stellingwerf_pdm_worker, tasks)
pool.close()
pool.join()
del pool
lsp = nparray(lsp)
periods = 1.0/frequencies
# find the nbestpeaks for the periodogram: 1. sort the lsp array by
# lowest value first 2. go down the values until we find five values
# that are separated by at least periodepsilon in period
# make sure to filter out the non-finite values of lsp
finitepeakind = npisfinite(lsp)
finlsp = lsp[finitepeakind]
finperiods = periods[finitepeakind]
# finlsp might not have any finite values if the period finding
# failed. if so, argmin will return a ValueError.
try:
bestperiodind = npargmin(finlsp)
except ValueError:
LOGERROR('no finite periodogram values for '
'this mag series, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'method':'pdm',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'normalize':normalize,
'phasebinsize':phasebinsize,
'mindetperbin':mindetperbin,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
sortedlspind = npargsort(finlsp)
sortedlspperiods = finperiods[sortedlspind]
sortedlspvals = finlsp[sortedlspind]
# now get the nbestpeaks
nbestperiods, nbestlspvals, peakcount = (
[finperiods[bestperiodind]],
[finlsp[bestperiodind]],
1
)
prevperiod = sortedlspperiods[0]
# find the best nbestpeaks in the lsp and their periods
for period, lspval in zip(sortedlspperiods, sortedlspvals):
if peakcount == nbestpeaks:
break
perioddiff = abs(period - prevperiod)
bestperiodsdiff = [abs(period - x) for x in nbestperiods]
# print('prevperiod = %s, thisperiod = %s, '
# 'perioddiff = %s, peakcount = %s' %
# (prevperiod, period, perioddiff, peakcount))
# this ensures that this period is different from the last
# period and from all the other existing best periods by
# periodepsilon to make sure we jump to an entire different peak
# in the periodogram
if (perioddiff > (periodepsilon*prevperiod) and
all(x > (periodepsilon*period) for x in bestperiodsdiff)):
nbestperiods.append(period)
nbestlspvals.append(lspval)
peakcount = peakcount + 1
prevperiod = period
return {'bestperiod':finperiods[bestperiodind],
'bestlspval':finlsp[bestperiodind],
'nbestpeaks':nbestpeaks,
'nbestlspvals':nbestlspvals,
'nbestperiods':nbestperiods,
'lspvals':lsp,
'periods':periods,
'method':'pdm',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'normalize':normalize,
'phasebinsize':phasebinsize,
'mindetperbin':mindetperbin,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
else:
LOGERROR('no good detections for these times and mags, skipping...')
return {'bestperiod':npnan,
'bestlspval':npnan,
'nbestpeaks':nbestpeaks,
'nbestlspvals':None,
'nbestperiods':None,
'lspvals':None,
'periods':None,
'method':'pdm',
'kwargs':{'startp':startp,
'endp':endp,
'stepsize':stepsize,
'normalize':normalize,
'phasebinsize':phasebinsize,
'mindetperbin':mindetperbin,
'autofreq':autofreq,
'periodepsilon':periodepsilon,
'nbestpeaks':nbestpeaks,
'sigclip':sigclip}}
|
def analytic_false_alarm_probability(lspinfo,
times,
conservative_nfreq_eff=True,
peakvals=None,
inplace=True):
'''This returns the analytic false alarm probabilities for periodogram
peak values.
FIXME: this doesn't actually work. Fix later.
The calculation follows that on page 3 of Zechmeister & Kurster (2009)::
FAP = 1 − [1 − Prob(z > z0)]**M
where::
M is the number of independent frequencies
Prob(z > z0) is the probability of peak with value > z0
z0 is the peak value we're evaluating
For PDM, the Prob(z > z0) is described by the beta distribution, according
to:
- Schwarzenberg-Czerny (1997;
https://ui.adsabs.harvard.edu/#abs/1997ApJ...489..941S)
- Zalian, Chadid, and Stellingwerf (2013;
http://adsabs.harvard.edu/abs/2014MNRAS.440...68Z)
This is given by::
beta( (N-B)/2, (B-1)/2; ((N-B)/(B-1))*theta_pdm )
Where::
N = number of observations
B = number of phase bins
This translates to a scipy.stats call to the beta distribution CDF::
x = ((N-B)/(B-1))*theta_pdm_best
prob_exceeds_val = scipy.stats.beta.cdf(x, (N-B)/2.0, (B-1.0)/2.0)
Which we can then plug into the false alarm prob eqn above with the
calculation of M.
Parameters
----------
lspinfo : dict
The dict returned by the
:py:func:`~astrobase.periodbase.spdm.stellingwerf_pdm` function.
times : np.array
The times for which the periodogram result in ``lspinfo`` was
calculated.
conservative_nfreq_eff : bool
If True, will follow the prescription given in Schwarzenberg-Czerny
(2003):
http://adsabs.harvard.edu/abs/2003ASPC..292..383S
and estimate the effective number of independent frequences M_eff as::
min(N_obs, N_freq, DELTA_f/delta_f)
peakvals : sequence or None
The peak values for which to evaluate the false-alarm probability. If
None, will calculate this for each of the peak values in the
``nbestpeaks`` key of the ``lspinfo`` dict.
inplace : bool
If True, puts the results of the FAP calculation into the ``lspinfo``
dict as a list available as ``lspinfo['falsealarmprob']``.
Returns
-------
list
The calculated false alarm probabilities for each of the peak values in
``peakvals``.
'''
from scipy.stats import beta
frequencies = 1.0/lspinfo['periods']
M = independent_freq_count(frequencies,
times,
conservative=conservative_nfreq_eff)
if peakvals is None:
peakvals = lspinfo['nbestlspvals']
nphasebins = nparange(0.0, 1.0, lspinfo['kwargs']['phasebinsize']).size
ndet = times.size
false_alarm_probs = []
for peakval in peakvals:
prob_xval = ((ndet-nphasebins)/(nphasebins-1.0))*peakval
prob_exceeds_val = beta.cdf(prob_xval,
(ndet-nphasebins)/2.0,
(nphasebins-1.0)/2.0)
import ipdb; ipdb.set_trace()
false_alarm_probs.append(1.0 - (1.0 - prob_exceeds_val)**M)
if inplace:
lspinfo['falsealarmprob'] = false_alarm_probs
return false_alarm_probs
|
def keplermag_to_sdssr(keplermag, kic_sdssg, kic_sdssr):
'''Converts magnitude measurements in Kepler band to SDSS r band.
Parameters
----------
keplermag : float or array-like
The Kepler magnitude value(s) to convert to fluxes.
kic_sdssg,kic_sdssr : float or array-like
The SDSS g and r magnitudes of the object(s) from the Kepler Input
Catalog. The .llc.fits MAST light curve file for a Kepler object
contains these values in the FITS extension 0 header.
Returns
-------
float or array-like
SDSS r band magnitude(s) converted from the Kepler band magnitude.
'''
kic_sdssgr = kic_sdssg - kic_sdssr
if kic_sdssgr < 0.8:
kepsdssr = (keplermag - 0.2*kic_sdssg)/0.8
else:
kepsdssr = (keplermag - 0.1*kic_sdssg)/0.9
return kepsdssr
|
def read_kepler_fitslc(
lcfits,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS,
appendto=None,
normalize=False,
):
'''This extracts the light curve from a single Kepler or K2 LC FITS file.
This works on the light curves available at MAST:
- `kplr{kepid}-{somedatething}_llc.fits` files from the Kepler mission
- `ktwo{epicid}-c{campaign}_llc.fits` files from the K2 mission
Parameters
----------
lcfits : str
The filename of a MAST Kepler/K2 light curve FITS file.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the Kepler/K2 pipeline. The default is `LCAPERTUREKEYS` above.
appendto : lcdict or None
If appendto is an `lcdict`, will append measurements of this `lcdict` to
that `lcdict`. This is used for consolidating light curves for the same
object across different files (quarters). The appending does not care
about the time order. To consolidate light curves in time order, use
`consolidate_kepler_fitslc` below.
normalize : bool
If True, then each component light curve's SAP_FLUX and PDCSAP_FLUX
measurements will be normalized to 1.0 by dividing out the median flux
for the component light curve.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
'''
# read the fits file
hdulist = pyfits.open(lcfits)
lchdr, lcdata = hdulist[1].header, hdulist[1].data
lctophdr, lcaperturehdr, lcaperturedata = (hdulist[0].header,
hdulist[2].header,
hdulist[2].data)
hdulist.close()
hdrinfo = {}
# now get the values we want from the header
for key in headerkeys:
if key in lchdr and lchdr[key] is not None:
hdrinfo[key.lower()] = lchdr[key]
else:
hdrinfo[key.lower()] = None
# get the number of detections
ndet = lchdr['NAXIS2']
# get the info from the topheader
for key in topkeys:
if key in lctophdr and lctophdr[key] is not None:
hdrinfo[key.lower()] = lctophdr[key]
else:
hdrinfo[key.lower()] = None
# get the info from the lcaperturehdr
for key in lcaperturehdr:
if key in lcaperturehdr and lcaperturehdr[key] is not None:
hdrinfo[key.lower()] = lcaperturehdr[key]
else:
hdrinfo[key.lower()] = None
# if we're appending to another lcdict
if appendto and isinstance(appendto, dict):
lcdict = appendto
lcdict['quarter'].append(hdrinfo['quarter'])
lcdict['season'].append(hdrinfo['season'])
lcdict['datarelease'].append(hdrinfo['data_rel'])
lcdict['obsmode'].append(hdrinfo['obsmode'])
lcdict['campaign'].append(hdrinfo['campaign'])
# we don't update the objectid
# update lcinfo
lcdict['lcinfo']['timesys'].append(hdrinfo['timesys'])
lcdict['lcinfo']['bjdoffset'].append(
hdrinfo['bjdrefi'] + hdrinfo['bjdreff']
)
lcdict['lcinfo']['exptime'].append(hdrinfo['exposure'])
lcdict['lcinfo']['lcaperture'].append(lcaperturedata)
lcdict['lcinfo']['aperpixused'].append(hdrinfo['npixsap'])
lcdict['lcinfo']['aperpixunused'].append(hdrinfo['npixmiss'])
lcdict['lcinfo']['pixarcsec'].append(
(npabs(hdrinfo['cdelt1']) +
npabs(hdrinfo['cdelt2']))*3600.0/2.0
)
lcdict['lcinfo']['channel'].append(hdrinfo['channel'])
lcdict['lcinfo']['skygroup'].append(hdrinfo['skygroup'])
lcdict['lcinfo']['module'].append(hdrinfo['module'])
lcdict['lcinfo']['output'].append(hdrinfo['output'])
lcdict['lcinfo']['ndet'].append(ndet)
# the objectinfo is not updated for the same object when appending to a
# light curve. FIXME: maybe it should be?
# update the varinfo for this light curve
lcdict['varinfo']['cdpp3_0'].append(hdrinfo['cdpp3_0'])
lcdict['varinfo']['cdpp6_0'].append(hdrinfo['cdpp6_0'])
lcdict['varinfo']['cdpp12_0'].append(hdrinfo['cdpp12_0'])
lcdict['varinfo']['pdcvar'].append(hdrinfo['pdcvar'])
lcdict['varinfo']['pdcmethod'].append(hdrinfo['pdcmethd'])
lcdict['varinfo']['aper_target_total_ratio'].append(hdrinfo['crowdsap'])
lcdict['varinfo']['aper_target_frac'].append(hdrinfo['flfrcsap'])
# update the light curve columns now
for key in datakeys:
if key.lower() in lcdict:
lcdict[key.lower()] = (
npconcatenate((lcdict[key.lower()], lcdata[key]))
)
for key in sapkeys:
if key.lower() in lcdict['sap']:
sapflux_median = np.nanmedian(lcdata['SAP_FLUX'])
# normalize the current flux measurements if needed
if normalize and key == 'SAP_FLUX':
thislcdata = lcdata[key] / sapflux_median
elif normalize and key == 'SAP_FLUX_ERR':
thislcdata = lcdata[key] / sapflux_median
elif normalize and key == 'SAP_BKG':
thislcdata = lcdata[key] / sapflux_median
elif normalize and key == 'SAP_BKG_ERR':
thislcdata = lcdata[key] / sapflux_median
else:
thislcdata = lcdata[key]
lcdict['sap'][key.lower()] = (
np.concatenate((lcdict['sap'][key.lower()], thislcdata))
)
for key in pdckeys:
if key.lower() in lcdict['pdc']:
pdcsap_flux_median = np.nanmedian(lcdata['PDCSAP_FLUX'])
# normalize the current flux measurements if needed
if normalize and key == 'PDCSAP_FLUX':
thislcdata = lcdata[key] / pdcsap_flux_median
elif normalize and key == 'PDCSAP_FLUX_ERR':
thislcdata = lcdata[key] / pdcsap_flux_median
else:
thislcdata = lcdata[key]
lcdict['pdc'][key.lower()] = (
np.concatenate((lcdict['pdc'][key.lower()], thislcdata))
)
# append some of the light curve information into existing numpy arrays
# so we can sort on them later
lcdict['lc_channel'] = npconcatenate(
(lcdict['lc_channel'],
npfull_like(lcdata['TIME'],
hdrinfo['channel']))
)
lcdict['lc_skygroup'] = npconcatenate(
(lcdict['lc_skygroup'],
npfull_like(lcdata['TIME'],
hdrinfo['skygroup']))
)
lcdict['lc_module'] = npconcatenate(
(lcdict['lc_module'],
npfull_like(lcdata['TIME'],
hdrinfo['module']))
)
lcdict['lc_output'] = npconcatenate(
(lcdict['lc_output'],
npfull_like(lcdata['TIME'],
hdrinfo['output']))
)
lcdict['lc_quarter'] = npconcatenate(
(lcdict['lc_quarter'],
npfull_like(lcdata['TIME'],
hdrinfo['quarter']))
)
lcdict['lc_season'] = npconcatenate(
(lcdict['lc_season'],
npfull_like(lcdata['TIME'],
hdrinfo['season']))
)
lcdict['lc_campaign'] = npconcatenate(
(lcdict['lc_campaign'],
npfull_like(lcdata['TIME'],
hdrinfo['campaign']))
)
# otherwise, this is a new lcdict
else:
# form the lcdict
# the metadata is one-elem arrays because we might add on to them later
lcdict = {
'quarter':[hdrinfo['quarter']],
'season':[hdrinfo['season']],
'datarelease':[hdrinfo['data_rel']],
'campaign':[hdrinfo['campaign']], # this is None for KepPrime
'obsmode':[hdrinfo['obsmode']],
'objectid':hdrinfo['object'],
'lcinfo':{
'timesys':[hdrinfo['timesys']],
'bjdoffset':[hdrinfo['bjdrefi'] + hdrinfo['bjdreff']],
'exptime':[hdrinfo['exposure']],
'lcaperture':[lcaperturedata],
'aperpixused':[hdrinfo['npixsap']],
'aperpixunused':[hdrinfo['npixmiss']],
'pixarcsec':[(npabs(hdrinfo['cdelt1']) +
npabs(hdrinfo['cdelt2']))*3600.0/2.0],
'channel':[hdrinfo['channel']],
'skygroup':[hdrinfo['skygroup']],
'module':[hdrinfo['module']],
'output':[hdrinfo['output']],
'ndet':[ndet],
},
'objectinfo':{
'objectid':hdrinfo['object'], # repeated here for checkplot use
'keplerid':hdrinfo['keplerid'],
'ra':hdrinfo['ra_obj'],
'decl':hdrinfo['dec_obj'],
'pmra':hdrinfo['pmra'],
'pmdecl':hdrinfo['pmdec'],
'pmtotal':hdrinfo['pmtotal'],
'sdssg':hdrinfo['gmag'],
'sdssr':hdrinfo['rmag'],
'sdssi':hdrinfo['imag'],
'sdssz':hdrinfo['zmag'],
'kepmag':hdrinfo['kepmag'],
'teff':hdrinfo['teff'],
'logg':hdrinfo['logg'],
'feh':hdrinfo['feh'],
'ebminusv':hdrinfo['ebminusv'],
'extinction':hdrinfo['av'],
'starradius':hdrinfo['radius'],
'twomassuid':hdrinfo['tmindex'],
},
'varinfo':{
'cdpp3_0':[hdrinfo['cdpp3_0']],
'cdpp6_0':[hdrinfo['cdpp6_0']],
'cdpp12_0':[hdrinfo['cdpp12_0']],
'pdcvar':[hdrinfo['pdcvar']],
'pdcmethod':[hdrinfo['pdcmethd']],
'aper_target_total_ratio':[hdrinfo['crowdsap']],
'aper_target_frac':[hdrinfo['flfrcsap']],
},
'sap':{},
'pdc':{},
}
# get the LC columns
for key in datakeys:
lcdict[key.lower()] = lcdata[key]
for key in sapkeys:
lcdict['sap'][key.lower()] = lcdata[key]
for key in pdckeys:
lcdict['pdc'][key.lower()] = lcdata[key]
# turn some of the light curve information into numpy arrays so we can
# sort on them later
lcdict['lc_channel'] = npfull_like(lcdict['time'],
lcdict['lcinfo']['channel'][0])
lcdict['lc_skygroup'] = npfull_like(lcdict['time'],
lcdict['lcinfo']['skygroup'][0])
lcdict['lc_module'] = npfull_like(lcdict['time'],
lcdict['lcinfo']['module'][0])
lcdict['lc_output'] = npfull_like(lcdict['time'],
lcdict['lcinfo']['output'][0])
lcdict['lc_quarter'] = npfull_like(lcdict['time'],
lcdict['quarter'][0])
lcdict['lc_season'] = npfull_like(lcdict['time'],
lcdict['season'][0])
lcdict['lc_campaign'] = npfull_like(lcdict['time'],
lcdict['campaign'][0])
# normalize the SAP and PDCSAP fluxes if needed
if normalize:
sapflux_median = np.nanmedian(lcdict['sap']['sap_flux'])
pdcsap_flux_median = np.nanmedian(lcdict['pdc']['pdcsap_flux'])
lcdict['sap']['sap_flux'] = (
lcdict['sap']['sap_flux'] /
sapflux_median
)
lcdict['sap']['sap_flux_err'] = (
lcdict['sap']['sap_flux_err'] /
sapflux_median
)
lcdict['sap']['sap_bkg'] = (
lcdict['sap']['sap_bkg'] /
sapflux_median
)
lcdict['sap']['sap_bkg_err'] = (
lcdict['sap']['sap_bkg_err'] /
sapflux_median
)
lcdict['pdc']['pdcsap_flux'] = (
lcdict['pdc']['pdcsap_flux'] /
pdcsap_flux_median
)
lcdict['pdc']['pdcsap_flux_err'] = (
lcdict['pdc']['pdcsap_flux_err'] /
pdcsap_flux_median
)
## END OF LIGHT CURVE CONSTRUCTION ##
# update the lcdict columns with the actual columns
lcdict['columns'] = (
[x.lower() for x in datakeys] +
['sap.%s' % x.lower() for x in sapkeys] +
['pdc.%s' % x.lower() for x in pdckeys] +
['lc_channel','lc_skygroup','lc_module',
'lc_output','lc_quarter','lc_season']
)
# return the lcdict at the end
return lcdict
|
def consolidate_kepler_fitslc(keplerid,
lcfitsdir,
normalize=True,
headerkeys=LCHEADERKEYS,
datakeys=LCDATAKEYS,
sapkeys=LCSAPKEYS,
pdckeys=LCPDCKEYS,
topkeys=LCTOPKEYS,
apkeys=LCAPERTUREKEYS):
'''This gets all Kepler/K2 light curves for the given `keplerid`
in `lcfitsdir`.
Searches recursively in `lcfitsdir` for all of the files belonging to the
specified `keplerid`. Sorts the light curves by time. Returns an
`lcdict`. This is meant to be used to consolidate light curves for a single
object across Kepler quarters.
NOTE: `keplerid` is an integer (without the leading zeros). This is usually
the KIC ID.
NOTE: if light curve time arrays contain `nans`, these and their associated
measurements will be sorted to the end of the final combined arrays.
Parameters
----------
keplerid : int
The Kepler ID of the object to consolidate LCs for, as an integer
without any leading zeros. This is usually the KIC or EPIC ID.
lcfitsdir : str
The directory to look in for LCs of the specified object.
normalize : bool
If True, then each component light curve's SAP_FLUX and PDCSAP_FLUX
measurements will be normalized to 1.0 by dividing out the median flux
for the component light curve.
headerkeys : list
A list of FITS header keys that will be extracted from the FITS light
curve file. These describe the observations. The default value for this
is given in `LCHEADERKEYS` above.
datakeys : list
A list of FITS column names that correspond to the auxiliary
measurements in the light curve. The default is `LCDATAKEYS` above.
sapkeys : list
A list of FITS column names that correspond to the SAP flux
measurements in the light curve. The default is `LCSAPKEYS` above.
pdckeys : list
A list of FITS column names that correspond to the PDC flux
measurements in the light curve. The default is `LCPDCKEYS` above.
topkeys : list
A list of FITS header keys that describe the object in the light
curve. The default is `LCTOPKEYS` above.
apkeys : list
A list of FITS header keys that describe the flux measurement apertures
used by the Kepler/K2 pipeline. The default is `LCAPERTUREKEYS` above.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
'''
LOGINFO('looking for Kepler light curve FITS in %s for %s...' % (lcfitsdir,
keplerid))
# for Python 3.5 and up, use recursive glob, it appears to be absurdly
# faster than os.walk
if sys.version_info[:2] > (3,4):
matching = glob.glob(os.path.join(lcfitsdir,
'**',
'kplr%09i-*_llc.fits' % keplerid),
recursive=True)
LOGINFO('found %s files: %s' % (len(matching), repr(matching)))
# for Python < 3.5, use os.walk and glob
else:
# use the os.walk function to start looking for files in lcfitsdir
walker = os.walk(lcfitsdir)
matching = []
for root, dirs, _files in walker:
for sdir in dirs:
searchpath = os.path.join(root,
sdir,
'kplr%09i-*_llc.fits' % keplerid)
foundfiles = glob.glob(searchpath)
if foundfiles:
matching.extend(foundfiles)
LOGINFO('found %s in dir: %s' % (repr(foundfiles),
os.path.join(root,sdir)))
# now that we've found everything, read them all in
if len(matching) > 0:
LOGINFO('consolidating...')
# the first file
consolidated = read_kepler_fitslc(matching[0],
headerkeys=headerkeys,
datakeys=datakeys,
sapkeys=sapkeys,
pdckeys=pdckeys,
topkeys=topkeys,
apkeys=apkeys,
normalize=normalize)
# get the rest of the files
for lcf in matching:
consolidated = read_kepler_fitslc(lcf,
appendto=consolidated,
headerkeys=headerkeys,
datakeys=datakeys,
sapkeys=sapkeys,
pdckeys=pdckeys,
topkeys=topkeys,
apkeys=apkeys,
normalize=normalize)
# get the sort indices
# we use time for the columns and quarters for the headers
LOGINFO('sorting by time...')
# NOTE: nans in time will be sorted to the end of the array
finiteind = npisfinite(consolidated['time'])
if npsum(finiteind) < consolidated['time'].size:
LOGWARNING('some time values are nan! '
'measurements at these times will be '
'sorted to the end of the column arrays.')
# get the sort index
column_sort_ind = npargsort(consolidated['time'])
# sort the columns by time
for col in consolidated['columns']:
if '.' in col:
key, subkey = col.split('.')
consolidated[key][subkey] = (
consolidated[key][subkey][column_sort_ind]
)
else:
consolidated[col] = consolidated[col][column_sort_ind]
# now sort the headers by quarters
header_sort_ind = npargsort(consolidated['quarter']).tolist()
# this is a bit convoluted, but whatever: list -> array -> list
for key in ('quarter', 'season', 'datarelease', 'obsmode'):
consolidated[key] = (
nparray(consolidated[key])[header_sort_ind].tolist()
)
for key in ('timesys','bjdoffset','exptime','lcaperture',
'aperpixused','aperpixunused','pixarcsec',
'channel','skygroup','module','output','ndet'):
consolidated['lcinfo'][key] = (
nparray(consolidated['lcinfo'][key])[header_sort_ind].tolist()
)
for key in ('cdpp3_0','cdpp6_0','cdpp12_0','pdcvar','pdcmethod',
'aper_target_total_ratio','aper_target_frac'):
consolidated['varinfo'][key] = (
nparray(consolidated['varinfo'][key])[header_sort_ind].tolist()
)
# finally, return the consolidated lcdict
return consolidated
# if we didn't find anything, complain
else:
LOGERROR('could not find any light curves '
'for %s in %s or its subdirectories' % (keplerid,
lcfitsdir))
return None
|
def read_k2sff_lightcurve(lcfits):
'''This reads a K2 SFF (Vandenberg+ 2014) light curve into an `lcdict`.
Use this with the light curves from the K2 SFF project at MAST.
Parameters
----------
lcfits : str
The filename of the FITS light curve file downloaded from MAST.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
'''
# read the fits file
hdulist = pyfits.open(lcfits)
lchdr, lcdata = hdulist[1].header, hdulist[1].data
lctophdr = hdulist[0].header
hdulist.close()
hdrinfo = {}
# get the number of detections
ndet = lchdr['NAXIS2']
# get the info from the topheader
for key in SFFTOPKEYS:
if key in lctophdr and lctophdr[key] is not None:
hdrinfo[key.lower()] = lctophdr[key]
else:
hdrinfo[key.lower()] = None
# now get the values we want from the header
for key in SFFHEADERKEYS:
if key in lchdr and lchdr[key] is not None:
hdrinfo[key.lower()] = lchdr[key]
else:
hdrinfo[key.lower()] = None
# form the lcdict
# the metadata is one-elem arrays because we might add on to them later
lcdict = {
'quarter':[hdrinfo['quarter']],
'season':[hdrinfo['season']],
'datarelease':[hdrinfo['data_rel']],
'obsmode':[hdrinfo['obsmode']],
'objectid':hdrinfo['object'],
'campaign':[hdrinfo['campaign']],
'lcinfo':{
'timesys':[hdrinfo['timesys']],
'bjdoffset':[hdrinfo['bjdrefi'] + hdrinfo['bjdreff']],
'exptime':[hdrinfo['exposure']],
'lcapermaskidx':[hdrinfo['maskinde']],
'lcapermasktype':[hdrinfo['masktype']],
'aperpixused':[hdrinfo['npixsap']],
'aperpixunused':[None],
'pixarcsec':[None],
'channel':[hdrinfo['channel']],
'skygroup':[hdrinfo['skygroup']],
'module':[hdrinfo['module']],
'output':[hdrinfo['output']],
'ndet':[ndet],
},
'objectinfo':{
'keplerid':hdrinfo['keplerid'],
'ra':hdrinfo['ra_obj'],
'decl':hdrinfo['dec_obj'],
'pmra':hdrinfo['pmra'],
'pmdecl':hdrinfo['pmdec'],
'pmtotal':hdrinfo['pmtotal'],
'sdssg':hdrinfo['gmag'],
'sdssr':hdrinfo['rmag'],
'sdssi':hdrinfo['imag'],
'sdssz':hdrinfo['zmag'],
'kepmag':hdrinfo['kepmag'],
'teff':hdrinfo['teff'],
'logg':hdrinfo['logg'],
'feh':hdrinfo['feh'],
'ebminusv':hdrinfo['ebminusv'],
'extinction':hdrinfo['av'],
'starradius':hdrinfo['radius'],
'twomassuid':hdrinfo['tmindex'],
},
'varinfo':{
'cdpp3_0':[hdrinfo['cdpp3_0']],
'cdpp6_0':[hdrinfo['cdpp6_0']],
'cdpp12_0':[hdrinfo['cdpp12_0']],
'pdcvar':[hdrinfo['pdcvar']],
'pdcmethod':[hdrinfo['pdcmethd']],
'aptgttotrat':[hdrinfo['crowdsap']],
'aptgtfrac':[hdrinfo['flfrcsap']],
},
}
# get the LC columns
for key in SFFDATAKEYS:
lcdict[key.lower()] = lcdata[key]
# add some of the light curve information to the data arrays so we can sort
# on them later
lcdict['channel'] = npfull_like(lcdict['t'],
lcdict['lcinfo']['channel'][0])
lcdict['skygroup'] = npfull_like(lcdict['t'],
lcdict['lcinfo']['skygroup'][0])
lcdict['module'] = npfull_like(lcdict['t'],
lcdict['lcinfo']['module'][0])
lcdict['output'] = npfull_like(lcdict['t'],
lcdict['lcinfo']['output'][0])
lcdict['quarter'] = npfull_like(lcdict['t'],
lcdict['quarter'][0])
lcdict['season'] = npfull_like(lcdict['t'],
lcdict['season'][0])
lcdict['campaign'] = npfull_like(lcdict['t'],
lcdict['campaign'][0])
# update the lcdict columns with the actual columns
lcdict['columns'] = (
[x.lower() for x in SFFDATAKEYS] +
['channel','skygroup','module','output','quarter','season','campaign']
)
# return the lcdict at the end
return lcdict
|
def kepler_lcdict_to_pkl(lcdict, outfile=None):
'''This writes the `lcdict` to a Python pickle.
Parameters
----------
lcdict : lcdict
This is the input `lcdict` to write to a pickle.
outfile : str or None
If this is None, the object's Kepler ID/EPIC ID will determined from the
`lcdict` and used to form the filename of the output pickle file. If
this is a `str`, the provided filename will be used.
Returns
-------
str
The absolute path to the written pickle file.
'''
if not outfile:
outfile = '%s-keplc.pkl' % lcdict['objectid'].replace(' ','-')
# we're using pickle.HIGHEST_PROTOCOL here, this will make Py3 pickles
# unreadable for Python 2.7
with open(outfile,'wb') as outfd:
pickle.dump(lcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return os.path.abspath(outfile)
|
def read_kepler_pklc(picklefile):
'''This turns the pickled lightcurve file back into an `lcdict`.
Parameters
----------
picklefile : str
The path to a previously written Kepler LC picklefile generated by
`kepler_lcdict_to_pkl` above.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing).
'''
if picklefile.endswith('.gz'):
infd = gzip.open(picklefile, 'rb')
else:
infd = open(picklefile, 'rb')
try:
with infd:
lcdict = pickle.load(infd)
except UnicodeDecodeError:
with open(picklefile,'rb') as infd:
lcdict = pickle.load(infd, encoding='latin1')
LOGWARNING('pickle %s was probably from Python 2 '
'and failed to load without using "latin1" encoding. '
'This is probably a numpy issue: '
'http://stackoverflow.com/q/11305790' % picklefile)
return lcdict
|
def filter_kepler_lcdict(lcdict,
filterflags=True,
nanfilter='sap,pdc',
timestoignore=None):
'''This filters the Kepler `lcdict`, removing nans and bad
observations.
By default, this function removes points in the Kepler LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_kepler_fitslc` or
`read_kepler_fitslc`.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
nanfilter : {'sap','pdc','sap,pdc'}
Indicates the flux measurement type(s) to apply the filtering to.
timestoignore : list of tuples or None
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
Returns
-------
lcdict
Returns an `lcdict` (this is useable by most astrobase functions for LC
processing). The `lcdict` is filtered IN PLACE!
'''
cols = lcdict['columns']
# filter all bad LC points as noted by quality flags
if filterflags:
nbefore = lcdict['time'].size
filterind = lcdict['sap_quality'] == 0
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][filterind]
else:
lcdict[col] = lcdict[col][filterind]
nafter = lcdict['time'].size
LOGINFO('applied quality flag filter, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
if nanfilter and nanfilter == 'sap,pdc':
notnanind = (
npisfinite(lcdict['sap']['sap_flux']) &
npisfinite(lcdict['pdc']['pdcsap_flux']) &
npisfinite(lcdict['time'])
)
elif nanfilter and nanfilter == 'sap':
notnanind = (
npisfinite(lcdict['sap']['sap_flux']) &
npisfinite(lcdict['time'])
)
elif nanfilter and nanfilter == 'pdc':
notnanind = (
npisfinite(lcdict['pdc']['pdcsap_flux']) &
npisfinite(lcdict['time'])
)
# remove nans from all columns
if nanfilter:
nbefore = lcdict['time'].size
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][notnanind]
else:
lcdict[col] = lcdict[col][notnanind]
nafter = lcdict['time'].size
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = npfull_like(lcdict['time'], True, dtype=np.bool_)
nbefore = exclind.size
# get all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = ~((lcdict['time'] >= time0) & (lcdict['time'] <= time1))
exclind = exclind & thismask
# apply the masks
for col in cols:
if '.' in col:
key, subkey = col.split('.')
lcdict[key][subkey] = lcdict[key][subkey][exclind]
else:
lcdict[col] = lcdict[col][exclind]
nafter = lcdict['time'].size
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
return lcdict
|
def _epd_function(coeffs, fluxes, xcc, ycc, bgv, bge):
'''This is the EPD function to fit.
Parameters
----------
coeffs : array-like of floats
Contains the EPD coefficients that will be used to generate the EPD fit
function.
fluxes : array-like
The flux measurement array being used.
xcc,ycc : array-like
Arrays of the x and y coordinates associated with each measurement in
`fluxes`.
bgv,bge : array-like
Arrays of the flux background value and the flux background error
associated with each measurement in `fluxes`.
Returns
-------
np.array
Contains the fit function evaluated at each flux measurement value.
'''
epdf = (
coeffs[0] +
coeffs[1]*npsin(2*MPI*xcc) + coeffs[2]*npcos(2*MPI*xcc) +
coeffs[3]*npsin(2*MPI*ycc) + coeffs[4]*npcos(2*MPI*ycc) +
coeffs[5]*npsin(4*MPI*xcc) + coeffs[6]*npcos(4*MPI*xcc) +
coeffs[7]*npsin(4*MPI*ycc) + coeffs[8]*npcos(4*MPI*ycc) +
coeffs[9]*bgv +
coeffs[10]*bge
)
return epdf
|
def _epd_residual(coeffs, fluxes, xcc, ycc, bgv, bge):
'''This is the residual function to minimize using scipy.optimize.leastsq.
Parameters
----------
coeffs : array-like of floats
Contains the EPD coefficients that will be used to generate the EPD fit
function.
fluxes : array-like
The flux measurement array being used.
xcc,ycc : array-like
Arrays of the x and y coordinates associated with each measurement in
`fluxes`.
bgv,bge : array-like
Arrays of the flux background value and the flux background error
associated with each measurement in `fluxes`.
Returns
-------
np.array
Contains the fit function residual evaluated at each flux measurement
value.
'''
f = _epd_function(coeffs, fluxes, xcc, ycc, bgv, bge)
residual = fluxes - f
return residual
|
def epd_kepler_lightcurve(lcdict,
xccol='mom_centr1',
yccol='mom_centr2',
timestoignore=None,
filterflags=True,
writetodict=True,
epdsmooth=5):
'''This runs EPD on the Kepler light curve.
Following Huang et al. 2015, we fit the following EPD function to a smoothed
light curve, and then subtract it to obtain EPD corrected magnitudes::
f = c0 +
c1*sin(2*pi*x) + c2*cos(2*pi*x) + c3*sin(2*pi*y) + c4*cos(2*pi*y) +
c5*sin(4*pi*x) + c6*cos(4*pi*x) + c7*sin(4*pi*y) + c8*cos(4*pi*y) +
c9*bgv + c10*bge
By default, this function removes points in the Kepler LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_kepler_fitslc` or
`read_kepler_fitslc`.
xcol,ycol : str
Indicates the x and y coordinate column names to use from the Kepler LC
in the EPD fit.
timestoignore : list of tuples
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
writetodict : bool
If writetodict is True, adds the following columns to the lcdict::
epd_time = time array
epd_sapflux = uncorrected flux before EPD
epd_epdsapflux = corrected flux after EPD
epd_epdsapcorr = EPD flux corrections
epd_bkg = background array
epd_bkg_err = background errors array
epd_xcc = xcoord array
epd_ycc = ycoord array
epd_quality = quality flag array
and updates the 'columns' list in the lcdict as well.
epdsmooth : int
Sets the number of light curve points to smooth over when generating the
EPD fit function.
Returns
-------
tuple
Returns a tuple of the form: (times, epdfluxes, fitcoeffs, epdfit)
'''
times, fluxes, background, background_err = (lcdict['time'],
lcdict['sap']['sap_flux'],
lcdict['sap']['sap_bkg'],
lcdict['sap']['sap_bkg_err'])
xcc = lcdict[xccol]
ycc = lcdict[yccol]
flags = lcdict['sap_quality']
# filter all bad LC points as noted by quality flags
if filterflags:
nbefore = times.size
filterind = flags == 0
times = times[filterind]
fluxes = fluxes[filterind]
background = background[filterind]
background_err = background_err[filterind]
xcc = xcc[filterind]
ycc = ycc[filterind]
flags = flags[filterind]
nafter = times.size
LOGINFO('applied quality flag filter, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# remove nans
find = (npisfinite(xcc) & npisfinite(ycc) &
npisfinite(times) & npisfinite(fluxes) &
npisfinite(background) & npisfinite(background_err))
nbefore = times.size
times = times[find]
fluxes = fluxes[find]
background = background[find]
background_err = background_err[find]
xcc = xcc[find]
ycc = ycc[find]
flags = flags[find]
nafter = times.size
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = npfull_like(times,True)
nbefore = times.size
# apply all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = (times > time0) & (times < time1)
exclind = exclind & thismask
# quantities after masks have been applied
times = times[exclind]
fluxes = fluxes[exclind]
background = background[exclind]
background_err = background_err[exclind]
xcc = xcc[exclind]
ycc = ycc[exclind]
flags = flags[exclind]
nafter = times.size
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# now that we're all done, we can do EPD
# first, smooth the light curve
smoothedfluxes = median_filter(fluxes, size=epdsmooth)
# initial fit coeffs
initcoeffs = npones(11)
# fit the the smoothed mags and find better coeffs
leastsqfit = leastsq(_epd_residual,
initcoeffs,
args=(smoothedfluxes,
xcc, ycc,
background, background_err))
# if the fit succeeds, then get the EPD fluxes
if leastsqfit[-1] in (1,2,3,4):
fitcoeffs = leastsqfit[0]
epdfit = _epd_function(fitcoeffs,
fluxes,
xcc,
ycc,
background,
background_err)
epdfluxes = npmedian(fluxes) + fluxes - epdfit
# write these to the dictionary if requested
if writetodict:
lcdict['epd'] = {}
lcdict['epd']['time'] = times
lcdict['epd']['sapflux'] = fluxes
lcdict['epd']['epdsapflux'] = epdfluxes
lcdict['epd']['epdsapcorr'] = epdfit
lcdict['epd']['bkg'] = background
lcdict['epd']['bkg_err'] = background_err
lcdict['epd']['xcc'] = xcc
lcdict['epd']['ycc'] = ycc
lcdict['epd']['quality'] = flags
for newcol in ['epd.time','epd.sapflux',
'epd.epdsapflux','epd.epdsapcorr',
'epd.bkg','epd.bkg.err',
'epd.xcc','epd.ycc',
'epd.quality']:
if newcol not in lcdict['columns']:
lcdict['columns'].append(newcol)
return times, epdfluxes, fitcoeffs, epdfit
else:
LOGERROR('could not fit EPD function to light curve')
return None, None, None, None
|
def rfepd_kepler_lightcurve(
lcdict,
xccol='mom_centr1',
yccol='mom_centr2',
timestoignore=None,
filterflags=True,
writetodict=True,
epdsmooth=23,
decorr='xcc,ycc',
nrftrees=200
):
'''This uses a `RandomForestRegressor` to fit and decorrelate Kepler light
curves.
Fits the X and Y positions, the background, and background error.
By default, this function removes points in the Kepler LC that have ANY
quality flags set.
Parameters
----------
lcdict : lcdict
An `lcdict` produced by `consolidate_kepler_fitslc` or
`read_kepler_fitslc`.
xcol,ycol : str
Indicates the x and y coordinate column names to use from the Kepler LC
in the EPD fit.
timestoignore : list of tuples
This is of the form::
[(time1_start, time1_end), (time2_start, time2_end), ...]
and indicates the start and end times to mask out of the final
lcdict. Use this to remove anything that wasn't caught by the quality
flags.
filterflags : bool
If True, will remove any measurements that have non-zero quality flags
present. This usually indicates an issue with the instrument or
spacecraft.
writetodict : bool
If writetodict is True, adds the following columns to the lcdict::
rfepd_time = time array
rfepd_sapflux = uncorrected flux before EPD
rfepd_epdsapflux = corrected flux after EPD
rfepd_epdsapcorr = EPD flux corrections
rfepd_bkg = background array
rfepd_bkg_err = background errors array
rfepd_xcc = xcoord array
rfepd_ycc = ycoord array
rfepd_quality = quality flag array
and updates the 'columns' list in the lcdict as well.
epdsmooth : int
Sets the number of light curve points to smooth over when generating the
EPD fit function.
decorr : {'xcc,ycc','bgv,bge','xcc,ycc,bgv,bge'}
Indicates whether to use the x,y coords alone; background value and
error alone; or x,y coords and background value, error in combination as
the features to training the `RandomForestRegressor` on and perform the
fit.
nrftrees : int
The number of trees to use in the `RandomForestRegressor`.
Returns
-------
tuple
Returns a tuple of the form: (times, corrected_fluxes, flux_corrections)
'''
times, fluxes, background, background_err = (
lcdict['time'],
lcdict['sap']['sap_flux'],
lcdict['sap']['sap_bkg'],
lcdict['sap']['sap_bkg_err']
)
xcc = lcdict[xccol]
ycc = lcdict[yccol]
flags = lcdict['sap_quality']
# filter all bad LC points as noted by quality flags
if filterflags:
nbefore = times.size
filterind = flags == 0
times = times[filterind]
fluxes = fluxes[filterind]
background = background[filterind]
background_err = background_err[filterind]
xcc = xcc[filterind]
ycc = ycc[filterind]
flags = flags[filterind]
nafter = times.size
LOGINFO('applied quality flag filter, ndet before = %s, '
'ndet after = %s'
% (nbefore, nafter))
# remove nans
find = (npisfinite(xcc) & npisfinite(ycc) &
npisfinite(times) & npisfinite(fluxes) &
npisfinite(background) & npisfinite(background_err))
nbefore = times.size
times = times[find]
fluxes = fluxes[find]
background = background[find]
background_err = background_err[find]
xcc = xcc[find]
ycc = ycc[find]
flags = flags[find]
nafter = times.size
LOGINFO('removed nans, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# exclude all times in timestoignore
if (timestoignore and
isinstance(timestoignore, list) and
len(timestoignore) > 0):
exclind = npfull_like(times,True)
nbefore = times.size
# apply all the masks
for ignoretime in timestoignore:
time0, time1 = ignoretime[0], ignoretime[1]
thismask = (times > time0) & (times < time1)
exclind = exclind & thismask
# quantities after masks have been applied
times = times[exclind]
fluxes = fluxes[exclind]
background = background[exclind]
background_err = background_err[exclind]
xcc = xcc[exclind]
ycc = ycc[exclind]
flags = flags[exclind]
nafter = times.size
LOGINFO('removed timestoignore, ndet before = %s, ndet after = %s'
% (nbefore, nafter))
# now that we're all done, we can do EPD
# set up the regressor
RFR = RandomForestRegressor(n_estimators=nrftrees)
if decorr == 'xcc,ycc,bgv,bge':
# collect the features and target variable
features = npcolumn_stack((xcc,ycc,background,background_err))
elif decorr == 'xcc,ycc':
# collect the features and target variable
features = npcolumn_stack((xcc,ycc))
elif decorr == 'bgv,bge':
# collect the features and target variable
features = npcolumn_stack((background,background_err))
else:
LOGERROR("couldn't understand decorr, not decorrelating...")
return None
# smooth the light curve
if epdsmooth:
smoothedfluxes = median_filter(fluxes, size=epdsmooth)
else:
smoothedfluxes = fluxes
# fit, then generate the predicted values, then get corrected values
RFR.fit(features, smoothedfluxes)
flux_corrections = RFR.predict(features)
corrected_fluxes = npmedian(fluxes) + fluxes - flux_corrections
# remove the random forest to save RAM
del RFR
# write these to the dictionary if requested
if writetodict:
lcdict['rfepd'] = {}
lcdict['rfepd']['time'] = times
lcdict['rfepd']['sapflux'] = fluxes
lcdict['rfepd']['epdsapflux'] = corrected_fluxes
lcdict['rfepd']['epdsapcorr'] = flux_corrections
lcdict['rfepd']['bkg'] = background
lcdict['rfepd']['bkg_err'] = background_err
lcdict['rfepd']['xcc'] = xcc
lcdict['rfepd']['ycc'] = ycc
lcdict['rfepd']['quality'] = flags
for newcol in ['rfepd.time','rfepd.sapflux',
'rfepd.epdsapflux','rfepd.epdsapcorr',
'rfepd.bkg','rfepd.bkg.err',
'rfepd.xcc','rfepd.ycc',
'rfepd.quality']:
if newcol not in lcdict['columns']:
lcdict['columns'].append(newcol)
return times, corrected_fluxes, flux_corrections
|
def detrend_centroid(lcd, detrend='legendre', sigclip=None, mingap=0.5):
'''Detrends the x and y coordinate centroids for a Kepler light curve.
Given an `lcdict` for a single quarter of Kepler data, returned by
`read_kepler_fitslc`, this function returns this same dictionary,
appending detrended centroid_x and centroid_y values.
Here "detrended" means "finite, SAP quality flag set to 0, sigma clipped,
timegroups selected based on `mingap` day gaps, then fit vs time by a
legendre polynomial of lowish degree".
Parameters
----------
lcd : lcdict
An `lcdict` generated by the `read_kepler_fitslc` function.
detrend : {'legendre'}
Method by which to detrend the LC. 'legendre' is the only thing
implemented at the moment.
sigclip : None or float or int or sequence of floats/ints
Determines the type and amount of sigma-clipping done on the light curve
to remove outliers. If None, no sigma-clipping is performed. If a two
element sequence of floats/ints, the first element corresponds to the
fainter sigma-clip limit, and the second element corresponds to the
brighter sigma-clip limit.
mingap : float
Number of days by which to define "timegroups" (for individual fitting
each of timegroup, and to eliminate "burn-in" of Kepler spacecraft. For
long cadence data, 0.5 days is typical.
Returns
-------
tuple
This is of the form `(lcd, errflag)`, where:
`lcd` : an `lcdict` with the new key `lcd['centroids']`, containing the
detrended times, (centroid_x, centroid_y) values, and their errors.
`errflag` : boolean error flag, could be raised at various points.
'''
qnum = npunique(lcd['quarter'])
try:
assert qnum.size == 1, 'lcd should be for a unique quarter'
assert detrend == 'legendre'
qnum = int(qnum)
except Exception as e:
errflag = True
# Get finite, QUALITY_FLAG != 0 times, centroids, and their errors.
# Fraquelli & Thompson (2012), or perhaps also newer papers, give the list
# of exclusions for quality flags.
nbefore = lcd['time'].size
# "ctd" for centroid.
times = lcd['time'][lcd['sap_quality'] == 0]
# Kepler Archive Manual KDMC-10008-006, pg 18. MOM_CENTR1 is the *column*
# value for the flux-weighted centroid, MOM_CENTR2 is the row value.
ctd_x = lcd['mom_centr2'][lcd['sap_quality'] == 0]
ctd_y = lcd['mom_centr1'][lcd['sap_quality'] == 0]
ctd_x_err = lcd['mom_centr2_err'][lcd['sap_quality'] == 0]
ctd_y_err = lcd['mom_centr1_err'][lcd['sap_quality'] == 0]
find = npisfinite(times) & npisfinite(ctd_x) & npisfinite(ctd_y)
find &= (npisfinite(ctd_x_err)) & (npisfinite(ctd_y_err))
f_times, f_ctd_x, f_ctd_y = times[find], ctd_x[find], ctd_y[find]
f_ctd_x_err, f_ctd_y_err = ctd_x_err[find], ctd_y_err[find]
# Sigma clip whopping outliers. It'd be better to have a general purpose
# function for this, but sigclip_magseries works.
stimes_x, s_ctd_x, s_ctd_x_err = sigclip_magseries(
f_times,
f_ctd_x,
f_ctd_x_err,
magsarefluxes=True,
sigclip=30.0
)
stimes_y, s_ctd_y, s_ctd_y_err = sigclip_magseries(
f_times,
f_ctd_y,
f_ctd_y_err,
magsarefluxes=True,
sigclip=30.0
)
# Get times and centroids where everything is finite and sigma clipped.
mask_x = npin1d(stimes_x, stimes_y)
s_times, s_ctd_x, s_ctd_x_err = (
stimes_x[mask_x],
s_ctd_x[mask_x],
s_ctd_x_err[mask_x]
)
mask_y = npin1d(stimes_y, stimes_x)
tmp, s_ctd_y, s_ctd_y_err = (
stimes_y[mask_y],
s_ctd_y[mask_y],
s_ctd_y_err[mask_y]
)
try:
np.testing.assert_array_equal(s_times, tmp)
assert len(s_ctd_y) == len(s_times)
assert len(s_ctd_y_err) == len(s_times)
assert len(s_ctd_x) == len(s_times)
assert len(s_ctd_x_err) == len(s_times)
except AssertionError:
return lcd, True
nqflag = s_times.size
# Drop intra-quarter and interquarter gaps in the timeseries. These are the
# same limits set by Armstrong et al (2014): split each quarter's
# timegroups by whether points are within 0.5 day limits. Then drop points
# within 0.5 days of any boundary. Finally, since the interquarter burn-in
# time is more like 1 day, drop a further 0.5 days from the edges of each
# quarter. A nicer way to implement this would be with numpy masks, but
# this approach just constructs the full arrays for any given quarter.
ngroups, groups = find_lc_timegroups(s_times, mingap=mingap)
tmp_times, tmp_ctd_x, tmp_ctd_y = [], [], []
tmp_ctd_x_err, tmp_ctd_y_err = [], []
for group in groups:
tg_times = s_times[group]
tg_ctd_x = s_ctd_x[group]
tg_ctd_y = s_ctd_y[group]
tg_ctd_x_err = s_ctd_x_err[group]
tg_ctd_y_err = s_ctd_y_err[group]
try:
sel = ((tg_times > npmin(tg_times)+mingap) &
(tg_times < npmax(tg_times)-mingap))
except ValueError:
# If tgtimes is empty, continue to next timegroup.
continue
tmp_times.append(tg_times[sel])
tmp_ctd_x.append(tg_ctd_x[sel])
tmp_ctd_y.append(tg_ctd_y[sel])
tmp_ctd_x_err.append(tg_ctd_x_err[sel])
tmp_ctd_y_err.append(tg_ctd_y_err[sel])
s_times,s_ctd_x,s_ctd_y,s_ctd_x_err,s_ctd_y_err = (
nparray([]),nparray([]),nparray([]),nparray([]),nparray([])
)
# N.b.: works fine with empty arrays.
for ix, _ in enumerate(tmp_times):
s_times = npappend(s_times, tmp_times[ix])
s_ctd_x = npappend(s_ctd_x, tmp_ctd_x[ix])
s_ctd_y = npappend(s_ctd_y, tmp_ctd_y[ix])
s_ctd_x_err = npappend(s_ctd_x_err, tmp_ctd_x_err[ix])
s_ctd_y_err = npappend(s_ctd_y_err, tmp_ctd_y_err[ix])
# Extra inter-quarter burn-in of 0.5 days.
try:
s_ctd_x = s_ctd_x[(s_times > (npmin(s_times)+mingap)) &
(s_times < (npmax(s_times)-mingap))]
except Exception as e:
# Case: s_times is wonky, all across this quarter. (Implemented because
# of a rare bug with a singleton s_times array).
LOGERROR('DETREND FAILED, qnum {:d}'.format(qnum))
return npnan, True
s_ctd_y = s_ctd_y[(s_times > (npmin(s_times)+mingap)) &
(s_times < (npmax(s_times)-mingap))]
s_ctd_x_err = s_ctd_x_err[(s_times > (npmin(s_times)+mingap)) &
(s_times < (npmax(s_times)-mingap))]
s_ctd_y_err = s_ctd_y_err[(s_times > (npmin(s_times)+mingap)) &
(s_times < (npmax(s_times)-mingap))]
# Careful to do this last...
s_times = s_times[(s_times > (npmin(s_times)+mingap)) &
(s_times < (npmax(s_times)-mingap))]
nafter = s_times.size
LOGINFO(
'CLIPPING (SAP), qnum: {:d}'.format(qnum) +
'\nndet before qflag & sigclip: {:d} ({:.3g}),'.format(
nbefore, 1.
) +
'\nndet after qflag & finite & sigclip: {:d} ({:.3g})'.format(
nqflag, nqflag/float(nbefore)
) +
'\nndet after dropping pts near gaps: {:d} ({:.3g})'.format(
nafter, nafter/float(nbefore)
)
)
# DETREND: fit a "low" order legendre series (see
# "legendredeg_vs_npts_per_timegroup_ctd.pdf"), and save it to the output
# dictionary. Save the fit (residuals to be computed after).
ctd_dtr = {}
if detrend == 'legendre':
mingap = 0.5 # days
ngroups, groups = find_lc_timegroups(s_times, mingap=mingap)
tmpctdxlegfit, tmpctdylegfit, legdegs = [], [], []
for group in groups:
tg_times = s_times[group]
tg_ctd_x = s_ctd_x[group]
tg_ctd_x_err = s_ctd_x_err[group]
tg_ctd_y = s_ctd_y[group]
tg_ctd_y_err = s_ctd_y_err[group]
legdeg = _get_legendre_deg_ctd(len(tg_times))
tg_ctd_x_fit, _, _ = _legendre_dtr(tg_times,tg_ctd_x,tg_ctd_x_err,
legendredeg=legdeg)
tg_ctd_y_fit, _, _ = _legendre_dtr(tg_times,tg_ctd_y,tg_ctd_y_err,
legendredeg=legdeg)
tmpctdxlegfit.append(tg_ctd_x_fit)
tmpctdylegfit.append(tg_ctd_y_fit)
legdegs.append(legdeg)
fit_ctd_x, fit_ctd_y = nparray([]), nparray([])
for ix, _ in enumerate(tmpctdxlegfit):
fit_ctd_x = npappend(fit_ctd_x, tmpctdxlegfit[ix])
fit_ctd_y = npappend(fit_ctd_y, tmpctdylegfit[ix])
ctd_dtr = {'times':s_times,
'ctd_x':s_ctd_x,
'ctd_x_err':s_ctd_x_err,
'fit_ctd_x':fit_ctd_x,
'ctd_y':s_ctd_y,
'ctd_y_err':s_ctd_y_err,
'fit_ctd_y':fit_ctd_y}
lcd['ctd_dtr'] = ctd_dtr
return lcd, False
|
def get_centroid_offsets(lcd, t_ing_egr, oot_buffer_time=0.1, sample_factor=3):
'''After running `detrend_centroid`, this gets positions of centroids during
transits, and outside of transits.
These positions can then be used in a false positive analysis.
This routine requires knowing the ingress and egress times for every
transit of interest within the quarter this routine is being called for.
There is currently no astrobase routine that automates this for periodic
transits (it must be done in a calling routine).
To get out of transit centroids, this routine takes points outside of the
"buffer" set by `oot_buffer_time`, sampling 3x as many points on either
side of the transit as are in the transit (or however many are specified by
`sample_factor`).
Parameters
----------
lcd : lcdict
An `lcdict` generated by the `read_kepler_fitslc` function. We assume
that the `detrend_centroid` function has been run on this `lcdict`.
t_ing_egr : list of tuples
This is of the form::
[(ingress time of i^th transit, egress time of i^th transit)]
for i the transit number index in this quarter (starts at zero at the
beginning of every quarter). Assumes units of BJD.
oot_buffer_time : float
Number of days away from ingress and egress times to begin sampling "out
of transit" centroid points. The number of out of transit points to take
per transit is 3x the number of points in transit.
sample_factor : float
The size of out of transit window from which to sample.
Returns
-------
dict
This is a dictionary keyed by transit number (i.e., the same index as
`t_ing_egr`), where each key contains the following value::
{'ctd_x_in_tra':ctd_x_in_tra,
'ctd_y_in_tra':ctd_y_in_tra,
'ctd_x_oot':ctd_x_oot,
'ctd_y_oot':ctd_y_oot,
'npts_in_tra':len(ctd_x_in_tra),
'npts_oot':len(ctd_x_oot),
'in_tra_times':in_tra_times,
'oot_times':oot_times}
'''
# NOTE:
# Bryson+ (2013) gives a more complicated and more correct approach to this
# problem, computing offsets relative to positions defined on the SKY. This
# requires using a Kepler focal plane geometry model. I don't have that
# model, or know how to get it. So I use a simpler approach.
qnum = int(np.unique(lcd['quarter']))
LOGINFO('Getting centroid offsets (qnum: {:d})...'.format(qnum))
# Kepler pixel scale, cf.
# https://keplerscience.arc.nasa.gov/the-kepler-space-telescope.html
arcsec_per_px = 3.98
# Get the residuals (units: pixel offset).
times = lcd['ctd_dtr']['times']
ctd_resid_x = lcd['ctd_dtr']['ctd_x'] - lcd['ctd_dtr']['fit_ctd_x']
ctd_resid_y = lcd['ctd_dtr']['ctd_y'] - lcd['ctd_dtr']['fit_ctd_y']
# Return results in "centroid dictionary" (has keys of transit number).
cd = {}
for ix,(t_ing,t_egr) in enumerate(t_ing_egr):
# We have in-transit times as input.
in_tra_times = times[(times > t_ing) & (times < t_egr)]
# Compute out of transit times on either side of the in-transit times.
transit_dur = t_egr - t_ing
oot_window_len = sample_factor * transit_dur
oot_before = times[
(times < (t_ing-oot_buffer_time)) &
(times > (t_ing-oot_buffer_time-oot_window_len))
]
oot_after = times[
(times > (t_egr+oot_buffer_time)) &
(times < (t_egr+oot_buffer_time+oot_window_len))
]
oot_times = npconcatenate([oot_before, oot_after])
mask_tra = npin1d(times, in_tra_times)
mask_oot = npin1d(times, oot_times)
# Convert to units of arcseconds.
ctd_x_in_tra = ctd_resid_x[mask_tra]*arcsec_per_px
ctd_y_in_tra = ctd_resid_y[mask_tra]*arcsec_per_px
ctd_x_oot = ctd_resid_x[mask_oot]*arcsec_per_px
ctd_y_oot = ctd_resid_y[mask_oot]*arcsec_per_px
cd[ix] = {'ctd_x_in_tra':ctd_x_in_tra,
'ctd_y_in_tra':ctd_y_in_tra,
'ctd_x_oot':ctd_x_oot,
'ctd_y_oot':ctd_y_oot,
'npts_in_tra':len(ctd_x_in_tra),
'npts_oot':len(ctd_x_oot),
'in_tra_times':in_tra_times,
'oot_times':oot_times}
LOGINFO('Got centroid offsets (qnum: {:d}).'.format(qnum))
return cd
|
def _get_legendre_deg_ctd(npts):
'''This is a helper function for centroid detrending.
'''
from scipy.interpolate import interp1d
degs = nparray([4,5,6,10,15])
pts = nparray([1e2,3e2,5e2,1e3,3e3])
fn = interp1d(pts, degs, kind='linear',
bounds_error=False,
fill_value=(min(degs), max(degs)))
legendredeg = int(npfloor(fn(npts)))
return legendredeg
|
def _legendre_dtr(x, y, y_err, legendredeg=10):
'''This calculates the residual and chi-sq values for a Legendre
function fit.
Parameters
----------
x : np.array
Array of the independent variable.
y : np.array
Array of the dependent variable.
y_err : np.array
Array of errors associated with each `y` value. Used to calculate fit
weights.
legendredeg : int
The degree of the Legendre function to use when fitting.
Returns
-------
tuple
The tuple returned is of the form: (fit_y, fitchisq, fitredchisq)
'''
try:
p = Legendre.fit(x, y, legendredeg)
fit_y = p(x)
except Exception as e:
fit_y = npzeros_like(y)
fitchisq = npsum(
((fit_y - y)*(fit_y - y)) / (y_err*y_err)
)
nparams = legendredeg + 1
fitredchisq = fitchisq/(len(y) - nparams - 1)
LOGINFO(
'legendre detrend applied. chisq = %.5f, reduced chisq = %.5f' %
(fitchisq, fitredchisq)
)
return fit_y, fitchisq, fitredchisq
|
def timebinlc(lcfile,
binsizesec,
outdir=None,
lcformat='hat-sql',
lcformatdir=None,
timecols=None,
magcols=None,
errcols=None,
minbinelems=7):
'''This bins the given light curve file in time using the specified bin size.
Parameters
----------
lcfile : str
The file name to process.
binsizesec : float
The time bin-size in seconds.
outdir : str or None
If this is a str, the output LC will be written to `outdir`. If this is
None, the output LC will be written to the same directory as `lcfile`.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curve file.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
timecols,magcols,errcols : lists of str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as inputs to the binning process. If these are None,
the default values for `timecols`, `magcols`, and `errcols` for your
light curve format will be used here.
minbinelems : int
The minimum number of time-bin elements required to accept a time-bin as
valid for the output binned light curve.
Returns
-------
str
The name of the output pickle file with the binned LC.
Writes the output binned light curve to a pickle that contains the
lcdict with an added `lcdict['binned'][magcol]` key, which contains the
binned times, mags/fluxes, and errs as
`lcdict['binned'][magcol]['times']`, `lcdict['binned'][magcol]['mags']`,
and `lcdict['epd'][magcol]['errs']` for each `magcol` provided in the
input or default `magcols` value for this light curve format.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
# get the LC into a dict
lcdict = readerfunc(lcfile)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
# skip already binned light curves
if 'binned' in lcdict:
LOGERROR('this light curve appears to be binned already, skipping...')
return None
lcdict['binned'] = {}
for tcol, mcol, ecol in zip(timecols, magcols, errcols):
# dereference the columns and get them from the lcdict
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
times = _dict_get(lcdict, tcolget)
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
mags = _dict_get(lcdict, mcolget)
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
errs = _dict_get(lcdict, ecolget)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
times, mags,
magsarefluxes=magsarefluxes
)
times, mags, errs = ntimes, nmags, errs
# now bin the mag series as requested
binned = time_bin_magseries_with_errs(times,
mags,
errs,
binsize=binsizesec,
minbinelems=minbinelems)
# put this into the special binned key of the lcdict
lcdict['binned'][mcol] = {'times':binned['binnedtimes'],
'mags':binned['binnedmags'],
'errs':binned['binnederrs'],
'nbins':binned['nbins'],
'timebins':binned['jdbins'],
'binsizesec':binsizesec}
# done with binning for all magcols, now generate the output file
# this will always be a pickle
if outdir is None:
outdir = os.path.dirname(lcfile)
outfile = os.path.join(outdir, '%s-binned%.1fsec-%s.pkl' %
(squeeze(lcdict['objectid']).replace(' ','-'),
binsizesec, lcformat))
with open(outfile, 'wb') as outfd:
pickle.dump(lcdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return outfile
|
def timebinlc_worker(task):
'''
This is a parallel worker for the function below.
Parameters
----------
task : tuple
This is of the form::
task[0] = lcfile
task[1] = binsizesec
task[3] = {'outdir','lcformat','lcformatdir',
'timecols','magcols','errcols','minbinelems'}
Returns
-------
str
The output pickle file with the binned LC if successful. None otherwise.
'''
lcfile, binsizesec, kwargs = task
try:
binnedlc = timebinlc(lcfile, binsizesec, **kwargs)
LOGINFO('%s binned using %s sec -> %s OK' %
(lcfile, binsizesec, binnedlc))
return binnedlc
except Exception as e:
LOGEXCEPTION('failed to bin %s using binsizesec = %s' % (lcfile,
binsizesec))
return None
|
def parallel_timebin(lclist,
binsizesec,
maxobjects=None,
outdir=None,
lcformat='hat-sql',
lcformatdir=None,
timecols=None,
magcols=None,
errcols=None,
minbinelems=7,
nworkers=NCPUS,
maxworkertasks=1000):
'''This time-bins all the LCs in the list using the specified bin size.
Parameters
----------
lclist : list of str
The input LCs to process.
binsizesec : float
The time bin size to use in seconds.
maxobjects : int or None
If provided, LC processing will stop at `lclist[maxobjects]`.
outdir : str or None
The directory where output LCs will be written. If None, will write to
the same directory as the input LCs.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curve file.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
timecols,magcols,errcols : lists of str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as inputs to the binning process. If these are None,
the default values for `timecols`, `magcols`, and `errcols` for your
light curve format will be used here.
minbinelems : int
The minimum number of time-bin elements required to accept a time-bin as
valid for the output binned light curve.
nworkers : int
Number of parallel workers to launch.
maxworkertasks : int
The maximum number of tasks a parallel worker will complete before being
replaced to guard against memory leaks.
Returns
-------
dict
The returned dict contains keys = input LCs, vals = output LCs.
'''
if outdir and not os.path.exists(outdir):
os.mkdir(outdir)
if maxobjects is not None:
lclist = lclist[:maxobjects]
tasks = [(x, binsizesec, {'outdir':outdir,
'lcformat':lcformat,
'lcformatdir':lcformatdir,
'timecols':timecols,
'magcols':magcols,
'errcols':errcols,
'minbinelems':minbinelems}) for x in lclist]
pool = mp.Pool(nworkers, maxtasksperchild=maxworkertasks)
results = pool.map(timebinlc_worker, tasks)
pool.close()
pool.join()
resdict = {os.path.basename(x):y for (x,y) in zip(lclist, results)}
return resdict
|
def parallel_timebin_lcdir(lcdir,
binsizesec,
maxobjects=None,
outdir=None,
lcformat='hat-sql',
lcformatdir=None,
timecols=None,
magcols=None,
errcols=None,
minbinelems=7,
nworkers=NCPUS,
maxworkertasks=1000):
'''
This time bins all the light curves in the specified directory.
Parameters
----------
lcdir : list of str
Directory containing the input LCs to process.
binsizesec : float
The time bin size to use in seconds.
maxobjects : int or None
If provided, LC processing will stop at `lclist[maxobjects]`.
outdir : str or None
The directory where output LCs will be written. If None, will write to
the same directory as the input LCs.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curve file.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
timecols,magcols,errcols : lists of str
The keys in the lcdict produced by your light curve reader function that
correspond to the times, mags/fluxes, and associated measurement errors
that will be used as inputs to the binning process. If these are None,
the default values for `timecols`, `magcols`, and `errcols` for your
light curve format will be used here.
minbinelems : int
The minimum number of time-bin elements required to accept a time-bin as
valid for the output binned light curve.
nworkers : int
Number of parallel workers to launch.
maxworkertasks : int
The maximum number of tasks a parallel worker will complete before being
replaced to guard against memory leaks.
Returns
-------
dict
The returned dict contains keys = input LCs, vals = output LCs.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(fileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
lclist = sorted(glob.glob(os.path.join(lcdir, fileglob)))
return parallel_timebin(lclist,
binsizesec,
maxobjects=maxobjects,
outdir=outdir,
lcformat=lcformat,
timecols=timecols,
magcols=magcols,
errcols=errcols,
minbinelems=minbinelems,
nworkers=nworkers,
maxworkertasks=maxworkertasks)
|
def get_varfeatures(lcfile,
outdir,
timecols=None,
magcols=None,
errcols=None,
mindet=1000,
lcformat='hat-sql',
lcformatdir=None):
'''This runs :py:func:`astrobase.varclass.varfeatures.all_nonperiodic_features`
on a single LC file.
Parameters
----------
lcfile : str
The input light curve to process.
outfile : str
The filename of the output variable features pickle that will be
generated.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the features.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the features.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the features.
mindet : int
The minimum number of LC points required to generate variability
features.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
Returns
-------
str
The generated variability features pickle for the input LC, with results
for each magcol in the input `magcol` or light curve format's default
`magcol` list.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
try:
# get the LC into a dict
lcdict = readerfunc(lcfile)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
resultdict = {'objectid':lcdict['objectid'],
'info':lcdict['objectinfo'],
'lcfbasename':os.path.basename(lcfile)}
# normalize using the special function if specified
if normfunc is not None:
lcdict = normfunc(lcdict)
for tcol, mcol, ecol in zip(timecols, magcols, errcols):
# dereference the columns and get them from the lcdict
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
times = _dict_get(lcdict, tcolget)
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
mags = _dict_get(lcdict, mcolget)
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
errs = _dict_get(lcdict, ecolget)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
times, mags,
magsarefluxes=magsarefluxes
)
times, mags, errs = ntimes, nmags, errs
# make sure we have finite values
finind = np.isfinite(times) & np.isfinite(mags) & np.isfinite(errs)
# make sure we have enough finite values
if mags[finind].size < mindet:
LOGINFO('not enough LC points: %s in normalized %s LC: %s' %
(mags[finind].size, mcol, os.path.basename(lcfile)))
resultdict[mcol] = None
else:
# get the features for this magcol
lcfeatures = varfeatures.all_nonperiodic_features(
times, mags, errs
)
resultdict[mcol] = lcfeatures
# now that we've collected all the magcols, we can choose which is the
# "best" magcol. this is defined as the magcol that gives us the
# smallest LC MAD.
try:
magmads = np.zeros(len(magcols))
for mind, mcol in enumerate(magcols):
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
magmads[mind] = resultdict[mcol]['mad']
# smallest MAD index
bestmagcolind = np.where(magmads == np.min(magmads))[0]
resultdict['bestmagcol'] = magcols[bestmagcolind]
except Exception as e:
resultdict['bestmagcol'] = None
outfile = os.path.join(outdir,
'varfeatures-%s.pkl' %
squeeze(resultdict['objectid']).replace(' ','-'))
with open(outfile, 'wb') as outfd:
pickle.dump(resultdict, outfd, protocol=4)
return outfile
except Exception as e:
LOGEXCEPTION('failed to get LC features for %s because: %s' %
(os.path.basename(lcfile), e))
return None
|
def _varfeatures_worker(task):
'''
This wraps varfeatures.
'''
try:
(lcfile, outdir, timecols, magcols, errcols,
mindet, lcformat, lcformatdir) = task
return get_varfeatures(lcfile, outdir,
timecols=timecols,
magcols=magcols,
errcols=errcols,
mindet=mindet,
lcformat=lcformat,
lcformatdir=lcformatdir)
except Exception as e:
return None
|
def serial_varfeatures(lclist,
outdir,
maxobjects=None,
timecols=None,
magcols=None,
errcols=None,
mindet=1000,
lcformat='hat-sql',
lcformatdir=None):
'''This runs variability feature extraction for a list of LCs.
Parameters
----------
lclist : list of str
The list of light curve file names to process.
outdir : str
The directory where the output varfeatures pickle files will be written.
maxobjects : int
The number of LCs to process from `lclist`.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the features.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the features.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the features.
mindet : int
The minimum number of LC points required to generate variability
features.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
Returns
-------
list of str
List of the generated variability features pickles for the input LCs,
with results for each magcol in the input `magcol` or light curve
format's default `magcol` list.
'''
if maxobjects:
lclist = lclist[:maxobjects]
tasks = [(x, outdir, timecols, magcols, errcols,
mindet, lcformat, lcformatdir)
for x in lclist]
for task in tqdm(tasks):
result = _varfeatures_worker(task)
return result
|
def parallel_varfeatures(lclist,
outdir,
maxobjects=None,
timecols=None,
magcols=None,
errcols=None,
mindet=1000,
lcformat='hat-sql',
lcformatdir=None,
nworkers=NCPUS):
'''This runs variable feature extraction in parallel for all LCs in `lclist`.
Parameters
----------
lclist : list of str
The list of light curve file names to process.
outdir : str
The directory where the output varfeatures pickle files will be written.
maxobjects : int
The number of LCs to process from `lclist`.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the features.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the features.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the features.
mindet : int
The minimum number of LC points required to generate variability
features.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
nworkers : int
The number of parallel workers to launch.
Returns
-------
dict
A dict with key:val pairs of input LC file name : the generated
variability features pickles for each of the input LCs, with results for
each magcol in the input `magcol` or light curve format's default
`magcol` list.
'''
# make sure to make the output directory if it doesn't exist
if not os.path.exists(outdir):
os.makedirs(outdir)
if maxobjects:
lclist = lclist[:maxobjects]
tasks = [(x, outdir, timecols, magcols, errcols, mindet,
lcformat, lcformatdir) for x in lclist]
with ProcessPoolExecutor(max_workers=nworkers) as executor:
resultfutures = executor.map(varfeatures_worker, tasks)
results = [x for x in resultfutures]
resdict = {os.path.basename(x):y for (x,y) in zip(lclist, results)}
return resdict
|
def parallel_varfeatures_lcdir(lcdir,
outdir,
fileglob=None,
maxobjects=None,
timecols=None,
magcols=None,
errcols=None,
recursive=True,
mindet=1000,
lcformat='hat-sql',
lcformatdir=None,
nworkers=NCPUS):
'''This runs parallel variable feature extraction for a directory of LCs.
Parameters
----------
lcdir : str
The directory of light curve files to process.
outdir : str
The directory where the output varfeatures pickle files will be written.
fileglob : str or None
The file glob to use when looking for light curve files in `lcdir`. If
None, the default file glob associated for this LC format will be used.
maxobjects : int
The number of LCs to process from `lclist`.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the features.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the features.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the features.
mindet : int
The minimum number of LC points required to generate variability
features.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
nworkers : int
The number of parallel workers to launch.
Returns
-------
dict
A dict with key:val pairs of input LC file name : the generated
variability features pickles for each of the input LCs, with results for
each magcol in the input `magcol` or light curve format's default
`magcol` list.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
if not fileglob:
fileglob = dfileglob
# now find the files
LOGINFO('searching for %s light curves in %s ...' % (lcformat, lcdir))
if recursive is False:
matching = glob.glob(os.path.join(lcdir, fileglob))
else:
# use recursive glob for Python 3.5+
if sys.version_info[:2] > (3,4):
matching = glob.glob(os.path.join(lcdir,
'**',
fileglob),
recursive=True)
# otherwise, use os.walk and glob
else:
# use os.walk to go through the directories
walker = os.walk(lcdir)
matching = []
for root, dirs, _files in walker:
for sdir in dirs:
searchpath = os.path.join(root,
sdir,
fileglob)
foundfiles = glob.glob(searchpath)
if foundfiles:
matching.extend(foundfiles)
# now that we have all the files, process them
if matching and len(matching) > 0:
LOGINFO('found %s light curves, getting varfeatures...' %
len(matching))
return parallel_varfeatures(matching,
outdir,
maxobjects=maxobjects,
timecols=timecols,
magcols=magcols,
errcols=errcols,
mindet=mindet,
lcformat=lcformat,
lcformatdir=lcformatdir,
nworkers=nworkers)
else:
LOGERROR('no light curve files in %s format found in %s' % (lcformat,
lcdir))
return None
|
def checkplot_pickle_to_png(
checkplotin,
outfile,
extrarows=None
):
'''This reads the checkplot pickle or dict provided, and writes out a PNG.
The output PNG contains most of the information in the input checkplot
pickle/dict, and can be used to quickly glance through the highlights
instead of having to review the checkplot with the `checkplotserver`
webapp. This is useful for exporting read-only views of finalized checkplots
from the `checkplotserver` as well, to share them with other people.
The PNG has 4 x N tiles::
[ finder ] [ objectinfo ] [ varinfo/comments ] [ unphased LC ]
[ periodogram1 ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
[ periodogram2 ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
.
.
[ periodogramN ] [ phased LC P1 ] [ phased LC P2 ] [ phased LC P3 ]
for N independent period-finding methods producing:
- periodogram1,2,3...N: the periodograms from each method
- phased LC P1,P2,P3: the phased lightcurves using the best 3 peaks in each
periodogram
Parameters
----------
checkplotin : dict or str
This is either a checkplotdict produced by
:py:func:`astrobase.checkplot.pkl.checkplot_dict` or a checkplot pickle
file produced by :py:func:`astrobase.checkplot.pkl.checkplot_pickle`.
outfile : str
The filename of the output PNG file to create.
extrarows : list of tuples
This is a list of 4-element tuples containing paths to PNG files that
will be added to the end of the rows generated from the checkplotin
pickle/dict. Each tuple represents a row in the final output PNG
file. If there are less than 4 elements per tuple, the missing elements
will be filled in with white-space. If there are more than 4 elements
per tuple, only the first four will be used.
The purpose of this kwarg is to incorporate periodograms and phased LC
plots (in the form of PNGs) generated from an external period-finding
function or program (like VARTOOLS) to allow for comparison with
astrobase results.
NOTE: the PNG files specified in `extrarows` here will be added to those
already present in the input checkplotdict['externalplots'] if that is
None because you passed in a similar list of external plots to the
:py:func:`astrobase.checkplot.pkl.checkplot_pickle` function earlier. In
this case, `extrarows` can be used to add even more external plots if
desired.
Each external plot PNG will be resized to 750 x 480 pixels to fit into
an output image cell.
By convention, each 4-element tuple should contain:
- a periodiogram PNG
- phased LC PNG with 1st best peak period from periodogram
- phased LC PNG with 2nd best peak period from periodogram
- phased LC PNG with 3rd best peak period from periodogram
Example of extrarows::
[('/path/to/external/bls-periodogram.png',
'/path/to/external/bls-phasedlc-plot-bestpeak.png',
'/path/to/external/bls-phasedlc-plot-peak2.png',
'/path/to/external/bls-phasedlc-plot-peak3.png'),
('/path/to/external/pdm-periodogram.png',
'/path/to/external/pdm-phasedlc-plot-bestpeak.png',
'/path/to/external/pdm-phasedlc-plot-peak2.png',
'/path/to/external/pdm-phasedlc-plot-peak3.png'),
...]
Returns
-------
str
The absolute path to the generated checkplot PNG.
'''
# figure out if the checkplotpickle is a filename
# python 3
if sys.version_info[:2] > (3,2):
if (isinstance(checkplotin, str) and os.path.exists(checkplotin)):
cpd = _read_checkplot_picklefile(checkplotin)
elif isinstance(checkplotin, dict):
cpd = checkplotin
else:
LOGERROR('checkplotin: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(checkplotin), type(checkplotin)))
return None
# check for unicode in python 2.7
else:
# get the current checkplotdict
if ((isinstance(checkplotin, str) or
isinstance(checkplotin, unicode)) and
os.path.exists(checkplotin)):
cpd = _read_checkplot_picklefile(checkplotin)
elif isinstance(checkplotin,dict):
cpd = checkplotin
else:
LOGERROR('checkplotin: %s of type %s is not a '
'valid checkplot filename (or does not exist), or a dict' %
(os.path.abspath(checkplotin), type(checkplotin)))
return None
# figure out the dimensions of the output png
# each cell is 750 x 480 pixels
# a row is made of four cells
# - the first row is for object info
# - the rest are for periodograms and phased LCs, one row per method
# if there are more than three phased LC plots per method, we'll only plot 3
if 'pfmethods' in cpd:
cplspmethods = cpd['pfmethods']
else:
cplspmethods = []
for pfm in METHODSHORTLABELS:
if pfm in cpd:
cplspmethods.append(pfm)
cprows = len(cplspmethods)
# add in any extra rows from neighbors
if 'neighbors' in cpd and cpd['neighbors'] and len(cpd['neighbors']) > 0:
nbrrows = len(cpd['neighbors'])
else:
nbrrows = 0
# add in any extra rows from keyword arguments
if extrarows and len(extrarows) > 0:
erows = len(extrarows)
else:
erows = 0
# add in any extra rows from the checkplot dict
if ('externalplots' in cpd and
cpd['externalplots'] and
len(cpd['externalplots']) > 0):
cpderows = len(cpd['externalplots'])
else:
cpderows = 0
totalwidth = 3000
totalheight = 480 + (cprows + erows + nbrrows + cpderows)*480
# this is the output PNG
outimg = Image.new('RGBA',(totalwidth, totalheight),(255,255,255,255))
# now fill in the rows of the output png. we'll use Pillow to build up the
# output image from the already stored plots and stuff in the checkplot
# dict.
###############################
# row 1, cell 1: finder chart #
###############################
if cpd['finderchart']:
finder = Image.open(
_base64_to_file(cpd['finderchart'], None, writetostrio=True)
)
bigfinder = finder.resize((450,450), Image.ANTIALIAS)
outimg.paste(bigfinder,(150,20))
#####################################
# row 1, cell 2: object information #
#####################################
# find the font we need from the package data
fontpath = os.path.abspath(
os.path.join(os.path.dirname(__file__),
'..',
'cpserver',
'cps-assets',
'DejaVuSans.ttf')
)
# load the font
if os.path.exists(fontpath):
cpfontnormal = ImageFont.truetype(fontpath, 20)
cpfontlarge = ImageFont.truetype(fontpath, 28)
else:
LOGWARNING('could not find bundled '
'DejaVu Sans font in the astrobase package '
'data, using ugly defaults...')
cpfontnormal = ImageFont.load_default()
cpfontlarge = ImageFont.load_default()
# the image draw object
objinfodraw = ImageDraw.Draw(outimg)
# write out the object information
# objectid
objinfodraw.text(
(625, 25),
cpd['objectid'] if cpd['objectid'] else 'no objectid',
font=cpfontlarge,
fill=(0,0,255,255)
)
# twomass id
if 'twomassid' in cpd['objectinfo']:
objinfodraw.text(
(625, 60),
('2MASS J%s' % cpd['objectinfo']['twomassid']
if cpd['objectinfo']['twomassid']
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
# ndet
if 'ndet' in cpd['objectinfo']:
objinfodraw.text(
(625, 85),
('LC points: %s' % cpd['objectinfo']['ndet']
if cpd['objectinfo']['ndet'] is not None
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
else:
objinfodraw.text(
(625, 85),
('LC points: %s' % cpd['magseries']['times'].size),
font=cpfontnormal,
fill=(0,0,0,255)
)
# coords and PM
objinfodraw.text(
(625, 125),
('Coords and PM'),
font=cpfontnormal,
fill=(0,0,0,255)
)
if 'ra' in cpd['objectinfo'] and 'decl' in cpd['objectinfo']:
objinfodraw.text(
(900, 125),
(('RA, Dec: %.3f, %.3f' %
(cpd['objectinfo']['ra'], cpd['objectinfo']['decl']))
if (cpd['objectinfo']['ra'] is not None and
cpd['objectinfo']['decl'] is not None)
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
else:
objinfodraw.text(
(900, 125),
'RA, Dec: nan, nan',
font=cpfontnormal,
fill=(0,0,0,255)
)
if 'propermotion' in cpd['objectinfo']:
objinfodraw.text(
(900, 150),
(('Total PM: %.5f mas/yr' % cpd['objectinfo']['propermotion'])
if (cpd['objectinfo']['propermotion'] is not None)
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
else:
objinfodraw.text(
(900, 150),
'Total PM: nan',
font=cpfontnormal,
fill=(0,0,0,255)
)
if 'rpmj' in cpd['objectinfo']:
objinfodraw.text(
(900, 175),
(('Reduced PM [Jmag]: %.3f' % cpd['objectinfo']['rpmj'])
if (cpd['objectinfo']['rpmj'] is not None)
else ''),
font=cpfontnormal,
fill=(0,0,0,255)
)
else:
objinfodraw.text(
(900, 175),
'Reduced PM [Jmag]: nan',
font=cpfontnormal,
fill=(0,0,0,255)
)
# here, we have to deal with two generations of objectinfo dicts
# first, deal with the new generation of objectinfo dicts
if 'available_dereddened_bands' in cpd['objectinfo']:
#
# first, we deal with the bands and mags
#
# magnitudes
objinfodraw.text(
(625, 200),
'Magnitudes',
font=cpfontnormal,
fill=(0,0,0,255)
)
# process the various bands
# if dereddened mags aren't available, use the observed mags
if len(cpd['objectinfo']['available_bands']) > 0:
# we'll get all the available mags
for bandind, band, label in zip(
range(len(cpd['objectinfo']['available_bands'])),
cpd['objectinfo']['available_bands'],
cpd['objectinfo']['available_band_labels']
):
thisbandmag = cpd['objectinfo'][band]
# we'll draw stuff in three rows depending on the number of
# bands we have to use
if bandind in (0,1,2,3,4):
thispos = (900+125*bandind, 200)
objinfodraw.text(
thispos,
'%s: %.3f' % (label, thisbandmag),
font=cpfontnormal,
fill=(0,0,0,255)
)
elif bandind in (5,6,7,8,9):
rowbandind = bandind - 5
thispos = (900+125*rowbandind, 225)
objinfodraw.text(
thispos,
'%s: %.3f' % (label, thisbandmag),
font=cpfontnormal,
fill=(0,0,0,255)
)
else:
rowbandind = bandind - 10
thispos = (900+125*rowbandind, 250)
objinfodraw.text(
thispos,
'%s: %.3f' % (label, thisbandmag),
font=cpfontnormal,
fill=(0,0,0,255)
)
#
# next, deal with the colors
#
# colors
if ('dereddened' in cpd['objectinfo'] and
cpd['objectinfo']['dereddened'] is True):
deredlabel = "(dereddened)"
else:
deredlabel = ""
objinfodraw.text(
(625, 275),
'Colors %s' % deredlabel,
font=cpfontnormal,
fill=(0,0,0,255)
)
if len(cpd['objectinfo']['available_colors']) > 0:
# we'll get all the available mags (dereddened versions preferred)
for colorind, color, colorlabel in zip(
range(len(cpd['objectinfo']['available_colors'])),
cpd['objectinfo']['available_colors'],
cpd['objectinfo']['available_color_labels']
):
thiscolor = cpd['objectinfo'][color]
# we'll draw stuff in three rows depending on the number of
# bands we have to use
if colorind in (0,1,2,3,4):
thispos = (900+150*colorind, 275)
objinfodraw.text(
thispos,
'%s: %.3f' % (colorlabel, thiscolor),
font=cpfontnormal,
fill=(0,0,0,255)
)
elif colorind in (5,6,7,8,9):
thisrowind = colorind - 5
thispos = (900+150*thisrowind, 300)
objinfodraw.text(
thispos,
'%s: %.3f' % (colorlabel, thiscolor),
font=cpfontnormal,
fill=(0,0,0,255)
)
elif colorind in (10,11,12,13,14):
thisrowind = colorind - 10
thispos = (900+150*thisrowind, 325)
objinfodraw.text(
thispos,
'%s: %.3f' % (colorlabel, thiscolor),
font=cpfontnormal,
fill=(0,0,0,255)
)
else:
thisrowind = colorind - 15
thispos = (900+150*thisrowind, 350)
objinfodraw.text(
thispos,
'%s: %.3f' % (colorlabel, thiscolor),
font=cpfontnormal,
fill=(0,0,0,255)
)
# otherwise, deal with older generation of checkplots
else:
objinfodraw.text(
(625, 200),
('Magnitudes'),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(900, 200),
('gri: %.3f, %.3f, %.3f' %
((cpd['objectinfo']['sdssg'] if
('sdssg' in cpd['objectinfo'] and
cpd['objectinfo']['sdssg'] is not None)
else npnan),
(cpd['objectinfo']['sdssr'] if
('sdssr' in cpd['objectinfo'] and
cpd['objectinfo']['sdssr'] is not None)
else npnan),
(cpd['objectinfo']['sdssi'] if
('sdssi' in cpd['objectinfo'] and
cpd['objectinfo']['sdssi'] is not None)
else npnan))),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(900, 225),
('JHK: %.3f, %.3f, %.3f' %
((cpd['objectinfo']['jmag'] if
('jmag' in cpd['objectinfo'] and
cpd['objectinfo']['jmag'] is not None)
else npnan),
(cpd['objectinfo']['hmag'] if
('hmag' in cpd['objectinfo'] and
cpd['objectinfo']['hmag'] is not None)
else npnan),
(cpd['objectinfo']['kmag'] if
('kmag' in cpd['objectinfo'] and
cpd['objectinfo']['kmag'] is not None)
else npnan))),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(900, 250),
('BV: %.3f, %.3f' %
((cpd['objectinfo']['bmag'] if
('bmag' in cpd['objectinfo'] and
cpd['objectinfo']['bmag'] is not None)
else npnan),
(cpd['objectinfo']['vmag'] if
('vmag' in cpd['objectinfo'] and
cpd['objectinfo']['vmag'] is not None)
else npnan))),
font=cpfontnormal,
fill=(0,0,0,255)
)
# colors
if ('dereddened' in cpd['objectinfo'] and
cpd['objectinfo']['dereddened'] is True):
deredlabel = "(dereddened)"
else:
deredlabel = ""
objinfodraw.text(
(625, 275),
'Colors %s' % deredlabel,
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(900, 275),
('B - V: %.3f, V - K: %.3f' %
( (cpd['objectinfo']['bvcolor'] if
('bvcolor' in cpd['objectinfo'] and
cpd['objectinfo']['bvcolor'] is not None)
else npnan),
(cpd['objectinfo']['vkcolor'] if
('vkcolor' in cpd['objectinfo'] and
cpd['objectinfo']['vkcolor'] is not None)
else npnan) )),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(900, 300),
('i - J: %.3f, g - K: %.3f' %
( (cpd['objectinfo']['ijcolor'] if
('ijcolor' in cpd['objectinfo'] and
cpd['objectinfo']['ijcolor'] is not None)
else npnan),
(cpd['objectinfo']['gkcolor'] if
('gkcolor' in cpd['objectinfo'] and
cpd['objectinfo']['gkcolor'] is not None)
else npnan) )),
font=cpfontnormal,
fill=(0,0,0,255)
)
objinfodraw.text(
(900, 325),
('J - K: %.3f' %
( (cpd['objectinfo']['jkcolor'] if
('jkcolor' in cpd['objectinfo'] and
cpd['objectinfo']['jkcolor'] is not None)
else npnan),) ),
font=cpfontnormal,
fill=(0,0,0,255)
)
#
# rest of the object information
#
# color classification
if ('color_classes' in cpd['objectinfo'] and
cpd['objectinfo']['color_classes']):
objinfodraw.text(
(625, 375),
('star classification by color: %s' %
(', '.join(cpd['objectinfo']['color_classes']))),
font=cpfontnormal,
fill=(0,0,0,255)
)
# GAIA neighbors
if ( ('gaia_neighbors' in cpd['objectinfo']) and
(cpd['objectinfo']['gaia_neighbors'] is not None) and
(np.isfinite(cpd['objectinfo']['gaia_neighbors'])) and
('searchradarcsec' in cpd['objectinfo']) and
(cpd['objectinfo']['searchradarcsec']) ):
objinfodraw.text(
(625, 400),
('%s GAIA close neighbors within %.1f arcsec' %
(cpd['objectinfo']['gaia_neighbors'],
cpd['objectinfo']['searchradarcsec'])),
font=cpfontnormal,
fill=(0,0,0,255)
)
# closest GAIA neighbor
if ( ('gaia_closest_distarcsec' in cpd['objectinfo']) and
(cpd['objectinfo']['gaia_closest_distarcsec'] is not None) and
(np.isfinite(cpd['objectinfo']['gaia_closest_distarcsec'])) and
('gaia_closest_gmagdiff' in cpd['objectinfo']) and
(cpd['objectinfo']['gaia_closest_gmagdiff'] is not None) and
(np.isfinite(cpd['objectinfo']['gaia_closest_gmagdiff'])) ):
objinfodraw.text(
(625, 425),
('closest GAIA neighbor is %.1f arcsec away, '
'GAIA mag (obj-nbr): %.3f' %
(cpd['objectinfo']['gaia_closest_distarcsec'],
cpd['objectinfo']['gaia_closest_gmagdiff'])),
font=cpfontnormal,
fill=(0,0,0,255)
)
# object tags
if 'objecttags' in cpd['objectinfo'] and cpd['objectinfo']['objecttags']:
objtagsplit = cpd['objectinfo']['objecttags'].split(',')
# write three tags per line
nobjtaglines = int(np.ceil(len(objtagsplit)/3.0))
for objtagline in range(nobjtaglines):
objtagslice = ','.join(objtagsplit[objtagline*3:objtagline*3+3])
objinfodraw.text(
(625, 450+objtagline*25),
objtagslice,
font=cpfontnormal,
fill=(135, 54, 0, 255)
)
################################################
# row 1, cell 3: variability info and comments #
################################################
# objectisvar
objisvar = cpd['varinfo']['objectisvar']
if objisvar == '0':
objvarflag = 'Variable star flag not set'
elif objisvar == '1':
objvarflag = 'Object is probably a variable star'
elif objisvar == '2':
objvarflag = 'Object is probably not a variable star'
elif objisvar == '3':
objvarflag = 'Not sure if this object is a variable star'
elif objisvar is None:
objvarflag = 'Variable star flag not set'
elif objisvar is True:
objvarflag = 'Object is probably a variable star'
elif objisvar is False:
objvarflag = 'Object is probably not a variable star'
else:
objvarflag = 'Variable star flag: %s' % objisvar
objinfodraw.text(
(1650, 125),
objvarflag,
font=cpfontnormal,
fill=(0,0,0,255)
)
# period
objinfodraw.text(
(1650, 150),
('Period [days]: %.6f' %
(cpd['varinfo']['varperiod']
if cpd['varinfo']['varperiod'] is not None
else np.nan)),
font=cpfontnormal,
fill=(0,0,0,255)
)
# epoch
objinfodraw.text(
(1650, 175),
('Epoch [JD]: %.6f' %
(cpd['varinfo']['varepoch']
if cpd['varinfo']['varepoch'] is not None
else np.nan)),
font=cpfontnormal,
fill=(0,0,0,255)
)
# variability tags
if cpd['varinfo']['vartags']:
vartagsplit = cpd['varinfo']['vartags'].split(',')
# write three tags per line
nvartaglines = int(np.ceil(len(vartagsplit)/3.0))
for vartagline in range(nvartaglines):
vartagslice = ','.join(vartagsplit[vartagline*3:vartagline*3+3])
objinfodraw.text(
(1650, 225+vartagline*25),
vartagslice,
font=cpfontnormal,
fill=(135, 54, 0, 255)
)
# object comments
if 'comments' in cpd and cpd['comments']:
commentsplit = cpd['comments'].split(' ')
# write 10 words per line
ncommentlines = int(np.ceil(len(commentsplit)/10.0))
for commentline in range(ncommentlines):
commentslice = ' '.join(
commentsplit[commentline*10:commentline*10+10]
)
objinfodraw.text(
(1650, 325+commentline*25),
commentslice,
font=cpfontnormal,
fill=(0,0,0,255)
)
# this handles JSON-ified checkplots returned by LCC server
elif 'objectcomments' in cpd and cpd['objectcomments']:
commentsplit = cpd['objectcomments'].split(' ')
# write 10 words per line
ncommentlines = int(np.ceil(len(commentsplit)/10.0))
for commentline in range(ncommentlines):
commentslice = ' '.join(
commentsplit[commentline*10:commentline*10+10]
)
objinfodraw.text(
(1650, 325+commentline*25),
commentslice,
font=cpfontnormal,
fill=(0,0,0,255)
)
#######################################
# row 1, cell 4: unphased light curve #
#######################################
if (cpd['magseries'] and
'plot' in cpd['magseries'] and
cpd['magseries']['plot']):
magseries = Image.open(
_base64_to_file(cpd['magseries']['plot'], None, writetostrio=True)
)
outimg.paste(magseries,(750*3,0))
# this handles JSON-ified checkplots from LCC server
elif ('magseries' in cpd and isinstance(cpd['magseries'],str)):
magseries = Image.open(
_base64_to_file(cpd['magseries'], None, writetostrio=True)
)
outimg.paste(magseries,(750*3,0))
###############################
# the rest of the rows in cpd #
###############################
for lspmethodind, lspmethod in enumerate(cplspmethods):
###############################
# the periodogram comes first #
###############################
if (cpd[lspmethod] and cpd[lspmethod]['periodogram']):
pgram = Image.open(
_base64_to_file(cpd[lspmethod]['periodogram'], None,
writetostrio=True)
)
outimg.paste(pgram,(0,480 + 480*lspmethodind))
#############################
# best phased LC comes next #
#############################
if (cpd[lspmethod] and 0 in cpd[lspmethod] and cpd[lspmethod][0]):
plc1 = Image.open(
_base64_to_file(cpd[lspmethod][0]['plot'], None,
writetostrio=True)
)
outimg.paste(plc1,(750,480 + 480*lspmethodind))
# this handles JSON-ified checkplots from LCC server
elif (cpd[lspmethod] and 'phasedlc0' in cpd[lspmethod] and
isinstance(cpd[lspmethod]['phasedlc0']['plot'], str)):
plc1 = Image.open(
_base64_to_file(cpd[lspmethod]['phasedlc0']['plot'], None,
writetostrio=True)
)
outimg.paste(plc1,(750,480 + 480*lspmethodind))
#################################
# 2nd best phased LC comes next #
#################################
if (cpd[lspmethod] and 1 in cpd[lspmethod] and cpd[lspmethod][1]):
plc2 = Image.open(
_base64_to_file(cpd[lspmethod][1]['plot'], None,
writetostrio=True)
)
outimg.paste(plc2,(750*2,480 + 480*lspmethodind))
# this handles JSON-ified checkplots from LCC server
elif (cpd[lspmethod] and 'phasedlc1' in cpd[lspmethod] and
isinstance(cpd[lspmethod]['phasedlc1']['plot'], str)):
plc2 = Image.open(
_base64_to_file(cpd[lspmethod]['phasedlc1']['plot'], None,
writetostrio=True)
)
outimg.paste(plc2,(750*2,480 + 480*lspmethodind))
#################################
# 3rd best phased LC comes next #
#################################
if (cpd[lspmethod] and 2 in cpd[lspmethod] and cpd[lspmethod][2]):
plc3 = Image.open(
_base64_to_file(cpd[lspmethod][2]['plot'], None,
writetostrio=True)
)
outimg.paste(plc3,(750*3,480 + 480*lspmethodind))
# this handles JSON-ified checkplots from LCC server
elif (cpd[lspmethod] and 'phasedlc2' in cpd[lspmethod] and
isinstance(cpd[lspmethod]['phasedlc2']['plot'], str)):
plc3 = Image.open(
_base64_to_file(cpd[lspmethod]['phasedlc2']['plot'], None,
writetostrio=True)
)
outimg.paste(plc3,(750*3,480 + 480*lspmethodind))
################################
## ALL DONE WITH BUILDING PNG ##
################################
#########################
# add in any extra rows #
#########################
# from the keyword arguments
if erows > 0:
for erowind, erow in enumerate(extrarows):
# make sure we never go above 4 plots in a row
for ecolind, ecol in enumerate(erow[:4]):
eplot = Image.open(ecol)
eplotresized = eplot.resize((750,480), Image.ANTIALIAS)
outimg.paste(eplotresized,
(750*ecolind,
(cprows+1)*480 + 480*erowind))
# from the checkplotdict
if cpderows > 0:
for cpderowind, cpderow in enumerate(cpd['externalplots']):
# make sure we never go above 4 plots in a row
for cpdecolind, cpdecol in enumerate(cpderow[:4]):
cpdeplot = Image.open(cpdecol)
cpdeplotresized = cpdeplot.resize((750,480), Image.ANTIALIAS)
outimg.paste(cpdeplotresized,
(750*cpdecolind,
(cprows+1)*480 + (erows*480) + 480*cpderowind))
# from neighbors:
if nbrrows > 0:
# we have four tiles
# tile 1: neighbor objectid, ra, decl, distance, unphased LC
# tile 2: phased LC for gls
# tile 3: phased LC for pdm
# tile 4: phased LC for any other period finding method
# the priority is like so: ['bls','mav','aov','win']
for nbrind, nbr in enumerate(cpd['neighbors']):
# figure out which period finding methods are available for this
# neighbor. make sure to match the ones from the actual object in
# order of priority: 'gls','pdm','bls','aov','mav','acf','win'
nbrlspmethods = []
for lspmethod in cpd['pfmethods']:
if lspmethod in nbr:
nbrlspmethods.append(lspmethod)
# restrict to top three in priority
nbrlspmethods = nbrlspmethods[:3]
try:
# first panel: neighbor objectid, ra, decl, distance, unphased
# LC
nbrlc = Image.open(
_base64_to_file(
nbr['magseries']['plot'], None, writetostrio=True
)
)
outimg.paste(nbrlc,
(750*0,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind))
# overlay the objectinfo
objinfodraw.text(
(98,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind + 15),
('N%s: %s' % (nbrind + 1, nbr['objectid'])),
font=cpfontlarge,
fill=(0,0,255,255)
)
# overlay the objectinfo
objinfodraw.text(
(98,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind + 50),
('(RA, DEC) = (%.3f, %.3f), distance: %.1f arcsec' %
(nbr['ra'], nbr['decl'], nbr['dist'])),
font=cpfontnormal,
fill=(0,0,255,255)
)
# second panel: phased LC for gls
lsp1lc = Image.open(
_base64_to_file(
nbr[nbrlspmethods[0]][0]['plot'], None,
writetostrio=True
)
)
outimg.paste(lsp1lc,
(750*1,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind))
# second panel: phased LC for gls
lsp2lc = Image.open(
_base64_to_file(
nbr[nbrlspmethods[1]][0]['plot'], None,
writetostrio=True
)
)
outimg.paste(lsp2lc,
(750*2,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind))
# second panel: phased LC for gls
lsp3lc = Image.open(
_base64_to_file(
nbr[nbrlspmethods[2]][0]['plot'], None,
writetostrio=True
)
)
outimg.paste(lsp3lc,
(750*3,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind))
except Exception as e:
LOGERROR('neighbor %s does not have a magseries plot, '
'measurements are probably all nan' % nbr['objectid'])
# overlay the objectinfo
objinfodraw.text(
(98,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind + 15),
('N%s: %s' %
(nbrind + 1, nbr['objectid'])),
font=cpfontlarge,
fill=(0,0,255,255)
)
if 'ra' in nbr and 'decl' in nbr and 'dist' in nbr:
# overlay the objectinfo
objinfodraw.text(
(98,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind + 50),
('(RA, DEC) = (%.3f, %.3f), distance: %.1f arcsec' %
(nbr['ra'], nbr['decl'], nbr['dist'])),
font=cpfontnormal,
fill=(0,0,255,255)
)
elif 'objectinfo' in nbr:
# overlay the objectinfo
objinfodraw.text(
(98,
(cprows+1)*480 + (erows*480) + (cpderows*480) +
480*nbrind + 50),
('(RA, DEC) = (%.3f, %.3f), distance: %.1f arcsec' %
(nbr['objectinfo']['ra'],
nbr['objectinfo']['decl'],
nbr['objectinfo']['distarcsec'])),
font=cpfontnormal,
fill=(0,0,255,255)
)
#####################
## WRITE FINAL PNG ##
#####################
# check if the output filename is actually an instance of StringIO
if sys.version_info[:2] < (3,0):
is_strio = isinstance(outfile, cStringIO.InputType)
else:
is_strio = isinstance(outfile, StrIO)
if not is_strio:
# check if we've stupidly copied over the same filename as the input
# pickle to expected output file
if outfile.endswith('pkl'):
LOGWARNING('expected output PNG filename ends with .pkl, '
'changed to .png')
outfile = outfile.replace('.pkl','.png')
outimg.save(outfile, format='PNG', optimize=True)
if not is_strio:
if os.path.exists(outfile):
LOGINFO('checkplot pickle -> checkplot PNG: %s OK' % outfile)
return outfile
else:
LOGERROR('failed to write checkplot PNG')
return None
else:
LOGINFO('checkplot pickle -> StringIO instance OK')
return outfile
|
def cp2png(checkplotin, extrarows=None):
'''This is just a shortened form of the function above for convenience.
This only handles pickle files as input.
Parameters
----------
checkplotin : str
File name of a checkplot pickle file to convert to a PNG.
extrarows : list of tuples
This is a list of 4-element tuples containing paths to PNG files that
will be added to the end of the rows generated from the checkplotin
pickle/dict. Each tuple represents a row in the final output PNG
file. If there are less than 4 elements per tuple, the missing elements
will be filled in with white-space. If there are more than 4 elements
per tuple, only the first four will be used.
The purpose of this kwarg is to incorporate periodograms and phased LC
plots (in the form of PNGs) generated from an external period-finding
function or program (like VARTOOLS) to allow for comparison with
astrobase results.
NOTE: the PNG files specified in `extrarows` here will be added to those
already present in the input `checkplotdict['externalplots']` if that is
None because you passed in a similar list of external plots to the
:py:func:`astrobase.checkplot.pkl.checkplot_pickle` function earlier. In
this case, `extrarows` can be used to add even more external plots if
desired.
Each external plot PNG will be resized to 750 x 480 pixels to fit into
an output image cell.
By convention, each 4-element tuple should contain:
- a periodiogram PNG
- phased LC PNG with 1st best peak period from periodogram
- phased LC PNG with 2nd best peak period from periodogram
- phased LC PNG with 3rd best peak period from periodogram
Example of extrarows::
[('/path/to/external/bls-periodogram.png',
'/path/to/external/bls-phasedlc-plot-bestpeak.png',
'/path/to/external/bls-phasedlc-plot-peak2.png',
'/path/to/external/bls-phasedlc-plot-peak3.png'),
('/path/to/external/pdm-periodogram.png',
'/path/to/external/pdm-phasedlc-plot-bestpeak.png',
'/path/to/external/pdm-phasedlc-plot-peak2.png',
'/path/to/external/pdm-phasedlc-plot-peak3.png'),
...]
Returns
-------
str
The absolute path to the generated checkplot PNG.
'''
if checkplotin.endswith('.gz'):
outfile = checkplotin.replace('.pkl.gz','.png')
else:
outfile = checkplotin.replace('.pkl','.png')
return checkplot_pickle_to_png(checkplotin, outfile, extrarows=extrarows)
|
def flare_model(flareparams, times, mags, errs):
'''This is a flare model function, similar to Kowalski+ 2011.
From the paper by Pitkin+ 2014:
http://adsabs.harvard.edu/abs/2014MNRAS.445.2268P
Parameters
----------
flareparams : list of float
This defines the flare model::
[amplitude,
flare_peak_time,
rise_gaussian_stdev,
decay_time_constant]
where:
`amplitude`: the maximum flare amplitude in mags or flux. If flux, then
amplitude should be positive. If mags, amplitude should be negative.
`flare_peak_time`: time at which the flare maximum happens.
`rise_gaussian_stdev`: the stdev of the gaussian describing the rise of
the flare.
`decay_time_constant`: the time constant of the exponential fall of the
flare.
times,mags,errs : np.array
The input time-series of measurements and associated errors for which
the model will be generated. The times will be used to generate
model mags.
Returns
-------
(modelmags, times, mags, errs) : tuple
Returns the model mags evaluated at the input time values. Also returns
the input `times`, `mags`, and `errs`.
'''
(amplitude, flare_peak_time,
rise_gaussian_stdev, decay_time_constant) = flareparams
zerolevel = np.median(mags)
modelmags = np.full_like(times, zerolevel)
# before peak gaussian rise...
modelmags[times < flare_peak_time] = (
mags[times < flare_peak_time] +
amplitude * np.exp(
-((times[times < flare_peak_time] -
flare_peak_time) *
(times[times < flare_peak_time] -
flare_peak_time)) /
(2.0*rise_gaussian_stdev*rise_gaussian_stdev)
)
)
# after peak exponential decay...
modelmags[times > flare_peak_time] = (
mags[times > flare_peak_time] +
amplitude * np.exp(
-((times[times > flare_peak_time] -
flare_peak_time)) /
(decay_time_constant)
)
)
return modelmags, times, mags, errs
|
def flare_model_residual(flareparams, times, mags, errs):
'''
This returns the residual between model mags and the actual mags.
Parameters
----------
flareparams : list of float
This defines the flare model::
[amplitude,
flare_peak_time,
rise_gaussian_stdev,
decay_time_constant]
where:
`amplitude`: the maximum flare amplitude in mags or flux. If flux, then
amplitude should be positive. If mags, amplitude should be negative.
`flare_peak_time`: time at which the flare maximum happens.
`rise_gaussian_stdev`: the stdev of the gaussian describing the rise of
the flare.
`decay_time_constant`: the time constant of the exponential fall of the
flare.
times,mags,errs : np.array
The input time-series of measurements and associated errors for which
the model will be generated. The times will be used to generate
model mags.
Returns
-------
np.array
The residuals between the input `mags` and generated `modelmags`,
weighted by the measurement errors in `errs`.
'''
modelmags, _, _, _ = flare_model(flareparams, times, mags, errs)
return (mags - modelmags)/errs
|
def cache_clean_handler(min_age_hours=1):
"""This periodically cleans up the ~/.astrobase cache to save us from
disk-space doom.
Parameters
----------
min_age_hours : int
Files older than this number of hours from the current time will be
deleted.
Returns
-------
Nothing.
"""
# find the files to delete
cmd = (
"find ~ec2-user/.astrobase -type f -mmin +{mmin} -exec rm -v '{{}}' \;"
)
mmin = '%.1f' % (min_age_hours*60.0)
cmd = cmd.format(mmin=mmin)
try:
proc = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE)
ndeleted = len(proc.stdout.decode().split('\n'))
LOGWARNING('cache clean: %s files older than %s hours deleted' %
(ndeleted, min_age_hours))
except Exception as e:
LOGEXCEPTION('cache clean: could not delete old files')
|
def shutdown_check_handler():
"""This checks the AWS instance data URL to see if there's a pending
shutdown for the instance.
This is useful for AWS spot instances. If there is a pending shutdown posted
to the instance data URL, we'll use the result of this function break out of
the processing loop and shut everything down ASAP before the instance dies.
Returns
-------
bool
- True if the instance is going to die soon.
- False if the instance is still safe.
"""
url = 'http://169.254.169.254/latest/meta-data/spot/instance-action'
try:
resp = requests.get(url, timeout=1.0)
resp.raise_for_status()
stopinfo = resp.json()
if 'action' in stopinfo and stopinfo['action'] in ('stop',
'terminate',
'hibernate'):
stoptime = stopinfo['time']
LOGWARNING('instance is going to %s at %s' % (stopinfo['action'],
stoptime))
resp.close()
return True
else:
resp.close()
return False
except HTTPError as e:
resp.close()
return False
except Exception as e:
resp.close()
return False
|
def runcp_producer_loop(
lightcurve_list,
input_queue,
input_bucket,
result_queue,
result_bucket,
pfresult_list=None,
runcp_kwargs=None,
process_list_slice=None,
purge_queues_when_done=False,
delete_queues_when_done=False,
download_when_done=True,
save_state_when_done=True,
s3_client=None,
sqs_client=None
):
"""This sends checkplot making tasks to the input queue and monitors the
result queue for task completion.
Parameters
----------
lightcurve_list : str or list of str
This is either a string pointing to a file containing a list of light
curves filenames to process or the list itself. The names must
correspond to the full filenames of files stored on S3, including all
prefixes, but not include the 's3://<bucket name>/' bit (these will be
added automatically).
input_queue : str
This is the name of the SQS queue which will receive processing tasks
generated by this function. The queue URL will automatically be obtained
from AWS.
input_bucket : str
The name of the S3 bucket containing the light curve files to process.
result_queue : str
This is the name of the SQS queue that this function will listen to for
messages from the workers as they complete processing on their input
elements. This function will attempt to match input sent to the
`input_queue` with results coming into the `result_queue` so it knows
how many objects have been successfully processed. If this function
receives task results that aren't in its own input queue, it will
acknowledge them so they complete successfully, but not download them
automatically. This handles leftover tasks completing from a previous
run of this function.
result_bucket : str
The name of the S3 bucket which will receive the results from the
workers.
pfresult_list : list of str or None
This is a list of periodfinder result pickle S3 URLs associated with
each light curve. If provided, this will be used to add in phased light
curve plots to each checkplot pickle. If this is None, the worker loop
will produce checkplot pickles that only contain object information,
neighbor information, and unphased light curves.
runcp_kwargs : dict
This is a dict used to pass any extra keyword arguments to the
`lcproc.checkplotgen.runcp` function that will be run by the worker
loop.
process_list_slice : list
This is used to index into the input light curve list so a subset of the
full list can be processed in this specific run of this function.
Use None for a slice index elem to emulate single slice spec behavior:
process_list_slice = [10, None] -> lightcurve_list[10:]
process_list_slice = [None, 500] -> lightcurve_list[:500]
purge_queues_when_done : bool
If this is True, and this function exits (either when all done, or when
it is interrupted with a Ctrl+C), all outstanding elements in the
input/output queues that have not yet been acknowledged by workers or by
this function will be purged. This effectively cancels all outstanding
work.
delete_queues_when_done : bool
If this is True, and this function exits (either when all done, or when
it is interrupted with a Ctrl+C'), all outstanding work items will be
purged from the input/queues and the queues themselves will be deleted.
download_when_done : bool
If this is True, the generated checkplot pickle for each input work item
will be downloaded immediately to the current working directory when the
worker functions report they're done with it.
save_state_when_done : bool
If this is True, will save the current state of the work item queue and
the work items acknowledged as completed to a pickle in the current
working directory. Call the `runcp_producer_loop_savedstate` function
below to resume processing from this saved state later.
s3_client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its S3 download operations. Alternatively, pass in an existing
`boto3.Client` instance to re-use it here.
sqs_client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its SQS operations. Alternatively, pass in an existing
`boto3.Client` instance to re-use it here.
Returns
-------
dict or str
Returns the current work state as a dict or str path to the generated
work state pickle depending on if `save_state_when_done` is True.
"""
if not sqs_client:
sqs_client = boto3.client('sqs')
if not s3_client:
s3_client = boto3.client('s3')
if isinstance(lightcurve_list, str) and os.path.exists(lightcurve_list):
# get the LC list
with open(lightcurve_list, 'r') as infd:
lclist = infd.readlines()
lclist = [x.replace('\n','') for x in lclist if len(x) > 0]
if process_list_slice is not None:
lclist = lclist[process_list_slice[0]:process_list_slice[1]]
lclist = [x[1:] for x in lclist if x.startswith('/')]
lclist = ['s3://%s/%s' % (input_bucket, x) for x in lclist]
# this handles direct invocation using lists of s3:// urls of light curves
elif isinstance(lightcurve_list, list):
lclist = lightcurve_list
# set up the input and output queues
# check if the queues by the input and output names given exist already
# if they do, go ahead and use them
# if they don't, make new ones.
try:
inq = sqs_client.get_queue_url(QueueName=input_queue)
inq_url = inq['QueueUrl']
LOGINFO('input queue already exists, skipping creation...')
except ClientError as e:
inq = awsutils.sqs_create_queue(input_queue, client=sqs_client)
inq_url = inq['url']
try:
outq = sqs_client.get_queue_url(QueueName=result_queue)
outq_url = outq['QueueUrl']
LOGINFO('result queue already exists, skipping creation...')
except ClientError as e:
outq = awsutils.sqs_create_queue(result_queue, client=sqs_client)
outq_url = outq['url']
LOGINFO('input queue: %s' % inq_url)
LOGINFO('output queue: %s' % outq_url)
# wait until queues are up
LOGINFO('waiting for queues to become ready...')
time.sleep(10.0)
# for each item in the lightcurve_list, send it to the input queue and wait
# until it's done to send another one
if pfresult_list is None:
pfresult_list = [None for x in lclist]
for lc, pf in zip(lclist, pfresult_list):
this_item = {
'target': lc,
'action': 'runcp',
'args': (pf,),
'kwargs':runcp_kwargs if runcp_kwargs is not None else {},
'outbucket': result_bucket,
'outqueue': outq_url
}
resp = awsutils.sqs_put_item(inq_url, this_item, client=sqs_client)
if resp:
LOGINFO('sent %s to queue: %s' % (lc,inq_url))
# now block until all objects are done
done_objects = {}
LOGINFO('all items queued, waiting for results...')
# listen to the kill and term signals and raise KeyboardInterrupt when
# called
signal.signal(signal.SIGINT, kill_handler)
signal.signal(signal.SIGTERM, kill_handler)
while len(list(done_objects.keys())) < len(lclist):
try:
result = awsutils.sqs_get_item(outq_url, client=sqs_client)
if result is not None and len(result) > 0:
recv = result[0]
try:
processed_object = recv['item']['target']
except KeyError:
LOGWARNING('unknown target in received item: %s' % recv)
processed_object = 'unknown-lc'
cpf = recv['item']['cpf']
receipt = recv['receipt_handle']
if processed_object in lclist:
if processed_object not in done_objects:
done_objects[processed_object] = [cpf]
else:
done_objects[processed_object].append(cpf)
LOGINFO('done with %s -> %s' % (processed_object, cpf))
if download_when_done:
getobj = awsutils.awsutils.s3_get_url(
cpf,
client=s3_client
)
LOGINFO('downloaded %s -> %s' % (cpf, getobj))
else:
LOGWARNING('processed object returned is not in '
'queued target list, probably from an '
'earlier run. accepting but not downloading.')
awsutils.sqs_delete_item(outq_url, receipt)
except KeyboardInterrupt as e:
LOGWARNING('breaking out of producer wait-loop')
break
# delete the input and output queues when we're done
LOGINFO('done with processing.')
time.sleep(1.0)
if purge_queues_when_done:
LOGWARNING('purging queues at exit, please wait 10 seconds...')
sqs_client.purge_queue(QueueUrl=inq_url)
sqs_client.purge_queue(QueueUrl=outq_url)
time.sleep(10.0)
if delete_queues_when_done:
LOGWARNING('deleting queues at exit')
awsutils.sqs_delete_queue(inq_url)
awsutils.sqs_delete_queue(outq_url)
work_state = {
'done': done_objects,
'in_progress': list(set(lclist) - set(done_objects.keys())),
'args':((os.path.abspath(lightcurve_list) if
isinstance(lightcurve_list, str) else lightcurve_list),
input_queue,
input_bucket,
result_queue,
result_bucket),
'kwargs':{'pfresult_list':pfresult_list,
'runcp_kwargs':runcp_kwargs,
'process_list_slice':process_list_slice,
'download_when_done':download_when_done,
'purge_queues_when_done':purge_queues_when_done,
'save_state_when_done':save_state_when_done,
'delete_queues_when_done':delete_queues_when_done}
}
if save_state_when_done:
with open('runcp-queue-producer-loop-state.pkl','wb') as outfd:
pickle.dump(work_state, outfd, pickle.HIGHEST_PROTOCOL)
# at the end, return the done_objects dict
# also return the list of unprocessed items if any
return work_state
|
def runcp_producer_loop_savedstate(
use_saved_state=None,
lightcurve_list=None,
input_queue=None,
input_bucket=None,
result_queue=None,
result_bucket=None,
pfresult_list=None,
runcp_kwargs=None,
process_list_slice=None,
download_when_done=True,
purge_queues_when_done=True,
save_state_when_done=True,
delete_queues_when_done=False,
s3_client=None,
sqs_client=None
):
"""This wraps the function above to allow for loading previous state from a
file.
Parameters
----------
use_saved_state : str or None
This is the path to the saved state pickle file produced by a previous
run of `runcp_producer_loop`. Will get all of the arguments to run
another instance of the loop from that pickle file. If this is None, you
MUST provide all of the appropriate arguments to that function.
lightcurve_list : str or list of str or None
This is either a string pointing to a file containing a list of light
curves filenames to process or the list itself. The names must
correspond to the full filenames of files stored on S3, including all
prefixes, but not include the 's3://<bucket name>/' bit (these will be
added automatically).
input_queue : str or None
This is the name of the SQS queue which will receive processing tasks
generated by this function. The queue URL will automatically be obtained
from AWS.
input_bucket : str or None
The name of the S3 bucket containing the light curve files to process.
result_queue : str or None
This is the name of the SQS queue that this function will listen to for
messages from the workers as they complete processing on their input
elements. This function will attempt to match input sent to the
`input_queue` with results coming into the `result_queue` so it knows
how many objects have been successfully processed. If this function
receives task results that aren't in its own input queue, it will
acknowledge them so they complete successfully, but not download them
automatically. This handles leftover tasks completing from a previous
run of this function.
result_bucket : str or None
The name of the S3 bucket which will receive the results from the
workers.
pfresult_list : list of str or None
This is a list of periodfinder result pickle S3 URLs associated with
each light curve. If provided, this will be used to add in phased light
curve plots to each checkplot pickle. If this is None, the worker loop
will produce checkplot pickles that only contain object information,
neighbor information, and unphased light curves.
runcp_kwargs : dict or None
This is a dict used to pass any extra keyword arguments to the
`lcproc.checkplotgen.runcp` function that will be run by the worker
loop.
process_list_slice : list or None
This is used to index into the input light curve list so a subset of the
full list can be processed in this specific run of this function.
Use None for a slice index elem to emulate single slice spec behavior:
process_list_slice = [10, None] -> lightcurve_list[10:]
process_list_slice = [None, 500] -> lightcurve_list[:500]
purge_queues_when_done : bool or None
If this is True, and this function exits (either when all done, or when
it is interrupted with a Ctrl+C), all outstanding elements in the
input/output queues that have not yet been acknowledged by workers or by
this function will be purged. This effectively cancels all outstanding
work.
delete_queues_when_done : bool or None
If this is True, and this function exits (either when all done, or when
it is interrupted with a Ctrl+C'), all outstanding work items will be
purged from the input/queues and the queues themselves will be deleted.
download_when_done : bool or None
If this is True, the generated checkplot pickle for each input work item
will be downloaded immediately to the current working directory when the
worker functions report they're done with it.
save_state_when_done : bool or None
If this is True, will save the current state of the work item queue and
the work items acknowledged as completed to a pickle in the current
working directory. Call the `runcp_producer_loop_savedstate` function
below to resume processing from this saved state later.
s3_client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its S3 download operations. Alternatively, pass in an existing
`boto3.Client` instance to re-use it here.
sqs_client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its SQS operations. Alternatively, pass in an existing
`boto3.Client` instance to re-use it here.
Returns
-------
dict or str
Returns the current work state as a dict or str path to the generated
work state pickle depending on if `save_state_when_done` is True.
"""
if use_saved_state is not None and os.path.exists(use_saved_state):
with open(use_saved_state,'rb') as infd:
saved_state = pickle.load(infd)
# run the producer loop using the saved state's todo list
return runcp_producer_loop(
saved_state['in_progress'],
saved_state['args'][1],
saved_state['args'][2],
saved_state['args'][3],
saved_state['args'][4],
**saved_state['kwargs']
)
else:
return runcp_producer_loop(
lightcurve_list,
input_queue,
input_bucket,
result_queue,
result_bucket,
pfresult_list=pfresult_list,
runcp_kwargs=runcp_kwargs,
process_list_slice=process_list_slice,
download_when_done=download_when_done,
purge_queues_when_done=purge_queues_when_done,
save_state_when_done=save_state_when_done,
delete_queues_when_done=delete_queues_when_done,
s3_client=s3_client,
sqs_client=sqs_client
)
|
def runcp_consumer_loop(
in_queue_url,
workdir,
lclist_pkl_s3url,
lc_altexts=('',),
wait_time_seconds=5,
cache_clean_timer_seconds=3600.0,
shutdown_check_timer_seconds=60.0,
sqs_client=None,
s3_client=None
):
"""This runs checkplot pickle making in a loop until interrupted.
Consumes work task items from an input queue set up by `runcp_producer_loop`
above. For the moment, we don't generate neighbor light curves since this
would require a lot more S3 calls.
Parameters
----------
in_queue_url : str
The SQS URL of the input queue to listen to for work assignment
messages. The task orders will include the input and output S3 bucket
names, as well as the URL of the output queue to where this function
will report its work-complete or work-failed status.
workdir : str
The directory on the local machine where this worker loop will download
the input light curves and associated period-finder results (if any),
process them, and produce its output checkplot pickles. These will then
be uploaded to the specified S3 output bucket and then deleted from the
workdir when the upload is confirmed to make it safely to S3.
lclist_pkl : str
S3 URL of a catalog pickle generated by `lcproc.catalogs.make_lclist`
that contains objectids and coordinates, as well as a kdtree for all of
the objects in the current light curve collection being processed. This
is used to look up neighbors for each object being processed.
lc_altexts : sequence of str
If not None, this is a sequence of alternate extensions to try for the
input light curve file other than the one provided in the input task
order. For example, to get anything that's an .sqlite where .sqlite.gz
is expected, use altexts=[''] to strip the .gz.
wait_time_seconds : int
The amount of time to wait in the input SQS queue for an input task
order. If this timeout expires and no task has been received, this
function goes back to the top of the work loop.
cache_clean_timer_seconds : float
The amount of time in seconds to wait before periodically removing old
files (such as finder chart FITS, external service result pickles) from
the astrobase cache directory. These accumulate as the work items are
processed, and take up significant space, so must be removed
periodically.
shutdown_check_timer_seconds : float
The amount of time to wait before checking for a pending EC2 shutdown
message for the instance this worker loop is operating on. If a shutdown
is noticed, the worker loop is cancelled in preparation for instance
shutdown.
sqs_client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its SQS operations. Alternatively, pass in an existing
`boto3.Client` instance to re-use it here.
s3_client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its S3 operations. Alternatively, pass in an existing
`boto3.Client` instance to re-use it here.
Returns
-------
Nothing.
"""
if not sqs_client:
sqs_client = boto3.client('sqs')
if not s3_client:
s3_client = boto3.client('s3')
lclist_pklf = lclist_pkl_s3url.split('/')[-1]
if not os.path.exists(lclist_pklf):
# get the lclist pickle from S3 to help with neighbor queries
lclist_pklf = awsutils.s3_get_url(
lclist_pkl_s3url,
client=s3_client
)
with open(lclist_pklf,'rb') as infd:
lclistpkl = pickle.load(infd)
# listen to the kill and term signals and raise KeyboardInterrupt when
# called
signal.signal(signal.SIGINT, kill_handler)
signal.signal(signal.SIGTERM, kill_handler)
shutdown_last_time = time.monotonic()
diskspace_last_time = time.monotonic()
while True:
curr_time = time.monotonic()
if (curr_time - shutdown_last_time) > shutdown_check_timer_seconds:
shutdown_check = shutdown_check_handler()
if shutdown_check:
LOGWARNING('instance will die soon, breaking loop')
break
shutdown_last_time = time.monotonic()
if (curr_time - diskspace_last_time) > cache_clean_timer_seconds:
cache_clean_handler()
diskspace_last_time = time.monotonic()
try:
# receive a single message from the inqueue
work = awsutils.sqs_get_item(in_queue_url,
client=sqs_client,
raiseonfail=True)
# JSON deserialize the work item
if work is not None and len(work) > 0:
recv = work[0]
# skip any messages that don't tell us to runcp
# FIXME: use the MessageAttributes for setting topics instead
action = recv['item']['action']
if action != 'runcp':
continue
target = recv['item']['target']
args = recv['item']['args']
kwargs = recv['item']['kwargs']
outbucket = recv['item']['outbucket']
if 'outqueue' in recv['item']:
out_queue_url = recv['item']['outqueue']
else:
out_queue_url = None
receipt = recv['receipt_handle']
# download the target from S3 to a file in the work directory
try:
lc_filename = awsutils.s3_get_url(
target,
altexts=lc_altexts,
client=s3_client,
)
# get the period-finder pickle if present in args
if len(args) > 0 and args[0] is not None:
pf_pickle = awsutils.s3_get_url(
args[0],
client=s3_client
)
else:
pf_pickle = None
# now runcp
cpfs = runcp(
pf_pickle,
workdir,
workdir,
lcfname=lc_filename,
lclistpkl=lclistpkl,
makeneighborlcs=False,
**kwargs
)
if cpfs and all(os.path.exists(x) for x in cpfs):
LOGINFO('runcp OK for LC: %s, PF: %s -> %s' %
(lc_filename, pf_pickle, cpfs))
# check if the file exists already because it's been
# processed somewhere else
resp = s3_client.list_objects_v2(
Bucket=outbucket,
MaxKeys=1,
Prefix=cpfs[0]
)
outbucket_list = resp.get('Contents',[])
if outbucket_list and len(outbucket_list) > 0:
LOGWARNING(
'not uploading runcp results for %s because '
'they exist in the output bucket already'
% target
)
awsutils.sqs_delete_item(in_queue_url, receipt)
continue
for cpf in cpfs:
put_url = awsutils.s3_put_file(cpf,
outbucket,
client=s3_client)
if put_url is not None:
LOGINFO('result uploaded to %s' % put_url)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(
out_queue_url,
{'cpf':put_url,
'target': target,
'lc_filename':lc_filename,
'lclistpkl':lclist_pklf,
'kwargs':kwargs},
raiseonfail=True
)
# delete the result from the local directory
os.remove(cpf)
# if the upload fails, don't acknowledge the
# message. might be a temporary S3 failure, so
# another worker might succeed later.
else:
LOGERROR('failed to upload %s to S3' % cpf)
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done and successful
awsutils.sqs_delete_item(in_queue_url,
receipt)
# delete the light curve file when we're done with it
if ( (lc_filename is not None) and
(os.path.exists(lc_filename)) ):
os.remove(lc_filename)
# if runcp failed outright, don't requeue. instead, write a
# ('failed-checkplot-%s.pkl' % lc_filename) file to the
# output S3 bucket.
else:
LOGWARNING('runcp failed for LC: %s, PF: %s' %
(lc_filename, pf_pickle))
with open('failed-checkplot-%s.pkl' %
lc_filename, 'wb') as outfd:
pickle.dump(
{'in_queue_url':in_queue_url,
'target':target,
'lc_filename':lc_filename,
'lclistpkl':lclist_pklf,
'kwargs':kwargs,
'outbucket':outbucket,
'out_queue_url':out_queue_url},
outfd, pickle.HIGHEST_PROTOCOL
)
put_url = awsutils.s3_put_file(
'failed-checkplot-%s.pkl' % lc_filename,
outbucket,
client=s3_client
)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(
out_queue_url,
{'cpf':put_url,
'lc_filename':lc_filename,
'lclistpkl':lclist_pklf,
'kwargs':kwargs},
raiseonfail=True
)
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done
awsutils.sqs_delete_item(in_queue_url,
receipt,
raiseonfail=True)
# delete the light curve file when we're done with it
if ( (lc_filename is not None) and
(os.path.exists(lc_filename)) ):
os.remove(lc_filename)
except ClientError as e:
LOGWARNING('queues have disappeared. stopping worker loop')
break
# if there's any other exception, put a failed response into the
# output bucket and queue
except Exception as e:
LOGEXCEPTION('could not process input from queue')
if 'lc_filename' in locals():
with open('failed-checkplot-%s.pkl' %
lc_filename,'wb') as outfd:
pickle.dump(
{'in_queue_url':in_queue_url,
'target':target,
'lc_filename':lc_filename,
'lclistpkl':lclist_pklf,
'kwargs':kwargs,
'outbucket':outbucket,
'out_queue_url':out_queue_url},
outfd, pickle.HIGHEST_PROTOCOL
)
put_url = awsutils.s3_put_file(
'failed-checkplot-%s.pkl' % lc_filename,
outbucket,
client=s3_client
)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(
out_queue_url,
{'cpf':put_url,
'lc_filename':lc_filename,
'lclistpkl':lclist_pklf,
'kwargs':kwargs},
raiseonfail=True
)
if ( (lc_filename is not None) and
(os.path.exists(lc_filename)) ):
os.remove(lc_filename)
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done
awsutils.sqs_delete_item(in_queue_url,
receipt,
raiseonfail=True)
# a keyboard interrupt kills the loop
except KeyboardInterrupt:
LOGWARNING('breaking out of the processing loop.')
break
# if the queues disappear, then the producer loop is done and we should
# exit
except ClientError as e:
LOGWARNING('queues have disappeared. stopping worker loop')
break
# any other exception continues the loop we'll write the output file to
# the output S3 bucket (and any optional output queue), but add a
# failed-* prefix to it to indicate that processing failed. FIXME: could
# use a dead-letter queue for this instead
except Exception as e:
LOGEXCEPTION('could not process input from queue')
if 'lc_filename' in locals():
with open('failed-checkplot-%s.pkl' %
lc_filename,'wb') as outfd:
pickle.dump(
{'in_queue_url':in_queue_url,
'target':target,
'lclistpkl':lclist_pklf,
'kwargs':kwargs,
'outbucket':outbucket,
'out_queue_url':out_queue_url},
outfd, pickle.HIGHEST_PROTOCOL
)
put_url = awsutils.s3_put_file(
'failed-checkplot-%s.pkl' % lc_filename,
outbucket,
client=s3_client
)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(
out_queue_url,
{'cpf':put_url,
'lclistpkl':lclist_pklf,
'kwargs':kwargs},
raiseonfail=True
)
if ( (lc_filename is not None) and
(os.path.exists(lc_filename)) ):
os.remove(lc_filename)
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done
awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True)
|
def runpf_consumer_loop(
in_queue_url,
workdir,
lc_altexts=('',),
wait_time_seconds=5,
shutdown_check_timer_seconds=60.0,
sqs_client=None,
s3_client=None
):
"""This runs period-finding in a loop until interrupted.
Consumes work task items from an input queue set up by `runpf_producer_loop`
above.
Parameters
----------
in_queue_url : str
The SQS URL of the input queue to listen to for work assignment
messages. The task orders will include the input and output S3 bucket
names, as well as the URL of the output queue to where this function
will report its work-complete or work-failed status.
workdir : str
The directory on the local machine where this worker loop will download
the input light curves, process them, and produce its output
periodfinding result pickles. These will then be uploaded to the
specified S3 output bucket, and then deleted from the local disk.
lc_altexts : sequence of str
If not None, this is a sequence of alternate extensions to try for the
input light curve file other than the one provided in the input task
order. For example, to get anything that's an .sqlite where .sqlite.gz
is expected, use altexts=[''] to strip the .gz.
wait_time_seconds : int
The amount of time to wait in the input SQS queue for an input task
order. If this timeout expires and no task has been received, this
function goes back to the top of the work loop.
shutdown_check_timer_seconds : float
The amount of time to wait before checking for a pending EC2 shutdown
message for the instance this worker loop is operating on. If a shutdown
is noticed, the worker loop is cancelled in preparation for instance
shutdown.
sqs_client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its SQS operations. Alternatively, pass in an existing
`boto3.Client` instance to re-use it here.
s3_client : boto3.Client or None
If None, this function will instantiate a new `boto3.Client` object to
use in its S3 operations. Alternatively, pass in an existing
`boto3.Client` instance to re-use it here.
Returns
-------
Nothing.
"""
if not sqs_client:
sqs_client = boto3.client('sqs')
if not s3_client:
s3_client = boto3.client('s3')
# listen to the kill and term signals and raise KeyboardInterrupt when
# called
signal.signal(signal.SIGINT, kill_handler)
signal.signal(signal.SIGTERM, kill_handler)
shutdown_last_time = time.monotonic()
while True:
curr_time = time.monotonic()
if (curr_time - shutdown_last_time) > shutdown_check_timer_seconds:
shutdown_check = shutdown_check_handler()
if shutdown_check:
LOGWARNING('instance will die soon, breaking loop')
break
shutdown_last_time = time.monotonic()
try:
# receive a single message from the inqueue
work = awsutils.sqs_get_item(in_queue_url,
client=sqs_client,
raiseonfail=True)
# JSON deserialize the work item
if work is not None and len(work) > 0:
recv = work[0]
# skip any messages that don't tell us to runpf
action = recv['item']['action']
if action != 'runpf':
continue
target = recv['item']['target']
args = recv['item']['args']
kwargs = recv['item']['kwargs']
outbucket = recv['item']['outbucket']
if 'outqueue' in recv['item']:
out_queue_url = recv['item']['outqueue']
else:
out_queue_url = None
receipt = recv['receipt_handle']
# download the target from S3 to a file in the work directory
try:
lc_filename = awsutils.s3_get_url(
target,
altexts=lc_altexts,
client=s3_client
)
runpf_args = (lc_filename, args[0])
# now runpf
pfresult = runpf(
*runpf_args,
**kwargs
)
if pfresult and os.path.exists(pfresult):
LOGINFO('runpf OK for LC: %s -> %s' %
(lc_filename, pfresult))
# check if the file exists already because it's been
# processed somewhere else
resp = s3_client.list_objects_v2(
Bucket=outbucket,
MaxKeys=1,
Prefix=pfresult
)
outbucket_list = resp.get('Contents',[])
if outbucket_list and len(outbucket_list) > 0:
LOGWARNING(
'not uploading pfresult for %s because '
'it exists in the output bucket already'
% target
)
awsutils.sqs_delete_item(in_queue_url, receipt)
continue
put_url = awsutils.s3_put_file(pfresult,
outbucket,
client=s3_client)
if put_url is not None:
LOGINFO('result uploaded to %s' % put_url)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(
out_queue_url,
{'pfresult':put_url,
'target': target,
'lc_filename':lc_filename,
'kwargs':kwargs},
raiseonfail=True
)
# delete the result from the local directory
os.remove(pfresult)
# if the upload fails, don't acknowledge the
# message. might be a temporary S3 failure, so
# another worker might succeed later.
# FIXME: add SNS bits to warn us of failures
else:
LOGERROR('failed to upload %s to S3' % pfresult)
os.remove(pfresult)
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done and successful
awsutils.sqs_delete_item(in_queue_url, receipt)
# delete the light curve file when we're done with it
if ( (lc_filename is not None) and
(os.path.exists(lc_filename)) ):
os.remove(lc_filename)
# if runcp failed outright, don't requeue. instead, write a
# ('failed-checkplot-%s.pkl' % lc_filename) file to the
# output S3 bucket.
else:
LOGWARNING('runpf failed for LC: %s' %
(lc_filename,))
with open('failed-periodfinding-%s.pkl' %
lc_filename, 'wb') as outfd:
pickle.dump(
{'in_queue_url':in_queue_url,
'target':target,
'lc_filename':lc_filename,
'kwargs':kwargs,
'outbucket':outbucket,
'out_queue_url':out_queue_url},
outfd, pickle.HIGHEST_PROTOCOL
)
put_url = awsutils.s3_put_file(
'failed-periodfinding-%s.pkl' % lc_filename,
outbucket,
client=s3_client
)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(
out_queue_url,
{'pfresult':put_url,
'lc_filename':lc_filename,
'kwargs':kwargs},
raiseonfail=True
)
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done
awsutils.sqs_delete_item(in_queue_url,
receipt,
raiseonfail=True)
# delete the light curve file when we're done with it
if ( (lc_filename is not None) and
(os.path.exists(lc_filename)) ):
os.remove(lc_filename)
except ClientError as e:
LOGWARNING('queues have disappeared. stopping worker loop')
break
# if there's any other exception, put a failed response into the
# output bucket and queue
except Exception as e:
LOGEXCEPTION('could not process input from queue')
if 'lc_filename' in locals():
with open('failed-periodfinding-%s.pkl' %
lc_filename,'wb') as outfd:
pickle.dump(
{'in_queue_url':in_queue_url,
'target':target,
'lc_filename':lc_filename,
'kwargs':kwargs,
'outbucket':outbucket,
'out_queue_url':out_queue_url},
outfd, pickle.HIGHEST_PROTOCOL
)
put_url = awsutils.s3_put_file(
'failed-periodfinding-%s.pkl' % lc_filename,
outbucket,
client=s3_client
)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(
out_queue_url,
{'pfresult':put_url,
'lc_filename':lc_filename,
'kwargs':kwargs},
raiseonfail=True
)
# delete the light curve file when we're done with it
if ( (lc_filename is not None) and
(os.path.exists(lc_filename)) ):
os.remove(lc_filename)
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done
awsutils.sqs_delete_item(in_queue_url,
receipt,
raiseonfail=True)
# a keyboard interrupt kills the loop
except KeyboardInterrupt:
LOGWARNING('breaking out of the processing loop.')
break
# if the queues disappear, then the producer loop is done and we should
# exit
except ClientError as e:
LOGWARNING('queues have disappeared. stopping worker loop')
break
# any other exception continues the loop we'll write the output file to
# the output S3 bucket (and any optional output queue), but add a
# failed-* prefix to it to indicate that processing failed. FIXME: could
# use a dead-letter queue for this instead
except Exception as e:
LOGEXCEPTION('could not process input from queue')
if 'lc_filename' in locals():
with open('failed-periodfinding-%s.pkl' %
lc_filename,'wb') as outfd:
pickle.dump(
{'in_queue_url':in_queue_url,
'target':target,
'kwargs':kwargs,
'outbucket':outbucket,
'out_queue_url':out_queue_url},
outfd, pickle.HIGHEST_PROTOCOL
)
put_url = awsutils.s3_put_file(
'failed-periodfinding-%s.pkl' % lc_filename,
outbucket,
client=s3_client
)
# put the S3 URL of the output into the output
# queue if requested
if out_queue_url is not None:
awsutils.sqs_put_item(
out_queue_url,
{'cpf':put_url,
'kwargs':kwargs},
raiseonfail=True
)
if ( (lc_filename is not None) and
(os.path.exists(lc_filename)) ):
os.remove(lc_filename)
# delete the input item from the input queue to
# acknowledge its receipt and indicate that
# processing is done
awsutils.sqs_delete_item(in_queue_url, receipt, raiseonfail=True)
|
def gaussianeb_fit_magseries(times, mags, errs,
ebparams,
sigclip=10.0,
plotfit=False,
magsarefluxes=False,
verbose=True):
'''This fits a double inverted gaussian EB model to a magnitude time series.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit the EB model to.
period : float
The period to use for EB fit.
ebparams : list of float
This is a list containing the eclipsing binary parameters::
ebparams = [period (time),
epoch (time),
pdepth (mags),
pduration (phase),
psdepthratio,
secondaryphase]
`period` is the period in days.
`epoch` is the time of primary minimum in JD.
`pdepth` is the depth of the primary eclipse:
- for magnitudes -> `pdepth` should be < 0
- for fluxes -> `pdepth` should be > 0
`pduration` is the length of the primary eclipse in phase.
`psdepthratio` is the ratio of the secondary eclipse depth to that of
the primary eclipse.
`secondaryphase` is the phase at which the minimum of the secondary
eclipse is located. This effectively parameterizes eccentricity.
If `epoch` is None, this function will do an initial spline fit to find
an approximate minimum of the phased light curve using the given period.
The `pdepth` provided is checked against the value of
`magsarefluxes`. if `magsarefluxes = True`, the `ebdepth` is forced to
be > 0; if `magsarefluxes = False`, the `ebdepth` is forced to be < 0.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'gaussianeb',
'fitinfo':{
'initialparams':the initial EB params provided,
'finalparams':the final model fit EB params,
'finalparamerrs':formal errors in the params,
'leastsqfit':the full tuple returned by scipy.leastsq,
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# get rid of zero errs
nzind = npnonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
# check the ebparams
ebperiod, ebepoch, ebdepth = ebparams[0:3]
# check if we have a ebepoch to use
if ebepoch is None:
if verbose:
LOGWARNING('no ebepoch given in ebparams, '
'trying to figure it out automatically...')
# do a spline fit to figure out the approximate min of the LC
try:
spfit = spline_fit_magseries(times, mags, errs, ebperiod,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
ebepoch = spfit['fitinfo']['fitepoch']
# if the spline-fit fails, try a savgol fit instead
except Exception as e:
sgfit = savgol_fit_magseries(times, mags, errs, ebperiod,
sigclip=sigclip,
magsarefluxes=magsarefluxes,
verbose=verbose)
ebepoch = sgfit['fitinfo']['fitepoch']
# if everything failed, then bail out and ask for the ebepoch
finally:
if ebepoch is None:
LOGERROR("couldn't automatically figure out the eb epoch, "
"can't continue. please provide it in ebparams.")
# assemble the returndict
returndict = {
'fittype':'gaussianeb',
'fitinfo':{
'initialparams':ebparams,
'finalparams':None,
'leastsqfit':None,
'fitmags':None,
'fitepoch':None,
},
'fitchisq':npnan,
'fitredchisq':npnan,
'fitplotfile':None,
'magseries':{
'phase':None,
'times':None,
'mags':None,
'errs':None,
'magsarefluxes':magsarefluxes,
},
}
return returndict
else:
if ebepoch.size > 1:
if verbose:
LOGWARNING('could not auto-find a single minimum '
'for ebepoch, using the first one returned')
ebparams[1] = ebepoch[0]
else:
if verbose:
LOGWARNING(
'using automatically determined ebepoch = %.5f'
% ebepoch
)
ebparams[1] = ebepoch.item()
# next, check the ebdepth and fix it to the form required
if magsarefluxes:
if ebdepth < 0.0:
ebparams[2] = -ebdepth[2]
else:
if ebdepth > 0.0:
ebparams[2] = -ebdepth[2]
# finally, do the fit
try:
leastsqfit = spleastsq(eclipses.invgauss_eclipses_residual,
ebparams,
args=(stimes, smags, serrs),
full_output=True)
except Exception as e:
leastsqfit = None
# if the fit succeeded, then we can return the final parameters
if leastsqfit and leastsqfit[-1] in (1,2,3,4):
finalparams = leastsqfit[0]
covxmatrix = leastsqfit[1]
# calculate the chisq and reduced chisq
fitmags, phase, ptimes, pmags, perrs = eclipses.invgauss_eclipses_func(
finalparams,
stimes, smags, serrs
)
fitchisq = npsum(
((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)
)
fitredchisq = fitchisq/(len(pmags) - len(finalparams) - 1)
# get the residual variance and calculate the formal 1-sigma errs on the
# final parameters
residuals = leastsqfit[2]['fvec']
residualvariance = (
npsum(residuals*residuals)/(pmags.size - finalparams.size)
)
if covxmatrix is not None:
covmatrix = residualvariance*covxmatrix
stderrs = npsqrt(npdiag(covmatrix))
else:
LOGERROR('covxmatrix not available, fit probably failed!')
stderrs = None
if verbose:
LOGINFO(
'final fit done. chisq = %.5f, reduced chisq = %.5f' %
(fitchisq, fitredchisq)
)
# get the fit epoch
fperiod, fepoch = finalparams[:2]
# assemble the returndict
returndict = {
'fittype':'gaussianeb',
'fitinfo':{
'initialparams':ebparams,
'finalparams':finalparams,
'finalparamerrs':stderrs,
'leastsqfit':leastsqfit,
'fitmags':fitmags,
'fitepoch':fepoch,
},
'fitchisq':fitchisq,
'fitredchisq':fitredchisq,
'fitplotfile':None,
'magseries':{
'phase':phase,
'times':ptimes,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes,
},
}
# make the fit plot if required
if plotfit and isinstance(plotfit, str):
make_fit_plot(phase, pmags, perrs, fitmags,
fperiod, ptimes.min(), fepoch,
plotfit,
magsarefluxes=magsarefluxes)
returndict['fitplotfile'] = plotfit
return returndict
# if the leastsq fit failed, return nothing
else:
LOGERROR('eb-fit: least-squared fit to the light curve failed!')
# assemble the returndict
returndict = {
'fittype':'gaussianeb',
'fitinfo':{
'initialparams':ebparams,
'finalparams':None,
'finalparamerrs':None,
'leastsqfit':leastsqfit,
'fitmags':None,
'fitepoch':None,
},
'fitchisq':npnan,
'fitredchisq':npnan,
'fitplotfile':None,
'magseries':{
'phase':None,
'times':None,
'mags':None,
'errs':None,
'magsarefluxes':magsarefluxes,
},
}
return returndict
|
def spline_fit_magseries(times, mags, errs, period,
knotfraction=0.01,
maxknots=30,
sigclip=30.0,
plotfit=False,
ignoreinitfail=False,
magsarefluxes=False,
verbose=True):
'''This fits a univariate cubic spline to the phased light curve.
This fit may be better than the Fourier fit for sharply variable objects,
like EBs, so can be used to distinguish them from other types of variables.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit a spline to.
period : float
The period to use for the spline fit.
knotfraction : float
The knot fraction is the number of internal knots to use for the
spline. A value of 0.01 (or 1%) of the total number of non-nan
observations appears to work quite well, without over-fitting. maxknots
controls the maximum number of knots that will be allowed.
maxknots : int
The maximum number of knots that will be used even if `knotfraction`
gives a value to use larger than `maxknots`. This helps dealing with
over-fitting to short time-scale variations.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'spline',
'fitinfo':{
'nknots': the number of knots used for the fit
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
'''
# this is required to fit the spline correctly
if errs is None:
errs = npfull_like(mags, 0.005)
# sigclip the magnitude time series
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# get rid of zero errs
nzind = npnonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
# phase the mag series
phase, pmags, perrs, ptimes, mintime = (
get_phased_quantities(stimes, smags, serrs, period)
)
# now figure out the number of knots up to max knots (=100)
nobs = len(phase)
nknots = int(npfloor(knotfraction*nobs))
nknots = maxknots if nknots > maxknots else nknots
splineknots = nplinspace(phase[0] + 0.01,
phase[-1] - 0.01,
num=nknots)
# NOTE: newer scipy needs x to be strictly increasing. this means we should
# filter out anything that doesn't have np.diff(phase) > 0.0
# FIXME: this needs to be tested
phase_diffs_ind = npdiff(phase) > 0.0
incphase_ind = npconcatenate((nparray([True]), phase_diffs_ind))
phase, pmags, perrs = (phase[incphase_ind],
pmags[incphase_ind],
perrs[incphase_ind])
# generate and fit the spline
spl = LSQUnivariateSpline(phase, pmags, t=splineknots, w=1.0/perrs)
# calculate the spline fit to the actual phases, the chisq and red-chisq
fitmags = spl(phase)
fitchisq = npsum(
((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)
)
fitredchisq = fitchisq/(len(pmags) - nknots - 1)
if verbose:
LOGINFO(
'spline fit done. nknots = %s, '
'chisq = %.5f, reduced chisq = %.5f' %
(nknots, fitchisq, fitredchisq)
)
# figure out the time of light curve minimum (i.e. the fit epoch)
# this is when the fit mag is maximum (i.e. the faintest)
# or if magsarefluxes = True, then this is when fit flux is minimum
if not magsarefluxes:
fitmagminind = npwhere(fitmags == npmax(fitmags))
else:
fitmagminind = npwhere(fitmags == npmin(fitmags))
if len(fitmagminind[0]) > 1:
fitmagminind = (fitmagminind[0][0],)
magseriesepoch = ptimes[fitmagminind]
# assemble the returndict
returndict = {
'fittype':'spline',
'fitinfo':{
'nknots':nknots,
'fitmags':fitmags,
'fitepoch':magseriesepoch
},
'fitchisq':fitchisq,
'fitredchisq':fitredchisq,
'fitplotfile':None,
'magseries':{
'times':ptimes,
'phase':phase,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes
},
}
# make the fit plot if required
if plotfit and isinstance(plotfit, str):
make_fit_plot(phase, pmags, perrs, fitmags,
period, mintime, magseriesepoch,
plotfit,
magsarefluxes=magsarefluxes)
returndict['fitplotfile'] = plotfit
return returndict
|
def savgol_fit_magseries(times, mags, errs, period,
windowlength=None,
polydeg=2,
sigclip=30.0,
plotfit=False,
magsarefluxes=False,
verbose=True):
'''Fit a Savitzky-Golay filter to the magnitude/flux time series.
SG fits successive sub-sets (windows) of adjacent data points with a
low-order polynomial via least squares. At each point (magnitude), it
returns the value of the polynomial at that magnitude's time. This is made
significantly cheaper than *actually* performing least squares for each
window through linear algebra tricks that are possible when specifying the
window size and polynomial order beforehand. Numerical Recipes Ch 14.8
gives an overview, Eq. 14.8.6 is what Scipy has implemented.
The idea behind Savitzky-Golay is to preserve higher moments (>=2) of the
input data series than would be done by a simple moving window average.
Note that the filter assumes evenly spaced data, which magnitude time series
are not. By *pretending* the data points are evenly spaced, we introduce an
additional noise source in the function values. This is a relatively small
noise source provided that the changes in the magnitude values across the
full width of the N=windowlength point window is < sqrt(N/2) times the
measurement noise on a single point.
TODO:
- Find correct dof for reduced chi squared in savgol_fit_magseries
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit the Savitsky-Golay model to.
period : float
The period to use for the model fit.
windowlength : None or int
The length of the filter window (the number of coefficients). Must be
either positive and odd, or None. (The window is the number of points to
the left, and to the right, of whatever point is having a polynomial fit
to it locally). Bigger windows at fixed polynomial order risk lowering
the amplitude of sharp features. If None, this routine (arbitrarily)
sets the `windowlength` for phased LCs to be either the number of finite
data points divided by 300, or polydeg+3, whichever is bigger.
polydeg : int
This is the order of the polynomial used to fit the samples. Must be
less than `windowlength`. "Higher-order filters do better at preserving
feature heights and widths, but do less smoothing on broader features."
(Numerical Recipes).
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'savgol',
'fitinfo':{
'windowlength': the window length used for the fit,
'polydeg':the polynomial degree used for the fit,
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# get rid of zero errs
nzind = npnonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
phase, pmags, perrs, ptimes, mintime = (
get_phased_quantities(stimes, smags, serrs, period)
)
if not isinstance(windowlength, int):
windowlength = max(
polydeg + 3,
int(len(phase)/300)
)
if windowlength % 2 == 0:
windowlength += 1
if verbose:
LOGINFO('applying Savitzky-Golay filter with '
'window length %s and polynomial degree %s to '
'mag series with %s observations, '
'using period %.6f, folded at %.6f' % (windowlength,
polydeg,
len(pmags),
period,
mintime))
# generate the function values obtained by applying the SG filter. The
# "wrap" option is best for phase-folded LCs.
sgf = savgol_filter(pmags, windowlength, polydeg, mode='wrap')
# here the "fit" to the phases is the function produced by the
# Savitzky-Golay filter. then compute the chisq and red-chisq.
fitmags = sgf
fitchisq = npsum(
((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)
)
# TODO: quantify dof for SG filter.
nparams = int(len(pmags)/windowlength) * polydeg
fitredchisq = fitchisq/(len(pmags) - nparams - 1)
fitredchisq = -99.
if verbose:
LOGINFO(
'SG filter applied. chisq = %.5f, reduced chisq = %.5f' %
(fitchisq, fitredchisq)
)
# figure out the time of light curve minimum (i.e. the fit epoch)
# this is when the fit mag is maximum (i.e. the faintest)
# or if magsarefluxes = True, then this is when fit flux is minimum
if not magsarefluxes:
fitmagminind = npwhere(fitmags == npmax(fitmags))
else:
fitmagminind = npwhere(fitmags == npmin(fitmags))
if len(fitmagminind[0]) > 1:
fitmagminind = (fitmagminind[0][0],)
magseriesepoch = ptimes[fitmagminind]
# assemble the returndict
returndict = {
'fittype':'savgol',
'fitinfo':{
'windowlength':windowlength,
'polydeg':polydeg,
'fitmags':fitmags,
'fitepoch':magseriesepoch
},
'fitchisq':fitchisq,
'fitredchisq':fitredchisq,
'fitplotfile':None,
'magseries':{
'times':ptimes,
'phase':phase,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes
}
}
# make the fit plot if required
if plotfit and isinstance(plotfit, str):
make_fit_plot(phase, pmags, perrs, fitmags,
period, mintime, magseriesepoch,
plotfit,
magsarefluxes=magsarefluxes)
returndict['fitplotfile'] = plotfit
return returndict
|
def legendre_fit_magseries(times, mags, errs, period,
legendredeg=10,
sigclip=30.0,
plotfit=False,
magsarefluxes=False,
verbose=True):
'''Fit an arbitrary-order Legendre series, via least squares, to the
magnitude/flux time series.
This is a series of the form::
p(x) = c_0*L_0(x) + c_1*L_1(x) + c_2*L_2(x) + ... + c_n*L_n(x)
where L_i's are Legendre polynomials (also called "Legendre functions of the
first kind") and c_i's are the coefficients being fit.
This function is mainly just a wrapper to
`numpy.polynomial.legendre.Legendre.fit`.
Parameters
----------
times,mags,errs : np.array
The input mag/flux time-series to fit a Legendre series polynomial to.
period : float
The period to use for the Legendre fit.
legendredeg : int
This is `n` in the equation above, e.g. if you give `n=5`, you will
get 6 coefficients. This number should be much less than the number of
data points you are fitting.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
magsarefluxes : bool
If True, will treat the input values of `mags` as fluxes for purposes of
plotting the fit and sig-clipping.
plotfit : str or False
If this is a string, this function will make a plot for the fit to the
mag/flux time-series and writes the plot to the path specified here.
ignoreinitfail : bool
If this is True, ignores the initial failure to find a set of optimized
Fourier parameters using the global optimization function and proceeds
to do a least-squares fit anyway.
verbose : bool
If True, will indicate progress and warn of any problems.
Returns
-------
dict
This function returns a dict containing the model fit parameters, the
minimized chi-sq value and the reduced chi-sq value. The form of this
dict is mostly standardized across all functions in this module::
{
'fittype':'legendre',
'fitinfo':{
'legendredeg': the Legendre polynomial degree used,
'fitmags': the model fit mags,
'fitepoch': the epoch of minimum light for the fit,
},
'fitchisq': the minimized value of the fit's chi-sq,
'fitredchisq':the reduced chi-sq value,
'fitplotfile': the output fit plot if fitplot is not None,
'magseries':{
'times':input times in phase order of the model,
'phase':the phases of the model mags,
'mags':input mags/fluxes in the phase order of the model,
'errs':errs in the phase order of the model,
'magsarefluxes':input value of magsarefluxes kwarg
}
}
'''
stimes, smags, serrs = sigclip_magseries(times, mags, errs,
sigclip=sigclip,
magsarefluxes=magsarefluxes)
# get rid of zero errs
nzind = npnonzero(serrs)
stimes, smags, serrs = stimes[nzind], smags[nzind], serrs[nzind]
phase, pmags, perrs, ptimes, mintime = (
get_phased_quantities(stimes, smags, serrs, period)
)
if verbose:
LOGINFO('fitting Legendre series with '
'maximum Legendre polynomial order %s to '
'mag series with %s observations, '
'using period %.6f, folded at %.6f' % (legendredeg,
len(pmags),
period,
mintime))
# Least squares fit of Legendre polynomial series to the data. The window
# and domain (see "Using the Convenience Classes" in the numpy
# documentation) are handled automatically, scaling the times to a minimal
# domain in [-1,1], in which Legendre polynomials are a complete basis.
p = Legendre.fit(phase, pmags, legendredeg)
coeffs = p.coef
fitmags = p(phase)
# Now compute the chisq and red-chisq.
fitchisq = npsum(
((fitmags - pmags)*(fitmags - pmags)) / (perrs*perrs)
)
nparams = legendredeg + 1
fitredchisq = fitchisq/(len(pmags) - nparams - 1)
if verbose:
LOGINFO(
'Legendre fit done. chisq = %.5f, reduced chisq = %.5f' %
(fitchisq, fitredchisq)
)
# figure out the time of light curve minimum (i.e. the fit epoch)
# this is when the fit mag is maximum (i.e. the faintest)
# or if magsarefluxes = True, then this is when fit flux is minimum
if not magsarefluxes:
fitmagminind = npwhere(fitmags == npmax(fitmags))
else:
fitmagminind = npwhere(fitmags == npmin(fitmags))
if len(fitmagminind[0]) > 1:
fitmagminind = (fitmagminind[0][0],)
magseriesepoch = ptimes[fitmagminind]
# assemble the returndict
returndict = {
'fittype':'legendre',
'fitinfo':{
'legendredeg':legendredeg,
'fitmags':fitmags,
'fitepoch':magseriesepoch,
'finalparams':coeffs,
},
'fitchisq':fitchisq,
'fitredchisq':fitredchisq,
'fitplotfile':None,
'magseries':{
'times':ptimes,
'phase':phase,
'mags':pmags,
'errs':perrs,
'magsarefluxes':magsarefluxes
}
}
# make the fit plot if required
if plotfit and isinstance(plotfit, str):
make_fit_plot(phase, pmags, perrs, fitmags,
period, mintime, magseriesepoch,
plotfit,
magsarefluxes=magsarefluxes)
returndict['fitplotfile'] = plotfit
return returndict
|
def update_checkplotdict_nbrlcs(
checkplotdict,
timecol, magcol, errcol,
lcformat='hat-sql',
lcformatdir=None,
verbose=True,
):
'''For all neighbors in a checkplotdict, make LCs and phased LCs.
Parameters
----------
checkplotdict : dict
This is the checkplot to process. The light curves for the neighbors to
the object here will be extracted from the stored file paths, and this
function will make plots of these time-series. If the object has 'best'
periods and epochs generated by period-finder functions in this
checkplotdict, phased light curve plots of each neighbor will be made
using these to check the effects of blending.
timecol,magcol,errcol : str
The timecol, magcol, and errcol keys used to generate this object's
checkplot. This is used to extract the correct times-series from the
neighbors' light curves.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
Returns
-------
dict
The input checkplotdict is returned with the neighor light curve plots
added in.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return checkplotdict
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return checkplotdict
if not ('neighbors' in checkplotdict and
checkplotdict['neighbors'] and
len(checkplotdict['neighbors']) > 0):
LOGERROR('no neighbors for %s, not updating...' %
(checkplotdict['objectid']))
return checkplotdict
# get our object's magkeys to compare to the neighbor
objmagkeys = {}
# handle diff generations of checkplots
if 'available_bands' in checkplotdict['objectinfo']:
mclist = checkplotdict['objectinfo']['available_bands']
else:
mclist = ('bmag','vmag','rmag','imag','jmag','hmag','kmag',
'sdssu','sdssg','sdssr','sdssi','sdssz')
for mc in mclist:
if (mc in checkplotdict['objectinfo'] and
checkplotdict['objectinfo'][mc] is not None and
np.isfinite(checkplotdict['objectinfo'][mc])):
objmagkeys[mc] = checkplotdict['objectinfo'][mc]
# if there are actually neighbors, go through them in order
for nbr in checkplotdict['neighbors']:
objectid, lcfpath = (nbr['objectid'],
nbr['lcfpath'])
# get the light curve
if not os.path.exists(lcfpath):
LOGERROR('objectid: %s, neighbor: %s, '
'lightcurve: %s not found, skipping...' %
(checkplotdict['objectid'], objectid, lcfpath))
continue
lcdict = readerfunc(lcfpath)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
# 0. get this neighbor's magcols and get the magdiff and colordiff
# between it and the object
nbrmagkeys = {}
for mc in objmagkeys:
if (('objectinfo' in lcdict) and
(isinstance(lcdict['objectinfo'], dict)) and
(mc in lcdict['objectinfo']) and
(lcdict['objectinfo'][mc] is not None) and
(np.isfinite(lcdict['objectinfo'][mc]))):
nbrmagkeys[mc] = lcdict['objectinfo'][mc]
# now calculate the magdiffs
magdiffs = {}
for omc in objmagkeys:
if omc in nbrmagkeys:
magdiffs[omc] = objmagkeys[omc] - nbrmagkeys[omc]
# calculate colors and colordiffs
colordiffs = {}
# generate the list of colors to get
# NOTE: here, we don't really bother with new/old gen checkplots
# maybe change this later to handle arbitrary colors
for ctrio in (['bmag','vmag','bvcolor'],
['vmag','kmag','vkcolor'],
['jmag','kmag','jkcolor'],
['sdssi','jmag','ijcolor'],
['sdssg','kmag','gkcolor'],
['sdssg','sdssr','grcolor']):
m1, m2, color = ctrio
if (m1 in objmagkeys and
m2 in objmagkeys and
m1 in nbrmagkeys and
m2 in nbrmagkeys):
objcolor = objmagkeys[m1] - objmagkeys[m2]
nbrcolor = nbrmagkeys[m1] - nbrmagkeys[m2]
colordiffs[color] = objcolor - nbrcolor
# finally, add all the color and magdiff info to the nbr dict
nbr.update({'magdiffs':magdiffs,
'colordiffs':colordiffs})
#
# process magcols
#
# normalize using the special function if specified
if normfunc is not None:
lcdict = normfunc(lcdict)
try:
# get the times, mags, and errs
# dereference the columns and get them from the lcdict
if '.' in timecol:
timecolget = timecol.split('.')
else:
timecolget = [timecol]
times = _dict_get(lcdict, timecolget)
if '.' in magcol:
magcolget = magcol.split('.')
else:
magcolget = [magcol]
mags = _dict_get(lcdict, magcolget)
if '.' in errcol:
errcolget = errcol.split('.')
else:
errcolget = [errcol]
errs = _dict_get(lcdict, errcolget)
except KeyError:
LOGERROR('LC for neighbor: %s (target object: %s) does not '
'have one or more of the required columns: %s, '
'skipping...' %
(objectid, checkplotdict['objectid'],
', '.join([timecol, magcol, errcol])))
continue
# filter the input times, mags, errs; do sigclipping and normalization
stimes, smags, serrs = sigclip_magseries(times,
mags,
errs,
magsarefluxes=magsarefluxes,
sigclip=4.0)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
stimes, smags,
magsarefluxes=magsarefluxes
)
xtimes, xmags, xerrs = ntimes, nmags, serrs
else:
xtimes, xmags, xerrs = stimes, smags, serrs
# check if this neighbor has enough finite points in its LC
# fail early if not enough light curve points
if ((xtimes is None) or (xmags is None) or (xerrs is None) or
(xtimes.size < 49) or (xmags.size < 49) or (xerrs.size < 49)):
LOGERROR("one or more of times, mags, errs appear to be None "
"after sig-clipping. are the measurements all nan? "
"can't make neighbor light curve plots "
"for target: %s, neighbor: %s, neighbor LC: %s" %
(checkplotdict['objectid'],
nbr['objectid'],
nbr['lcfpath']))
continue
#
# now we can start doing stuff if everything checks out
#
# make an unphased mag-series plot
nbrdict = _pkl_magseries_plot(xtimes,
xmags,
xerrs,
magsarefluxes=magsarefluxes)
# update the nbr
nbr.update(nbrdict)
# for each lspmethod in the checkplot, make a corresponding plot for
# this neighbor
# figure out the period finder methods present
if 'pfmethods' in checkplotdict:
pfmethods = checkplotdict['pfmethods']
else:
pfmethods = []
for cpkey in checkplotdict:
for pfkey in PFMETHODS:
if pfkey in cpkey:
pfmethods.append(pfkey)
for lspt in pfmethods:
# initialize this lspmethod entry
nbr[lspt] = {}
# we only care about the best period and its options
operiod, oepoch = (checkplotdict[lspt][0]['period'],
checkplotdict[lspt][0]['epoch'])
(ophasewrap, ophasesort, ophasebin,
ominbinelems, oplotxlim) = (
checkplotdict[lspt][0]['phasewrap'],
checkplotdict[lspt][0]['phasesort'],
checkplotdict[lspt][0]['phasebin'],
checkplotdict[lspt][0]['minbinelems'],
checkplotdict[lspt][0]['plotxlim'],
)
# make the phasedlc plot for this period
nbr = _pkl_phased_magseries_plot(
nbr,
lspt.split('-')[1], # this splits '<pfindex>-<pfmethod>'
0,
xtimes, xmags, xerrs,
operiod, oepoch,
phasewrap=ophasewrap,
phasesort=ophasesort,
phasebin=ophasebin,
minbinelems=ominbinelems,
plotxlim=oplotxlim,
magsarefluxes=magsarefluxes,
verbose=verbose,
override_pfmethod=lspt
)
# at this point, this neighbor's dict should be up to date with all
# info, magseries plot, and all phased LC plots
# return the updated checkplotdict
return checkplotdict
|
def runcp(
pfpickle,
outdir,
lcbasedir,
lcfname=None,
cprenorm=False,
lclistpkl=None,
nbrradiusarcsec=60.0,
maxnumneighbors=5,
makeneighborlcs=True,
fast_mode=False,
gaia_max_timeout=60.0,
gaia_mirror=None,
xmatchinfo=None,
xmatchradiusarcsec=3.0,
minobservations=99,
sigclip=10.0,
lcformat='hat-sql',
lcformatdir=None,
timecols=None,
magcols=None,
errcols=None,
skipdone=False,
done_callback=None,
done_callback_args=None,
done_callback_kwargs=None
):
'''This makes a checkplot pickle for the given period-finding result pickle
produced by `lcproc.periodfinding.runpf`.
Parameters
----------
pfpickle : str or None
This is the filename of the period-finding result pickle file created by
`lcproc.periodfinding.runpf`. If this is None, the checkplot will be
made anyway, but no phased LC information will be collected into the
output checkplot pickle. This can be useful for just collecting GAIA and
other external information and making LC plots for an object.
outdir : str
This is the directory to which the output checkplot pickle will be
written.
lcbasedir : str
The base directory where this function will look for the light curve
file associated with the object in the input period-finding result
pickle file.
lcfname : str or None
This is usually None because we'll get the path to the light curve
associated with this period-finding pickle from the pickle itself. If
`pfpickle` is None, however, this function will use `lcfname` to look up
the light curve file instead. If both are provided, the value of
`lcfname` takes precedence.
Providing the light curve file name in this kwarg is useful when you're
making checkplots directly from light curve files and not including
period-finder results (perhaps because period-finding takes a long time
for large collections of LCs).
cprenorm : bool
Set this to True if the light curves should be renormalized by
`checkplot.checkplot_pickle`. This is set to False by default because we
do our own normalization in this function using the light curve's
registered normalization function and pass the normalized times, mags,
errs to the `checkplot.checkplot_pickle` function.
lclistpkl : str or dict
This is either the filename of a pickle or the actual dict produced by
lcproc.make_lclist. This is used to gather neighbor information.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
makeneighborlcs : bool
If True, will make light curve and phased light curve plots for all
neighbors to the current object found in the catalog passed in using
`lclistpkl`.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond.
If this is set to True, the default settings for the external requests
will then become::
skyview_lookup = False
skyview_timeout = 10.0
skyview_retry_failed = False
dust_timeout = 10.0
gaia_submit_timeout = 7.0
gaia_max_timeout = 10.0
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
If this is a float, will run in "fast" mode with the provided timeout
value in seconds and the following settings::
skyview_lookup = True
skyview_timeout = fast_mode
skyview_retry_failed = False
dust_timeout = fast_mode
gaia_submit_timeout = 0.66*fast_mode
gaia_max_timeout = fast_mode
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str or None
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
xmatchinfo : str or dict
This is either the xmatch dict produced by the function
`load_xmatch_external_catalogs` above, or the path to the xmatch info
pickle file produced by that function.
xmatchradiusarcsec : float
This is the cross-matching radius to use in arcseconds.
minobservations : int
The minimum of observations the input object's mag/flux time-series must
have for this function to plot its light curve and phased light
curve. If the object has less than this number, no light curves will be
plotted, but the checkplotdict will still contain all of the other
information.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
timecols : list of str or None
The timecol keys to use from the lcdict in generating this checkplot.
magcols : list of str or None
The magcol keys to use from the lcdict in generating this checkplot.
errcols : list of str or None
The errcol keys to use from the lcdict in generating this checkplot.
skipdone : bool
This indicates if this function will skip creating checkplots that
already exist corresponding to the current `objectid` and `magcol`. If
`skipdone` is set to True, this will be done.
done_callback : Python function or None
This is used to provide a function to execute after the checkplot
pickles are generated. This is useful if you want to stream the results
of checkplot making to some other process, e.g. directly running an
ingestion into an LCC-Server collection. The function will always get
the list of the generated checkplot pickles as its first arg, and all of
the kwargs for runcp in the kwargs dict. Additional args and kwargs can
be provided by giving a list in the `done_callbacks_args` kwarg and a
dict in the `done_callbacks_kwargs` kwarg.
NOTE: the function you pass in here should be pickleable by normal
Python if you want to use it with the parallel_cp and parallel_cp_lcdir
functions below.
done_callback_args : tuple or None
If not None, contains any args to pass into the `done_callback`
function.
done_callback_kwargs : dict or None
If not None, contains any kwargs to pass into the `done_callback`
function.
Returns
-------
list of str
This returns a list of checkplot pickle filenames with one element for
each (timecol, magcol, errcol) combination provided in the default
lcformat config or in the timecols, magcols, errcols kwargs.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(fileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
if pfpickle is not None:
if pfpickle.endswith('.gz'):
infd = gzip.open(pfpickle,'rb')
else:
infd = open(pfpickle,'rb')
pfresults = pickle.load(infd)
infd.close()
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
if ((lcfname is not None or pfpickle is None) and os.path.exists(lcfname)):
lcfpath = lcfname
objectid = None
else:
if pfpickle is not None:
objectid = pfresults['objectid']
lcfbasename = pfresults['lcfbasename']
lcfsearchpath = os.path.join(lcbasedir, lcfbasename)
if os.path.exists(lcfsearchpath):
lcfpath = lcfsearchpath
elif lcfname is not None and os.path.exists(lcfname):
lcfpath = lcfname
else:
LOGERROR('could not find light curve for '
'pfresult %s, objectid %s, '
'used search path: %s, lcfname kwarg: %s' %
(pfpickle, objectid, lcfsearchpath, lcfname))
return None
else:
LOGERROR("no light curve provided and pfpickle is None, "
"can't continue")
return None
lcdict = readerfunc(lcfpath)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
# get the object ID from the light curve if pfpickle is None or we used
# lcfname directly
if objectid is None:
if 'objectid' in lcdict:
objectid = lcdict['objectid']
elif ('objectid' in lcdict['objectinfo'] and
lcdict['objectinfo']['objectid']):
objectid = lcdict['objectinfo']['objectid']
elif 'hatid' in lcdict['objectinfo'] and lcdict['objectinfo']['hatid']:
objectid = lcdict['objectinfo']['hatid']
else:
objectid = uuid.uuid4().hex[:5]
LOGWARNING('no objectid found for this object, '
'generated a random one: %s' % objectid)
# normalize using the special function if specified
if normfunc is not None:
lcdict = normfunc(lcdict)
cpfs = []
for tcol, mcol, ecol in zip(timecols, magcols, errcols):
# dereference the columns and get them from the lcdict
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
times = _dict_get(lcdict, tcolget)
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
mags = _dict_get(lcdict, mcolget)
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
errs = _dict_get(lcdict, ecolget)
# get all the period-finder results from this magcol
if pfpickle is not None:
if 'pfmethods' in pfresults[mcol]:
pflist = [
pfresults[mcol][x] for x in
pfresults[mcol]['pfmethods'] if
len(pfresults[mcol][x].keys()) > 0
]
else:
pflist = []
for pfm in PFMETHODS:
if (pfm in pfresults[mcol] and
len(pfresults[mcol][pfm].keys()) > 0):
pflist.append(pfresults[mcol][pfm])
# special case of generating a checkplot with no phased LCs
else:
pflist = []
# generate the output filename
outfile = os.path.join(outdir,
'checkplot-%s-%s.pkl' % (
squeeze(objectid).replace(' ','-'),
mcol
))
if skipdone and os.path.exists(outfile):
LOGWARNING('skipdone = True and '
'checkplot for this objectid/magcol combination '
'exists already: %s, skipping...' % outfile)
return outfile
# make sure the checkplot has a valid objectid
if 'objectid' not in lcdict['objectinfo']:
lcdict['objectinfo']['objectid'] = objectid
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
times, mags,
magsarefluxes=magsarefluxes
)
xtimes, xmags, xerrs = ntimes, nmags, errs
else:
xtimes, xmags, xerrs = times, mags, errs
# generate the checkplotdict
cpd = checkplot_dict(
pflist,
xtimes, xmags, xerrs,
objectinfo=lcdict['objectinfo'],
gaia_max_timeout=gaia_max_timeout,
gaia_mirror=gaia_mirror,
lclistpkl=lclistpkl,
nbrradiusarcsec=nbrradiusarcsec,
maxnumneighbors=maxnumneighbors,
xmatchinfo=xmatchinfo,
xmatchradiusarcsec=xmatchradiusarcsec,
sigclip=sigclip,
mindet=minobservations,
verbose=False,
fast_mode=fast_mode,
magsarefluxes=magsarefluxes,
normto=cprenorm # we've done the renormalization already, so this
# should be False by default. just messes up the
# plots otherwise, destroying LPVs in particular
)
if makeneighborlcs:
# include any neighbor information as well
cpdupdated = update_checkplotdict_nbrlcs(
cpd,
tcol, mcol, ecol,
lcformat=lcformat,
verbose=False
)
else:
cpdupdated = cpd
# write the update checkplot dict to disk
cpf = _write_checkplot_picklefile(
cpdupdated,
outfile=outfile,
protocol=pickle.HIGHEST_PROTOCOL,
outgzip=False
)
cpfs.append(cpf)
#
# done with checkplot making
#
LOGINFO('done with %s -> %s' % (objectid, repr(cpfs)))
if done_callback is not None:
if (done_callback_args is not None and
isinstance(done_callback_args,list)):
done_callback_args = tuple([cpfs] + done_callback_args)
else:
done_callback_args = (cpfs,)
if (done_callback_kwargs is not None and
isinstance(done_callback_kwargs, dict)):
done_callback_kwargs.update(dict(
fast_mode=fast_mode,
lcfname=lcfname,
cprenorm=cprenorm,
lclistpkl=lclistpkl,
nbrradiusarcsec=nbrradiusarcsec,
maxnumneighbors=maxnumneighbors,
gaia_max_timeout=gaia_max_timeout,
gaia_mirror=gaia_mirror,
xmatchinfo=xmatchinfo,
xmatchradiusarcsec=xmatchradiusarcsec,
minobservations=minobservations,
sigclip=sigclip,
lcformat=lcformat,
fileglob=fileglob,
readerfunc=readerfunc,
normfunc=normfunc,
magsarefluxes=magsarefluxes,
timecols=timecols,
magcols=magcols,
errcols=errcols,
skipdone=skipdone,
))
else:
done_callback_kwargs = dict(
fast_mode=fast_mode,
lcfname=lcfname,
cprenorm=cprenorm,
lclistpkl=lclistpkl,
nbrradiusarcsec=nbrradiusarcsec,
maxnumneighbors=maxnumneighbors,
gaia_max_timeout=gaia_max_timeout,
gaia_mirror=gaia_mirror,
xmatchinfo=xmatchinfo,
xmatchradiusarcsec=xmatchradiusarcsec,
minobservations=minobservations,
sigclip=sigclip,
lcformat=lcformat,
fileglob=fileglob,
readerfunc=readerfunc,
normfunc=normfunc,
magsarefluxes=magsarefluxes,
timecols=timecols,
magcols=magcols,
errcols=errcols,
skipdone=skipdone,
)
# fire the callback
try:
done_callback(*done_callback_args, **done_callback_kwargs)
LOGINFO('callback fired successfully for %r' % cpfs)
except Exception as e:
LOGEXCEPTION('callback function failed for %r' % cpfs)
# at the end, return the list of checkplot files generated
return cpfs
|
def runcp_worker(task):
'''
This is the worker for running checkplots.
Parameters
----------
task : tuple
This is of the form: (pfpickle, outdir, lcbasedir, kwargs).
Returns
-------
list of str
The list of checkplot pickles returned by the `runcp` function.
'''
pfpickle, outdir, lcbasedir, kwargs = task
try:
return runcp(pfpickle, outdir, lcbasedir, **kwargs)
except Exception as e:
LOGEXCEPTION(' could not make checkplots for %s: %s' % (pfpickle, e))
return None
|
def parallel_cp(
pfpicklelist,
outdir,
lcbasedir,
fast_mode=False,
lcfnamelist=None,
cprenorm=False,
lclistpkl=None,
gaia_max_timeout=60.0,
gaia_mirror=None,
nbrradiusarcsec=60.0,
maxnumneighbors=5,
makeneighborlcs=True,
xmatchinfo=None,
xmatchradiusarcsec=3.0,
sigclip=10.0,
minobservations=99,
lcformat='hat-sql',
lcformatdir=None,
timecols=None,
magcols=None,
errcols=None,
skipdone=False,
done_callback=None,
done_callback_args=None,
done_callback_kwargs=None,
liststartindex=None,
maxobjects=None,
nworkers=NCPUS,
):
'''This drives the parallel execution of `runcp` for a list of periodfinding
result pickles.
Parameters
----------
pfpicklelist : list of str or list of Nones
This is the list of the filenames of the period-finding result pickles
to process. To make checkplots using the light curves directly, set this
to a list of Nones with the same length as the list of light curve files
that you provide in `lcfnamelist`.
outdir : str
The directory the checkplot pickles will be written to.
lcbasedir : str
The base directory that this function will look in to find the light
curves pointed to by the period-finding result files. If you're using
`lcfnamelist` to provide a list of light curve filenames directly, this
arg is ignored.
lcfnamelist : list of str or None
If this is provided, it must be a list of the input light curve
filenames to process. These can either be associated with each input
period-finder result pickle, or can be provided standalone to make
checkplots without phased LC plots in them. In the second case, you must
set `pfpicklelist` to a list of Nones that matches the length of
`lcfnamelist`.
cprenorm : bool
Set this to True if the light curves should be renormalized by
`checkplot.checkplot_pickle`. This is set to False by default because we
do our own normalization in this function using the light curve's
registered normalization function and pass the normalized times, mags,
errs to the `checkplot.checkplot_pickle` function.
lclistpkl : str or dict
This is either the filename of a pickle or the actual dict produced by
lcproc.make_lclist. This is used to gather neighbor information.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
makeneighborlcs : bool
If True, will make light curve and phased light curve plots for all
neighbors found in the object collection for each input object.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond.
If this is set to True, the default settings for the external requests
will then become::
skyview_lookup = False
skyview_timeout = 10.0
skyview_retry_failed = False
dust_timeout = 10.0
gaia_submit_timeout = 7.0
gaia_max_timeout = 10.0
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
If this is a float, will run in "fast" mode with the provided timeout
value in seconds and the following settings::
skyview_lookup = True
skyview_timeout = fast_mode
skyview_retry_failed = False
dust_timeout = fast_mode
gaia_submit_timeout = 0.66*fast_mode
gaia_max_timeout = fast_mode
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str or None
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
xmatchinfo : str or dict
This is either the xmatch dict produced by the function
`load_xmatch_external_catalogs` above, or the path to the xmatch info
pickle file produced by that function.
xmatchradiusarcsec : float
This is the cross-matching radius to use in arcseconds.
minobservations : int
The minimum of observations the input object's mag/flux time-series must
have for this function to plot its light curve and phased light
curve. If the object has less than this number, no light curves will be
plotted, but the checkplotdict will still contain all of the other
information.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
timecols : list of str or None
The timecol keys to use from the lcdict in generating this checkplot.
magcols : list of str or None
The magcol keys to use from the lcdict in generating this checkplot.
errcols : list of str or None
The errcol keys to use from the lcdict in generating this checkplot.
skipdone : bool
This indicates if this function will skip creating checkplots that
already exist corresponding to the current `objectid` and `magcol`. If
`skipdone` is set to True, this will be done.
done_callback : Python function or None
This is used to provide a function to execute after the checkplot
pickles are generated. This is useful if you want to stream the results
of checkplot making to some other process, e.g. directly running an
ingestion into an LCC-Server collection. The function will always get
the list of the generated checkplot pickles as its first arg, and all of
the kwargs for runcp in the kwargs dict. Additional args and kwargs can
be provided by giving a list in the `done_callbacks_args` kwarg and a
dict in the `done_callbacks_kwargs` kwarg.
NOTE: the function you pass in here should be pickleable by normal
Python if you want to use it with the parallel_cp and parallel_cp_lcdir
functions below.
done_callback_args : tuple or None
If not None, contains any args to pass into the `done_callback`
function.
done_callback_kwargs : dict or None
If not None, contains any kwargs to pass into the `done_callback`
function.
liststartindex : int
The index of the `pfpicklelist` (and `lcfnamelist` if provided) to start
working at.
maxobjects : int
The maximum number of objects to process in this run. Use this with
`liststartindex` to effectively distribute working on a large list of
input period-finding result pickles (and light curves if `lcfnamelist`
is also provided) over several sessions or machines.
nworkers : int
The number of parallel workers that will work on the checkplot
generation process.
Returns
-------
dict
This returns a dict with keys = input period-finding pickles and vals =
list of the corresponding checkplot pickles produced.
'''
# work around the Darwin segfault after fork if no network activity in
# main thread bug: https://bugs.python.org/issue30385#msg293958
if sys.platform == 'darwin':
import requests
requests.get('http://captive.apple.com/hotspot-detect.html')
if not os.path.exists(outdir):
os.mkdir(outdir)
# handle the start and end indices
if (liststartindex is not None) and (maxobjects is None):
pfpicklelist = pfpicklelist[liststartindex:]
if lcfnamelist is not None:
lcfnamelist = lcfnamelist[liststartindex:]
elif (liststartindex is None) and (maxobjects is not None):
pfpicklelist = pfpicklelist[:maxobjects]
if lcfnamelist is not None:
lcfnamelist = lcfnamelist[:maxobjects]
elif (liststartindex is not None) and (maxobjects is not None):
pfpicklelist = (
pfpicklelist[liststartindex:liststartindex+maxobjects]
)
if lcfnamelist is not None:
lcfnamelist = lcfnamelist[liststartindex:liststartindex+maxobjects]
# if the lcfnamelist is not provided, create a dummy
if lcfnamelist is None:
lcfnamelist = [None]*len(pfpicklelist)
tasklist = [(x, outdir, lcbasedir,
{'lcformat':lcformat,
'lcformatdir':lcformatdir,
'lcfname':y,
'timecols':timecols,
'magcols':magcols,
'errcols':errcols,
'lclistpkl':lclistpkl,
'gaia_max_timeout':gaia_max_timeout,
'gaia_mirror':gaia_mirror,
'nbrradiusarcsec':nbrradiusarcsec,
'maxnumneighbors':maxnumneighbors,
'makeneighborlcs':makeneighborlcs,
'xmatchinfo':xmatchinfo,
'xmatchradiusarcsec':xmatchradiusarcsec,
'sigclip':sigclip,
'minobservations':minobservations,
'skipdone':skipdone,
'cprenorm':cprenorm,
'fast_mode':fast_mode,
'done_callback':done_callback,
'done_callback_args':done_callback_args,
'done_callback_kwargs':done_callback_kwargs}) for
x,y in zip(pfpicklelist, lcfnamelist)]
resultfutures = []
results = []
with ProcessPoolExecutor(max_workers=nworkers) as executor:
resultfutures = executor.map(runcp_worker, tasklist)
results = [x for x in resultfutures]
executor.shutdown()
return results
|
def parallel_cp_pfdir(pfpickledir,
outdir,
lcbasedir,
pfpickleglob='periodfinding-*.pkl*',
lclistpkl=None,
cprenorm=False,
nbrradiusarcsec=60.0,
maxnumneighbors=5,
makeneighborlcs=True,
fast_mode=False,
gaia_max_timeout=60.0,
gaia_mirror=None,
xmatchinfo=None,
xmatchradiusarcsec=3.0,
minobservations=99,
sigclip=10.0,
lcformat='hat-sql',
lcformatdir=None,
timecols=None,
magcols=None,
errcols=None,
skipdone=False,
done_callback=None,
done_callback_args=None,
done_callback_kwargs=None,
maxobjects=None,
nworkers=32):
'''This drives the parallel execution of `runcp` for a directory of
periodfinding pickles.
Parameters
----------
pfpickledir : str
This is the directory containing all of the period-finding pickles to
process.
outdir : str
The directory the checkplot pickles will be written to.
lcbasedir : str
The base directory that this function will look in to find the light
curves pointed to by the period-finding result files. If you're using
`lcfnamelist` to provide a list of light curve filenames directly, this
arg is ignored.
pkpickleglob : str
This is a UNIX file glob to select period-finding result pickles in the
specified `pfpickledir`.
lclistpkl : str or dict
This is either the filename of a pickle or the actual dict produced by
lcproc.make_lclist. This is used to gather neighbor information.
cprenorm : bool
Set this to True if the light curves should be renormalized by
`checkplot.checkplot_pickle`. This is set to False by default because we
do our own normalization in this function using the light curve's
registered normalization function and pass the normalized times, mags,
errs to the `checkplot.checkplot_pickle` function.
nbrradiusarcsec : float
The radius in arcseconds to use for a search conducted around the
coordinates of this object to look for any potential confusion and
blending of variability amplitude caused by their proximity.
maxnumneighbors : int
The maximum number of neighbors that will have their light curves and
magnitudes noted in this checkplot as potential blends with the target
object.
makeneighborlcs : bool
If True, will make light curve and phased light curve plots for all
neighbors found in the object collection for each input object.
fast_mode : bool or float
This runs the external catalog operations in a "fast" mode, with short
timeouts and not trying to hit external catalogs that take a long time
to respond.
If this is set to True, the default settings for the external requests
will then become::
skyview_lookup = False
skyview_timeout = 10.0
skyview_retry_failed = False
dust_timeout = 10.0
gaia_submit_timeout = 7.0
gaia_max_timeout = 10.0
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
If this is a float, will run in "fast" mode with the provided timeout
value in seconds and the following settings::
skyview_lookup = True
skyview_timeout = fast_mode
skyview_retry_failed = False
dust_timeout = fast_mode
gaia_submit_timeout = 0.66*fast_mode
gaia_max_timeout = fast_mode
gaia_submit_tries = 2
complete_query_later = False
search_simbad = False
gaia_max_timeout : float
Sets the timeout in seconds to use when waiting for the GAIA service to
respond to our request for the object's information. Note that if
`fast_mode` is set, this is ignored.
gaia_mirror : str or None
This sets the GAIA mirror to use. This is a key in the
`services.gaia.GAIA_URLS` dict which defines the URLs to hit for each
mirror.
xmatchinfo : str or dict
This is either the xmatch dict produced by the function
`load_xmatch_external_catalogs` above, or the path to the xmatch info
pickle file produced by that function.
xmatchradiusarcsec : float
This is the cross-matching radius to use in arcseconds.
minobservations : int
The minimum of observations the input object's mag/flux time-series must
have for this function to plot its light curve and phased light
curve. If the object has less than this number, no light curves will be
plotted, but the checkplotdict will still contain all of the other
information.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
timecols : list of str or None
The timecol keys to use from the lcdict in generating this checkplot.
magcols : list of str or None
The magcol keys to use from the lcdict in generating this checkplot.
errcols : list of str or None
The errcol keys to use from the lcdict in generating this checkplot.
skipdone : bool
This indicates if this function will skip creating checkplots that
already exist corresponding to the current `objectid` and `magcol`. If
`skipdone` is set to True, this will be done.
done_callback : Python function or None
This is used to provide a function to execute after the checkplot
pickles are generated. This is useful if you want to stream the results
of checkplot making to some other process, e.g. directly running an
ingestion into an LCC-Server collection. The function will always get
the list of the generated checkplot pickles as its first arg, and all of
the kwargs for runcp in the kwargs dict. Additional args and kwargs can
be provided by giving a list in the `done_callbacks_args` kwarg and a
dict in the `done_callbacks_kwargs` kwarg.
NOTE: the function you pass in here should be pickleable by normal
Python if you want to use it with the parallel_cp and parallel_cp_lcdir
functions below.
done_callback_args : tuple or None
If not None, contains any args to pass into the `done_callback`
function.
done_callback_kwargs : dict or None
If not None, contains any kwargs to pass into the `done_callback`
function.
maxobjects : int
The maximum number of objects to process in this run.
nworkers : int
The number of parallel workers that will work on the checkplot
generation process.
Returns
-------
dict
This returns a dict with keys = input period-finding pickles and vals =
list of the corresponding checkplot pickles produced.
'''
pfpicklelist = sorted(glob.glob(os.path.join(pfpickledir, pfpickleglob)))
LOGINFO('found %s period-finding pickles, running cp...' %
len(pfpicklelist))
return parallel_cp(pfpicklelist,
outdir,
lcbasedir,
fast_mode=fast_mode,
lclistpkl=lclistpkl,
nbrradiusarcsec=nbrradiusarcsec,
gaia_max_timeout=gaia_max_timeout,
gaia_mirror=gaia_mirror,
maxnumneighbors=maxnumneighbors,
makeneighborlcs=makeneighborlcs,
xmatchinfo=xmatchinfo,
xmatchradiusarcsec=xmatchradiusarcsec,
sigclip=sigclip,
minobservations=minobservations,
cprenorm=cprenorm,
maxobjects=maxobjects,
lcformat=lcformat,
lcformatdir=lcformatdir,
timecols=timecols,
magcols=magcols,
errcols=errcols,
skipdone=skipdone,
nworkers=nworkers,
done_callback=done_callback,
done_callback_args=done_callback_args,
done_callback_kwargs=done_callback_kwargs)
|
def runpf(lcfile,
outdir,
timecols=None,
magcols=None,
errcols=None,
lcformat='hat-sql',
lcformatdir=None,
pfmethods=('gls','pdm','mav','win'),
pfkwargs=({},{},{},{}),
sigclip=10.0,
getblssnr=False,
nworkers=NCPUS,
minobservations=500,
excludeprocessed=False,
raiseonfail=False):
'''This runs the period-finding for a single LC.
Parameters
----------
lcfile : str
The light curve file to run period-finding on.
outdir : str
The output directory where the result pickle will go.
timecols : list of str or None
The timecol keys to use from the lcdict in calculating the features.
magcols : list of str or None
The magcol keys to use from the lcdict in calculating the features.
errcols : list of str or None
The errcol keys to use from the lcdict in calculating the features.
lcformat : str
This is the `formatkey` associated with your light curve format, which
you previously passed in to the `lcproc.register_lcformat`
function. This will be used to look up how to find and read the light
curves specified in `basedir` or `use_list_of_filenames`.
lcformatdir : str or None
If this is provided, gives the path to a directory when you've stored
your lcformat description JSONs, other than the usual directories lcproc
knows to search for them in. Use this along with `lcformat` to specify
an LC format JSON file that's not currently registered with lcproc.
pfmethods : list of str
This is a list of period finding methods to run. Each element is a
string matching the keys of the `PFMETHODS` dict above. By default, this
runs GLS, PDM, AoVMH, and the spectral window Lomb-Scargle periodogram.
pfkwargs : list of dicts
This is used to provide any special kwargs as dicts to each
period-finding method function specified in `pfmethods`.
sigclip : float or int or sequence of two floats/ints or None
If a single float or int, a symmetric sigma-clip will be performed using
the number provided as the sigma-multiplier to cut out from the input
time-series.
If a list of two ints/floats is provided, the function will perform an
'asymmetric' sigma-clip. The first element in this list is the sigma
value to use for fainter flux/mag values; the second element in this
list is the sigma value to use for brighter flux/mag values. For
example, `sigclip=[10., 3.]`, will sigclip out greater than 10-sigma
dimmings and greater than 3-sigma brightenings. Here the meaning of
"dimming" and "brightening" is set by *physics* (not the magnitude
system), which is why the `magsarefluxes` kwarg must be correctly set.
If `sigclip` is None, no sigma-clipping will be performed, and the
time-series (with non-finite elems removed) will be passed through to
the output.
getblssnr : bool
If this is True and BLS is one of the methods specified in `pfmethods`,
will also calculate the stats for each best period in the BLS results:
transit depth, duration, ingress duration, refit period and epoch, and
the SNR of the transit.
nworkers : int
The number of parallel period-finding workers to launch.
minobservations : int
The minimum number of finite LC points required to process a light
curve.
excludeprocessed : bool
If this is True, light curves that have existing period-finding result
pickles in `outdir` will not be processed.
FIXME: currently, this uses a dumb method of excluding already-processed
files. A smarter way to do this is to (i) generate a SHA512 cachekey
based on a repr of `{'lcfile', 'timecols', 'magcols', 'errcols',
'lcformat', 'pfmethods', 'sigclip', 'getblssnr', 'pfkwargs'}`, (ii) make
sure all list kwargs in the dict are sorted, (iii) check if the output
file has the same cachekey in its filename (last 8 chars of cachekey
should work), so the result was processed in exactly the same way as
specifed in the input to this function, and can therefore be
ignored. Will implement this later.
raiseonfail : bool
If something fails and this is True, will raise an Exception instead of
returning None at the end.
Returns
-------
str
The path to the output period-finding result pickle.
'''
try:
formatinfo = get_lcformat(lcformat,
use_lcformat_dir=lcformatdir)
if formatinfo:
(dfileglob, readerfunc,
dtimecols, dmagcols, derrcols,
magsarefluxes, normfunc) = formatinfo
else:
LOGERROR("can't figure out the light curve format")
return None
except Exception as e:
LOGEXCEPTION("can't figure out the light curve format")
return None
# override the default timecols, magcols, and errcols
# using the ones provided to the function
if timecols is None:
timecols = dtimecols
if magcols is None:
magcols = dmagcols
if errcols is None:
errcols = derrcols
try:
# get the LC into a dict
lcdict = readerfunc(lcfile)
# this should handle lists/tuples being returned by readerfunc
# we assume that the first element is the actual lcdict
# FIXME: figure out how to not need this assumption
if ( (isinstance(lcdict, (list, tuple))) and
(isinstance(lcdict[0], dict)) ):
lcdict = lcdict[0]
outfile = os.path.join(outdir, 'periodfinding-%s.pkl' %
squeeze(lcdict['objectid']).replace(' ', '-'))
# if excludeprocessed is True, return the output file if it exists and
# has a size that is at least 100 kilobytes (this should be enough to
# contain the minimal results of this function).
if excludeprocessed:
test_outfile = os.path.exists(outfile)
test_outfile_gz = os.path.exists(outfile+'.gz')
if (test_outfile and os.stat(outfile).st_size > 102400):
LOGWARNING('periodfinding result for %s already exists at %s, '
'skipping because excludeprocessed=True'
% (lcfile, outfile))
return outfile
elif (test_outfile_gz and os.stat(outfile+'.gz').st_size > 102400):
LOGWARNING(
'gzipped periodfinding result for %s already '
'exists at %s, skipping because excludeprocessed=True'
% (lcfile, outfile+'.gz')
)
return outfile+'.gz'
# this is the final returndict
resultdict = {
'objectid':lcdict['objectid'],
'lcfbasename':os.path.basename(lcfile),
'kwargs':{'timecols':timecols,
'magcols':magcols,
'errcols':errcols,
'lcformat':lcformat,
'lcformatdir':lcformatdir,
'pfmethods':pfmethods,
'pfkwargs':pfkwargs,
'sigclip':sigclip,
'getblssnr':getblssnr}
}
# normalize using the special function if specified
if normfunc is not None:
lcdict = normfunc(lcdict)
for tcol, mcol, ecol in zip(timecols, magcols, errcols):
# dereference the columns and get them from the lcdict
if '.' in tcol:
tcolget = tcol.split('.')
else:
tcolget = [tcol]
times = _dict_get(lcdict, tcolget)
if '.' in mcol:
mcolget = mcol.split('.')
else:
mcolget = [mcol]
mags = _dict_get(lcdict, mcolget)
if '.' in ecol:
ecolget = ecol.split('.')
else:
ecolget = [ecol]
errs = _dict_get(lcdict, ecolget)
# normalize here if not using special normalization
if normfunc is None:
ntimes, nmags = normalize_magseries(
times, mags,
magsarefluxes=magsarefluxes
)
times, mags, errs = ntimes, nmags, errs
# run each of the requested period-finder functions
resultdict[mcol] = {}
# check if we have enough non-nan observations to proceed
finmags = mags[np.isfinite(mags)]
if finmags.size < minobservations:
LOGERROR('not enough non-nan observations for '
'this LC. have: %s, required: %s, '
'magcol: %s, skipping...' %
(finmags.size, minobservations, mcol))
continue
pfmkeys = []
for pfmind, pfm, pfkw in zip(range(len(pfmethods)),
pfmethods,
pfkwargs):
pf_func = PFMETHODS[pfm]
# get any optional kwargs for this function
pf_kwargs = pfkw
pf_kwargs.update({'verbose':False,
'nworkers':nworkers,
'magsarefluxes':magsarefluxes,
'sigclip':sigclip})
# we'll always prefix things with their index to allow multiple
# invocations and results from the same period-finder (for
# different period ranges, for example).
pfmkey = '%s-%s' % (pfmind, pfm)
pfmkeys.append(pfmkey)
# run this period-finder and save its results to the output dict
resultdict[mcol][pfmkey] = pf_func(
times, mags, errs,
**pf_kwargs
)
#
# done with running the period finders
#
# append the pfmkeys list to the magcol dict
resultdict[mcol]['pfmethods'] = pfmkeys
# check if we need to get the SNR from any BLS pfresults
if 'bls' in pfmethods and getblssnr:
# we need to scan thru the pfmethods to get to any BLS pfresults
for pfmk in resultdict[mcol]['pfmethods']:
if 'bls' in pfmk:
try:
bls = resultdict[mcol][pfmk]
# calculate the SNR for the BLS as well
blssnr = bls_snr(bls, times, mags, errs,
magsarefluxes=magsarefluxes,
verbose=False)
# add the SNR results to the BLS result dict
resultdict[mcol][pfmk].update({
'snr':blssnr['snr'],
'transitdepth':blssnr['transitdepth'],
'transitduration':blssnr['transitduration'],
})
# update the BLS result dict with the refit periods
# and epochs using the results from bls_snr
resultdict[mcol][pfmk].update({
'nbestperiods':blssnr['period'],
'epochs':blssnr['epoch']
})
except Exception as e:
LOGEXCEPTION('could not calculate BLS SNR for %s' %
lcfile)
# add the SNR null results to the BLS result dict
resultdict[mcol][pfmk].update({
'snr':[np.nan,np.nan,np.nan,np.nan,np.nan],
'transitdepth':[np.nan,np.nan,np.nan,
np.nan,np.nan],
'transitduration':[np.nan,np.nan,np.nan,
np.nan,np.nan],
})
elif 'bls' in pfmethods:
# we need to scan thru the pfmethods to get to any BLS pfresults
for pfmk in resultdict[mcol]['pfmethods']:
if 'bls' in pfmk:
# add the SNR null results to the BLS result dict
resultdict[mcol][pfmk].update({
'snr':[np.nan,np.nan,np.nan,np.nan,np.nan],
'transitdepth':[np.nan,np.nan,np.nan,
np.nan,np.nan],
'transitduration':[np.nan,np.nan,np.nan,
np.nan,np.nan],
})
# once all mag cols have been processed, write out the pickle
with open(outfile, 'wb') as outfd:
pickle.dump(resultdict, outfd, protocol=pickle.HIGHEST_PROTOCOL)
return outfile
except Exception as e:
LOGEXCEPTION('failed to run for %s, because: %s' % (lcfile, e))
if raiseonfail:
raise
return None
|
def _runpf_worker(task):
'''
This runs the runpf function.
'''
(lcfile, outdir, timecols, magcols, errcols, lcformat, lcformatdir,
pfmethods, pfkwargs, getblssnr, sigclip, nworkers, minobservations,
excludeprocessed) = task
if os.path.exists(lcfile):
pfresult = runpf(lcfile,
outdir,
timecols=timecols,
magcols=magcols,
errcols=errcols,
lcformat=lcformat,
lcformatdir=lcformatdir,
pfmethods=pfmethods,
pfkwargs=pfkwargs,
getblssnr=getblssnr,
sigclip=sigclip,
nworkers=nworkers,
minobservations=minobservations,
excludeprocessed=excludeprocessed)
return pfresult
else:
LOGERROR('LC does not exist for requested file %s' % lcfile)
return None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.